blob: 964c3a0bbf1663d2b0ddd80948eea73c075dffc1 [file] [log] [blame]
/*
* Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <rdma/ib_umem.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_user_verbs.h>
#include <linux/mlx5/fs.h>
#include "mlx5_ib.h"
/* not supported currently */
static int wq_signature;
enum {
MLX5_IB_ACK_REQ_FREQ = 8,
};
enum {
MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
MLX5_IB_LINK_TYPE_IB = 0,
MLX5_IB_LINK_TYPE_ETH = 1
};
enum {
MLX5_IB_SQ_STRIDE = 6,
};
static const u32 mlx5_ib_opcode[] = {
[IB_WR_SEND] = MLX5_OPCODE_SEND,
[IB_WR_LSO] = MLX5_OPCODE_LSO,
[IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
[IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
[IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
[IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
[IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
[IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
[IB_WR_REG_MR] = MLX5_OPCODE_UMR,
[IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
[MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
};
struct mlx5_wqe_eth_pad {
u8 rsvd0[16];
};
enum raw_qp_set_mask_map {
MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID = 1UL << 0,
MLX5_RAW_QP_RATE_LIMIT = 1UL << 1,
};
struct mlx5_modify_raw_qp_param {
u16 operation;
u32 set_mask; /* raw_qp_set_mask_map */
u32 rate_limit;
u8 rq_q_ctr_id;
};
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq);
static int is_qp0(enum ib_qp_type qp_type)
{
return qp_type == IB_QPT_SMI;
}
static int is_sqp(enum ib_qp_type qp_type)
{
return is_qp0(qp_type) || is_qp1(qp_type);
}
static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
{
return mlx5_buf_offset(&qp->buf, offset);
}
static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
{
return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
}
void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
{
return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
}
/**
* mlx5_ib_read_user_wqe() - Copy a user-space WQE to kernel space.
*
* @qp: QP to copy from.
* @send: copy from the send queue when non-zero, use the receive queue
* otherwise.
* @wqe_index: index to start copying from. For send work queues, the
* wqe_index is in units of MLX5_SEND_WQE_BB.
* For receive work queue, it is the number of work queue
* element in the queue.
* @buffer: destination buffer.
* @length: maximum number of bytes to copy.
*
* Copies at least a single WQE, but may copy more data.
*
* Return: the number of bytes copied, or an error code.
*/
int mlx5_ib_read_user_wqe(struct mlx5_ib_qp *qp, int send, int wqe_index,
void *buffer, u32 length,
struct mlx5_ib_qp_base *base)
{
struct ib_device *ibdev = qp->ibqp.device;
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq;
size_t offset;
size_t wq_end;
struct ib_umem *umem = base->ubuffer.umem;
u32 first_copy_length;
int wqe_length;
int ret;
if (wq->wqe_cnt == 0) {
mlx5_ib_dbg(dev, "mlx5_ib_read_user_wqe for a QP with wqe_cnt == 0. qp_type: 0x%x\n",
qp->ibqp.qp_type);
return -EINVAL;
}
offset = wq->offset + ((wqe_index % wq->wqe_cnt) << wq->wqe_shift);
wq_end = wq->offset + (wq->wqe_cnt << wq->wqe_shift);
if (send && length < sizeof(struct mlx5_wqe_ctrl_seg))
return -EINVAL;
if (offset > umem->length ||
(send && offset + sizeof(struct mlx5_wqe_ctrl_seg) > umem->length))
return -EINVAL;
first_copy_length = min_t(u32, offset + length, wq_end) - offset;
ret = ib_umem_copy_from(buffer, umem, offset, first_copy_length);
if (ret)
return ret;
if (send) {
struct mlx5_wqe_ctrl_seg *ctrl = buffer;
int ds = be32_to_cpu(ctrl->qpn_ds) & MLX5_WQE_CTRL_DS_MASK;
wqe_length = ds * MLX5_WQE_DS_UNITS;
} else {
wqe_length = 1 << wq->wqe_shift;
}
if (wqe_length <= first_copy_length)
return first_copy_length;
ret = ib_umem_copy_from(buffer + first_copy_length, umem, wq->offset,
wqe_length - first_copy_length);
if (ret)
return ret;
return wqe_length;
}
static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
{
struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
struct ib_event event;
if (type == MLX5_EVENT_TYPE_PATH_MIG) {
/* This event is only valid for trans_qps */
to_mibqp(qp)->port = to_mibqp(qp)->trans_qp.alt_port;
}
if (ibqp->event_handler) {
event.device = ibqp->device;
event.element.qp = ibqp;
switch (type) {
case MLX5_EVENT_TYPE_PATH_MIG:
event.event = IB_EVENT_PATH_MIG;
break;
case MLX5_EVENT_TYPE_COMM_EST:
event.event = IB_EVENT_COMM_EST;
break;
case MLX5_EVENT_TYPE_SQ_DRAINED:
event.event = IB_EVENT_SQ_DRAINED;
break;
case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
event.event = IB_EVENT_QP_LAST_WQE_REACHED;
break;
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
event.event = IB_EVENT_QP_FATAL;
break;
case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
event.event = IB_EVENT_PATH_MIG_ERR;
break;
case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
event.event = IB_EVENT_QP_REQ_ERR;
break;
case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
event.event = IB_EVENT_QP_ACCESS_ERR;
break;
default:
pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
return;
}
ibqp->event_handler(&event, ibqp->qp_context);
}
}
static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
{
int wqe_size;
int wq_size;
/* Sanity check RQ size before proceeding */
if (cap->max_recv_wr > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz)))
return -EINVAL;
if (!has_rq) {
qp->rq.max_gs = 0;
qp->rq.wqe_cnt = 0;
qp->rq.wqe_shift = 0;
cap->max_recv_wr = 0;
cap->max_recv_sge = 0;
} else {
if (ucmd) {
qp->rq.wqe_cnt = ucmd->rq_wqe_count;
if (ucmd->rq_wqe_shift > BITS_PER_BYTE * sizeof(ucmd->rq_wqe_shift))
return -EINVAL;
qp->rq.wqe_shift = ucmd->rq_wqe_shift;
if ((1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) < qp->wq_sig)
return -EINVAL;
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
} else {
wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
wqe_size = roundup_pow_of_two(wqe_size);
wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
qp->rq.wqe_cnt = wq_size / wqe_size;
if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq)) {
mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
wqe_size,
MLX5_CAP_GEN(dev->mdev,
max_wqe_sz_rq));
return -EINVAL;
}
qp->rq.wqe_shift = ilog2(wqe_size);
qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
qp->rq.max_post = qp->rq.wqe_cnt;
}
}
return 0;
}
static int sq_overhead(struct ib_qp_init_attr *attr)
{
int size = 0;
switch (attr->qp_type) {
case IB_QPT_XRC_INI:
size += sizeof(struct mlx5_wqe_xrc_seg);
/* fall through */
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
max(sizeof(struct mlx5_wqe_atomic_seg) +
sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_XRC_TGT:
return 0;
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
max(sizeof(struct mlx5_wqe_raddr_seg),
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_UD:
if (attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)
size += sizeof(struct mlx5_wqe_eth_pad) +
sizeof(struct mlx5_wqe_eth_seg);
/* fall through */
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_datagram_seg);
break;
case MLX5_IB_QPT_REG_UMR:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
sizeof(struct mlx5_wqe_umr_ctrl_seg) +
sizeof(struct mlx5_mkey_seg);
break;
default:
return -EINVAL;
}
return size;
}
static int calc_send_wqe(struct ib_qp_init_attr *attr)
{
int inl_size = 0;
int size;
size = sq_overhead(attr);
if (size < 0)
return size;
if (attr->cap.max_inline_data) {
inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
attr->cap.max_inline_data;
}
size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
return MLX5_SIG_WQE_SIZE;
else
return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
}
static int get_send_sge(struct ib_qp_init_attr *attr, int wqe_size)
{
int max_sge;
if (attr->qp_type == IB_QPT_RC)
max_sge = (min_t(int, wqe_size, 512) -
sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
else if (attr->qp_type == IB_QPT_XRC_INI)
max_sge = (min_t(int, wqe_size, 512) -
sizeof(struct mlx5_wqe_ctrl_seg) -
sizeof(struct mlx5_wqe_xrc_seg) -
sizeof(struct mlx5_wqe_raddr_seg)) /
sizeof(struct mlx5_wqe_data_seg);
else
max_sge = (wqe_size - sq_overhead(attr)) /
sizeof(struct mlx5_wqe_data_seg);
return min_t(int, max_sge, wqe_size - sq_overhead(attr) /
sizeof(struct mlx5_wqe_data_seg));
}
static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
struct mlx5_ib_qp *qp)
{
int wqe_size;
int wq_size;
if (!attr->cap.max_send_wr)
return 0;
wqe_size = calc_send_wqe(attr);
mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
if (wqe_size < 0)
return wqe_size;
if (wqe_size > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
wqe_size, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
return -EINVAL;
}
qp->max_inline_data = wqe_size - sq_overhead(attr) -
sizeof(struct mlx5_wqe_inline_seg);
attr->cap.max_inline_data = qp->max_inline_data;
if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
qp->signature_en = true;
wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mlx5_ib_dbg(dev, "send queue size (%d * %d / %d -> %d) exceeds limits(%d)\n",
attr->cap.max_send_wr, wqe_size, MLX5_SEND_WQE_BB,
qp->sq.wqe_cnt,
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -ENOMEM;
}
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.max_gs = get_send_sge(attr, wqe_size);
if (qp->sq.max_gs < attr->cap.max_send_sge)
return -ENOMEM;
attr->cap.max_send_sge = qp->sq.max_gs;
qp->sq.max_post = wq_size / wqe_size;
attr->cap.max_send_wr = qp->sq.max_post;
return wq_size;
}
static int set_user_buf_size(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct mlx5_ib_create_qp *ucmd,
struct mlx5_ib_qp_base *base,
struct ib_qp_init_attr *attr)
{
int desc_sz = 1 << qp->sq.wqe_shift;
if (desc_sz > MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq)) {
mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
desc_sz, MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq));
return -EINVAL;
}
if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
ucmd->sq_wqe_count, ucmd->sq_wqe_count);
return -EINVAL;
}
qp->sq.wqe_cnt = ucmd->sq_wqe_count;
if (qp->sq.wqe_cnt > (1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz))) {
mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
qp->sq.wqe_cnt,
1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz));
return -EINVAL;
}
if (attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
base->ubuffer.buf_size = qp->rq.wqe_cnt << qp->rq.wqe_shift;
qp->raw_packet_qp.sq.ubuffer.buf_size = qp->sq.wqe_cnt << 6;
} else {
base->ubuffer.buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
(qp->sq.wqe_cnt << 6);
}
return 0;
}
static int qp_has_rq(struct ib_qp_init_attr *attr)
{
if (attr->qp_type == IB_QPT_XRC_INI ||
attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
attr->qp_type == MLX5_IB_QPT_REG_UMR ||
!attr->cap.max_recv_wr)
return 0;
return 1;
}
static int first_med_bfreg(void)
{
return 1;
}
enum {
/* this is the first blue flame register in the array of bfregs assigned
* to a processes. Since we do not use it for blue flame but rather
* regular 64 bit doorbells, we do not need a lock for maintaiing
* "odd/even" order
*/
NUM_NON_BLUE_FLAME_BFREGS = 1,
};
static int max_bfregs(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi)
{
return get_num_uars(dev, bfregi) * MLX5_NON_FP_BFREGS_PER_UAR;
}
static int num_med_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
int n;
n = max_bfregs(dev, bfregi) - bfregi->num_low_latency_bfregs -
NUM_NON_BLUE_FLAME_BFREGS;
return n >= 0 ? n : 0;
}
static int first_hi_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
int med;
med = num_med_bfreg(dev, bfregi);
return ++med;
}
static int alloc_high_class_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
int i;
for (i = first_hi_bfreg(dev, bfregi); i < max_bfregs(dev, bfregi); i++) {
if (!bfregi->count[i]) {
bfregi->count[i]++;
return i;
}
}
return -ENOMEM;
}
static int alloc_med_class_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi)
{
int minidx = first_med_bfreg();
int i;
for (i = first_med_bfreg(); i < first_hi_bfreg(dev, bfregi); i++) {
if (bfregi->count[i] < bfregi->count[minidx])
minidx = i;
if (!bfregi->count[minidx])
break;
}
bfregi->count[minidx]++;
return minidx;
}
static int alloc_bfreg(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi,
enum mlx5_ib_latency_class lat)
{
int bfregn = -EINVAL;
mutex_lock(&bfregi->lock);
switch (lat) {
case MLX5_IB_LATENCY_CLASS_LOW:
BUILD_BUG_ON(NUM_NON_BLUE_FLAME_BFREGS != 1);
bfregn = 0;
bfregi->count[bfregn]++;
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
if (bfregi->ver < 2)
bfregn = -ENOMEM;
else
bfregn = alloc_med_class_bfreg(dev, bfregi);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
if (bfregi->ver < 2)
bfregn = -ENOMEM;
else
bfregn = alloc_high_class_bfreg(dev, bfregi);
break;
}
mutex_unlock(&bfregi->lock);
return bfregn;
}
static void free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi, int bfregn)
{
mutex_lock(&bfregi->lock);
bfregi->count[bfregn]--;
mutex_unlock(&bfregi->lock);
}
static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
{
switch (state) {
case IB_QPS_RESET: return MLX5_QP_STATE_RST;
case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
default: return -1;
}
}
static int to_mlx5_st(enum ib_qp_type type)
{
switch (type) {
case IB_QPT_RC: return MLX5_QP_ST_RC;
case IB_QPT_UC: return MLX5_QP_ST_UC;
case IB_QPT_UD: return MLX5_QP_ST_UD;
case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
case IB_QPT_XRC_INI:
case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
case IB_QPT_SMI: return MLX5_QP_ST_QP0;
case MLX5_IB_QPT_HW_GSI: return MLX5_QP_ST_QP1;
case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
case IB_QPT_RAW_PACKET:
case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
case IB_QPT_MAX:
default: return -EINVAL;
}
}
static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq,
struct mlx5_ib_cq *recv_cq);
static int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, int bfregn)
{
int bfregs_per_sys_page;
int index_of_sys_page;
int offset;
bfregs_per_sys_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k) *
MLX5_NON_FP_BFREGS_PER_UAR;
index_of_sys_page = bfregn / bfregs_per_sys_page;
offset = bfregn % bfregs_per_sys_page / MLX5_NON_FP_BFREGS_PER_UAR;
return bfregi->sys_pages[index_of_sys_page] + offset;
}
static int mlx5_ib_umem_get(struct mlx5_ib_dev *dev,
struct ib_pd *pd,
unsigned long addr, size_t size,
struct ib_umem **umem,
int *npages, int *page_shift, int *ncont,
u32 *offset)
{
int err;
*umem = ib_umem_get(pd->uobject->context, addr, size, 0, 0);
if (IS_ERR(*umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
return PTR_ERR(*umem);
}
mlx5_ib_cont_pages(*umem, addr, 0, npages, page_shift, ncont, NULL);
err = mlx5_ib_get_buf_offset(addr, *page_shift, offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
mlx5_ib_dbg(dev, "addr 0x%lx, size %zu, npages %d, page_shift %d, ncont %d, offset %d\n",
addr, size, *npages, *page_shift, *ncont, *offset);
return 0;
err_umem:
ib_umem_release(*umem);
*umem = NULL;
return err;
}
static void destroy_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_rwq *rwq)
{
struct mlx5_ib_ucontext *context;
if (rwq->create_flags & MLX5_IB_WQ_FLAGS_DELAY_DROP)
atomic_dec(&dev->delay_drop.rqs_cnt);
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &rwq->db);
if (rwq->umem)
ib_umem_release(rwq->umem);
}
static int create_user_rq(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_rwq *rwq,
struct mlx5_ib_create_wq *ucmd)
{
struct mlx5_ib_ucontext *context;
int page_shift = 0;
int npages;
u32 offset = 0;
int ncont = 0;
int err;
if (!ucmd->buf_addr)
return -EINVAL;
context = to_mucontext(pd->uobject->context);
rwq->umem = ib_umem_get(pd->uobject->context, ucmd->buf_addr,
rwq->buf_size, 0, 0);
if (IS_ERR(rwq->umem)) {
mlx5_ib_dbg(dev, "umem_get failed\n");
err = PTR_ERR(rwq->umem);
return err;
}
mlx5_ib_cont_pages(rwq->umem, ucmd->buf_addr, 0, &npages, &page_shift,
&ncont, NULL);
err = mlx5_ib_get_buf_offset(ucmd->buf_addr, page_shift,
&rwq->rq_page_offset);
if (err) {
mlx5_ib_warn(dev, "bad offset\n");
goto err_umem;
}
rwq->rq_num_pas = ncont;
rwq->page_shift = page_shift;
rwq->log_page_size = page_shift - MLX5_ADAPTER_PAGE_SHIFT;
rwq->wq_sig = !!(ucmd->flags & MLX5_WQ_FLAG_SIGNATURE);
mlx5_ib_dbg(dev, "addr 0x%llx, size %zd, npages %d, page_shift %d, ncont %d, offset %d\n",
(unsigned long long)ucmd->buf_addr, rwq->buf_size,
npages, page_shift, ncont, offset);
err = mlx5_ib_db_map_user(context, ucmd->db_addr, &rwq->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_umem;
}
rwq->create_type = MLX5_WQ_USER;
return 0;
err_umem:
ib_umem_release(rwq->umem);
return err;
}
static int adjust_bfregn(struct mlx5_ib_dev *dev,
struct mlx5_bfreg_info *bfregi, int bfregn)
{
return bfregn / MLX5_NON_FP_BFREGS_PER_UAR * MLX5_BFREGS_PER_UAR +
bfregn % MLX5_NON_FP_BFREGS_PER_UAR;
}
static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct ib_udata *udata,
struct ib_qp_init_attr *attr,
u32 **in,
struct mlx5_ib_create_qp_resp *resp, int *inlen,
struct mlx5_ib_qp_base *base)
{
struct mlx5_ib_ucontext *context;
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_ubuffer *ubuffer = &base->ubuffer;
int page_shift = 0;
int uar_index;
int npages;
u32 offset = 0;
int bfregn;
int ncont = 0;
__be64 *pas;
void *qpc;
int err;
err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
if (err) {
mlx5_ib_dbg(dev, "copy failed\n");
return err;
}
context = to_mucontext(pd->uobject->context);
/*
* TBD: should come from the verbs when we have the API
*/
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
/* In CROSS_CHANNEL CQ and QP must use the same UAR */
bfregn = MLX5_CROSS_CHANNEL_BFREG;
else {
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_HIGH);
if (bfregn < 0) {
mlx5_ib_dbg(dev, "failed to allocate low latency BFREG\n");
mlx5_ib_dbg(dev, "reverting to medium latency\n");
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_MEDIUM);
if (bfregn < 0) {
mlx5_ib_dbg(dev, "failed to allocate medium latency BFREG\n");
mlx5_ib_dbg(dev, "reverting to high latency\n");
bfregn = alloc_bfreg(dev, &context->bfregi, MLX5_IB_LATENCY_CLASS_LOW);
if (bfregn < 0) {
mlx5_ib_warn(dev, "bfreg allocation failed\n");
return bfregn;
}
}
}
}
uar_index = bfregn_to_uar_index(dev, &context->bfregi, bfregn);
mlx5_ib_dbg(dev, "bfregn 0x%x, uar_index 0x%x\n", bfregn, uar_index);
qp->rq.offset = 0;
qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
err = set_user_buf_size(dev, qp, &ucmd, base, attr);
if (err)
goto err_bfreg;
if (ucmd.buf_addr && ubuffer->buf_size) {
ubuffer->buf_addr = ucmd.buf_addr;
err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr,
ubuffer->buf_size,
&ubuffer->umem, &npages, &page_shift,
&ncont, &offset);
if (err)
goto err_bfreg;
} else {
ubuffer->umem = NULL;
}
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * ncont;
*in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_umem;
}
pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas);
if (ubuffer->umem)
mlx5_ib_populate_pas(dev, ubuffer->umem, page_shift, pas, 0);
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, log_page_size, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(qpc, qpc, page_offset, offset);
MLX5_SET(qpc, qpc, uar_page, uar_index);
resp->bfreg_index = adjust_bfregn(dev, &context->bfregi, bfregn);
qp->bfregn = bfregn;
err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "map failed\n");
goto err_free;
}
err = ib_copy_to_udata(udata, resp, sizeof(*resp));
if (err) {
mlx5_ib_dbg(dev, "copy failed\n");
goto err_unmap;
}
qp->create_type = MLX5_QP_USER;
return 0;
err_unmap:
mlx5_ib_db_unmap_user(context, &qp->db);
err_free:
kvfree(*in);
err_umem:
if (ubuffer->umem)
ib_umem_release(ubuffer->umem);
err_bfreg:
free_bfreg(dev, &context->bfregi, bfregn);
return err;
}
static void destroy_qp_user(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct mlx5_ib_qp *qp, struct mlx5_ib_qp_base *base)
{
struct mlx5_ib_ucontext *context;
context = to_mucontext(pd->uobject->context);
mlx5_ib_db_unmap_user(context, &qp->db);
if (base->ubuffer.umem)
ib_umem_release(base->ubuffer.umem);
free_bfreg(dev, &context->bfregi, qp->bfregn);
}
static int create_kernel_qp(struct mlx5_ib_dev *dev,
struct ib_qp_init_attr *init_attr,
struct mlx5_ib_qp *qp,
u32 **in, int *inlen,
struct mlx5_ib_qp_base *base)
{
int uar_index;
void *qpc;
int err;
if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN |
IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK |
IB_QP_CREATE_IPOIB_UD_LSO |
IB_QP_CREATE_NETIF_QP |
mlx5_ib_create_qp_sqpn_qp1()))
return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
qp->bf.bfreg = &dev->fp_bfreg;
else
qp->bf.bfreg = &dev->bfreg;
/* We need to divide by two since each register is comprised of
* two buffers of identical size, namely odd and even
*/
qp->bf.buf_size = (1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size)) / 2;
uar_index = qp->bf.bfreg->index;
err = calc_sq_size(dev, init_attr, qp);
if (err < 0) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
qp->rq.offset = 0;
qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
base->ubuffer.buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
err = mlx5_buf_alloc(dev->mdev, base->ubuffer.buf_size, &qp->buf);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
*inlen = MLX5_ST_SZ_BYTES(create_qp_in) +
MLX5_FLD_SZ_BYTES(create_qp_in, pas[0]) * qp->buf.npages;
*in = kvzalloc(*inlen, GFP_KERNEL);
if (!*in) {
err = -ENOMEM;
goto err_buf;
}
qpc = MLX5_ADDR_OF(create_qp_in, *in, qpc);
MLX5_SET(qpc, qpc, uar_page, uar_index);
MLX5_SET(qpc, qpc, log_page_size, qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT);
/* Set "fast registration enabled" for all kernel QPs */
MLX5_SET(qpc, qpc, fre, 1);
MLX5_SET(qpc, qpc, rlky, 1);
if (init_attr->create_flags & mlx5_ib_create_qp_sqpn_qp1()) {
MLX5_SET(qpc, qpc, deth_sqpn, 1);
qp->flags |= MLX5_IB_QP_SQPN_QP1;
}
mlx5_fill_page_array(&qp->buf,
(__be64 *)MLX5_ADDR_OF(create_qp_in, *in, pas));
err = mlx5_db_alloc(dev->mdev, &qp->db);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
goto err_free;
}
qp->sq.wrid = kvmalloc_array(qp->sq.wqe_cnt,
sizeof(*qp->sq.wrid), GFP_KERNEL);
qp->sq.wr_data = kvmalloc_array(qp->sq.wqe_cnt,
sizeof(*qp->sq.wr_data), GFP_KERNEL);
qp->rq.wrid = kvmalloc_array(qp->rq.wqe_cnt,
sizeof(*qp->rq.wrid), GFP_KERNEL);
qp->sq.w_list = kvmalloc_array(qp->sq.wqe_cnt,
sizeof(*qp->sq.w_list), GFP_KERNEL);
qp->sq.wqe_head = kvmalloc_array(qp->sq.wqe_cnt,
sizeof(*qp->sq.wqe_head), GFP_KERNEL);
if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
!qp->sq.w_list || !qp->sq.wqe_head) {
err = -ENOMEM;
goto err_wrid;
}
qp->create_type = MLX5_QP_KERNEL;
return 0;
err_wrid:
kvfree(qp->sq.wqe_head);
kvfree(qp->sq.w_list);
kvfree(qp->sq.wrid);
kvfree(qp->sq.wr_data);
kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
err_free:
kvfree(*in);
err_buf:
mlx5_buf_free(dev->mdev, &qp->buf);
return err;
}
static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
kvfree(qp->sq.wqe_head);
kvfree(qp->sq.w_list);
kvfree(qp->sq.wrid);
kvfree(qp->sq.wr_data);
kvfree(qp->rq.wrid);
mlx5_db_free(dev->mdev, &qp->db);
mlx5_buf_free(dev->mdev, &qp->buf);
}
static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
{
if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
(attr->qp_type == IB_QPT_XRC_INI))
return MLX5_SRQ_RQ;
else if (!qp->has_rq)
return MLX5_ZERO_LEN_RQ;
else
return MLX5_NON_ZERO_RQ;
}
static int is_connected(enum ib_qp_type qp_type)
{
if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
return 1;
return 0;
}
static int create_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp,
struct mlx5_ib_sq *sq, u32 tdn)
{
u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
MLX5_SET(tisc, tisc, transport_domain, tdn);
if (qp->flags & MLX5_IB_QP_UNDERLAY)
MLX5_SET(tisc, tisc, underlay_qpn, qp->underlay_qpn);
return mlx5_core_create_tis(dev->mdev, in, sizeof(in), &sq->tisn);
}
static void destroy_raw_packet_qp_tis(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
mlx5_core_destroy_tis(dev->mdev, sq->tisn);
}
static int create_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq, void *qpin,
struct ib_pd *pd)
{
struct mlx5_ib_ubuffer *ubuffer = &sq->ubuffer;
__be64 *pas;
void *in;
void *sqc;
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
void *wq;
int inlen;
int err;
int page_shift = 0;
int npages;
int ncont = 0;
u32 offset = 0;
err = mlx5_ib_umem_get(dev, pd, ubuffer->buf_addr, ubuffer->buf_size,
&sq->ubuffer.umem, &npages, &page_shift,
&ncont, &offset);
if (err)
return err;
inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * ncont;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in) {
err = -ENOMEM;
goto err_umem;
}
sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
MLX5_SET(sqc, sqc, flush_in_error_en, 1);
if (MLX5_CAP_ETH(dev->mdev, multi_pkt_send_wqe))
MLX5_SET(sqc, sqc, allow_multi_pkt_send_wqe, 1);
MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
MLX5_SET(sqc, sqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(sqc, sqc, cqn, MLX5_GET(qpc, qpc, cqn_snd));
MLX5_SET(sqc, sqc, tis_lst_sz, 1);
MLX5_SET(sqc, sqc, tis_num_0, sq->tisn);
if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
MLX5_CAP_ETH(dev->mdev, swp))
MLX5_SET(sqc, sqc, allow_swp, 1);
wq = MLX5_ADDR_OF(sqc, sqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET(wq, wq, uar_page, MLX5_GET(qpc, qpc, uar_page));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_sq_size));
MLX5_SET(wq, wq, log_wq_pg_sz, page_shift - MLX5_ADAPTER_PAGE_SHIFT);
MLX5_SET(wq, wq, page_offset, offset);
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, sq->ubuffer.umem, page_shift, pas, 0);
err = mlx5_core_create_sq_tracked(dev->mdev, in, inlen, &sq->base.mqp);
kvfree(in);
if (err)
goto err_umem;
return 0;
err_umem:
ib_umem_release(sq->ubuffer.umem);
sq->ubuffer.umem = NULL;
return err;
}
static void destroy_raw_packet_qp_sq(struct mlx5_ib_dev *dev,
struct mlx5_ib_sq *sq)
{
mlx5_core_destroy_sq_tracked(dev->mdev, &sq->base.mqp);
ib_umem_release(sq->ubuffer.umem);
}
static size_t get_rq_pas_size(void *qpc)
{
u32 log_page_size = MLX5_GET(qpc, qpc, log_page_size) + 12;
u32 log_rq_stride = MLX5_GET(qpc, qpc, log_rq_stride);
u32 log_rq_size = MLX5_GET(qpc, qpc, log_rq_size);
u32 page_offset = MLX5_GET(qpc, qpc, page_offset);
u32 po_quanta = 1 << (log_page_size - 6);
u32 rq_sz = 1 << (log_rq_size + 4 + log_rq_stride);
u32 page_size = 1 << log_page_size;
u32 rq_sz_po = rq_sz + (page_offset * po_quanta);
u32 rq_num_pas = (rq_sz_po + page_size - 1) / page_size;
return rq_num_pas * sizeof(u64);
}
static int create_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, void *qpin,
size_t qpinlen)
{
struct mlx5_ib_qp *mqp = rq->base.container_mibqp;
__be64 *pas;
__be64 *qp_pas;
void *in;
void *rqc;
void *wq;
void *qpc = MLX5_ADDR_OF(create_qp_in, qpin, qpc);
size_t rq_pas_size = get_rq_pas_size(qpc);
size_t inlen;
int err;
if (qpinlen < rq_pas_size + MLX5_BYTE_OFF(create_qp_in, pas))
return -EINVAL;
inlen = MLX5_ST_SZ_BYTES(create_rq_in) + rq_pas_size;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
if (!(rq->flags & MLX5_IB_RQ_CVLAN_STRIPPING))
MLX5_SET(rqc, rqc, vsd, 1);
MLX5_SET(rqc, rqc, mem_rq_type, MLX5_RQC_MEM_RQ_TYPE_MEMORY_RQ_INLINE);
MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
MLX5_SET(rqc, rqc, flush_in_error_en, 1);
MLX5_SET(rqc, rqc, user_index, MLX5_GET(qpc, qpc, user_index));
MLX5_SET(rqc, rqc, cqn, MLX5_GET(qpc, qpc, cqn_rcv));
if (mqp->flags & MLX5_IB_QP_CAP_SCATTER_FCS)
MLX5_SET(rqc, rqc, scatter_fcs, 1);
wq = MLX5_ADDR_OF(rqc, rqc, wq);
MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
MLX5_SET(wq, wq, end_padding_mode,
MLX5_GET(qpc, qpc, end_padding_mode));
MLX5_SET(wq, wq, page_offset, MLX5_GET(qpc, qpc, page_offset));
MLX5_SET(wq, wq, pd, MLX5_GET(qpc, qpc, pd));
MLX5_SET64(wq, wq, dbr_addr, MLX5_GET64(qpc, qpc, dbr_addr));
MLX5_SET(wq, wq, log_wq_stride, MLX5_GET(qpc, qpc, log_rq_stride) + 4);
MLX5_SET(wq, wq, log_wq_pg_sz, MLX5_GET(qpc, qpc, log_page_size));
MLX5_SET(wq, wq, log_wq_sz, MLX5_GET(qpc, qpc, log_rq_size));
pas = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
qp_pas = (__be64 *)MLX5_ADDR_OF(create_qp_in, qpin, pas);
memcpy(pas, qp_pas, rq_pas_size);
err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rq->base.mqp);
kvfree(in);
return err;
}
static void destroy_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
mlx5_core_destroy_rq_tracked(dev->mdev, &rq->base.mqp);
}
static int create_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, u32 tdn)
{
u32 *in;
void *tirc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_DIRECT);
MLX5_SET(tirc, tirc, inline_rqn, rq->base.mqp.qpn);
MLX5_SET(tirc, tirc, transport_domain, tdn);
err = mlx5_core_create_tir(dev->mdev, in, inlen, &rq->tirn);
kvfree(in);
return err;
}
static void destroy_raw_packet_qp_tir(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq)
{
mlx5_core_destroy_tir(dev->mdev, rq->tirn);
}
static int create_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
u32 *in, size_t inlen,
struct ib_pd *pd)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
struct ib_uobject *uobj = pd->uobject;
struct ib_ucontext *ucontext = uobj->context;
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
int err;
u32 tdn = mucontext->tdn;
if (qp->sq.wqe_cnt) {
err = create_raw_packet_qp_tis(dev, qp, sq, tdn);
if (err)
return err;
err = create_raw_packet_qp_sq(dev, sq, in, pd);
if (err)
goto err_destroy_tis;
sq->base.container_mibqp = qp;
sq->base.mqp.event = mlx5_ib_qp_event;
}
if (qp->rq.wqe_cnt) {
rq->base.container_mibqp = qp;
if (qp->flags & MLX5_IB_QP_CVLAN_STRIPPING)
rq->flags |= MLX5_IB_RQ_CVLAN_STRIPPING;
err = create_raw_packet_qp_rq(dev, rq, in, inlen);
if (err)
goto err_destroy_sq;
err = create_raw_packet_qp_tir(dev, rq, tdn);
if (err)
goto err_destroy_rq;
}
qp->trans_qp.base.mqp.qpn = qp->sq.wqe_cnt ? sq->base.mqp.qpn :
rq->base.mqp.qpn;
return 0;
err_destroy_rq:
destroy_raw_packet_qp_rq(dev, rq);
err_destroy_sq:
if (!qp->sq.wqe_cnt)
return err;
destroy_raw_packet_qp_sq(dev, sq);
err_destroy_tis:
destroy_raw_packet_qp_tis(dev, sq);
return err;
}
static void destroy_raw_packet_qp(struct mlx5_ib_dev *dev,
struct mlx5_ib_qp *qp)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
if (qp->rq.wqe_cnt) {
destroy_raw_packet_qp_tir(dev, rq);
destroy_raw_packet_qp_rq(dev, rq);
}
if (qp->sq.wqe_cnt) {
destroy_raw_packet_qp_sq(dev, sq);
destroy_raw_packet_qp_tis(dev, sq);
}
}
static void raw_packet_qp_copy_info(struct mlx5_ib_qp *qp,
struct mlx5_ib_raw_packet_qp *raw_packet_qp)
{
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
sq->sq = &qp->sq;
rq->rq = &qp->rq;
sq->doorbell = &qp->db;
rq->doorbell = &qp->db;
}
static void destroy_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
mlx5_core_destroy_tir(dev->mdev, qp->rss_qp.tirn);
}
static int create_rss_raw_qp_tir(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
struct ib_uobject *uobj = pd->uobject;
struct ib_ucontext *ucontext = uobj->context;
struct mlx5_ib_ucontext *mucontext = to_mucontext(ucontext);
struct mlx5_ib_create_qp_resp resp = {};
int inlen;
int err;
u32 *in;
void *tirc;
void *hfso;
u32 selected_fields = 0;
size_t min_resp_len;
u32 tdn = mucontext->tdn;
struct mlx5_ib_create_qp_rss ucmd = {};
size_t required_cmd_sz;
if (init_attr->qp_type != IB_QPT_RAW_PACKET)
return -EOPNOTSUPP;
if (init_attr->create_flags || init_attr->send_cq)
return -EINVAL;
min_resp_len = offsetof(typeof(resp), bfreg_index) + sizeof(resp.bfreg_index);
if (udata->outlen < min_resp_len)
return -EINVAL;
required_cmd_sz = offsetof(typeof(ucmd), reserved1) + sizeof(ucmd.reserved1);
if (udata->inlen < required_cmd_sz) {
mlx5_ib_dbg(dev, "invalid inlen\n");
return -EINVAL;
}
if (udata->inlen > sizeof(ucmd) &&
!ib_is_udata_cleared(udata, sizeof(ucmd),
udata->inlen - sizeof(ucmd))) {
mlx5_ib_dbg(dev, "inlen is not supported\n");
return -EOPNOTSUPP;
}
if (ib_copy_from_udata(&ucmd, udata, min(sizeof(ucmd), udata->inlen))) {
mlx5_ib_dbg(dev, "copy failed\n");
return -EFAULT;
}
if (ucmd.comp_mask) {
mlx5_ib_dbg(dev, "invalid comp mask\n");
return -EOPNOTSUPP;
}
if (memchr_inv(ucmd.reserved, 0, sizeof(ucmd.reserved)) || ucmd.reserved1) {
mlx5_ib_dbg(dev, "invalid reserved\n");
return -EOPNOTSUPP;
}
err = ib_copy_to_udata(udata, &resp, min_resp_len);
if (err) {
mlx5_ib_dbg(dev, "copy failed\n");
return -EINVAL;
}
inlen = MLX5_ST_SZ_BYTES(create_tir_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
MLX5_SET(tirc, tirc, disp_type,
MLX5_TIRC_DISP_TYPE_INDIRECT);
MLX5_SET(tirc, tirc, indirect_table,
init_attr->rwq_ind_tbl->ind_tbl_num);
MLX5_SET(tirc, tirc, transport_domain, tdn);
hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
switch (ucmd.rx_hash_function) {
case MLX5_RX_HASH_FUNC_TOEPLITZ:
{
void *rss_key = MLX5_ADDR_OF(tirc, tirc, rx_hash_toeplitz_key);
size_t len = MLX5_FLD_SZ_BYTES(tirc, rx_hash_toeplitz_key);
if (len != ucmd.rx_key_len) {
err = -EINVAL;
goto err;
}
MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_TOEPLITZ);
MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
memcpy(rss_key, ucmd.rx_hash_key, len);
break;
}
default:
err = -EOPNOTSUPP;
goto err;
}
if (!ucmd.rx_hash_fields_mask) {
/* special case when this TIR serves as steering entry without hashing */
if (!init_attr->rwq_ind_tbl->log_ind_tbl_size)
goto create_tir;
err = -EINVAL;
goto err;
}
if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4)) &&
((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))) {
err = -EINVAL;
goto err;
}
/* If none of IPV4 & IPV6 SRC/DST was set - this bit field is ignored */
if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV4);
else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
MLX5_L3_PROT_TYPE_IPV6);
if (((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP)) &&
((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))) {
err = -EINVAL;
goto err;
}
/* If none of TCP & UDP SRC/DST was set - this bit field is ignored */
if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_TCP);
else if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
MLX5_L4_PROT_TYPE_UDP);
if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV4) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_SRC_IP;
if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV4) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_IPV6))
selected_fields |= MLX5_HASH_FIELD_SEL_DST_IP;
if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_TCP) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_SRC_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_SPORT;
if ((ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_TCP) ||
(ucmd.rx_hash_fields_mask & MLX5_RX_HASH_DST_PORT_UDP))
selected_fields |= MLX5_HASH_FIELD_SEL_L4_DPORT;
MLX5_SET(rx_hash_field_select, hfso, selected_fields, selected_fields);
create_tir:
err = mlx5_core_create_tir(dev->mdev, in, inlen, &qp->rss_qp.tirn);
if (err)
goto err;
kvfree(in);
/* qpn is reserved for that QP */
qp->trans_qp.base.mqp.qpn = 0;
qp->flags |= MLX5_IB_QP_RSS;
return 0;
err:
kvfree(in);
return err;
}
static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_resources *devr = &dev->devr;
int inlen = MLX5_ST_SZ_BYTES(create_qp_in);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_ib_create_qp_resp resp;
struct mlx5_ib_cq *send_cq;
struct mlx5_ib_cq *recv_cq;
unsigned long flags;
u32 uidx = MLX5_IB_DEFAULT_UIDX;
struct mlx5_ib_create_qp ucmd;
struct mlx5_ib_qp_base *base;
int mlx5_st;
void *qpc;
u32 *in;
int err;
mutex_init(&qp->mutex);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
mlx5_st = to_mlx5_st(init_attr->qp_type);
if (mlx5_st < 0)
return -EINVAL;
if (init_attr->rwq_ind_tbl) {
if (!udata)
return -ENOSYS;
err = create_rss_raw_qp_tir(dev, qp, pd, init_attr, udata);
return err;
}
if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
if (!MLX5_CAP_GEN(mdev, block_lb_mc)) {
mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
return -EINVAL;
} else {
qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
}
}
if (init_attr->create_flags &
(IB_QP_CREATE_CROSS_CHANNEL |
IB_QP_CREATE_MANAGED_SEND |
IB_QP_CREATE_MANAGED_RECV)) {
if (!MLX5_CAP_GEN(mdev, cd)) {
mlx5_ib_dbg(dev, "cross-channel isn't supported\n");
return -EINVAL;
}
if (init_attr->create_flags & IB_QP_CREATE_CROSS_CHANNEL)
qp->flags |= MLX5_IB_QP_CROSS_CHANNEL;
if (init_attr->create_flags & IB_QP_CREATE_MANAGED_SEND)
qp->flags |= MLX5_IB_QP_MANAGED_SEND;
if (init_attr->create_flags & IB_QP_CREATE_MANAGED_RECV)
qp->flags |= MLX5_IB_QP_MANAGED_RECV;
}
if (init_attr->qp_type == IB_QPT_UD &&
(init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO))
if (!MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) {
mlx5_ib_dbg(dev, "ipoib UD lso qp isn't supported\n");
return -EOPNOTSUPP;
}
if (init_attr->create_flags & IB_QP_CREATE_SCATTER_FCS) {
if (init_attr->qp_type != IB_QPT_RAW_PACKET) {
mlx5_ib_dbg(dev, "Scatter FCS is supported only for Raw Packet QPs");
return -EOPNOTSUPP;
}
if (!MLX5_CAP_GEN(dev->mdev, eth_net_offloads) ||
!MLX5_CAP_ETH(dev->mdev, scatter_fcs)) {
mlx5_ib_dbg(dev, "Scatter FCS isn't supported\n");
return -EOPNOTSUPP;
}
qp->flags |= MLX5_IB_QP_CAP_SCATTER_FCS;
}
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
if (init_attr->create_flags & IB_QP_CREATE_CVLAN_STRIPPING) {
if (!(MLX5_CAP_GEN(dev->mdev, eth_net_offloads) &&
MLX5_CAP_ETH(dev->mdev, vlan_cap)) ||
(init_attr->qp_type != IB_QPT_RAW_PACKET))
return -EOPNOTSUPP;
qp->flags |= MLX5_IB_QP_CVLAN_STRIPPING;
}
if (pd && pd->uobject) {
if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
mlx5_ib_dbg(dev, "copy failed\n");
return -EFAULT;
}
err = get_qp_user_index(to_mucontext(pd->uobject->context),
&ucmd, udata->inlen, &uidx);
if (err)
return err;
qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
if (init_attr->create_flags & IB_QP_CREATE_SOURCE_QPN) {
if (init_attr->qp_type != IB_QPT_UD ||
(MLX5_CAP_GEN(dev->mdev, port_type) !=
MLX5_CAP_PORT_TYPE_IB) ||
!mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) {
mlx5_ib_dbg(dev, "Source QP option isn't supported\n");
return -EOPNOTSUPP;
}
qp->flags |= MLX5_IB_QP_UNDERLAY;
qp->underlay_qpn = init_attr->source_qpn;
}
} else {
qp->wq_sig = !!wq_signature;
}
base = (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
qp->has_rq = qp_has_rq(init_attr);
err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
qp, (pd && pd->uobject) ? &ucmd : NULL);
if (err) {
mlx5_ib_dbg(dev, "err %d\n", err);
return err;
}
if (pd) {
if (pd->uobject) {
__u32 max_wqes =
1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
mlx5_ib_dbg(dev, "invalid rq params\n");
return -EINVAL;
}
if (ucmd.sq_wqe_count > max_wqes) {
mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
ucmd.sq_wqe_count, max_wqes);
return -EINVAL;
}
if (init_attr->create_flags &
mlx5_ib_create_qp_sqpn_qp1()) {
mlx5_ib_dbg(dev, "user-space is not allowed to create UD QPs spoofing as QP1\n");
return -EINVAL;
}
err = create_user_qp(dev, pd, qp, udata, init_attr, &in,
&resp, &inlen, base);
if (err)
mlx5_ib_dbg(dev, "err %d\n", err);
} else {
err = create_kernel_qp(dev, init_attr, qp, &in, &inlen,
base);
if (err)
mlx5_ib_dbg(dev, "err %d\n", err);
}
if (err)
return err;
} else {
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
qp->create_type = MLX5_QP_EMPTY;
}
if (is_sqp(init_attr->qp_type))
qp->port = init_attr->port_num;
qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
MLX5_SET(qpc, qpc, st, mlx5_st);
MLX5_SET(qpc, qpc, pm_state, MLX5_QP_PM_MIGRATED);
if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
MLX5_SET(qpc, qpc, pd, to_mpd(pd ? pd : devr->p0)->pdn);
else
MLX5_SET(qpc, qpc, latency_sensitive, 1);
if (qp->wq_sig)
MLX5_SET(qpc, qpc, wq_signature, 1);
if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
MLX5_SET(qpc, qpc, block_lb_mc, 1);
if (qp->flags & MLX5_IB_QP_CROSS_CHANNEL)
MLX5_SET(qpc, qpc, cd_master, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_SEND)
MLX5_SET(qpc, qpc, cd_slave_send, 1);
if (qp->flags & MLX5_IB_QP_MANAGED_RECV)
MLX5_SET(qpc, qpc, cd_slave_receive, 1);
if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
int rcqe_sz;
int scqe_sz;
rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
if (rcqe_sz == 128)
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
else
MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
if (scqe_sz == 128)
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
else
MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
}
}
if (qp->rq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_rq_stride, qp->rq.wqe_shift - 4);
MLX5_SET(qpc, qpc, log_rq_size, ilog2(qp->rq.wqe_cnt));
}
MLX5_SET(qpc, qpc, rq_type, get_rx_type(qp, init_attr));
if (qp->sq.wqe_cnt) {
MLX5_SET(qpc, qpc, log_sq_size, ilog2(qp->sq.wqe_cnt));
} else {
MLX5_SET(qpc, qpc, no_sq, 1);
if (init_attr->srq &&
init_attr->srq->srq_type == IB_SRQT_TM)
MLX5_SET(qpc, qpc, offload_type,
MLX5_QPC_OFFLOAD_TYPE_RNDV);
}
/* Set default resources */
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, cqn_snd, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(init_attr->xrcd)->xrcdn);
break;
case IB_QPT_XRC_INI:
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(devr->c0)->mcq.cqn);
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s0)->msrq.srqn);
break;
default:
if (init_attr->srq) {
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x0)->xrcdn);
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(init_attr->srq)->msrq.srqn);
} else {
MLX5_SET(qpc, qpc, xrcd, to_mxrcd(devr->x1)->xrcdn);
MLX5_SET(qpc, qpc, srqn_rmpn_xrqn, to_msrq(devr->s1)->msrq.srqn);
}
}
if (init_attr->send_cq)
MLX5_SET(qpc, qpc, cqn_snd, to_mcq(init_attr->send_cq)->mcq.cqn);
if (init_attr->recv_cq)
MLX5_SET(qpc, qpc, cqn_rcv, to_mcq(init_attr->recv_cq)->mcq.cqn);
MLX5_SET64(qpc, qpc, dbr_addr, qp->db.dma);
/* 0xffffff means we ask to work with cqe version 0 */
if (MLX5_CAP_GEN(mdev, cqe_version) == MLX5_CQE_VERSION_V1)
MLX5_SET(qpc, qpc, user_index, uidx);
/* we use IB_QP_CREATE_IPOIB_UD_LSO to indicates ipoib qp */
if (init_attr->qp_type == IB_QPT_UD &&
(init_attr->create_flags & IB_QP_CREATE_IPOIB_UD_LSO)) {
MLX5_SET(qpc, qpc, ulp_stateless_offload_mode, 1);
qp->flags |= MLX5_IB_QP_LSO;
}
if (inlen < 0) {
err = -EINVAL;
goto err;
}
if (init_attr->qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
qp->raw_packet_qp.sq.ubuffer.buf_addr = ucmd.sq_buf_addr;
raw_packet_qp_copy_info(qp, &qp->raw_packet_qp);
err = create_raw_packet_qp(dev, qp, in, inlen, pd);
} else {
err = mlx5_core_create_qp(dev->mdev, &base->mqp, in, inlen);
}
if (err) {
mlx5_ib_dbg(dev, "create qp failed\n");
goto err_create;
}
kvfree(in);
base->container_mibqp = qp;
base->mqp.event = mlx5_ib_qp_event;
get_cqs(init_attr->qp_type, init_attr->send_cq, init_attr->recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
/* Maintain device to QPs access, needed for further handling via reset
* flow
*/
list_add_tail(&qp->qps_list, &dev->qp_list);
/* Maintain CQ to QPs access, needed for further handling via reset flow
*/
if (send_cq)
list_add_tail(&qp->cq_send_list, &send_cq->list_send_qp);
if (recv_cq)
list_add_tail(&qp->cq_recv_list, &recv_cq->list_recv_qp);
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
return 0;
err_create:
if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(dev, pd, qp, base);
else if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
err:
kvfree(in);
return err;
}
static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
if (send_cq) {
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_lock(&send_cq->lock);
spin_lock_nested(&recv_cq->lock,
SINGLE_DEPTH_NESTING);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
} else {
spin_lock(&recv_cq->lock);
spin_lock_nested(&send_cq->lock,
SINGLE_DEPTH_NESTING);
}
} else {
spin_lock(&send_cq->lock);
__acquire(&recv_cq->lock);
}
} else if (recv_cq) {
spin_lock(&recv_cq->lock);
__acquire(&send_cq->lock);
} else {
__acquire(&send_cq->lock);
__acquire(&recv_cq->lock);
}
}
static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
__releases(&send_cq->lock) __releases(&recv_cq->lock)
{
if (send_cq) {
if (recv_cq) {
if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock(&send_cq->lock);
} else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
__release(&recv_cq->lock);
spin_unlock(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock(&recv_cq->lock);
}
} else {
__release(&recv_cq->lock);
spin_unlock(&send_cq->lock);
}
} else if (recv_cq) {
__release(&send_cq->lock);
spin_unlock(&recv_cq->lock);
} else {
__release(&recv_cq->lock);
__release(&send_cq->lock);
}
}
static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
{
return to_mpd(qp->ibqp.pd);
}
static void get_cqs(enum ib_qp_type qp_type,
struct ib_cq *ib_send_cq, struct ib_cq *ib_recv_cq,
struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
{
switch (qp_type) {
case IB_QPT_XRC_TGT:
*send_cq = NULL;
*recv_cq = NULL;
break;
case MLX5_IB_QPT_REG_UMR:
case IB_QPT_XRC_INI:
*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = NULL;
break;
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_RAW_PACKET:
*send_cq = ib_send_cq ? to_mcq(ib_send_cq) : NULL;
*recv_cq = ib_recv_cq ? to_mcq(ib_recv_cq) : NULL;
break;
case IB_QPT_MAX:
default:
*send_cq = NULL;
*recv_cq = NULL;
break;
}
}
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct mlx5_modify_raw_qp_param *raw_qp_param,
u8 lag_tx_affinity);
static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
{
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_ib_qp_base *base;
unsigned long flags;
int err;
if (qp->ibqp.rwq_ind_tbl) {
destroy_rss_raw_qp_tir(dev, qp);
return;
}
base = (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) ?
&qp->raw_packet_qp.rq.base :
&qp->trans_qp.base;
if (qp->state != IB_QPS_RESET) {
if (qp->ibqp.qp_type != IB_QPT_RAW_PACKET &&
!(qp->flags & MLX5_IB_QP_UNDERLAY)) {
err = mlx5_core_qp_modify(dev->mdev,
MLX5_CMD_OP_2RST_QP, 0,
NULL, &base->mqp);
} else {
struct mlx5_modify_raw_qp_param raw_qp_param = {
.operation = MLX5_CMD_OP_2RST_QP
};
err = modify_raw_packet_qp(dev, qp, &raw_qp_param, 0);
}
if (err)
mlx5_ib_warn(dev, "mlx5_ib: modify QP 0x%06x to RESET failed\n",
base->mqp.qpn);
}
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
spin_lock_irqsave(&dev->reset_flow_resource_lock, flags);
mlx5_ib_lock_cqs(send_cq, recv_cq);
/* del from lists under both locks above to protect reset flow paths */
list_del(&qp->qps_list);
if (send_cq)
list_del(&qp->cq_send_list);
if (recv_cq)
list_del(&qp->cq_recv_list);
if (qp->create_type == MLX5_QP_KERNEL) {
__mlx5_ib_cq_clean(recv_cq, base->mqp.qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
__mlx5_ib_cq_clean(send_cq, base->mqp.qpn,
NULL);
}
mlx5_ib_unlock_cqs(send_cq, recv_cq);
spin_unlock_irqrestore(&dev->reset_flow_resource_lock, flags);
if (qp->ibqp.qp_type == IB_QPT_RAW_PACKET ||
qp->flags & MLX5_IB_QP_UNDERLAY) {
destroy_raw_packet_qp(dev, qp);
} else {
err = mlx5_core_destroy_qp(dev->mdev, &base->mqp);
if (err)
mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n",
base->mqp.qpn);
}
if (qp->create_type == MLX5_QP_KERNEL)
destroy_qp_kernel(dev, qp);
else if (qp->create_type == MLX5_QP_USER)
destroy_qp_user(dev, &get_pd(qp)->ibpd, qp, base);
}
static const char *ib_qp_type_str(enum ib_qp_type type)
{
switch (type) {
case IB_QPT_SMI:
return "IB_QPT_SMI";
case IB_QPT_GSI:
return "IB_QPT_GSI";
case IB_QPT_RC:
return "IB_QPT_RC";
case IB_QPT_UC:
return "IB_QPT_UC";
case IB_QPT_UD:
return "IB_QPT_UD";
case IB_QPT_RAW_IPV6:
return "IB_QPT_RAW_IPV6";
case IB_QPT_RAW_ETHERTYPE:
return "IB_QPT_RAW_ETHERTYPE";
case IB_QPT_XRC_INI:
return "IB_QPT_XRC_INI";
case IB_QPT_XRC_TGT:
return "IB_QPT_XRC_TGT";
case IB_QPT_RAW_PACKET:
return "IB_QPT_RAW_PACKET";
case MLX5_IB_QPT_REG_UMR:
return "MLX5_IB_QPT_REG_UMR";
case IB_QPT_MAX:
default:
return "Invalid QP type";
}
}
struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
struct ib_qp_init_attr *init_attr,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev;
struct mlx5_ib_qp *qp;
u16 xrcdn = 0;
int err;
if (pd) {
dev = to_mdev(pd->device);
if (init_attr->qp_type == IB_QPT_RAW_PACKET) {
if (!pd->uobject) {
mlx5_ib_dbg(dev, "Raw Packet QP is not supported for kernel consumers\n");
return ERR_PTR(-EINVAL);
} else if (!to_mucontext(pd->uobject->context)->cqe_version) {
mlx5_ib_dbg(dev, "Raw Packet QP is only supported for CQE version > 0\n");
return ERR_PTR(-EINVAL);
}
}
} else {
/* being cautious here */
if (init_attr->qp_type != IB_QPT_XRC_TGT &&
init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
pr_warn("%s: no PD for transport %s\n", __func__,
ib_qp_type_str(init_attr->qp_type));
return ERR_PTR(-EINVAL);
}
dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
}
switch (init_attr->qp_type) {
case IB_QPT_XRC_TGT:
case IB_QPT_XRC_INI:
if (!MLX5_CAP_GEN(dev->mdev, xrc)) {
mlx5_ib_dbg(dev, "XRC not supported\n");
return ERR_PTR(-ENOSYS);
}
init_attr->recv_cq = NULL;
if (init_attr->qp_type == IB_QPT_XRC_TGT) {
xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
init_attr->send_cq = NULL;
}
/* fall through */
case IB_QPT_RAW_PACKET:
case IB_QPT_RC:
case IB_QPT_UC:
case IB_QPT_UD:
case IB_QPT_SMI:
case MLX5_IB_QPT_HW_GSI:
case MLX5_IB_QPT_REG_UMR:
qp = kzalloc(sizeof(*qp), GFP_KERNEL);
if (!qp)
return ERR_PTR(-ENOMEM);
err = create_qp_common(dev, pd, init_attr, udata, qp);
if (err) {
mlx5_ib_dbg(dev, "create_qp_common failed\n");
kfree(qp);
return ERR_PTR(err);
}
if (is_qp0(init_attr->qp_type))
qp->ibqp.qp_num = 0;
else if (is_qp1(init_attr->qp_type))
qp->ibqp.qp_num = 1;
else
qp->ibqp.qp_num = qp->trans_qp.base.mqp.qpn;
mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
qp->ibqp.qp_num, qp->trans_qp.base.mqp.qpn,
init_attr->recv_cq ? to_mcq(init_attr->recv_cq)->mcq.cqn : -1,
init_attr->send_cq ? to_mcq(init_attr->send_cq)->mcq.cqn : -1);
qp->trans_qp.xrcdn = xrcdn;
break;
case IB_QPT_GSI:
return mlx5_ib_gsi_create_qp(pd, init_attr);
case IB_QPT_RAW_IPV6:
case IB_QPT_RAW_ETHERTYPE:
case IB_QPT_MAX:
default:
mlx5_ib_dbg(dev, "unsupported qp type %d\n",
init_attr->qp_type);
/* Don't support raw QPs */
return ERR_PTR(-EINVAL);
}
return &qp->ibqp;
}
int mlx5_ib_destroy_qp(struct ib_qp *qp)
{
struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
if (unlikely(qp->qp_type == IB_QPT_GSI))
return mlx5_ib_gsi_destroy_qp(qp);
destroy_qp_common(dev, mqp);
kfree(mqp);
return 0;
}
static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
int attr_mask)
{
u32 hw_access_flags = 0;
u8 dest_rd_atomic;
u32 access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
else
dest_rd_atomic = qp->trans_qp.resp_depth;
if (attr_mask & IB_QP_ACCESS_FLAGS)
access_flags = attr->qp_access_flags;
else
access_flags = qp->trans_qp.atomic_rd_en;
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
hw_access_flags |= MLX5_QP_BIT_RRE;
if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
if (access_flags & IB_ACCESS_REMOTE_WRITE)
hw_access_flags |= MLX5_QP_BIT_RWE;
return cpu_to_be32(hw_access_flags);
}
enum {
MLX5_PATH_FLAG_FL = 1 << 0,
MLX5_PATH_FLAG_FREE_AR = 1 << 1,
MLX5_PATH_FLAG_COUNTER = 1 << 2,
};
static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
{
if (rate == IB_RATE_PORT_CURRENT)
return 0;
if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS)
return -EINVAL;
while (rate != IB_RATE_PORT_CURRENT &&
!(1 << (rate + MLX5_STAT_RATE_OFFSET) &
MLX5_CAP_GEN(dev->mdev, stat_rate_support)))
--rate;
return rate ? rate + MLX5_STAT_RATE_OFFSET : rate;
}
static int modify_raw_packet_eth_prio(struct mlx5_core_dev *dev,
struct mlx5_ib_sq *sq, u8 sl)
{
void *in;
void *tisc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.prio, 1);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, prio, ((sl & 0x7) << 1));
err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
kvfree(in);
return err;
}
static int modify_raw_packet_tx_affinity(struct mlx5_core_dev *dev,
struct mlx5_ib_sq *sq, u8 tx_affinity)
{
void *in;
void *tisc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_tis_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_tis_in, in, bitmask.lag_tx_port_affinity, 1);
tisc = MLX5_ADDR_OF(modify_tis_in, in, ctx);
MLX5_SET(tisc, tisc, lag_tx_port_affinity, tx_affinity);
err = mlx5_core_modify_tis(dev, sq->tisn, in, inlen);
kvfree(in);
return err;
}
static int mlx5_set_path(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct rdma_ah_attr *ah,
struct mlx5_qp_path *path, u8 port, int attr_mask,
u32 path_flags, const struct ib_qp_attr *attr,
bool alt)
{
const struct ib_global_route *grh = rdma_ah_read_grh(ah);
int err;
enum ib_gid_type gid_type;
u8 ah_flags = rdma_ah_get_ah_flags(ah);
u8 sl = rdma_ah_get_sl(ah);
if (attr_mask & IB_QP_PKEY_INDEX)
path->pkey_index = cpu_to_be16(alt ? attr->alt_pkey_index :
attr->pkey_index);
if (ah_flags & IB_AH_GRH) {
if (grh->sgid_index >=
dev->mdev->port_caps[port - 1].gid_table_len) {
pr_err("sgid_index (%u) too large. max is %d\n",
grh->sgid_index,
dev->mdev->port_caps[port - 1].gid_table_len);
return -EINVAL;
}
}
if (ah->type == RDMA_AH_ATTR_TYPE_ROCE) {
if (!(ah_flags & IB_AH_GRH))
return -EINVAL;
err = mlx5_get_roce_gid_type(dev, port, grh->sgid_index,
&gid_type);
if (err)
return err;
memcpy(path->rmac, ah->roce.dmac, sizeof(ah->roce.dmac));
path->udp_sport = mlx5_get_roce_udp_sport(dev, port,
grh->sgid_index);
path->dci_cfi_prio_sl = (sl & 0x7) << 4;
if (gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP)
path->ecn_dscp = (grh->traffic_class >> 2) & 0x3f;
} else {
path->fl_free_ar = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
path->fl_free_ar |=
(path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x40 : 0;
path->rlid = cpu_to_be16(rdma_ah_get_dlid(ah));
path->grh_mlid = rdma_ah_get_path_bits(ah) & 0x7f;
if (ah_flags & IB_AH_GRH)
path->grh_mlid |= 1 << 7;
path->dci_cfi_prio_sl = sl & 0xf;
}
if (ah_flags & IB_AH_GRH) {
path->mgid_index = grh->sgid_index;
path->hop_limit = grh->hop_limit;
path->tclass_flowlabel =
cpu_to_be32((grh->traffic_class << 20) |
(grh->flow_label));
memcpy(path->rgid, grh->dgid.raw, 16);
}
err = ib_rate_to_mlx5(dev, rdma_ah_get_static_rate(ah));
if (err < 0)
return err;
path->static_rate = err;
path->port = port;
if (attr_mask & IB_QP_TIMEOUT)
path->ackto_lt = (alt ? attr->alt_timeout : attr->timeout) << 3;
if ((qp->ibqp.qp_type == IB_QPT_RAW_PACKET) && qp->sq.wqe_cnt)
return modify_raw_packet_eth_prio(dev->mdev,
&qp->raw_packet_qp.sq,
sl & 0xf);
return 0;
}
static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
[MLX5_QP_STATE_INIT] = {
[MLX5_QP_STATE_INIT] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_PRI_PORT,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_PRI_PORT,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_PRI_PORT,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PKEY_INDEX,
},
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PM_STATE |
MLX5_QP_OPTPAR_RNR_TIMEOUT,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PM_STATE,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
},
},
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RNR_TIMEOUT |
MLX5_QP_OPTPAR_PM_STATE |
MLX5_QP_OPTPAR_ALT_ADDR_PATH,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_PM_STATE |
MLX5_QP_OPTPAR_ALT_ADDR_PATH,
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
MLX5_QP_OPTPAR_SRQN |
MLX5_QP_OPTPAR_CQN_RCV,
},
},
[MLX5_QP_STATE_SQER] = {
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
[MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
[MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RAE |
MLX5_QP_OPTPAR_RRE,
},
},
};
static int ib_nr_to_mlx5_nr(int ib_mask)
{
switch (ib_mask) {
case IB_QP_STATE:
return 0;
case IB_QP_CUR_STATE:
return 0;
case IB_QP_EN_SQD_ASYNC_NOTIFY:
return 0;
case IB_QP_ACCESS_FLAGS:
return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
MLX5_QP_OPTPAR_RAE;
case IB_QP_PKEY_INDEX:
return MLX5_QP_OPTPAR_PKEY_INDEX;
case IB_QP_PORT:
return MLX5_QP_OPTPAR_PRI_PORT;
case IB_QP_QKEY:
return MLX5_QP_OPTPAR_Q_KEY;
case IB_QP_AV:
return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
MLX5_QP_OPTPAR_PRI_PORT;
case IB_QP_PATH_MTU:
return 0;
case IB_QP_TIMEOUT:
return MLX5_QP_OPTPAR_ACK_TIMEOUT;
case IB_QP_RETRY_CNT:
return MLX5_QP_OPTPAR_RETRY_COUNT;
case IB_QP_RNR_RETRY:
return MLX5_QP_OPTPAR_RNR_RETRY;
case IB_QP_RQ_PSN:
return 0;
case IB_QP_MAX_QP_RD_ATOMIC:
return MLX5_QP_OPTPAR_SRA_MAX;
case IB_QP_ALT_PATH:
return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
case IB_QP_MIN_RNR_TIMER:
return MLX5_QP_OPTPAR_RNR_TIMEOUT;
case IB_QP_SQ_PSN:
return 0;
case IB_QP_MAX_DEST_RD_ATOMIC:
return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
case IB_QP_PATH_MIG_STATE:
return MLX5_QP_OPTPAR_PM_STATE;
case IB_QP_CAP:
return 0;
case IB_QP_DEST_QPN:
return 0;
}
return 0;
}
static int ib_mask_to_mlx5_opt(int ib_mask)
{
int result = 0;
int i;
for (i = 0; i < 8 * sizeof(int); i++) {
if ((1 << i) & ib_mask)
result |= ib_nr_to_mlx5_nr(1 << i);
}
return result;
}
static int modify_raw_packet_qp_rq(struct mlx5_ib_dev *dev,
struct mlx5_ib_rq *rq, int new_state,
const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
void *in;
void *rqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_rq_in, in, rq_state, rq->state);
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(rqc, rqc, state, new_state);
if (raw_qp_param->set_mask & MLX5_RAW_QP_MOD_SET_RQ_Q_CTR_ID) {
if (MLX5_CAP_GEN(dev->mdev, modify_rq_counter_set_id)) {
MLX5_SET64(modify_rq_in, in, modify_bitmask,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID);
MLX5_SET(rqc, rqc, counter_set_id, raw_qp_param->rq_q_ctr_id);
} else
pr_info_once("%s: RAW PACKET QP counters are not supported on current FW\n",
dev->ib_dev.name);
}
err = mlx5_core_modify_rq(dev->mdev, rq->base.mqp.qpn, in, inlen);
if (err)
goto out;
rq->state = new_state;
out:
kvfree(in);
return err;
}
static int modify_raw_packet_qp_sq(struct mlx5_core_dev *dev,
struct mlx5_ib_sq *sq,
int new_state,
const struct mlx5_modify_raw_qp_param *raw_qp_param)
{
struct mlx5_ib_qp *ibqp = sq->base.container_mibqp;
u32 old_rate = ibqp->rate_limit;
u32 new_rate = old_rate;
u16 rl_index = 0;
void *in;
void *sqc;
int inlen;
int err;
inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
return -ENOMEM;
MLX5_SET(modify_sq_in, in, sq_state, sq->state);
sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
MLX5_SET(sqc, sqc, state, new_state);
if (raw_qp_param->set_mask & MLX5_RAW_QP_RATE_LIMIT) {
if (new_state != MLX5_SQC_STATE_RDY)
pr_warn("%s: Rate limit can only be changed when SQ is moving to RDY\n",
__func__);
else
new_rate = raw_qp_param->rate_limit;
}
if (old_rate != new_rate) {
if (new_rate) {
err = mlx5_rl_add_rate(dev, new_rate, &rl_index);
if (err) {
pr_err("Failed configuring rate %u: %d\n",
new_rate, err);
goto out;
}
}
MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
}
err = mlx5_core_modify_sq(dev, sq->base.mqp.qpn, in, inlen);
if (err) {
/* Remove new rate from table if failed */
if (new_rate &&
old_rate != new_rate)
mlx5_rl_remove_rate(dev, new_rate);
goto out;
}
/* Only remove the old rate after new rate was set */
if ((old_rate &&
(old_rate != new_rate)) ||
(new_state != MLX5_SQC_STATE_RDY))
mlx5_rl_remove_rate(dev, old_rate);
ibqp->rate_limit = new_rate;
sq->state = new_state;
out:
kvfree(in);
return err;
}
static int modify_raw_packet_qp(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp,
const struct mlx5_modify_raw_qp_param *raw_qp_param,
u8 tx_affinity)
{
struct mlx5_ib_raw_packet_qp *raw_packet_qp = &qp->raw_packet_qp;
struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
int modify_rq = !!qp->rq.wqe_cnt;
int modify_sq = !!qp->sq.wqe_cnt;
int rq_state;
int sq_state;
int err;
switch (raw_qp_param->operation) {
case MLX5_CMD_OP_RST2INIT_QP:
rq_state = MLX5_RQC_STATE_RDY;
sq_state = MLX5_SQC_STATE_RDY;
break;
case MLX5_CMD_OP_2ERR_QP:
rq_state = MLX5_RQC_STATE_ERR;
sq_state = MLX5_SQC_STATE_ERR;
break;
case MLX5_CMD_OP_2RST_QP:
rq_state = MLX5_RQC_STATE_RST;
sq_state = MLX5_SQC_STATE_RST;
break;
case MLX5_CMD_OP_RTR2RTS_QP:
case MLX5_CMD_OP_RTS2RTS_QP:
if (raw_qp_param->set_mask ==
MLX5_RAW_QP_RATE_LIMIT) {
modify_rq = 0;
sq_state = sq->state;
} else {
return raw_qp_param->set_mask ? -EINVAL : 0;
}
break;
case MLX5_CMD_OP_INIT2INIT_QP:
case MLX5_CMD_OP_INIT2RTR_QP:
if (raw_qp_param->set_mask)
return -EINVAL;
else
return 0;
default:
WARN_ON(1);
return -EINVAL;
}
if (modify_rq) {
err = modify_raw_packet_qp_rq(dev, rq, rq_state, raw_qp_param);
if (err)
return err;
}
if (modify_sq) {
if (tx_affinity) {
err = modify_raw_packet_tx_affinity(dev->mdev, sq,
tx_affinity);
if (err)
return err;
}
return modify_raw_packet_qp_sq(dev->mdev, sq, sq_state, raw_qp_param);
}
return 0;
}
static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
{
static const u16 optab[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE] = {
[MLX5_QP_STATE_RST] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_INIT] = MLX5_CMD_OP_RST2INIT_QP,
},
[MLX5_QP_STATE_INIT] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_INIT] = MLX5_CMD_OP_INIT2INIT_QP,
[MLX5_QP_STATE_RTR] = MLX5_CMD_OP_INIT2RTR_QP,
},
[MLX5_QP_STATE_RTR] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTR2RTS_QP,
},
[MLX5_QP_STATE_RTS] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_RTS2RTS_QP,
},
[MLX5_QP_STATE_SQD] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
},
[MLX5_QP_STATE_SQER] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
[MLX5_QP_STATE_RTS] = MLX5_CMD_OP_SQERR2RTS_QP,
},
[MLX5_QP_STATE_ERR] = {
[MLX5_QP_STATE_RST] = MLX5_CMD_OP_2RST_QP,
[MLX5_QP_STATE_ERR] = MLX5_CMD_OP_2ERR_QP,
}
};
struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
struct mlx5_ib_qp *qp = to_mqp(ibqp);
struct mlx5_ib_qp_base *base = &qp->trans_qp.base;
struct mlx5_ib_cq *send_cq, *recv_cq;
struct mlx5_qp_context *context;
struct mlx5_ib_pd *pd;
struct mlx5_ib_port *mibport = NULL;
enum mlx5_qp_state mlx5_cur, mlx5_new;
enum mlx5_qp_optpar optpar;
int mlx5_st;
int err;
u16 op;
u8 tx_affinity = 0;
context = kzalloc(sizeof(*context), GFP_KERNEL);
if (!context)
return -ENOMEM;
err = to_mlx5_st(ibqp->qp_type);
if (err < 0) {
mlx5_ib_dbg(dev, "unsupported qp type %d\n", ibqp->qp_type);
goto out;
}
context->flags = cpu_to_be32(err << 16);
if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
} else {
switch (attr->path_mig_state) {
case IB_MIG_MIGRATED:
context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
break;
case IB_MIG_REARM:
context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
break;
case IB_MIG_ARMED:
context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
break;
}
}
if ((cur_state == IB_QPS_RESET) && (new_state == IB_QPS_INIT)) {
if ((ibqp->qp_type == IB_QPT_RC) ||
(ibqp->qp_type == IB_QPT_UD &&
!(qp->flags & MLX5_IB_QP_SQPN_QP1)) ||
(ibqp->qp_type == IB_QPT_UC) ||
(ibqp->qp_type == IB_QPT_RAW_PACKET) ||
(ibqp->qp_type == IB_QPT_XRC_INI) ||
(ibqp->qp_type == IB_QPT_XRC_TGT)) {
if (mlx5_lag_is_active(dev->mdev)) {
tx_affinity = (unsigned int)atomic_add_return(1,
&dev->roce.next_port) %
MLX5_MAX_PORTS + 1;
context->flags |= cpu_to_be32(tx_affinity << 24);
}
}
}
if (is_sqp(ibqp->qp_type)) {
context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
} else if ((ibqp->qp_type == IB_QPT_UD &&
!(qp->flags & MLX5_IB_QP_UNDERLAY)) ||
ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
} else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
err = -EINVAL;
goto out;
}
context->mtu_msgmax = (attr->path_mtu << 5) |
(u8)MLX5_CAP_GEN(dev->mdev, log_max_msg);
}
if (attr_mask & IB_QP_DEST_QPN)
context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
if (attr_mask & IB_QP_PKEY_INDEX)
context->pri_path.pkey_index = cpu_to_be16(attr->pkey_index);
/* todo implement counter_index functionality */
if (is_sqp(ibqp->qp_type))
context->pri_path.port = qp->port;
if (attr_mask & IB_QP_PORT)
context->pri_path.port = attr->port_num;
if (attr_mask & IB_QP_AV) {
err = mlx5_set_path(dev, qp, &attr->ah_attr, &context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
attr_mask, 0, attr, false);
if (err)
goto out;
}
if (attr_mask & IB_QP_TIMEOUT)
context->pri_path.ackto_lt |= attr->timeout << 3;
if (attr_mask & IB_QP_ALT_PATH) {
err = mlx5_set_path(dev, qp, &attr->alt_ah_attr,
&context->alt_path,
attr->alt_port_num,
attr_mask | IB_QP_PKEY_INDEX | IB_QP_TIMEOUT,
0, attr, true);
if (err)
goto out;
}
pd = get_pd(qp);
get_cqs(qp->ibqp.qp_type, qp->ibqp.send_cq, qp->ibqp.recv_cq,
&send_cq, &recv_cq);
context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
if (attr_mask & IB_QP_RNR_RETRY)
context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
if (attr_mask & IB_QP_RETRY_CNT)
context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic)
context->params1 |=
cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
}
if (attr_mask & IB_QP_SQ_PSN)
context->next_send_psn = cpu_to_be32(attr->sq_psn);
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic)
context->params2 |=
cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
}
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
context->params2 |= to_mlx5_access_flags(qp, attr,