2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
76 unsigned int page_shift;
83 static int is_qp0(enum ib_qp_type qp_type)
85 return qp_type == IB_QPT_SMI;
88 static int is_qp1(enum ib_qp_type qp_type)
90 return qp_type == IB_QPT_GSI;
93 static int is_sqp(enum ib_qp_type qp_type)
95 return is_qp0(qp_type) || is_qp1(qp_type);
98 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
100 return mlx5_buf_offset(&qp->buf, offset);
103 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
110 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
115 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
116 struct ib_event event;
118 if (type == MLX5_EVENT_TYPE_PATH_MIG)
119 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
121 if (ibqp->event_handler) {
122 event.device = ibqp->device;
123 event.element.qp = ibqp;
125 case MLX5_EVENT_TYPE_PATH_MIG:
126 event.event = IB_EVENT_PATH_MIG;
128 case MLX5_EVENT_TYPE_COMM_EST:
129 event.event = IB_EVENT_COMM_EST;
131 case MLX5_EVENT_TYPE_SQ_DRAINED:
132 event.event = IB_EVENT_SQ_DRAINED;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
135 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
138 event.event = IB_EVENT_QP_FATAL;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
141 event.event = IB_EVENT_PATH_MIG_ERR;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
144 event.event = IB_EVENT_QP_REQ_ERR;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
147 event.event = IB_EVENT_QP_ACCESS_ERR;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
154 ibqp->event_handler(&event, ibqp->qp_context);
158 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
164 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev->caps.max_wqes)
171 qp->rq.wqe_shift = 0;
174 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
175 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
176 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
177 qp->rq.max_post = qp->rq.wqe_cnt;
179 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
180 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
181 wqe_size = roundup_pow_of_two(wqe_size);
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
188 dev->mdev->caps.max_rq_desc_sz);
191 qp->rq.wqe_shift = ilog2(wqe_size);
192 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
193 qp->rq.max_post = qp->rq.wqe_cnt;
200 static int sq_overhead(enum ib_qp_type qp_type)
206 size += sizeof(struct mlx5_wqe_xrc_seg);
209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
210 sizeof(struct mlx5_wqe_atomic_seg) +
211 sizeof(struct mlx5_wqe_raddr_seg);
218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
219 sizeof(struct mlx5_wqe_raddr_seg) +
220 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
221 sizeof(struct mlx5_mkey_seg);
227 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_datagram_seg);
231 case MLX5_IB_QPT_REG_UMR:
232 size += sizeof(struct mlx5_wqe_ctrl_seg) +
233 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
234 sizeof(struct mlx5_mkey_seg);
244 static int calc_send_wqe(struct ib_qp_init_attr *attr)
249 size = sq_overhead(attr->qp_type);
253 if (attr->cap.max_inline_data) {
254 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
255 attr->cap.max_inline_data;
258 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
259 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN &&
260 ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB) < MLX5_SIG_WQE_SIZE)
261 return MLX5_SIG_WQE_SIZE;
263 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
266 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
267 struct mlx5_ib_qp *qp)
272 if (!attr->cap.max_send_wr)
275 wqe_size = calc_send_wqe(attr);
276 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
280 if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
281 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
282 wqe_size, dev->mdev->caps.max_sq_desc_sz);
286 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
287 sizeof(struct mlx5_wqe_inline_seg);
288 attr->cap.max_inline_data = qp->max_inline_data;
290 if (attr->create_flags & IB_QP_CREATE_SIGNATURE_EN)
291 qp->signature_en = true;
293 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
294 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
295 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
296 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
297 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
300 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
301 qp->sq.max_gs = attr->cap.max_send_sge;
302 qp->sq.max_post = wq_size / wqe_size;
303 attr->cap.max_send_wr = qp->sq.max_post;
308 static int set_user_buf_size(struct mlx5_ib_dev *dev,
309 struct mlx5_ib_qp *qp,
310 struct mlx5_ib_create_qp *ucmd)
312 int desc_sz = 1 << qp->sq.wqe_shift;
314 if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
315 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
316 desc_sz, dev->mdev->caps.max_sq_desc_sz);
320 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
321 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
322 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
326 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
328 if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
329 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
330 qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
334 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
335 (qp->sq.wqe_cnt << 6);
340 static int qp_has_rq(struct ib_qp_init_attr *attr)
342 if (attr->qp_type == IB_QPT_XRC_INI ||
343 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
344 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
345 !attr->cap.max_recv_wr)
351 static int first_med_uuar(void)
356 static int next_uuar(int n)
360 while (((n % 4) & 2))
366 static int num_med_uuar(struct mlx5_uuar_info *uuari)
370 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
371 uuari->num_low_latency_uuars - 1;
373 return n >= 0 ? n : 0;
376 static int max_uuari(struct mlx5_uuar_info *uuari)
378 return uuari->num_uars * 4;
381 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
387 med = num_med_uuar(uuari);
388 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
397 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
401 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
402 if (!test_bit(i, uuari->bitmap)) {
403 set_bit(i, uuari->bitmap);
412 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
414 int minidx = first_med_uuar();
417 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
418 if (uuari->count[i] < uuari->count[minidx])
422 uuari->count[minidx]++;
426 static int alloc_uuar(struct mlx5_uuar_info *uuari,
427 enum mlx5_ib_latency_class lat)
431 mutex_lock(&uuari->lock);
433 case MLX5_IB_LATENCY_CLASS_LOW:
435 uuari->count[uuarn]++;
438 case MLX5_IB_LATENCY_CLASS_MEDIUM:
442 uuarn = alloc_med_class_uuar(uuari);
445 case MLX5_IB_LATENCY_CLASS_HIGH:
449 uuarn = alloc_high_class_uuar(uuari);
452 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
456 mutex_unlock(&uuari->lock);
461 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
463 clear_bit(uuarn, uuari->bitmap);
464 --uuari->count[uuarn];
467 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
469 clear_bit(uuarn, uuari->bitmap);
470 --uuari->count[uuarn];
473 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
475 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
476 int high_uuar = nuuars - uuari->num_low_latency_uuars;
478 mutex_lock(&uuari->lock);
480 --uuari->count[uuarn];
484 if (uuarn < high_uuar) {
485 free_med_class_uuar(uuari, uuarn);
489 free_high_class_uuar(uuari, uuarn);
492 mutex_unlock(&uuari->lock);
495 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
498 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
499 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
500 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
501 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
502 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
503 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
504 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
509 static int to_mlx5_st(enum ib_qp_type type)
512 case IB_QPT_RC: return MLX5_QP_ST_RC;
513 case IB_QPT_UC: return MLX5_QP_ST_UC;
514 case IB_QPT_UD: return MLX5_QP_ST_UD;
515 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
517 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
518 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
519 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
520 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
521 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
522 case IB_QPT_RAW_PACKET:
524 default: return -EINVAL;
528 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
530 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
533 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
534 struct mlx5_ib_qp *qp, struct ib_udata *udata,
535 struct mlx5_create_qp_mbox_in **in,
536 struct mlx5_ib_create_qp_resp *resp, int *inlen)
538 struct mlx5_ib_ucontext *context;
539 struct mlx5_ib_create_qp ucmd;
548 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
550 mlx5_ib_dbg(dev, "copy failed\n");
554 context = to_mucontext(pd->uobject->context);
556 * TBD: should come from the verbs when we have the API
558 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
560 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
561 mlx5_ib_dbg(dev, "reverting to medium latency\n");
562 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
564 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
565 mlx5_ib_dbg(dev, "reverting to high latency\n");
566 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
568 mlx5_ib_warn(dev, "uuar allocation failed\n");
574 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
575 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
578 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
579 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
581 err = set_user_buf_size(dev, qp, &ucmd);
585 if (ucmd.buf_addr && qp->buf_size) {
586 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
588 if (IS_ERR(qp->umem)) {
589 mlx5_ib_dbg(dev, "umem_get failed\n");
590 err = PTR_ERR(qp->umem);
598 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
600 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
602 mlx5_ib_warn(dev, "bad offset\n");
605 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
606 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
609 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
610 *in = mlx5_vzalloc(*inlen);
616 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
617 (*in)->ctx.log_pg_sz_remote_qpn =
618 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
619 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
621 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
622 resp->uuar_index = uuarn;
625 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
627 mlx5_ib_dbg(dev, "map failed\n");
631 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
633 mlx5_ib_dbg(dev, "copy failed\n");
636 qp->create_type = MLX5_QP_USER;
641 mlx5_ib_db_unmap_user(context, &qp->db);
648 ib_umem_release(qp->umem);
651 free_uuar(&context->uuari, uuarn);
655 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
657 struct mlx5_ib_ucontext *context;
659 context = to_mucontext(pd->uobject->context);
660 mlx5_ib_db_unmap_user(context, &qp->db);
662 ib_umem_release(qp->umem);
663 free_uuar(&context->uuari, qp->uuarn);
666 static int create_kernel_qp(struct mlx5_ib_dev *dev,
667 struct ib_qp_init_attr *init_attr,
668 struct mlx5_ib_qp *qp,
669 struct mlx5_create_qp_mbox_in **in, int *inlen)
671 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
672 struct mlx5_uuar_info *uuari;
677 uuari = &dev->mdev->priv.uuari;
678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
682 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
684 uuarn = alloc_uuar(uuari, lc);
686 mlx5_ib_dbg(dev, "\n");
690 qp->bf = &uuari->bfs[uuarn];
691 uar_index = qp->bf->uar->index;
693 err = calc_sq_size(dev, init_attr, qp);
695 mlx5_ib_dbg(dev, "err %d\n", err);
700 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
701 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
703 err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
705 mlx5_ib_dbg(dev, "err %d\n", err);
709 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
710 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
711 *in = mlx5_vzalloc(*inlen);
716 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
717 (*in)->ctx.log_pg_sz_remote_qpn =
718 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
719 /* Set "fast registration enabled" for all kernel QPs */
720 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
721 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
723 mlx5_fill_page_array(&qp->buf, (*in)->pas);
725 err = mlx5_db_alloc(dev->mdev, &qp->db);
727 mlx5_ib_dbg(dev, "err %d\n", err);
734 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
735 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
736 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
737 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
738 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
740 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
741 !qp->sq.w_list || !qp->sq.wqe_head) {
745 qp->create_type = MLX5_QP_KERNEL;
750 mlx5_db_free(dev->mdev, &qp->db);
751 kfree(qp->sq.wqe_head);
752 kfree(qp->sq.w_list);
754 kfree(qp->sq.wr_data);
761 mlx5_buf_free(dev->mdev, &qp->buf);
764 free_uuar(&dev->mdev->priv.uuari, uuarn);
768 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
770 mlx5_db_free(dev->mdev, &qp->db);
771 kfree(qp->sq.wqe_head);
772 kfree(qp->sq.w_list);
774 kfree(qp->sq.wr_data);
776 mlx5_buf_free(dev->mdev, &qp->buf);
777 free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
780 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
782 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
783 (attr->qp_type == IB_QPT_XRC_INI))
784 return cpu_to_be32(MLX5_SRQ_RQ);
785 else if (!qp->has_rq)
786 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
788 return cpu_to_be32(MLX5_NON_ZERO_RQ);
791 static int is_connected(enum ib_qp_type qp_type)
793 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
799 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
800 struct ib_qp_init_attr *init_attr,
801 struct ib_udata *udata, struct mlx5_ib_qp *qp)
803 struct mlx5_ib_resources *devr = &dev->devr;
804 struct mlx5_ib_create_qp_resp resp;
805 struct mlx5_create_qp_mbox_in *in;
806 struct mlx5_ib_create_qp ucmd;
807 int inlen = sizeof(*in);
810 mutex_init(&qp->mutex);
811 spin_lock_init(&qp->sq.lock);
812 spin_lock_init(&qp->rq.lock);
814 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
815 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
816 mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
819 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
823 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
824 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
826 if (pd && pd->uobject) {
827 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
828 mlx5_ib_dbg(dev, "copy failed\n");
832 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
833 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
835 qp->wq_sig = !!wq_signature;
838 qp->has_rq = qp_has_rq(init_attr);
839 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
840 qp, (pd && pd->uobject) ? &ucmd : NULL);
842 mlx5_ib_dbg(dev, "err %d\n", err);
848 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
849 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
850 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
851 mlx5_ib_dbg(dev, "invalid rq params\n");
854 if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
855 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
856 ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
859 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
861 mlx5_ib_dbg(dev, "err %d\n", err);
863 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
865 mlx5_ib_dbg(dev, "err %d\n", err);
867 qp->pa_lkey = to_mpd(pd)->pa_lkey;
873 in = mlx5_vzalloc(sizeof(*in));
877 qp->create_type = MLX5_QP_EMPTY;
880 if (is_sqp(init_attr->qp_type))
881 qp->port = init_attr->port_num;
883 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
884 MLX5_QP_PM_MIGRATED << 11);
886 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
887 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
889 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
892 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
894 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
895 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_BLOCK_MCAST);
897 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
901 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
902 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
905 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
907 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
909 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
911 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
913 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
917 if (qp->rq.wqe_cnt) {
918 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
919 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
922 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
925 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
927 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
929 /* Set default resources */
930 switch (init_attr->qp_type) {
932 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
933 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
934 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
935 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
938 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
939 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
940 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
943 if (init_attr->srq) {
944 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
945 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
947 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
948 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
952 if (init_attr->send_cq)
953 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
955 if (init_attr->recv_cq)
956 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
958 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
960 err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
962 mlx5_ib_dbg(dev, "create qp failed\n");
967 /* Hardware wants QPN written in big-endian order (after
968 * shifting) for send doorbell. Precompute this value to save
969 * a little bit when posting sends.
971 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
973 qp->mqp.event = mlx5_ib_qp_event;
978 if (qp->create_type == MLX5_QP_USER)
979 destroy_qp_user(pd, qp);
980 else if (qp->create_type == MLX5_QP_KERNEL)
981 destroy_qp_kernel(dev, qp);
987 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
988 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
992 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
993 spin_lock_irq(&send_cq->lock);
994 spin_lock_nested(&recv_cq->lock,
995 SINGLE_DEPTH_NESTING);
996 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
997 spin_lock_irq(&send_cq->lock);
998 __acquire(&recv_cq->lock);
1000 spin_lock_irq(&recv_cq->lock);
1001 spin_lock_nested(&send_cq->lock,
1002 SINGLE_DEPTH_NESTING);
1005 spin_lock_irq(&send_cq->lock);
1007 } else if (recv_cq) {
1008 spin_lock_irq(&recv_cq->lock);
1012 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
1013 __releases(&send_cq->lock) __releases(&recv_cq->lock)
1017 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
1018 spin_unlock(&recv_cq->lock);
1019 spin_unlock_irq(&send_cq->lock);
1020 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
1021 __release(&recv_cq->lock);
1022 spin_unlock_irq(&send_cq->lock);
1024 spin_unlock(&send_cq->lock);
1025 spin_unlock_irq(&recv_cq->lock);
1028 spin_unlock_irq(&send_cq->lock);
1030 } else if (recv_cq) {
1031 spin_unlock_irq(&recv_cq->lock);
1035 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1037 return to_mpd(qp->ibqp.pd);
1040 static void get_cqs(struct mlx5_ib_qp *qp,
1041 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1043 switch (qp->ibqp.qp_type) {
1044 case IB_QPT_XRC_TGT:
1048 case MLX5_IB_QPT_REG_UMR:
1049 case IB_QPT_XRC_INI:
1050 *send_cq = to_mcq(qp->ibqp.send_cq);
1059 case IB_QPT_RAW_IPV6:
1060 case IB_QPT_RAW_ETHERTYPE:
1061 *send_cq = to_mcq(qp->ibqp.send_cq);
1062 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1065 case IB_QPT_RAW_PACKET:
1074 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1076 struct mlx5_ib_cq *send_cq, *recv_cq;
1077 struct mlx5_modify_qp_mbox_in *in;
1080 in = kzalloc(sizeof(*in), GFP_KERNEL);
1083 if (qp->state != IB_QPS_RESET)
1084 if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
1085 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1086 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1089 get_cqs(qp, &send_cq, &recv_cq);
1091 if (qp->create_type == MLX5_QP_KERNEL) {
1092 mlx5_ib_lock_cqs(send_cq, recv_cq);
1093 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1094 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1095 if (send_cq != recv_cq)
1096 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1097 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1100 err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
1102 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1106 if (qp->create_type == MLX5_QP_KERNEL)
1107 destroy_qp_kernel(dev, qp);
1108 else if (qp->create_type == MLX5_QP_USER)
1109 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1112 static const char *ib_qp_type_str(enum ib_qp_type type)
1116 return "IB_QPT_SMI";
1118 return "IB_QPT_GSI";
1125 case IB_QPT_RAW_IPV6:
1126 return "IB_QPT_RAW_IPV6";
1127 case IB_QPT_RAW_ETHERTYPE:
1128 return "IB_QPT_RAW_ETHERTYPE";
1129 case IB_QPT_XRC_INI:
1130 return "IB_QPT_XRC_INI";
1131 case IB_QPT_XRC_TGT:
1132 return "IB_QPT_XRC_TGT";
1133 case IB_QPT_RAW_PACKET:
1134 return "IB_QPT_RAW_PACKET";
1135 case MLX5_IB_QPT_REG_UMR:
1136 return "MLX5_IB_QPT_REG_UMR";
1139 return "Invalid QP type";
1143 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1144 struct ib_qp_init_attr *init_attr,
1145 struct ib_udata *udata)
1147 struct mlx5_ib_dev *dev;
1148 struct mlx5_ib_qp *qp;
1153 dev = to_mdev(pd->device);
1155 /* being cautious here */
1156 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1157 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1158 pr_warn("%s: no PD for transport %s\n", __func__,
1159 ib_qp_type_str(init_attr->qp_type));
1160 return ERR_PTR(-EINVAL);
1162 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1165 switch (init_attr->qp_type) {
1166 case IB_QPT_XRC_TGT:
1167 case IB_QPT_XRC_INI:
1168 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
1169 mlx5_ib_dbg(dev, "XRC not supported\n");
1170 return ERR_PTR(-ENOSYS);
1172 init_attr->recv_cq = NULL;
1173 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1174 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1175 init_attr->send_cq = NULL;
1184 case MLX5_IB_QPT_REG_UMR:
1185 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1187 return ERR_PTR(-ENOMEM);
1189 err = create_qp_common(dev, pd, init_attr, udata, qp);
1191 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1193 return ERR_PTR(err);
1196 if (is_qp0(init_attr->qp_type))
1197 qp->ibqp.qp_num = 0;
1198 else if (is_qp1(init_attr->qp_type))
1199 qp->ibqp.qp_num = 1;
1201 qp->ibqp.qp_num = qp->mqp.qpn;
1203 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1204 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1205 to_mcq(init_attr->send_cq)->mcq.cqn);
1211 case IB_QPT_RAW_IPV6:
1212 case IB_QPT_RAW_ETHERTYPE:
1213 case IB_QPT_RAW_PACKET:
1216 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1217 init_attr->qp_type);
1218 /* Don't support raw QPs */
1219 return ERR_PTR(-EINVAL);
1225 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1227 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1228 struct mlx5_ib_qp *mqp = to_mqp(qp);
1230 destroy_qp_common(dev, mqp);
1237 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1240 u32 hw_access_flags = 0;
1244 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1245 dest_rd_atomic = attr->max_dest_rd_atomic;
1247 dest_rd_atomic = qp->resp_depth;
1249 if (attr_mask & IB_QP_ACCESS_FLAGS)
1250 access_flags = attr->qp_access_flags;
1252 access_flags = qp->atomic_rd_en;
1254 if (!dest_rd_atomic)
1255 access_flags &= IB_ACCESS_REMOTE_WRITE;
1257 if (access_flags & IB_ACCESS_REMOTE_READ)
1258 hw_access_flags |= MLX5_QP_BIT_RRE;
1259 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1260 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1261 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1262 hw_access_flags |= MLX5_QP_BIT_RWE;
1264 return cpu_to_be32(hw_access_flags);
1268 MLX5_PATH_FLAG_FL = 1 << 0,
1269 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1270 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1273 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1275 if (rate == IB_RATE_PORT_CURRENT) {
1277 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1280 while (rate != IB_RATE_2_5_GBPS &&
1281 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1282 dev->mdev->caps.stat_rate_support))
1286 return rate + MLX5_STAT_RATE_OFFSET;
1289 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1290 struct mlx5_qp_path *path, u8 port, int attr_mask,
1291 u32 path_flags, const struct ib_qp_attr *attr)
1295 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1296 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1298 if (attr_mask & IB_QP_PKEY_INDEX)
1299 path->pkey_index = attr->pkey_index;
1301 path->grh_mlid = ah->src_path_bits & 0x7f;
1302 path->rlid = cpu_to_be16(ah->dlid);
1304 if (ah->ah_flags & IB_AH_GRH) {
1305 path->grh_mlid |= 1 << 7;
1306 path->mgid_index = ah->grh.sgid_index;
1307 path->hop_limit = ah->grh.hop_limit;
1308 path->tclass_flowlabel =
1309 cpu_to_be32((ah->grh.traffic_class << 20) |
1310 (ah->grh.flow_label));
1311 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1314 err = ib_rate_to_mlx5(dev, ah->static_rate);
1317 path->static_rate = err;
1320 if (ah->ah_flags & IB_AH_GRH) {
1321 if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
1322 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1323 ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
1327 path->grh_mlid |= 1 << 7;
1328 path->mgid_index = ah->grh.sgid_index;
1329 path->hop_limit = ah->grh.hop_limit;
1330 path->tclass_flowlabel =
1331 cpu_to_be32((ah->grh.traffic_class << 20) |
1332 (ah->grh.flow_label));
1333 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1336 if (attr_mask & IB_QP_TIMEOUT)
1337 path->ackto_lt = attr->timeout << 3;
1339 path->sl = ah->sl & 0xf;
1344 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1345 [MLX5_QP_STATE_INIT] = {
1346 [MLX5_QP_STATE_INIT] = {
1347 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1348 MLX5_QP_OPTPAR_RAE |
1349 MLX5_QP_OPTPAR_RWE |
1350 MLX5_QP_OPTPAR_PKEY_INDEX |
1351 MLX5_QP_OPTPAR_PRI_PORT,
1352 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1353 MLX5_QP_OPTPAR_PKEY_INDEX |
1354 MLX5_QP_OPTPAR_PRI_PORT,
1355 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1356 MLX5_QP_OPTPAR_Q_KEY |
1357 MLX5_QP_OPTPAR_PRI_PORT,
1359 [MLX5_QP_STATE_RTR] = {
1360 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1361 MLX5_QP_OPTPAR_RRE |
1362 MLX5_QP_OPTPAR_RAE |
1363 MLX5_QP_OPTPAR_RWE |
1364 MLX5_QP_OPTPAR_PKEY_INDEX,
1365 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1366 MLX5_QP_OPTPAR_RWE |
1367 MLX5_QP_OPTPAR_PKEY_INDEX,
1368 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1369 MLX5_QP_OPTPAR_Q_KEY,
1370 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1371 MLX5_QP_OPTPAR_Q_KEY,
1372 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1373 MLX5_QP_OPTPAR_RRE |
1374 MLX5_QP_OPTPAR_RAE |
1375 MLX5_QP_OPTPAR_RWE |
1376 MLX5_QP_OPTPAR_PKEY_INDEX,
1379 [MLX5_QP_STATE_RTR] = {
1380 [MLX5_QP_STATE_RTS] = {
1381 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1382 MLX5_QP_OPTPAR_RRE |
1383 MLX5_QP_OPTPAR_RAE |
1384 MLX5_QP_OPTPAR_RWE |
1385 MLX5_QP_OPTPAR_PM_STATE |
1386 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1387 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1388 MLX5_QP_OPTPAR_RWE |
1389 MLX5_QP_OPTPAR_PM_STATE,
1390 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1393 [MLX5_QP_STATE_RTS] = {
1394 [MLX5_QP_STATE_RTS] = {
1395 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1396 MLX5_QP_OPTPAR_RAE |
1397 MLX5_QP_OPTPAR_RWE |
1398 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1399 MLX5_QP_OPTPAR_PM_STATE |
1400 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1401 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1402 MLX5_QP_OPTPAR_PM_STATE |
1403 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1404 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1405 MLX5_QP_OPTPAR_SRQN |
1406 MLX5_QP_OPTPAR_CQN_RCV,
1409 [MLX5_QP_STATE_SQER] = {
1410 [MLX5_QP_STATE_RTS] = {
1411 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1412 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1413 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1414 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1415 MLX5_QP_OPTPAR_RWE |
1416 MLX5_QP_OPTPAR_RAE |
1422 static int ib_nr_to_mlx5_nr(int ib_mask)
1427 case IB_QP_CUR_STATE:
1429 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1431 case IB_QP_ACCESS_FLAGS:
1432 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1434 case IB_QP_PKEY_INDEX:
1435 return MLX5_QP_OPTPAR_PKEY_INDEX;
1437 return MLX5_QP_OPTPAR_PRI_PORT;
1439 return MLX5_QP_OPTPAR_Q_KEY;
1441 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1442 MLX5_QP_OPTPAR_PRI_PORT;
1443 case IB_QP_PATH_MTU:
1446 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1447 case IB_QP_RETRY_CNT:
1448 return MLX5_QP_OPTPAR_RETRY_COUNT;
1449 case IB_QP_RNR_RETRY:
1450 return MLX5_QP_OPTPAR_RNR_RETRY;
1453 case IB_QP_MAX_QP_RD_ATOMIC:
1454 return MLX5_QP_OPTPAR_SRA_MAX;
1455 case IB_QP_ALT_PATH:
1456 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1457 case IB_QP_MIN_RNR_TIMER:
1458 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1461 case IB_QP_MAX_DEST_RD_ATOMIC:
1462 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1463 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1464 case IB_QP_PATH_MIG_STATE:
1465 return MLX5_QP_OPTPAR_PM_STATE;
1468 case IB_QP_DEST_QPN:
1474 static int ib_mask_to_mlx5_opt(int ib_mask)
1479 for (i = 0; i < 8 * sizeof(int); i++) {
1480 if ((1 << i) & ib_mask)
1481 result |= ib_nr_to_mlx5_nr(1 << i);
1487 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1488 const struct ib_qp_attr *attr, int attr_mask,
1489 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1491 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1492 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1493 struct mlx5_ib_cq *send_cq, *recv_cq;
1494 struct mlx5_qp_context *context;
1495 struct mlx5_modify_qp_mbox_in *in;
1496 struct mlx5_ib_pd *pd;
1497 enum mlx5_qp_state mlx5_cur, mlx5_new;
1498 enum mlx5_qp_optpar optpar;
1503 in = kzalloc(sizeof(*in), GFP_KERNEL);
1508 err = to_mlx5_st(ibqp->qp_type);
1512 context->flags = cpu_to_be32(err << 16);
1514 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1515 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1517 switch (attr->path_mig_state) {
1518 case IB_MIG_MIGRATED:
1519 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1522 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1525 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1530 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1531 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1532 } else if (ibqp->qp_type == IB_QPT_UD ||
1533 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1534 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1535 } else if (attr_mask & IB_QP_PATH_MTU) {
1536 if (attr->path_mtu < IB_MTU_256 ||
1537 attr->path_mtu > IB_MTU_4096) {
1538 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1542 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
1545 if (attr_mask & IB_QP_DEST_QPN)
1546 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1548 if (attr_mask & IB_QP_PKEY_INDEX)
1549 context->pri_path.pkey_index = attr->pkey_index;
1551 /* todo implement counter_index functionality */
1553 if (is_sqp(ibqp->qp_type))
1554 context->pri_path.port = qp->port;
1556 if (attr_mask & IB_QP_PORT)
1557 context->pri_path.port = attr->port_num;
1559 if (attr_mask & IB_QP_AV) {
1560 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1561 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1562 attr_mask, 0, attr);
1567 if (attr_mask & IB_QP_TIMEOUT)
1568 context->pri_path.ackto_lt |= attr->timeout << 3;
1570 if (attr_mask & IB_QP_ALT_PATH) {
1571 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1572 attr->alt_port_num, attr_mask, 0, attr);
1578 get_cqs(qp, &send_cq, &recv_cq);
1580 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1581 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1582 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1583 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1585 if (attr_mask & IB_QP_RNR_RETRY)
1586 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1588 if (attr_mask & IB_QP_RETRY_CNT)
1589 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1591 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1592 if (attr->max_rd_atomic)
1594 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1597 if (attr_mask & IB_QP_SQ_PSN)
1598 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1600 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1601 if (attr->max_dest_rd_atomic)
1603 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1606 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1607 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1609 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1610 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1612 if (attr_mask & IB_QP_RQ_PSN)
1613 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1615 if (attr_mask & IB_QP_QKEY)
1616 context->qkey = cpu_to_be32(attr->qkey);
1618 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1619 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1621 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1622 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1627 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1628 context->sq_crq_size |= cpu_to_be16(1 << 4);
1631 mlx5_cur = to_mlx5_state(cur_state);
1632 mlx5_new = to_mlx5_state(new_state);
1633 mlx5_st = to_mlx5_st(ibqp->qp_type);
1637 optpar = ib_mask_to_mlx5_opt(attr_mask);
1638 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1639 in->optparam = cpu_to_be32(optpar);
1640 err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
1641 to_mlx5_state(new_state), in, sqd_event,
1646 qp->state = new_state;
1648 if (attr_mask & IB_QP_ACCESS_FLAGS)
1649 qp->atomic_rd_en = attr->qp_access_flags;
1650 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1651 qp->resp_depth = attr->max_dest_rd_atomic;
1652 if (attr_mask & IB_QP_PORT)
1653 qp->port = attr->port_num;
1654 if (attr_mask & IB_QP_ALT_PATH)
1655 qp->alt_port = attr->alt_port_num;
1658 * If we moved a kernel QP to RESET, clean up all old CQ
1659 * entries and reinitialize the QP.
1661 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1662 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1663 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1664 if (send_cq != recv_cq)
1665 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1671 qp->sq.cur_post = 0;
1672 qp->sq.last_poll = 0;
1673 qp->db.db[MLX5_RCV_DBR] = 0;
1674 qp->db.db[MLX5_SND_DBR] = 0;
1682 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1683 int attr_mask, struct ib_udata *udata)
1685 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1686 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1687 enum ib_qp_state cur_state, new_state;
1691 mutex_lock(&qp->mutex);
1693 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1694 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1696 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1697 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1698 IB_LINK_LAYER_UNSPECIFIED))
1701 if ((attr_mask & IB_QP_PORT) &&
1702 (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
1705 if (attr_mask & IB_QP_PKEY_INDEX) {
1706 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1707 if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
1711 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1712 attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
1715 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1716 attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
1719 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1724 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1727 mutex_unlock(&qp->mutex);
1731 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1733 struct mlx5_ib_cq *cq;
1736 cur = wq->head - wq->tail;
1737 if (likely(cur + nreq < wq->max_post))
1741 spin_lock(&cq->lock);
1742 cur = wq->head - wq->tail;
1743 spin_unlock(&cq->lock);
1745 return cur + nreq >= wq->max_post;
1748 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1749 u64 remote_addr, u32 rkey)
1751 rseg->raddr = cpu_to_be64(remote_addr);
1752 rseg->rkey = cpu_to_be32(rkey);
1756 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1757 struct ib_send_wr *wr)
1759 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1760 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1761 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1764 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1766 dseg->byte_count = cpu_to_be32(sg->length);
1767 dseg->lkey = cpu_to_be32(sg->lkey);
1768 dseg->addr = cpu_to_be64(sg->addr);
1771 static __be16 get_klm_octo(int npages)
1773 return cpu_to_be16(ALIGN(npages, 8) / 2);
1776 static __be64 frwr_mkey_mask(void)
1780 result = MLX5_MKEY_MASK_LEN |
1781 MLX5_MKEY_MASK_PAGE_SIZE |
1782 MLX5_MKEY_MASK_START_ADDR |
1783 MLX5_MKEY_MASK_EN_RINVAL |
1784 MLX5_MKEY_MASK_KEY |
1790 MLX5_MKEY_MASK_SMALL_FENCE |
1791 MLX5_MKEY_MASK_FREE;
1793 return cpu_to_be64(result);
1796 static __be64 sig_mkey_mask(void)
1800 result = MLX5_MKEY_MASK_LEN |
1801 MLX5_MKEY_MASK_PAGE_SIZE |
1802 MLX5_MKEY_MASK_START_ADDR |
1803 MLX5_MKEY_MASK_EN_SIGERR |
1804 MLX5_MKEY_MASK_EN_RINVAL |
1805 MLX5_MKEY_MASK_KEY |
1810 MLX5_MKEY_MASK_SMALL_FENCE |
1811 MLX5_MKEY_MASK_FREE |
1812 MLX5_MKEY_MASK_BSF_EN;
1814 return cpu_to_be64(result);
1817 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1818 struct ib_send_wr *wr, int li)
1820 memset(umr, 0, sizeof(*umr));
1823 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1824 umr->flags = 1 << 7;
1828 umr->flags = (1 << 5); /* fail if not free */
1829 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1830 umr->mkey_mask = frwr_mkey_mask();
1833 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1834 struct ib_send_wr *wr)
1836 struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
1839 memset(umr, 0, sizeof(*umr));
1841 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1842 umr->flags = 1 << 5; /* fail if not free */
1843 umr->klm_octowords = get_klm_octo(umrwr->npages);
1844 mask = MLX5_MKEY_MASK_LEN |
1845 MLX5_MKEY_MASK_PAGE_SIZE |
1846 MLX5_MKEY_MASK_START_ADDR |
1850 MLX5_MKEY_MASK_KEY |
1854 MLX5_MKEY_MASK_FREE;
1855 umr->mkey_mask = cpu_to_be64(mask);
1857 umr->flags = 2 << 5; /* fail if free */
1858 mask = MLX5_MKEY_MASK_FREE;
1859 umr->mkey_mask = cpu_to_be64(mask);
1863 umr->flags |= (1 << 7); /* inline */
1866 static u8 get_umr_flags(int acc)
1868 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1869 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1870 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1871 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1872 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN;
1875 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1878 memset(seg, 0, sizeof(*seg));
1880 seg->status = 1 << 6;
1884 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags) |
1885 MLX5_ACCESS_MODE_MTT;
1886 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1887 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1888 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1889 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1890 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1891 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1892 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1895 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1897 memset(seg, 0, sizeof(*seg));
1898 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1899 seg->status = 1 << 6;
1903 seg->flags = convert_access(wr->wr.fast_reg.access_flags);
1904 seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
1905 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1906 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1907 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1908 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1909 mlx5_mkey_variant(wr->wr.fast_reg.rkey));
1912 static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1913 struct ib_send_wr *wr,
1914 struct mlx5_core_dev *mdev,
1915 struct mlx5_ib_pd *pd,
1918 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1919 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1920 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1923 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1924 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1925 dseg->addr = cpu_to_be64(mfrpl->map);
1926 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1927 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1930 static __be32 send_ieth(struct ib_send_wr *wr)
1932 switch (wr->opcode) {
1933 case IB_WR_SEND_WITH_IMM:
1934 case IB_WR_RDMA_WRITE_WITH_IMM:
1935 return wr->ex.imm_data;
1937 case IB_WR_SEND_WITH_INV:
1938 return cpu_to_be32(wr->ex.invalidate_rkey);
1945 static u8 calc_sig(void *wqe, int size)
1951 for (i = 0; i < size; i++)
1957 static u8 wq_sig(void *wqe)
1959 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
1962 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
1965 struct mlx5_wqe_inline_seg *seg;
1966 void *qend = qp->sq.qend;
1974 wqe += sizeof(*seg);
1975 for (i = 0; i < wr->num_sge; i++) {
1976 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
1977 len = wr->sg_list[i].length;
1980 if (unlikely(inl > qp->max_inline_data))
1983 if (unlikely(wqe + len > qend)) {
1985 memcpy(wqe, addr, copy);
1988 wqe = mlx5_get_send_wqe(qp, 0);
1990 memcpy(wqe, addr, len);
1994 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
1996 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
2001 static u16 prot_field_size(enum ib_signature_type type)
2004 case IB_SIG_TYPE_T10_DIF:
2005 return MLX5_DIF_SIZE;
2011 static u8 bs_selector(int block_size)
2013 switch (block_size) {
2014 case 512: return 0x1;
2015 case 520: return 0x2;
2016 case 4096: return 0x3;
2017 case 4160: return 0x4;
2018 case 1073741824: return 0x5;
2023 static int format_selector(struct ib_sig_attrs *attr,
2024 struct ib_sig_domain *domain,
2028 #define FORMAT_DIF_NONE 0
2029 #define FORMAT_DIF_CRC_INC 8
2030 #define FORMAT_DIF_CRC_NO_INC 12
2031 #define FORMAT_DIF_CSUM_INC 13
2032 #define FORMAT_DIF_CSUM_NO_INC 14
2034 switch (domain->sig.dif.type) {
2035 case IB_T10DIF_NONE:
2037 *selector = FORMAT_DIF_NONE;
2039 case IB_T10DIF_TYPE1: /* Fall through */
2040 case IB_T10DIF_TYPE2:
2041 switch (domain->sig.dif.bg_type) {
2043 *selector = FORMAT_DIF_CRC_INC;
2045 case IB_T10DIF_CSUM:
2046 *selector = FORMAT_DIF_CSUM_INC;
2052 case IB_T10DIF_TYPE3:
2053 switch (domain->sig.dif.bg_type) {
2055 *selector = domain->sig.dif.type3_inc_reftag ?
2056 FORMAT_DIF_CRC_INC :
2057 FORMAT_DIF_CRC_NO_INC;
2059 case IB_T10DIF_CSUM:
2060 *selector = domain->sig.dif.type3_inc_reftag ?
2061 FORMAT_DIF_CSUM_INC :
2062 FORMAT_DIF_CSUM_NO_INC;
2075 static int mlx5_set_bsf(struct ib_mr *sig_mr,
2076 struct ib_sig_attrs *sig_attrs,
2077 struct mlx5_bsf *bsf, u32 data_size)
2079 struct mlx5_core_sig_ctx *msig = to_mmr(sig_mr)->sig;
2080 struct mlx5_bsf_basic *basic = &bsf->basic;
2081 struct ib_sig_domain *mem = &sig_attrs->mem;
2082 struct ib_sig_domain *wire = &sig_attrs->wire;
2085 memset(bsf, 0, sizeof(*bsf));
2086 switch (sig_attrs->mem.sig_type) {
2087 case IB_SIG_TYPE_T10_DIF:
2088 if (sig_attrs->wire.sig_type != IB_SIG_TYPE_T10_DIF)
2091 /* Input domain check byte mask */
2092 basic->check_byte_mask = sig_attrs->check_mask;
2093 if (mem->sig.dif.pi_interval == wire->sig.dif.pi_interval &&
2094 mem->sig.dif.type == wire->sig.dif.type) {
2095 /* Same block structure */
2096 basic->bsf_size_sbs = 1 << 4;
2097 if (mem->sig.dif.bg_type == wire->sig.dif.bg_type)
2098 basic->wire.copy_byte_mask |= 0xc0;
2099 if (mem->sig.dif.app_tag == wire->sig.dif.app_tag)
2100 basic->wire.copy_byte_mask |= 0x30;
2101 if (mem->sig.dif.ref_tag == wire->sig.dif.ref_tag)
2102 basic->wire.copy_byte_mask |= 0x0f;
2104 basic->wire.bs_selector = bs_selector(wire->sig.dif.pi_interval);
2106 basic->mem.bs_selector = bs_selector(mem->sig.dif.pi_interval);
2107 basic->raw_data_size = cpu_to_be32(data_size);
2109 ret = format_selector(sig_attrs, mem, &selector);
2112 basic->m_bfs_psv = cpu_to_be32(selector << 24 |
2113 msig->psv_memory.psv_idx);
2115 ret = format_selector(sig_attrs, wire, &selector);
2118 basic->w_bfs_psv = cpu_to_be32(selector << 24 |
2119 msig->psv_wire.psv_idx);
2129 static int set_sig_data_segment(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2130 void **seg, int *size)
2132 struct ib_sig_attrs *sig_attrs = wr->wr.sig_handover.sig_attrs;
2133 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2134 struct mlx5_bsf *bsf;
2135 u32 data_len = wr->sg_list->length;
2136 u32 data_key = wr->sg_list->lkey;
2137 u64 data_va = wr->sg_list->addr;
2141 if (!wr->wr.sig_handover.prot ||
2142 (data_key == wr->wr.sig_handover.prot->lkey &&
2143 data_va == wr->wr.sig_handover.prot->addr &&
2144 data_len == wr->wr.sig_handover.prot->length)) {
2146 * Source domain doesn't contain signature information
2147 * or data and protection are interleaved in memory.
2148 * So need construct:
2149 * ------------------
2151 * ------------------
2153 * ------------------
2155 struct mlx5_klm *data_klm = *seg;
2157 data_klm->bcount = cpu_to_be32(data_len);
2158 data_klm->key = cpu_to_be32(data_key);
2159 data_klm->va = cpu_to_be64(data_va);
2160 wqe_size = ALIGN(sizeof(*data_klm), 64);
2163 * Source domain contains signature information
2164 * So need construct a strided block format:
2165 * ---------------------------
2166 * | stride_block_ctrl |
2167 * ---------------------------
2169 * ---------------------------
2171 * ---------------------------
2173 * ---------------------------
2175 struct mlx5_stride_block_ctrl_seg *sblock_ctrl;
2176 struct mlx5_stride_block_entry *data_sentry;
2177 struct mlx5_stride_block_entry *prot_sentry;
2178 u32 prot_key = wr->wr.sig_handover.prot->lkey;
2179 u64 prot_va = wr->wr.sig_handover.prot->addr;
2180 u16 block_size = sig_attrs->mem.sig.dif.pi_interval;
2184 data_sentry = (void *)sblock_ctrl + sizeof(*sblock_ctrl);
2185 prot_sentry = (void *)data_sentry + sizeof(*data_sentry);
2187 prot_size = prot_field_size(sig_attrs->mem.sig_type);
2189 pr_err("Bad block size given: %u\n", block_size);
2192 sblock_ctrl->bcount_per_cycle = cpu_to_be32(block_size +
2194 sblock_ctrl->op = cpu_to_be32(MLX5_STRIDE_BLOCK_OP);
2195 sblock_ctrl->repeat_count = cpu_to_be32(data_len / block_size);
2196 sblock_ctrl->num_entries = cpu_to_be16(2);
2198 data_sentry->bcount = cpu_to_be16(block_size);
2199 data_sentry->key = cpu_to_be32(data_key);
2200 data_sentry->va = cpu_to_be64(data_va);
2201 data_sentry->stride = cpu_to_be16(block_size);
2203 prot_sentry->bcount = cpu_to_be16(prot_size);
2204 prot_sentry->key = cpu_to_be32(prot_key);
2205 prot_sentry->va = cpu_to_be64(prot_va);
2206 prot_sentry->stride = cpu_to_be16(prot_size);
2208 wqe_size = ALIGN(sizeof(*sblock_ctrl) + sizeof(*data_sentry) +
2209 sizeof(*prot_sentry), 64);
2213 *size += wqe_size / 16;
2214 if (unlikely((*seg == qp->sq.qend)))
2215 *seg = mlx5_get_send_wqe(qp, 0);
2218 ret = mlx5_set_bsf(sig_mr, sig_attrs, bsf, data_len);
2222 *seg += sizeof(*bsf);
2223 *size += sizeof(*bsf) / 16;
2224 if (unlikely((*seg == qp->sq.qend)))
2225 *seg = mlx5_get_send_wqe(qp, 0);
2230 static void set_sig_mkey_segment(struct mlx5_mkey_seg *seg,
2231 struct ib_send_wr *wr, u32 nelements,
2232 u32 length, u32 pdn)
2234 struct ib_mr *sig_mr = wr->wr.sig_handover.sig_mr;
2235 u32 sig_key = sig_mr->rkey;
2236 u8 sigerr = to_mmr(sig_mr)->sig->sigerr_count & 1;
2238 memset(seg, 0, sizeof(*seg));
2240 seg->flags = get_umr_flags(wr->wr.sig_handover.access_flags) |
2241 MLX5_ACCESS_MODE_KLM;
2242 seg->qpn_mkey7_0 = cpu_to_be32((sig_key & 0xff) | 0xffffff00);
2243 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL | sigerr << 26 |
2244 MLX5_MKEY_BSF_EN | pdn);
2245 seg->len = cpu_to_be64(length);
2246 seg->xlt_oct_size = cpu_to_be32(be16_to_cpu(get_klm_octo(nelements)));
2247 seg->bsfs_octo_size = cpu_to_be32(MLX5_MKEY_BSF_OCTO_SIZE);
2250 static void set_sig_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
2251 struct ib_send_wr *wr, u32 nelements)
2253 memset(umr, 0, sizeof(*umr));
2255 umr->flags = MLX5_FLAGS_INLINE | MLX5_FLAGS_CHECK_FREE;
2256 umr->klm_octowords = get_klm_octo(nelements);
2257 umr->bsf_octowords = cpu_to_be16(MLX5_MKEY_BSF_OCTO_SIZE);
2258 umr->mkey_mask = sig_mkey_mask();
2262 static int set_sig_umr_wr(struct ib_send_wr *wr, struct mlx5_ib_qp *qp,
2263 void **seg, int *size)
2265 struct mlx5_ib_mr *sig_mr = to_mmr(wr->wr.sig_handover.sig_mr);
2266 u32 pdn = get_pd(qp)->pdn;
2268 int region_len, ret;
2270 if (unlikely(wr->num_sge != 1) ||
2271 unlikely(wr->wr.sig_handover.access_flags &
2272 IB_ACCESS_REMOTE_ATOMIC) ||
2273 unlikely(!sig_mr->sig) || unlikely(!qp->signature_en) ||
2274 unlikely(!sig_mr->sig->sig_status_checked))
2277 /* length of the protected region, data + protection */
2278 region_len = wr->sg_list->length;
2279 if (wr->wr.sig_handover.prot &&
2280 (wr->wr.sig_handover.prot->lkey != wr->sg_list->lkey ||
2281 wr->wr.sig_handover.prot->addr != wr->sg_list->addr ||
2282 wr->wr.sig_handover.prot->length != wr->sg_list->length))
2283 region_len += wr->wr.sig_handover.prot->length;
2286 * KLM octoword size - if protection was provided
2287 * then we use strided block format (3 octowords),
2288 * else we use single KLM (1 octoword)
2290 klm_oct_size = wr->wr.sig_handover.prot ? 3 : 1;
2292 set_sig_umr_segment(*seg, wr, klm_oct_size);
2293 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2294 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2295 if (unlikely((*seg == qp->sq.qend)))
2296 *seg = mlx5_get_send_wqe(qp, 0);
2298 set_sig_mkey_segment(*seg, wr, klm_oct_size, region_len, pdn);
2299 *seg += sizeof(struct mlx5_mkey_seg);
2300 *size += sizeof(struct mlx5_mkey_seg) / 16;
2301 if (unlikely((*seg == qp->sq.qend)))
2302 *seg = mlx5_get_send_wqe(qp, 0);
2304 ret = set_sig_data_segment(wr, qp, seg, size);
2308 sig_mr->sig->sig_status_checked = false;
2312 static int set_psv_wr(struct ib_sig_domain *domain,
2313 u32 psv_idx, void **seg, int *size)
2315 struct mlx5_seg_set_psv *psv_seg = *seg;
2317 memset(psv_seg, 0, sizeof(*psv_seg));
2318 psv_seg->psv_num = cpu_to_be32(psv_idx);
2319 switch (domain->sig_type) {
2320 case IB_SIG_TYPE_T10_DIF:
2321 psv_seg->transient_sig = cpu_to_be32(domain->sig.dif.bg << 16 |
2322 domain->sig.dif.app_tag);
2323 psv_seg->ref_tag = cpu_to_be32(domain->sig.dif.ref_tag);
2325 *seg += sizeof(*psv_seg);
2326 *size += sizeof(*psv_seg) / 16;
2330 pr_err("Bad signature type given.\n");
2337 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
2338 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
2343 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
2344 if (unlikely(wr->send_flags & IB_SEND_INLINE))
2347 set_frwr_umr_segment(*seg, wr, li);
2348 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2349 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2350 if (unlikely((*seg == qp->sq.qend)))
2351 *seg = mlx5_get_send_wqe(qp, 0);
2352 set_mkey_segment(*seg, wr, li, &writ);
2353 *seg += sizeof(struct mlx5_mkey_seg);
2354 *size += sizeof(struct mlx5_mkey_seg) / 16;
2355 if (unlikely((*seg == qp->sq.qend)))
2356 *seg = mlx5_get_send_wqe(qp, 0);
2358 if (unlikely(wr->wr.fast_reg.page_list_len >
2359 wr->wr.fast_reg.page_list->max_page_list_len))
2362 set_frwr_pages(*seg, wr, mdev, pd, writ);
2363 *seg += sizeof(struct mlx5_wqe_data_seg);
2364 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
2369 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
2375 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
2376 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
2377 if ((i & 0xf) == 0) {
2378 void *buf = mlx5_get_send_wqe(qp, tidx);
2379 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
2383 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
2384 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
2385 be32_to_cpu(p[j + 3]));
2389 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2390 unsigned bytecnt, struct mlx5_ib_qp *qp)
2392 while (bytecnt > 0) {
2393 __iowrite64_copy(dst++, src++, 8);
2394 __iowrite64_copy(dst++, src++, 8);
2395 __iowrite64_copy(dst++, src++, 8);
2396 __iowrite64_copy(dst++, src++, 8);
2397 __iowrite64_copy(dst++, src++, 8);
2398 __iowrite64_copy(dst++, src++, 8);
2399 __iowrite64_copy(dst++, src++, 8);
2400 __iowrite64_copy(dst++, src++, 8);
2402 if (unlikely(src == qp->sq.qend))
2403 src = mlx5_get_send_wqe(qp, 0);
2407 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2409 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2410 wr->send_flags & IB_SEND_FENCE))
2411 return MLX5_FENCE_MODE_STRONG_ORDERING;
2413 if (unlikely(fence)) {
2414 if (wr->send_flags & IB_SEND_FENCE)
2415 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2424 static int begin_wqe(struct mlx5_ib_qp *qp, void **seg,
2425 struct mlx5_wqe_ctrl_seg **ctrl,
2426 struct ib_send_wr *wr, int *idx,
2427 int *size, int nreq)
2431 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2436 *idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2437 *seg = mlx5_get_send_wqe(qp, *idx);
2439 *(uint32_t *)(*seg + 8) = 0;
2440 (*ctrl)->imm = send_ieth(wr);
2441 (*ctrl)->fm_ce_se = qp->sq_signal_bits |
2442 (wr->send_flags & IB_SEND_SIGNALED ?
2443 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2444 (wr->send_flags & IB_SEND_SOLICITED ?
2445 MLX5_WQE_CTRL_SOLICITED : 0);
2447 *seg += sizeof(**ctrl);
2448 *size = sizeof(**ctrl) / 16;
2453 static void finish_wqe(struct mlx5_ib_qp *qp,
2454 struct mlx5_wqe_ctrl_seg *ctrl,
2455 u8 size, unsigned idx, u64 wr_id,
2456 int nreq, u8 fence, u8 next_fence,
2461 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2462 mlx5_opcode | ((u32)opmod << 24));
2463 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2464 ctrl->fm_ce_se |= fence;
2465 qp->fm_cache = next_fence;
2466 if (unlikely(qp->wq_sig))
2467 ctrl->signature = wq_sig(ctrl);
2469 qp->sq.wrid[idx] = wr_id;
2470 qp->sq.w_list[idx].opcode = mlx5_opcode;
2471 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2472 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2473 qp->sq.w_list[idx].next = qp->sq.cur_post;
2477 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2478 struct ib_send_wr **bad_wr)
2480 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2481 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2482 struct mlx5_core_dev *mdev = dev->mdev;
2483 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2484 struct mlx5_ib_mr *mr;
2485 struct mlx5_wqe_data_seg *dpseg;
2486 struct mlx5_wqe_xrc_seg *xrc;
2487 struct mlx5_bf *bf = qp->bf;
2488 int uninitialized_var(size);
2489 void *qend = qp->sq.qend;
2490 unsigned long flags;
2501 spin_lock_irqsave(&qp->sq.lock, flags);
2503 for (nreq = 0; wr; nreq++, wr = wr->next) {
2504 if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
2505 mlx5_ib_warn(dev, "\n");
2511 fence = qp->fm_cache;
2512 num_sge = wr->num_sge;
2513 if (unlikely(num_sge > qp->sq.max_gs)) {
2514 mlx5_ib_warn(dev, "\n");
2520 err = begin_wqe(qp, &seg, &ctrl, wr, &idx, &size, nreq);
2522 mlx5_ib_warn(dev, "\n");
2528 switch (ibqp->qp_type) {
2529 case IB_QPT_XRC_INI:
2531 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2532 seg += sizeof(*xrc);
2533 size += sizeof(*xrc) / 16;
2536 switch (wr->opcode) {
2537 case IB_WR_RDMA_READ:
2538 case IB_WR_RDMA_WRITE:
2539 case IB_WR_RDMA_WRITE_WITH_IMM:
2540 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2542 seg += sizeof(struct mlx5_wqe_raddr_seg);
2543 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2546 case IB_WR_ATOMIC_CMP_AND_SWP:
2547 case IB_WR_ATOMIC_FETCH_AND_ADD:
2548 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2549 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2554 case IB_WR_LOCAL_INV:
2555 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2556 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2557 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2558 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2560 mlx5_ib_warn(dev, "\n");
2567 case IB_WR_FAST_REG_MR:
2568 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2569 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2570 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2571 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2573 mlx5_ib_warn(dev, "\n");
2580 case IB_WR_REG_SIG_MR:
2581 qp->sq.wr_data[idx] = IB_WR_REG_SIG_MR;
2582 mr = to_mmr(wr->wr.sig_handover.sig_mr);
2584 ctrl->imm = cpu_to_be32(mr->ibmr.rkey);
2585 err = set_sig_umr_wr(wr, qp, &seg, &size);
2587 mlx5_ib_warn(dev, "\n");
2592 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2593 nreq, get_fence(fence, wr),
2594 next_fence, MLX5_OPCODE_UMR);
2596 * SET_PSV WQEs are not signaled and solicited
2599 wr->send_flags &= ~IB_SEND_SIGNALED;
2600 wr->send_flags |= IB_SEND_SOLICITED;
2601 err = begin_wqe(qp, &seg, &ctrl, wr,
2604 mlx5_ib_warn(dev, "\n");
2610 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->mem,
2611 mr->sig->psv_memory.psv_idx, &seg,
2614 mlx5_ib_warn(dev, "\n");
2619 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2620 nreq, get_fence(fence, wr),
2621 next_fence, MLX5_OPCODE_SET_PSV);
2622 err = begin_wqe(qp, &seg, &ctrl, wr,
2625 mlx5_ib_warn(dev, "\n");
2631 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2632 err = set_psv_wr(&wr->wr.sig_handover.sig_attrs->wire,
2633 mr->sig->psv_wire.psv_idx, &seg,
2636 mlx5_ib_warn(dev, "\n");
2641 finish_wqe(qp, ctrl, size, idx, wr->wr_id,
2642 nreq, get_fence(fence, wr),
2643 next_fence, MLX5_OPCODE_SET_PSV);
2653 switch (wr->opcode) {
2654 case IB_WR_RDMA_WRITE:
2655 case IB_WR_RDMA_WRITE_WITH_IMM:
2656 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2658 seg += sizeof(struct mlx5_wqe_raddr_seg);
2659 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2670 set_datagram_seg(seg, wr);
2671 seg += sizeof(struct mlx5_wqe_datagram_seg);
2672 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2673 if (unlikely((seg == qend)))
2674 seg = mlx5_get_send_wqe(qp, 0);
2677 case MLX5_IB_QPT_REG_UMR:
2678 if (wr->opcode != MLX5_IB_WR_UMR) {
2680 mlx5_ib_warn(dev, "bad opcode\n");
2683 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2684 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2685 set_reg_umr_segment(seg, wr);
2686 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2687 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2688 if (unlikely((seg == qend)))
2689 seg = mlx5_get_send_wqe(qp, 0);
2690 set_reg_mkey_segment(seg, wr);
2691 seg += sizeof(struct mlx5_mkey_seg);
2692 size += sizeof(struct mlx5_mkey_seg) / 16;
2693 if (unlikely((seg == qend)))
2694 seg = mlx5_get_send_wqe(qp, 0);
2701 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2702 int uninitialized_var(sz);
2704 err = set_data_inl_seg(qp, wr, seg, &sz);
2705 if (unlikely(err)) {
2706 mlx5_ib_warn(dev, "\n");
2714 for (i = 0; i < num_sge; i++) {
2715 if (unlikely(dpseg == qend)) {
2716 seg = mlx5_get_send_wqe(qp, 0);
2719 if (likely(wr->sg_list[i].length)) {
2720 set_data_ptr_seg(dpseg, wr->sg_list + i);
2721 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2727 finish_wqe(qp, ctrl, size, idx, wr->wr_id, nreq,
2728 get_fence(fence, wr), next_fence,
2729 mlx5_ib_opcode[wr->opcode]);
2732 dump_wqe(qp, idx, size);
2737 qp->sq.head += nreq;
2739 /* Make sure that descriptors are written before
2740 * updating doorbell record and ringing the doorbell
2744 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2746 /* Make sure doorbell record is visible to the HCA before
2747 * we hit doorbell */
2751 spin_lock(&bf->lock);
2754 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2755 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2758 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2759 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2760 /* Make sure doorbells don't leak out of SQ spinlock
2761 * and reach the HCA out of order.
2765 bf->offset ^= bf->buf_size;
2767 spin_unlock(&bf->lock);
2770 spin_unlock_irqrestore(&qp->sq.lock, flags);
2775 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2777 sig->signature = calc_sig(sig, size);
2780 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2781 struct ib_recv_wr **bad_wr)
2783 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2784 struct mlx5_wqe_data_seg *scat;
2785 struct mlx5_rwqe_sig *sig;
2786 unsigned long flags;
2792 spin_lock_irqsave(&qp->rq.lock, flags);
2794 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2796 for (nreq = 0; wr; nreq++, wr = wr->next) {
2797 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2803 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2809 scat = get_recv_wqe(qp, ind);
2813 for (i = 0; i < wr->num_sge; i++)
2814 set_data_ptr_seg(scat + i, wr->sg_list + i);
2816 if (i < qp->rq.max_gs) {
2817 scat[i].byte_count = 0;
2818 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2823 sig = (struct mlx5_rwqe_sig *)scat;
2824 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2827 qp->rq.wrid[ind] = wr->wr_id;
2829 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2834 qp->rq.head += nreq;
2836 /* Make sure that descriptors are written before
2841 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2844 spin_unlock_irqrestore(&qp->rq.lock, flags);
2849 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2851 switch (mlx5_state) {
2852 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2853 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2854 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2855 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2856 case MLX5_QP_STATE_SQ_DRAINING:
2857 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2858 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2859 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2864 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2866 switch (mlx5_mig_state) {
2867 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2868 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2869 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2874 static int to_ib_qp_access_flags(int mlx5_flags)
2878 if (mlx5_flags & MLX5_QP_BIT_RRE)
2879 ib_flags |= IB_ACCESS_REMOTE_READ;
2880 if (mlx5_flags & MLX5_QP_BIT_RWE)
2881 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2882 if (mlx5_flags & MLX5_QP_BIT_RAE)
2883 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2888 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2889 struct mlx5_qp_path *path)
2891 struct mlx5_core_dev *dev = ibdev->mdev;
2893 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2894 ib_ah_attr->port_num = path->port;
2896 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
2899 ib_ah_attr->sl = path->sl & 0xf;
2901 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2902 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2903 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2904 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2905 if (ib_ah_attr->ah_flags) {
2906 ib_ah_attr->grh.sgid_index = path->mgid_index;
2907 ib_ah_attr->grh.hop_limit = path->hop_limit;
2908 ib_ah_attr->grh.traffic_class =
2909 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2910 ib_ah_attr->grh.flow_label =
2911 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2912 memcpy(ib_ah_attr->grh.dgid.raw,
2913 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2917 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2918 struct ib_qp_init_attr *qp_init_attr)
2920 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2921 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2922 struct mlx5_query_qp_mbox_out *outb;
2923 struct mlx5_qp_context *context;
2927 mutex_lock(&qp->mutex);
2928 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2933 context = &outb->ctx;
2934 err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
2938 mlx5_state = be32_to_cpu(context->flags) >> 28;
2940 qp->state = to_ib_qp_state(mlx5_state);
2941 qp_attr->qp_state = qp->state;
2942 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2943 qp_attr->path_mig_state =
2944 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2945 qp_attr->qkey = be32_to_cpu(context->qkey);
2946 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2947 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2948 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2949 qp_attr->qp_access_flags =
2950 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2952 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2953 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2954 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2955 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2956 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2959 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2960 qp_attr->port_num = context->pri_path.port;
2962 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2963 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2965 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2967 qp_attr->max_dest_rd_atomic =
2968 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
2969 qp_attr->min_rnr_timer =
2970 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
2971 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
2972 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
2973 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
2974 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
2975 qp_attr->cur_qp_state = qp_attr->qp_state;
2976 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
2977 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
2979 if (!ibqp->uobject) {
2980 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
2981 qp_attr->cap.max_send_sge = qp->sq.max_gs;
2983 qp_attr->cap.max_send_wr = 0;
2984 qp_attr->cap.max_send_sge = 0;
2987 /* We don't support inline sends for kernel QPs (yet), and we
2988 * don't know what userspace's value should be.
2990 qp_attr->cap.max_inline_data = 0;
2992 qp_init_attr->cap = qp_attr->cap;
2994 qp_init_attr->create_flags = 0;
2995 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2996 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
2998 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
2999 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
3005 mutex_unlock(&qp->mutex);
3009 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
3010 struct ib_ucontext *context,
3011 struct ib_udata *udata)
3013 struct mlx5_ib_dev *dev = to_mdev(ibdev);
3014 struct mlx5_ib_xrcd *xrcd;
3017 if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
3018 return ERR_PTR(-ENOSYS);
3020 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
3022 return ERR_PTR(-ENOMEM);
3024 err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
3027 return ERR_PTR(-ENOMEM);
3030 return &xrcd->ibxrcd;
3033 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
3035 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
3036 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
3039 err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
3041 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);