2 * Copyright (c) 2013, Mellanox Technologies inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/module.h>
34 #include <rdma/ib_umem.h>
38 /* not supported currently */
39 static int wq_signature;
42 MLX5_IB_ACK_REQ_FREQ = 8,
46 MLX5_IB_DEFAULT_SCHED_QUEUE = 0x83,
47 MLX5_IB_DEFAULT_QP0_SCHED_QUEUE = 0x3f,
48 MLX5_IB_LINK_TYPE_IB = 0,
49 MLX5_IB_LINK_TYPE_ETH = 1
53 MLX5_IB_SQ_STRIDE = 6,
54 MLX5_IB_CACHE_LINE_SIZE = 64,
57 static const u32 mlx5_ib_opcode[] = {
58 [IB_WR_SEND] = MLX5_OPCODE_SEND,
59 [IB_WR_SEND_WITH_IMM] = MLX5_OPCODE_SEND_IMM,
60 [IB_WR_RDMA_WRITE] = MLX5_OPCODE_RDMA_WRITE,
61 [IB_WR_RDMA_WRITE_WITH_IMM] = MLX5_OPCODE_RDMA_WRITE_IMM,
62 [IB_WR_RDMA_READ] = MLX5_OPCODE_RDMA_READ,
63 [IB_WR_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_CS,
64 [IB_WR_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_FA,
65 [IB_WR_SEND_WITH_INV] = MLX5_OPCODE_SEND_INVAL,
66 [IB_WR_LOCAL_INV] = MLX5_OPCODE_UMR,
67 [IB_WR_FAST_REG_MR] = MLX5_OPCODE_UMR,
68 [IB_WR_MASKED_ATOMIC_CMP_AND_SWP] = MLX5_OPCODE_ATOMIC_MASKED_CS,
69 [IB_WR_MASKED_ATOMIC_FETCH_AND_ADD] = MLX5_OPCODE_ATOMIC_MASKED_FA,
70 [MLX5_IB_WR_UMR] = MLX5_OPCODE_UMR,
76 unsigned int page_shift;
83 static int is_qp0(enum ib_qp_type qp_type)
85 return qp_type == IB_QPT_SMI;
88 static int is_qp1(enum ib_qp_type qp_type)
90 return qp_type == IB_QPT_GSI;
93 static int is_sqp(enum ib_qp_type qp_type)
95 return is_qp0(qp_type) || is_qp1(qp_type);
98 static void *get_wqe(struct mlx5_ib_qp *qp, int offset)
100 return mlx5_buf_offset(&qp->buf, offset);
103 static void *get_recv_wqe(struct mlx5_ib_qp *qp, int n)
105 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift));
108 void *mlx5_get_send_wqe(struct mlx5_ib_qp *qp, int n)
110 return get_wqe(qp, qp->sq.offset + (n << MLX5_IB_SQ_STRIDE));
113 static void mlx5_ib_qp_event(struct mlx5_core_qp *qp, int type)
115 struct ib_qp *ibqp = &to_mibqp(qp)->ibqp;
116 struct ib_event event;
118 if (type == MLX5_EVENT_TYPE_PATH_MIG)
119 to_mibqp(qp)->port = to_mibqp(qp)->alt_port;
121 if (ibqp->event_handler) {
122 event.device = ibqp->device;
123 event.element.qp = ibqp;
125 case MLX5_EVENT_TYPE_PATH_MIG:
126 event.event = IB_EVENT_PATH_MIG;
128 case MLX5_EVENT_TYPE_COMM_EST:
129 event.event = IB_EVENT_COMM_EST;
131 case MLX5_EVENT_TYPE_SQ_DRAINED:
132 event.event = IB_EVENT_SQ_DRAINED;
134 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
135 event.event = IB_EVENT_QP_LAST_WQE_REACHED;
137 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
138 event.event = IB_EVENT_QP_FATAL;
140 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
141 event.event = IB_EVENT_PATH_MIG_ERR;
143 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
144 event.event = IB_EVENT_QP_REQ_ERR;
146 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
147 event.event = IB_EVENT_QP_ACCESS_ERR;
150 pr_warn("mlx5_ib: Unexpected event type %d on QP %06x\n", type, qp->qpn);
154 ibqp->event_handler(&event, ibqp->qp_context);
158 static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
159 int has_rq, struct mlx5_ib_qp *qp, struct mlx5_ib_create_qp *ucmd)
164 /* Sanity check RQ size before proceeding */
165 if (cap->max_recv_wr > dev->mdev.caps.max_wqes)
171 qp->rq.wqe_shift = 0;
174 qp->rq.wqe_cnt = ucmd->rq_wqe_count;
175 qp->rq.wqe_shift = ucmd->rq_wqe_shift;
176 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
177 qp->rq.max_post = qp->rq.wqe_cnt;
179 wqe_size = qp->wq_sig ? sizeof(struct mlx5_wqe_signature_seg) : 0;
180 wqe_size += cap->max_recv_sge * sizeof(struct mlx5_wqe_data_seg);
181 wqe_size = roundup_pow_of_two(wqe_size);
182 wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
183 wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
184 qp->rq.wqe_cnt = wq_size / wqe_size;
185 if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
186 mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
188 dev->mdev.caps.max_rq_desc_sz);
191 qp->rq.wqe_shift = ilog2(wqe_size);
192 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig;
193 qp->rq.max_post = qp->rq.wqe_cnt;
200 static int sq_overhead(enum ib_qp_type qp_type)
206 size += sizeof(struct mlx5_wqe_xrc_seg);
209 size += sizeof(struct mlx5_wqe_ctrl_seg) +
210 sizeof(struct mlx5_wqe_atomic_seg) +
211 sizeof(struct mlx5_wqe_raddr_seg);
218 size += sizeof(struct mlx5_wqe_ctrl_seg) +
219 sizeof(struct mlx5_wqe_raddr_seg) +
220 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
221 sizeof(struct mlx5_mkey_seg);
227 size += sizeof(struct mlx5_wqe_ctrl_seg) +
228 sizeof(struct mlx5_wqe_datagram_seg);
231 case MLX5_IB_QPT_REG_UMR:
232 size += sizeof(struct mlx5_wqe_ctrl_seg) +
233 sizeof(struct mlx5_wqe_umr_ctrl_seg) +
234 sizeof(struct mlx5_mkey_seg);
244 static int calc_send_wqe(struct ib_qp_init_attr *attr)
249 size = sq_overhead(attr->qp_type);
253 if (attr->cap.max_inline_data) {
254 inl_size = size + sizeof(struct mlx5_wqe_inline_seg) +
255 attr->cap.max_inline_data;
258 size += attr->cap.max_send_sge * sizeof(struct mlx5_wqe_data_seg);
260 return ALIGN(max_t(int, inl_size, size), MLX5_SEND_WQE_BB);
263 static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
264 struct mlx5_ib_qp *qp)
269 if (!attr->cap.max_send_wr)
272 wqe_size = calc_send_wqe(attr);
273 mlx5_ib_dbg(dev, "wqe_size %d\n", wqe_size);
277 if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
278 mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
279 wqe_size, dev->mdev.caps.max_sq_desc_sz);
283 qp->max_inline_data = wqe_size - sq_overhead(attr->qp_type) -
284 sizeof(struct mlx5_wqe_inline_seg);
285 attr->cap.max_inline_data = qp->max_inline_data;
287 wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
288 qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
289 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
290 mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
291 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
294 qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
295 qp->sq.max_gs = attr->cap.max_send_sge;
296 qp->sq.max_post = wq_size / wqe_size;
297 attr->cap.max_send_wr = qp->sq.max_post;
302 static int set_user_buf_size(struct mlx5_ib_dev *dev,
303 struct mlx5_ib_qp *qp,
304 struct mlx5_ib_create_qp *ucmd)
306 int desc_sz = 1 << qp->sq.wqe_shift;
308 if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
309 mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
310 desc_sz, dev->mdev.caps.max_sq_desc_sz);
314 if (ucmd->sq_wqe_count && ((1 << ilog2(ucmd->sq_wqe_count)) != ucmd->sq_wqe_count)) {
315 mlx5_ib_warn(dev, "sq_wqe_count %d, sq_wqe_count %d\n",
316 ucmd->sq_wqe_count, ucmd->sq_wqe_count);
320 qp->sq.wqe_cnt = ucmd->sq_wqe_count;
322 if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
323 mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
324 qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
328 qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) +
329 (qp->sq.wqe_cnt << 6);
334 static int qp_has_rq(struct ib_qp_init_attr *attr)
336 if (attr->qp_type == IB_QPT_XRC_INI ||
337 attr->qp_type == IB_QPT_XRC_TGT || attr->srq ||
338 attr->qp_type == MLX5_IB_QPT_REG_UMR ||
339 !attr->cap.max_recv_wr)
345 static int first_med_uuar(void)
350 static int next_uuar(int n)
354 while (((n % 4) & 2))
360 static int num_med_uuar(struct mlx5_uuar_info *uuari)
364 n = uuari->num_uars * MLX5_NON_FP_BF_REGS_PER_PAGE -
365 uuari->num_low_latency_uuars - 1;
367 return n >= 0 ? n : 0;
370 static int max_uuari(struct mlx5_uuar_info *uuari)
372 return uuari->num_uars * 4;
375 static int first_hi_uuar(struct mlx5_uuar_info *uuari)
381 med = num_med_uuar(uuari);
382 for (t = 0, i = first_med_uuar();; i = next_uuar(i)) {
391 static int alloc_high_class_uuar(struct mlx5_uuar_info *uuari)
395 for (i = first_hi_uuar(uuari); i < max_uuari(uuari); i = next_uuar(i)) {
396 if (!test_bit(i, uuari->bitmap)) {
397 set_bit(i, uuari->bitmap);
406 static int alloc_med_class_uuar(struct mlx5_uuar_info *uuari)
408 int minidx = first_med_uuar();
411 for (i = first_med_uuar(); i < first_hi_uuar(uuari); i = next_uuar(i)) {
412 if (uuari->count[i] < uuari->count[minidx])
416 uuari->count[minidx]++;
420 static int alloc_uuar(struct mlx5_uuar_info *uuari,
421 enum mlx5_ib_latency_class lat)
425 mutex_lock(&uuari->lock);
427 case MLX5_IB_LATENCY_CLASS_LOW:
429 uuari->count[uuarn]++;
432 case MLX5_IB_LATENCY_CLASS_MEDIUM:
433 uuarn = alloc_med_class_uuar(uuari);
436 case MLX5_IB_LATENCY_CLASS_HIGH:
437 uuarn = alloc_high_class_uuar(uuari);
440 case MLX5_IB_LATENCY_CLASS_FAST_PATH:
444 mutex_unlock(&uuari->lock);
449 static void free_med_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
451 clear_bit(uuarn, uuari->bitmap);
452 --uuari->count[uuarn];
455 static void free_high_class_uuar(struct mlx5_uuar_info *uuari, int uuarn)
457 clear_bit(uuarn, uuari->bitmap);
458 --uuari->count[uuarn];
461 static void free_uuar(struct mlx5_uuar_info *uuari, int uuarn)
463 int nuuars = uuari->num_uars * MLX5_BF_REGS_PER_PAGE;
464 int high_uuar = nuuars - uuari->num_low_latency_uuars;
466 mutex_lock(&uuari->lock);
468 --uuari->count[uuarn];
472 if (uuarn < high_uuar) {
473 free_med_class_uuar(uuari, uuarn);
477 free_high_class_uuar(uuari, uuarn);
480 mutex_unlock(&uuari->lock);
483 static enum mlx5_qp_state to_mlx5_state(enum ib_qp_state state)
486 case IB_QPS_RESET: return MLX5_QP_STATE_RST;
487 case IB_QPS_INIT: return MLX5_QP_STATE_INIT;
488 case IB_QPS_RTR: return MLX5_QP_STATE_RTR;
489 case IB_QPS_RTS: return MLX5_QP_STATE_RTS;
490 case IB_QPS_SQD: return MLX5_QP_STATE_SQD;
491 case IB_QPS_SQE: return MLX5_QP_STATE_SQER;
492 case IB_QPS_ERR: return MLX5_QP_STATE_ERR;
497 static int to_mlx5_st(enum ib_qp_type type)
500 case IB_QPT_RC: return MLX5_QP_ST_RC;
501 case IB_QPT_UC: return MLX5_QP_ST_UC;
502 case IB_QPT_UD: return MLX5_QP_ST_UD;
503 case MLX5_IB_QPT_REG_UMR: return MLX5_QP_ST_REG_UMR;
505 case IB_QPT_XRC_TGT: return MLX5_QP_ST_XRC;
506 case IB_QPT_SMI: return MLX5_QP_ST_QP0;
507 case IB_QPT_GSI: return MLX5_QP_ST_QP1;
508 case IB_QPT_RAW_IPV6: return MLX5_QP_ST_RAW_IPV6;
509 case IB_QPT_RAW_ETHERTYPE: return MLX5_QP_ST_RAW_ETHERTYPE;
510 case IB_QPT_RAW_PACKET:
512 default: return -EINVAL;
516 static int uuarn_to_uar_index(struct mlx5_uuar_info *uuari, int uuarn)
518 return uuari->uars[uuarn / MLX5_BF_REGS_PER_PAGE].index;
521 static int create_user_qp(struct mlx5_ib_dev *dev, struct ib_pd *pd,
522 struct mlx5_ib_qp *qp, struct ib_udata *udata,
523 struct mlx5_create_qp_mbox_in **in,
524 struct mlx5_ib_create_qp_resp *resp, int *inlen)
526 struct mlx5_ib_ucontext *context;
527 struct mlx5_ib_create_qp ucmd;
536 err = ib_copy_from_udata(&ucmd, udata, sizeof(ucmd));
538 mlx5_ib_dbg(dev, "copy failed\n");
542 context = to_mucontext(pd->uobject->context);
544 * TBD: should come from the verbs when we have the API
546 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_HIGH);
548 mlx5_ib_dbg(dev, "failed to allocate low latency UUAR\n");
549 mlx5_ib_dbg(dev, "reverting to medium latency\n");
550 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_MEDIUM);
552 mlx5_ib_dbg(dev, "failed to allocate medium latency UUAR\n");
553 mlx5_ib_dbg(dev, "reverting to high latency\n");
554 uuarn = alloc_uuar(&context->uuari, MLX5_IB_LATENCY_CLASS_LOW);
556 mlx5_ib_warn(dev, "uuar allocation failed\n");
562 uar_index = uuarn_to_uar_index(&context->uuari, uuarn);
563 mlx5_ib_dbg(dev, "uuarn 0x%x, uar_index 0x%x\n", uuarn, uar_index);
565 err = set_user_buf_size(dev, qp, &ucmd);
569 if (ucmd.buf_addr && qp->buf_size) {
570 qp->umem = ib_umem_get(pd->uobject->context, ucmd.buf_addr,
572 if (IS_ERR(qp->umem)) {
573 mlx5_ib_dbg(dev, "umem_get failed\n");
574 err = PTR_ERR(qp->umem);
582 mlx5_ib_cont_pages(qp->umem, ucmd.buf_addr, &npages, &page_shift,
584 err = mlx5_ib_get_buf_offset(ucmd.buf_addr, page_shift, &offset);
586 mlx5_ib_warn(dev, "bad offset\n");
589 mlx5_ib_dbg(dev, "addr 0x%llx, size %d, npages %d, page_shift %d, ncont %d, offset %d\n",
590 ucmd.buf_addr, qp->buf_size, npages, page_shift, ncont, offset);
593 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * ncont;
594 *in = mlx5_vzalloc(*inlen);
600 mlx5_ib_populate_pas(dev, qp->umem, page_shift, (*in)->pas, 0);
601 (*in)->ctx.log_pg_sz_remote_qpn =
602 cpu_to_be32((page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
603 (*in)->ctx.params2 = cpu_to_be32(offset << 6);
605 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
606 resp->uuar_index = uuarn;
609 err = mlx5_ib_db_map_user(context, ucmd.db_addr, &qp->db);
611 mlx5_ib_dbg(dev, "map failed\n");
615 err = ib_copy_to_udata(udata, resp, sizeof(*resp));
617 mlx5_ib_dbg(dev, "copy failed\n");
620 qp->create_type = MLX5_QP_USER;
625 mlx5_ib_db_unmap_user(context, &qp->db);
632 ib_umem_release(qp->umem);
635 free_uuar(&context->uuari, uuarn);
639 static void destroy_qp_user(struct ib_pd *pd, struct mlx5_ib_qp *qp)
641 struct mlx5_ib_ucontext *context;
643 context = to_mucontext(pd->uobject->context);
644 mlx5_ib_db_unmap_user(context, &qp->db);
646 ib_umem_release(qp->umem);
647 free_uuar(&context->uuari, qp->uuarn);
650 static int create_kernel_qp(struct mlx5_ib_dev *dev,
651 struct ib_qp_init_attr *init_attr,
652 struct mlx5_ib_qp *qp,
653 struct mlx5_create_qp_mbox_in **in, int *inlen)
655 enum mlx5_ib_latency_class lc = MLX5_IB_LATENCY_CLASS_LOW;
656 struct mlx5_uuar_info *uuari;
661 uuari = &dev->mdev.priv.uuari;
662 if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
663 qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
665 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
666 lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
668 uuarn = alloc_uuar(uuari, lc);
670 mlx5_ib_dbg(dev, "\n");
674 qp->bf = &uuari->bfs[uuarn];
675 uar_index = qp->bf->uar->index;
677 err = calc_sq_size(dev, init_attr, qp);
679 mlx5_ib_dbg(dev, "err %d\n", err);
684 qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
685 qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
687 err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
689 mlx5_ib_dbg(dev, "err %d\n", err);
693 qp->sq.qend = mlx5_get_send_wqe(qp, qp->sq.wqe_cnt);
694 *inlen = sizeof(**in) + sizeof(*(*in)->pas) * qp->buf.npages;
695 *in = mlx5_vzalloc(*inlen);
700 (*in)->ctx.qp_counter_set_usr_page = cpu_to_be32(uar_index);
701 (*in)->ctx.log_pg_sz_remote_qpn =
702 cpu_to_be32((qp->buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT) << 24);
703 /* Set "fast registration enabled" for all kernel QPs */
704 (*in)->ctx.params1 |= cpu_to_be32(1 << 11);
705 (*in)->ctx.sq_crq_size |= cpu_to_be16(1 << 4);
707 mlx5_fill_page_array(&qp->buf, (*in)->pas);
709 err = mlx5_db_alloc(&dev->mdev, &qp->db);
711 mlx5_ib_dbg(dev, "err %d\n", err);
718 qp->sq.wrid = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wrid), GFP_KERNEL);
719 qp->sq.wr_data = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wr_data), GFP_KERNEL);
720 qp->rq.wrid = kmalloc(qp->rq.wqe_cnt * sizeof(*qp->rq.wrid), GFP_KERNEL);
721 qp->sq.w_list = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.w_list), GFP_KERNEL);
722 qp->sq.wqe_head = kmalloc(qp->sq.wqe_cnt * sizeof(*qp->sq.wqe_head), GFP_KERNEL);
724 if (!qp->sq.wrid || !qp->sq.wr_data || !qp->rq.wrid ||
725 !qp->sq.w_list || !qp->sq.wqe_head) {
729 qp->create_type = MLX5_QP_KERNEL;
734 mlx5_db_free(&dev->mdev, &qp->db);
735 kfree(qp->sq.wqe_head);
736 kfree(qp->sq.w_list);
738 kfree(qp->sq.wr_data);
745 mlx5_buf_free(&dev->mdev, &qp->buf);
748 free_uuar(&dev->mdev.priv.uuari, uuarn);
752 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
754 mlx5_db_free(&dev->mdev, &qp->db);
755 kfree(qp->sq.wqe_head);
756 kfree(qp->sq.w_list);
758 kfree(qp->sq.wr_data);
760 mlx5_buf_free(&dev->mdev, &qp->buf);
761 free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
764 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
766 if (attr->srq || (attr->qp_type == IB_QPT_XRC_TGT) ||
767 (attr->qp_type == IB_QPT_XRC_INI))
768 return cpu_to_be32(MLX5_SRQ_RQ);
769 else if (!qp->has_rq)
770 return cpu_to_be32(MLX5_ZERO_LEN_RQ);
772 return cpu_to_be32(MLX5_NON_ZERO_RQ);
775 static int is_connected(enum ib_qp_type qp_type)
777 if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
783 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
784 struct ib_qp_init_attr *init_attr,
785 struct ib_udata *udata, struct mlx5_ib_qp *qp)
787 struct mlx5_ib_resources *devr = &dev->devr;
788 struct mlx5_ib_create_qp_resp resp;
789 struct mlx5_create_qp_mbox_in *in;
790 struct mlx5_ib_create_qp ucmd;
791 int inlen = sizeof(*in);
794 mutex_init(&qp->mutex);
795 spin_lock_init(&qp->sq.lock);
796 spin_lock_init(&qp->rq.lock);
798 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
799 qp->sq_signal_bits = MLX5_WQE_CTRL_CQ_UPDATE;
801 if (pd && pd->uobject) {
802 if (ib_copy_from_udata(&ucmd, udata, sizeof(ucmd))) {
803 mlx5_ib_dbg(dev, "copy failed\n");
807 qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
808 qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
810 qp->wq_sig = !!wq_signature;
813 qp->has_rq = qp_has_rq(init_attr);
814 err = set_rq_size(dev, &init_attr->cap, qp->has_rq,
815 qp, (pd && pd->uobject) ? &ucmd : NULL);
817 mlx5_ib_dbg(dev, "err %d\n", err);
823 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d)\n", ucmd.sq_wqe_count);
824 if (ucmd.rq_wqe_shift != qp->rq.wqe_shift ||
825 ucmd.rq_wqe_count != qp->rq.wqe_cnt) {
826 mlx5_ib_dbg(dev, "invalid rq params\n");
829 if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
830 mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
831 ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
834 err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
836 mlx5_ib_dbg(dev, "err %d\n", err);
838 err = create_kernel_qp(dev, init_attr, qp, &in, &inlen);
840 mlx5_ib_dbg(dev, "err %d\n", err);
842 qp->pa_lkey = to_mpd(pd)->pa_lkey;
848 in = mlx5_vzalloc(sizeof(*in));
852 qp->create_type = MLX5_QP_EMPTY;
855 if (is_sqp(init_attr->qp_type))
856 qp->port = init_attr->port_num;
858 in->ctx.flags = cpu_to_be32(to_mlx5_st(init_attr->qp_type) << 16 |
859 MLX5_QP_PM_MIGRATED << 11);
861 if (init_attr->qp_type != MLX5_IB_QPT_REG_UMR)
862 in->ctx.flags_pd = cpu_to_be32(to_mpd(pd ? pd : devr->p0)->pdn);
864 in->ctx.flags_pd = cpu_to_be32(MLX5_QP_LAT_SENSITIVE);
867 in->ctx.flags_pd |= cpu_to_be32(MLX5_QP_ENABLE_SIG);
869 if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
873 rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
874 scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
877 in->ctx.cs_res = MLX5_RES_SCAT_DATA64_CQE;
879 in->ctx.cs_res = MLX5_RES_SCAT_DATA32_CQE;
881 if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
883 in->ctx.cs_req = MLX5_REQ_SCAT_DATA64_CQE;
885 in->ctx.cs_req = MLX5_REQ_SCAT_DATA32_CQE;
889 if (qp->rq.wqe_cnt) {
890 in->ctx.rq_size_stride = (qp->rq.wqe_shift - 4);
891 in->ctx.rq_size_stride |= ilog2(qp->rq.wqe_cnt) << 3;
894 in->ctx.rq_type_srqn = get_rx_type(qp, init_attr);
897 in->ctx.sq_crq_size |= cpu_to_be16(ilog2(qp->sq.wqe_cnt) << 11);
899 in->ctx.sq_crq_size |= cpu_to_be16(0x8000);
901 /* Set default resources */
902 switch (init_attr->qp_type) {
904 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
905 in->ctx.cqn_send = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
906 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
907 in->ctx.xrcd = cpu_to_be32(to_mxrcd(init_attr->xrcd)->xrcdn);
910 in->ctx.cqn_recv = cpu_to_be32(to_mcq(devr->c0)->mcq.cqn);
911 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
912 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
915 if (init_attr->srq) {
916 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x0)->xrcdn);
917 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(init_attr->srq)->msrq.srqn);
919 in->ctx.xrcd = cpu_to_be32(to_mxrcd(devr->x1)->xrcdn);
920 in->ctx.rq_type_srqn |= cpu_to_be32(to_msrq(devr->s0)->msrq.srqn);
924 if (init_attr->send_cq)
925 in->ctx.cqn_send = cpu_to_be32(to_mcq(init_attr->send_cq)->mcq.cqn);
927 if (init_attr->recv_cq)
928 in->ctx.cqn_recv = cpu_to_be32(to_mcq(init_attr->recv_cq)->mcq.cqn);
930 in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
932 err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
934 mlx5_ib_dbg(dev, "create qp failed\n");
939 /* Hardware wants QPN written in big-endian order (after
940 * shifting) for send doorbell. Precompute this value to save
941 * a little bit when posting sends.
943 qp->doorbell_qpn = swab32(qp->mqp.qpn << 8);
945 qp->mqp.event = mlx5_ib_qp_event;
950 if (qp->create_type == MLX5_QP_USER)
951 destroy_qp_user(pd, qp);
952 else if (qp->create_type == MLX5_QP_KERNEL)
953 destroy_qp_kernel(dev, qp);
959 static void mlx5_ib_lock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
960 __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
964 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
965 spin_lock_irq(&send_cq->lock);
966 spin_lock_nested(&recv_cq->lock,
967 SINGLE_DEPTH_NESTING);
968 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
969 spin_lock_irq(&send_cq->lock);
970 __acquire(&recv_cq->lock);
972 spin_lock_irq(&recv_cq->lock);
973 spin_lock_nested(&send_cq->lock,
974 SINGLE_DEPTH_NESTING);
977 spin_lock_irq(&send_cq->lock);
979 } else if (recv_cq) {
980 spin_lock_irq(&recv_cq->lock);
984 static void mlx5_ib_unlock_cqs(struct mlx5_ib_cq *send_cq, struct mlx5_ib_cq *recv_cq)
985 __releases(&send_cq->lock) __releases(&recv_cq->lock)
989 if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
990 spin_unlock(&recv_cq->lock);
991 spin_unlock_irq(&send_cq->lock);
992 } else if (send_cq->mcq.cqn == recv_cq->mcq.cqn) {
993 __release(&recv_cq->lock);
994 spin_unlock_irq(&send_cq->lock);
996 spin_unlock(&send_cq->lock);
997 spin_unlock_irq(&recv_cq->lock);
1000 spin_unlock_irq(&send_cq->lock);
1002 } else if (recv_cq) {
1003 spin_unlock_irq(&recv_cq->lock);
1007 static struct mlx5_ib_pd *get_pd(struct mlx5_ib_qp *qp)
1009 return to_mpd(qp->ibqp.pd);
1012 static void get_cqs(struct mlx5_ib_qp *qp,
1013 struct mlx5_ib_cq **send_cq, struct mlx5_ib_cq **recv_cq)
1015 switch (qp->ibqp.qp_type) {
1016 case IB_QPT_XRC_TGT:
1020 case MLX5_IB_QPT_REG_UMR:
1021 case IB_QPT_XRC_INI:
1022 *send_cq = to_mcq(qp->ibqp.send_cq);
1031 case IB_QPT_RAW_IPV6:
1032 case IB_QPT_RAW_ETHERTYPE:
1033 *send_cq = to_mcq(qp->ibqp.send_cq);
1034 *recv_cq = to_mcq(qp->ibqp.recv_cq);
1037 case IB_QPT_RAW_PACKET:
1046 static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
1048 struct mlx5_ib_cq *send_cq, *recv_cq;
1049 struct mlx5_modify_qp_mbox_in *in;
1052 in = kzalloc(sizeof(*in), GFP_KERNEL);
1055 if (qp->state != IB_QPS_RESET)
1056 if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
1057 MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
1058 mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
1061 get_cqs(qp, &send_cq, &recv_cq);
1063 if (qp->create_type == MLX5_QP_KERNEL) {
1064 mlx5_ib_lock_cqs(send_cq, recv_cq);
1065 __mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1066 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1067 if (send_cq != recv_cq)
1068 __mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1069 mlx5_ib_unlock_cqs(send_cq, recv_cq);
1072 err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
1074 mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
1078 if (qp->create_type == MLX5_QP_KERNEL)
1079 destroy_qp_kernel(dev, qp);
1080 else if (qp->create_type == MLX5_QP_USER)
1081 destroy_qp_user(&get_pd(qp)->ibpd, qp);
1084 static const char *ib_qp_type_str(enum ib_qp_type type)
1088 return "IB_QPT_SMI";
1090 return "IB_QPT_GSI";
1097 case IB_QPT_RAW_IPV6:
1098 return "IB_QPT_RAW_IPV6";
1099 case IB_QPT_RAW_ETHERTYPE:
1100 return "IB_QPT_RAW_ETHERTYPE";
1101 case IB_QPT_XRC_INI:
1102 return "IB_QPT_XRC_INI";
1103 case IB_QPT_XRC_TGT:
1104 return "IB_QPT_XRC_TGT";
1105 case IB_QPT_RAW_PACKET:
1106 return "IB_QPT_RAW_PACKET";
1107 case MLX5_IB_QPT_REG_UMR:
1108 return "MLX5_IB_QPT_REG_UMR";
1111 return "Invalid QP type";
1115 struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
1116 struct ib_qp_init_attr *init_attr,
1117 struct ib_udata *udata)
1119 struct mlx5_ib_dev *dev;
1120 struct mlx5_ib_qp *qp;
1125 dev = to_mdev(pd->device);
1127 /* being cautious here */
1128 if (init_attr->qp_type != IB_QPT_XRC_TGT &&
1129 init_attr->qp_type != MLX5_IB_QPT_REG_UMR) {
1130 pr_warn("%s: no PD for transport %s\n", __func__,
1131 ib_qp_type_str(init_attr->qp_type));
1132 return ERR_PTR(-EINVAL);
1134 dev = to_mdev(to_mxrcd(init_attr->xrcd)->ibxrcd.device);
1137 switch (init_attr->qp_type) {
1138 case IB_QPT_XRC_TGT:
1139 case IB_QPT_XRC_INI:
1140 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
1141 mlx5_ib_dbg(dev, "XRC not supported\n");
1142 return ERR_PTR(-ENOSYS);
1144 init_attr->recv_cq = NULL;
1145 if (init_attr->qp_type == IB_QPT_XRC_TGT) {
1146 xrcdn = to_mxrcd(init_attr->xrcd)->xrcdn;
1147 init_attr->send_cq = NULL;
1156 case MLX5_IB_QPT_REG_UMR:
1157 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
1159 return ERR_PTR(-ENOMEM);
1161 err = create_qp_common(dev, pd, init_attr, udata, qp);
1163 mlx5_ib_dbg(dev, "create_qp_common failed\n");
1165 return ERR_PTR(err);
1168 if (is_qp0(init_attr->qp_type))
1169 qp->ibqp.qp_num = 0;
1170 else if (is_qp1(init_attr->qp_type))
1171 qp->ibqp.qp_num = 1;
1173 qp->ibqp.qp_num = qp->mqp.qpn;
1175 mlx5_ib_dbg(dev, "ib qpnum 0x%x, mlx qpn 0x%x, rcqn 0x%x, scqn 0x%x\n",
1176 qp->ibqp.qp_num, qp->mqp.qpn, to_mcq(init_attr->recv_cq)->mcq.cqn,
1177 to_mcq(init_attr->send_cq)->mcq.cqn);
1183 case IB_QPT_RAW_IPV6:
1184 case IB_QPT_RAW_ETHERTYPE:
1185 case IB_QPT_RAW_PACKET:
1188 mlx5_ib_dbg(dev, "unsupported qp type %d\n",
1189 init_attr->qp_type);
1190 /* Don't support raw QPs */
1191 return ERR_PTR(-EINVAL);
1197 int mlx5_ib_destroy_qp(struct ib_qp *qp)
1199 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1200 struct mlx5_ib_qp *mqp = to_mqp(qp);
1202 destroy_qp_common(dev, mqp);
1209 static __be32 to_mlx5_access_flags(struct mlx5_ib_qp *qp, const struct ib_qp_attr *attr,
1212 u32 hw_access_flags = 0;
1216 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1217 dest_rd_atomic = attr->max_dest_rd_atomic;
1219 dest_rd_atomic = qp->resp_depth;
1221 if (attr_mask & IB_QP_ACCESS_FLAGS)
1222 access_flags = attr->qp_access_flags;
1224 access_flags = qp->atomic_rd_en;
1226 if (!dest_rd_atomic)
1227 access_flags &= IB_ACCESS_REMOTE_WRITE;
1229 if (access_flags & IB_ACCESS_REMOTE_READ)
1230 hw_access_flags |= MLX5_QP_BIT_RRE;
1231 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
1232 hw_access_flags |= (MLX5_QP_BIT_RAE | MLX5_ATOMIC_MODE_CX);
1233 if (access_flags & IB_ACCESS_REMOTE_WRITE)
1234 hw_access_flags |= MLX5_QP_BIT_RWE;
1236 return cpu_to_be32(hw_access_flags);
1240 MLX5_PATH_FLAG_FL = 1 << 0,
1241 MLX5_PATH_FLAG_FREE_AR = 1 << 1,
1242 MLX5_PATH_FLAG_COUNTER = 1 << 2,
1245 static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
1247 if (rate == IB_RATE_PORT_CURRENT) {
1249 } else if (rate < IB_RATE_2_5_GBPS || rate > IB_RATE_300_GBPS) {
1252 while (rate != IB_RATE_2_5_GBPS &&
1253 !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
1254 dev->mdev.caps.stat_rate_support))
1258 return rate + MLX5_STAT_RATE_OFFSET;
1261 static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
1262 struct mlx5_qp_path *path, u8 port, int attr_mask,
1263 u32 path_flags, const struct ib_qp_attr *attr)
1267 path->fl = (path_flags & MLX5_PATH_FLAG_FL) ? 0x80 : 0;
1268 path->free_ar = (path_flags & MLX5_PATH_FLAG_FREE_AR) ? 0x80 : 0;
1270 if (attr_mask & IB_QP_PKEY_INDEX)
1271 path->pkey_index = attr->pkey_index;
1273 path->grh_mlid = ah->src_path_bits & 0x7f;
1274 path->rlid = cpu_to_be16(ah->dlid);
1276 if (ah->ah_flags & IB_AH_GRH) {
1277 path->grh_mlid |= 1 << 7;
1278 path->mgid_index = ah->grh.sgid_index;
1279 path->hop_limit = ah->grh.hop_limit;
1280 path->tclass_flowlabel =
1281 cpu_to_be32((ah->grh.traffic_class << 20) |
1282 (ah->grh.flow_label));
1283 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1286 err = ib_rate_to_mlx5(dev, ah->static_rate);
1289 path->static_rate = err;
1292 if (ah->ah_flags & IB_AH_GRH) {
1293 if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
1294 pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
1295 ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
1299 path->grh_mlid |= 1 << 7;
1300 path->mgid_index = ah->grh.sgid_index;
1301 path->hop_limit = ah->grh.hop_limit;
1302 path->tclass_flowlabel =
1303 cpu_to_be32((ah->grh.traffic_class << 20) |
1304 (ah->grh.flow_label));
1305 memcpy(path->rgid, ah->grh.dgid.raw, 16);
1308 if (attr_mask & IB_QP_TIMEOUT)
1309 path->ackto_lt = attr->timeout << 3;
1311 path->sl = ah->sl & 0xf;
1316 static enum mlx5_qp_optpar opt_mask[MLX5_QP_NUM_STATE][MLX5_QP_NUM_STATE][MLX5_QP_ST_MAX] = {
1317 [MLX5_QP_STATE_INIT] = {
1318 [MLX5_QP_STATE_INIT] = {
1319 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1320 MLX5_QP_OPTPAR_RAE |
1321 MLX5_QP_OPTPAR_RWE |
1322 MLX5_QP_OPTPAR_PKEY_INDEX |
1323 MLX5_QP_OPTPAR_PRI_PORT,
1324 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1325 MLX5_QP_OPTPAR_PKEY_INDEX |
1326 MLX5_QP_OPTPAR_PRI_PORT,
1327 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1328 MLX5_QP_OPTPAR_Q_KEY |
1329 MLX5_QP_OPTPAR_PRI_PORT,
1331 [MLX5_QP_STATE_RTR] = {
1332 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1333 MLX5_QP_OPTPAR_RRE |
1334 MLX5_QP_OPTPAR_RAE |
1335 MLX5_QP_OPTPAR_RWE |
1336 MLX5_QP_OPTPAR_PKEY_INDEX,
1337 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1338 MLX5_QP_OPTPAR_RWE |
1339 MLX5_QP_OPTPAR_PKEY_INDEX,
1340 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_PKEY_INDEX |
1341 MLX5_QP_OPTPAR_Q_KEY,
1342 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_PKEY_INDEX |
1343 MLX5_QP_OPTPAR_Q_KEY,
1344 [MLX5_QP_ST_XRC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1345 MLX5_QP_OPTPAR_RRE |
1346 MLX5_QP_OPTPAR_RAE |
1347 MLX5_QP_OPTPAR_RWE |
1348 MLX5_QP_OPTPAR_PKEY_INDEX,
1351 [MLX5_QP_STATE_RTR] = {
1352 [MLX5_QP_STATE_RTS] = {
1353 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1354 MLX5_QP_OPTPAR_RRE |
1355 MLX5_QP_OPTPAR_RAE |
1356 MLX5_QP_OPTPAR_RWE |
1357 MLX5_QP_OPTPAR_PM_STATE |
1358 MLX5_QP_OPTPAR_RNR_TIMEOUT,
1359 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_ALT_ADDR_PATH |
1360 MLX5_QP_OPTPAR_RWE |
1361 MLX5_QP_OPTPAR_PM_STATE,
1362 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1365 [MLX5_QP_STATE_RTS] = {
1366 [MLX5_QP_STATE_RTS] = {
1367 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RRE |
1368 MLX5_QP_OPTPAR_RAE |
1369 MLX5_QP_OPTPAR_RWE |
1370 MLX5_QP_OPTPAR_RNR_TIMEOUT |
1371 MLX5_QP_OPTPAR_PM_STATE |
1372 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1373 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE |
1374 MLX5_QP_OPTPAR_PM_STATE |
1375 MLX5_QP_OPTPAR_ALT_ADDR_PATH,
1376 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY |
1377 MLX5_QP_OPTPAR_SRQN |
1378 MLX5_QP_OPTPAR_CQN_RCV,
1381 [MLX5_QP_STATE_SQER] = {
1382 [MLX5_QP_STATE_RTS] = {
1383 [MLX5_QP_ST_UD] = MLX5_QP_OPTPAR_Q_KEY,
1384 [MLX5_QP_ST_MLX] = MLX5_QP_OPTPAR_Q_KEY,
1385 [MLX5_QP_ST_UC] = MLX5_QP_OPTPAR_RWE,
1386 [MLX5_QP_ST_RC] = MLX5_QP_OPTPAR_RNR_TIMEOUT |
1387 MLX5_QP_OPTPAR_RWE |
1388 MLX5_QP_OPTPAR_RAE |
1394 static int ib_nr_to_mlx5_nr(int ib_mask)
1399 case IB_QP_CUR_STATE:
1401 case IB_QP_EN_SQD_ASYNC_NOTIFY:
1403 case IB_QP_ACCESS_FLAGS:
1404 return MLX5_QP_OPTPAR_RWE | MLX5_QP_OPTPAR_RRE |
1406 case IB_QP_PKEY_INDEX:
1407 return MLX5_QP_OPTPAR_PKEY_INDEX;
1409 return MLX5_QP_OPTPAR_PRI_PORT;
1411 return MLX5_QP_OPTPAR_Q_KEY;
1413 return MLX5_QP_OPTPAR_PRIMARY_ADDR_PATH |
1414 MLX5_QP_OPTPAR_PRI_PORT;
1415 case IB_QP_PATH_MTU:
1418 return MLX5_QP_OPTPAR_ACK_TIMEOUT;
1419 case IB_QP_RETRY_CNT:
1420 return MLX5_QP_OPTPAR_RETRY_COUNT;
1421 case IB_QP_RNR_RETRY:
1422 return MLX5_QP_OPTPAR_RNR_RETRY;
1425 case IB_QP_MAX_QP_RD_ATOMIC:
1426 return MLX5_QP_OPTPAR_SRA_MAX;
1427 case IB_QP_ALT_PATH:
1428 return MLX5_QP_OPTPAR_ALT_ADDR_PATH;
1429 case IB_QP_MIN_RNR_TIMER:
1430 return MLX5_QP_OPTPAR_RNR_TIMEOUT;
1433 case IB_QP_MAX_DEST_RD_ATOMIC:
1434 return MLX5_QP_OPTPAR_RRA_MAX | MLX5_QP_OPTPAR_RWE |
1435 MLX5_QP_OPTPAR_RRE | MLX5_QP_OPTPAR_RAE;
1436 case IB_QP_PATH_MIG_STATE:
1437 return MLX5_QP_OPTPAR_PM_STATE;
1440 case IB_QP_DEST_QPN:
1446 static int ib_mask_to_mlx5_opt(int ib_mask)
1451 for (i = 0; i < 8 * sizeof(int); i++) {
1452 if ((1 << i) & ib_mask)
1453 result |= ib_nr_to_mlx5_nr(1 << i);
1459 static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
1460 const struct ib_qp_attr *attr, int attr_mask,
1461 enum ib_qp_state cur_state, enum ib_qp_state new_state)
1463 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1464 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1465 struct mlx5_ib_cq *send_cq, *recv_cq;
1466 struct mlx5_qp_context *context;
1467 struct mlx5_modify_qp_mbox_in *in;
1468 struct mlx5_ib_pd *pd;
1469 enum mlx5_qp_state mlx5_cur, mlx5_new;
1470 enum mlx5_qp_optpar optpar;
1475 in = kzalloc(sizeof(*in), GFP_KERNEL);
1480 err = to_mlx5_st(ibqp->qp_type);
1484 context->flags = cpu_to_be32(err << 16);
1486 if (!(attr_mask & IB_QP_PATH_MIG_STATE)) {
1487 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1489 switch (attr->path_mig_state) {
1490 case IB_MIG_MIGRATED:
1491 context->flags |= cpu_to_be32(MLX5_QP_PM_MIGRATED << 11);
1494 context->flags |= cpu_to_be32(MLX5_QP_PM_REARM << 11);
1497 context->flags |= cpu_to_be32(MLX5_QP_PM_ARMED << 11);
1502 if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) {
1503 context->mtu_msgmax = (IB_MTU_256 << 5) | 8;
1504 } else if (ibqp->qp_type == IB_QPT_UD ||
1505 ibqp->qp_type == MLX5_IB_QPT_REG_UMR) {
1506 context->mtu_msgmax = (IB_MTU_4096 << 5) | 12;
1507 } else if (attr_mask & IB_QP_PATH_MTU) {
1508 if (attr->path_mtu < IB_MTU_256 ||
1509 attr->path_mtu > IB_MTU_4096) {
1510 mlx5_ib_warn(dev, "invalid mtu %d\n", attr->path_mtu);
1514 context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
1517 if (attr_mask & IB_QP_DEST_QPN)
1518 context->log_pg_sz_remote_qpn = cpu_to_be32(attr->dest_qp_num);
1520 if (attr_mask & IB_QP_PKEY_INDEX)
1521 context->pri_path.pkey_index = attr->pkey_index;
1523 /* todo implement counter_index functionality */
1525 if (is_sqp(ibqp->qp_type))
1526 context->pri_path.port = qp->port;
1528 if (attr_mask & IB_QP_PORT)
1529 context->pri_path.port = attr->port_num;
1531 if (attr_mask & IB_QP_AV) {
1532 err = mlx5_set_path(dev, &attr->ah_attr, &context->pri_path,
1533 attr_mask & IB_QP_PORT ? attr->port_num : qp->port,
1534 attr_mask, 0, attr);
1539 if (attr_mask & IB_QP_TIMEOUT)
1540 context->pri_path.ackto_lt |= attr->timeout << 3;
1542 if (attr_mask & IB_QP_ALT_PATH) {
1543 err = mlx5_set_path(dev, &attr->alt_ah_attr, &context->alt_path,
1544 attr->alt_port_num, attr_mask, 0, attr);
1550 get_cqs(qp, &send_cq, &recv_cq);
1552 context->flags_pd = cpu_to_be32(pd ? pd->pdn : to_mpd(dev->devr.p0)->pdn);
1553 context->cqn_send = send_cq ? cpu_to_be32(send_cq->mcq.cqn) : 0;
1554 context->cqn_recv = recv_cq ? cpu_to_be32(recv_cq->mcq.cqn) : 0;
1555 context->params1 = cpu_to_be32(MLX5_IB_ACK_REQ_FREQ << 28);
1557 if (attr_mask & IB_QP_RNR_RETRY)
1558 context->params1 |= cpu_to_be32(attr->rnr_retry << 13);
1560 if (attr_mask & IB_QP_RETRY_CNT)
1561 context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
1563 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
1564 if (attr->max_rd_atomic)
1566 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
1569 if (attr_mask & IB_QP_SQ_PSN)
1570 context->next_send_psn = cpu_to_be32(attr->sq_psn);
1572 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
1573 if (attr->max_dest_rd_atomic)
1575 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
1578 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC))
1579 context->params2 |= to_mlx5_access_flags(qp, attr, attr_mask);
1581 if (attr_mask & IB_QP_MIN_RNR_TIMER)
1582 context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
1584 if (attr_mask & IB_QP_RQ_PSN)
1585 context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
1587 if (attr_mask & IB_QP_QKEY)
1588 context->qkey = cpu_to_be32(attr->qkey);
1590 if (qp->rq.wqe_cnt && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1591 context->db_rec_addr = cpu_to_be64(qp->db.dma);
1593 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
1594 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && attr->en_sqd_async_notify)
1599 if (!ibqp->uobject && cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT)
1600 context->sq_crq_size |= cpu_to_be16(1 << 4);
1603 mlx5_cur = to_mlx5_state(cur_state);
1604 mlx5_new = to_mlx5_state(new_state);
1605 mlx5_st = to_mlx5_st(ibqp->qp_type);
1609 optpar = ib_mask_to_mlx5_opt(attr_mask);
1610 optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
1611 in->optparam = cpu_to_be32(optpar);
1612 err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
1613 to_mlx5_state(new_state), in, sqd_event,
1618 qp->state = new_state;
1620 if (attr_mask & IB_QP_ACCESS_FLAGS)
1621 qp->atomic_rd_en = attr->qp_access_flags;
1622 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
1623 qp->resp_depth = attr->max_dest_rd_atomic;
1624 if (attr_mask & IB_QP_PORT)
1625 qp->port = attr->port_num;
1626 if (attr_mask & IB_QP_ALT_PATH)
1627 qp->alt_port = attr->alt_port_num;
1630 * If we moved a kernel QP to RESET, clean up all old CQ
1631 * entries and reinitialize the QP.
1633 if (new_state == IB_QPS_RESET && !ibqp->uobject) {
1634 mlx5_ib_cq_clean(recv_cq, qp->mqp.qpn,
1635 ibqp->srq ? to_msrq(ibqp->srq) : NULL);
1636 if (send_cq != recv_cq)
1637 mlx5_ib_cq_clean(send_cq, qp->mqp.qpn, NULL);
1643 qp->sq.cur_post = 0;
1644 qp->sq.last_poll = 0;
1645 qp->db.db[MLX5_RCV_DBR] = 0;
1646 qp->db.db[MLX5_SND_DBR] = 0;
1654 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1655 int attr_mask, struct ib_udata *udata)
1657 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1658 struct mlx5_ib_qp *qp = to_mqp(ibqp);
1659 enum ib_qp_state cur_state, new_state;
1663 mutex_lock(&qp->mutex);
1665 cur_state = attr_mask & IB_QP_CUR_STATE ? attr->cur_qp_state : qp->state;
1666 new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
1668 if (ibqp->qp_type != MLX5_IB_QPT_REG_UMR &&
1669 !ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask,
1670 IB_LINK_LAYER_UNSPECIFIED))
1673 if ((attr_mask & IB_QP_PORT) &&
1674 (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
1677 if (attr_mask & IB_QP_PKEY_INDEX) {
1678 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1679 if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
1683 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
1684 attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
1687 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
1688 attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
1691 if (cur_state == new_state && cur_state == IB_QPS_RESET) {
1696 err = __mlx5_ib_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
1699 mutex_unlock(&qp->mutex);
1703 static int mlx5_wq_overflow(struct mlx5_ib_wq *wq, int nreq, struct ib_cq *ib_cq)
1705 struct mlx5_ib_cq *cq;
1708 cur = wq->head - wq->tail;
1709 if (likely(cur + nreq < wq->max_post))
1713 spin_lock(&cq->lock);
1714 cur = wq->head - wq->tail;
1715 spin_unlock(&cq->lock);
1717 return cur + nreq >= wq->max_post;
1720 static __always_inline void set_raddr_seg(struct mlx5_wqe_raddr_seg *rseg,
1721 u64 remote_addr, u32 rkey)
1723 rseg->raddr = cpu_to_be64(remote_addr);
1724 rseg->rkey = cpu_to_be32(rkey);
1728 static void set_datagram_seg(struct mlx5_wqe_datagram_seg *dseg,
1729 struct ib_send_wr *wr)
1731 memcpy(&dseg->av, &to_mah(wr->wr.ud.ah)->av, sizeof(struct mlx5_av));
1732 dseg->av.dqp_dct = cpu_to_be32(wr->wr.ud.remote_qpn | MLX5_EXTENDED_UD_AV);
1733 dseg->av.key.qkey.qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
1736 static void set_data_ptr_seg(struct mlx5_wqe_data_seg *dseg, struct ib_sge *sg)
1738 dseg->byte_count = cpu_to_be32(sg->length);
1739 dseg->lkey = cpu_to_be32(sg->lkey);
1740 dseg->addr = cpu_to_be64(sg->addr);
1743 static __be16 get_klm_octo(int npages)
1745 return cpu_to_be16(ALIGN(npages, 8) / 2);
1748 static __be64 frwr_mkey_mask(void)
1752 result = MLX5_MKEY_MASK_LEN |
1753 MLX5_MKEY_MASK_PAGE_SIZE |
1754 MLX5_MKEY_MASK_START_ADDR |
1755 MLX5_MKEY_MASK_EN_RINVAL |
1756 MLX5_MKEY_MASK_KEY |
1762 MLX5_MKEY_MASK_SMALL_FENCE |
1763 MLX5_MKEY_MASK_FREE;
1765 return cpu_to_be64(result);
1768 static void set_frwr_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1769 struct ib_send_wr *wr, int li)
1771 memset(umr, 0, sizeof(*umr));
1774 umr->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
1775 umr->flags = 1 << 7;
1779 umr->flags = (1 << 5); /* fail if not free */
1780 umr->klm_octowords = get_klm_octo(wr->wr.fast_reg.page_list_len);
1781 umr->mkey_mask = frwr_mkey_mask();
1784 static void set_reg_umr_segment(struct mlx5_wqe_umr_ctrl_seg *umr,
1785 struct ib_send_wr *wr)
1787 struct umr_wr *umrwr = (struct umr_wr *)&wr->wr.fast_reg;
1790 memset(umr, 0, sizeof(*umr));
1792 if (!(wr->send_flags & MLX5_IB_SEND_UMR_UNREG)) {
1793 umr->flags = 1 << 5; /* fail if not free */
1794 umr->klm_octowords = get_klm_octo(umrwr->npages);
1795 mask = MLX5_MKEY_MASK_LEN |
1796 MLX5_MKEY_MASK_PAGE_SIZE |
1797 MLX5_MKEY_MASK_START_ADDR |
1801 MLX5_MKEY_MASK_KEY |
1805 MLX5_MKEY_MASK_FREE;
1806 umr->mkey_mask = cpu_to_be64(mask);
1808 umr->flags = 2 << 5; /* fail if free */
1809 mask = MLX5_MKEY_MASK_FREE;
1810 umr->mkey_mask = cpu_to_be64(mask);
1814 umr->flags |= (1 << 7); /* inline */
1817 static u8 get_umr_flags(int acc)
1819 return (acc & IB_ACCESS_REMOTE_ATOMIC ? MLX5_PERM_ATOMIC : 0) |
1820 (acc & IB_ACCESS_REMOTE_WRITE ? MLX5_PERM_REMOTE_WRITE : 0) |
1821 (acc & IB_ACCESS_REMOTE_READ ? MLX5_PERM_REMOTE_READ : 0) |
1822 (acc & IB_ACCESS_LOCAL_WRITE ? MLX5_PERM_LOCAL_WRITE : 0) |
1823 MLX5_PERM_LOCAL_READ | MLX5_PERM_UMR_EN | MLX5_ACCESS_MODE_MTT;
1826 static void set_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr,
1829 memset(seg, 0, sizeof(*seg));
1831 seg->status = 1 << 6;
1835 seg->flags = get_umr_flags(wr->wr.fast_reg.access_flags);
1836 *writ = seg->flags & (MLX5_PERM_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE);
1837 seg->qpn_mkey7_0 = cpu_to_be32((wr->wr.fast_reg.rkey & 0xff) | 0xffffff00);
1838 seg->flags_pd = cpu_to_be32(MLX5_MKEY_REMOTE_INVAL);
1839 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1840 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1841 seg->xlt_oct_size = cpu_to_be32((wr->wr.fast_reg.page_list_len + 1) / 2);
1842 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1845 static void set_reg_mkey_segment(struct mlx5_mkey_seg *seg, struct ib_send_wr *wr)
1847 memset(seg, 0, sizeof(*seg));
1848 if (wr->send_flags & MLX5_IB_SEND_UMR_UNREG) {
1849 seg->status = 1 << 6;
1853 seg->flags = convert_access(wr->wr.fast_reg.access_flags);
1854 seg->flags_pd = cpu_to_be32(to_mpd((struct ib_pd *)wr->wr.fast_reg.page_list)->pdn);
1855 seg->start_addr = cpu_to_be64(wr->wr.fast_reg.iova_start);
1856 seg->len = cpu_to_be64(wr->wr.fast_reg.length);
1857 seg->log2_page_size = wr->wr.fast_reg.page_shift;
1858 seg->qpn_mkey7_0 = cpu_to_be32(0xffffff00 |
1859 mlx5_mkey_variant(wr->wr.fast_reg.rkey));
1862 static void set_frwr_pages(struct mlx5_wqe_data_seg *dseg,
1863 struct ib_send_wr *wr,
1864 struct mlx5_core_dev *mdev,
1865 struct mlx5_ib_pd *pd,
1868 struct mlx5_ib_fast_reg_page_list *mfrpl = to_mfrpl(wr->wr.fast_reg.page_list);
1869 u64 *page_list = wr->wr.fast_reg.page_list->page_list;
1870 u64 perm = MLX5_EN_RD | (writ ? MLX5_EN_WR : 0);
1873 for (i = 0; i < wr->wr.fast_reg.page_list_len; i++)
1874 mfrpl->mapped_page_list[i] = cpu_to_be64(page_list[i] | perm);
1875 dseg->addr = cpu_to_be64(mfrpl->map);
1876 dseg->byte_count = cpu_to_be32(ALIGN(sizeof(u64) * wr->wr.fast_reg.page_list_len, 64));
1877 dseg->lkey = cpu_to_be32(pd->pa_lkey);
1880 static __be32 send_ieth(struct ib_send_wr *wr)
1882 switch (wr->opcode) {
1883 case IB_WR_SEND_WITH_IMM:
1884 case IB_WR_RDMA_WRITE_WITH_IMM:
1885 return wr->ex.imm_data;
1887 case IB_WR_SEND_WITH_INV:
1888 return cpu_to_be32(wr->ex.invalidate_rkey);
1895 static u8 calc_sig(void *wqe, int size)
1901 for (i = 0; i < size; i++)
1907 static u8 wq_sig(void *wqe)
1909 return calc_sig(wqe, (*((u8 *)wqe + 8) & 0x3f) << 4);
1912 static int set_data_inl_seg(struct mlx5_ib_qp *qp, struct ib_send_wr *wr,
1915 struct mlx5_wqe_inline_seg *seg;
1916 void *qend = qp->sq.qend;
1924 wqe += sizeof(*seg);
1925 for (i = 0; i < wr->num_sge; i++) {
1926 addr = (void *)(unsigned long)(wr->sg_list[i].addr);
1927 len = wr->sg_list[i].length;
1930 if (unlikely(inl > qp->max_inline_data))
1933 if (unlikely(wqe + len > qend)) {
1935 memcpy(wqe, addr, copy);
1938 wqe = mlx5_get_send_wqe(qp, 0);
1940 memcpy(wqe, addr, len);
1944 seg->byte_count = cpu_to_be32(inl | MLX5_INLINE_SEG);
1946 *sz = ALIGN(inl + sizeof(seg->byte_count), 16) / 16;
1951 static int set_frwr_li_wr(void **seg, struct ib_send_wr *wr, int *size,
1952 struct mlx5_core_dev *mdev, struct mlx5_ib_pd *pd, struct mlx5_ib_qp *qp)
1957 li = wr->opcode == IB_WR_LOCAL_INV ? 1 : 0;
1958 if (unlikely(wr->send_flags & IB_SEND_INLINE))
1961 set_frwr_umr_segment(*seg, wr, li);
1962 *seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
1963 *size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
1964 if (unlikely((*seg == qp->sq.qend)))
1965 *seg = mlx5_get_send_wqe(qp, 0);
1966 set_mkey_segment(*seg, wr, li, &writ);
1967 *seg += sizeof(struct mlx5_mkey_seg);
1968 *size += sizeof(struct mlx5_mkey_seg) / 16;
1969 if (unlikely((*seg == qp->sq.qend)))
1970 *seg = mlx5_get_send_wqe(qp, 0);
1972 if (unlikely(wr->wr.fast_reg.page_list_len >
1973 wr->wr.fast_reg.page_list->max_page_list_len))
1976 set_frwr_pages(*seg, wr, mdev, pd, writ);
1977 *seg += sizeof(struct mlx5_wqe_data_seg);
1978 *size += (sizeof(struct mlx5_wqe_data_seg) / 16);
1983 static void dump_wqe(struct mlx5_ib_qp *qp, int idx, int size_16)
1989 pr_debug("dump wqe at %p\n", mlx5_get_send_wqe(qp, tidx));
1990 for (i = 0, j = 0; i < size_16 * 4; i += 4, j += 4) {
1991 if ((i & 0xf) == 0) {
1992 void *buf = mlx5_get_send_wqe(qp, tidx);
1993 tidx = (tidx + 1) & (qp->sq.wqe_cnt - 1);
1997 pr_debug("%08x %08x %08x %08x\n", be32_to_cpu(p[j]),
1998 be32_to_cpu(p[j + 1]), be32_to_cpu(p[j + 2]),
1999 be32_to_cpu(p[j + 3]));
2003 static void mlx5_bf_copy(u64 __iomem *dst, u64 *src,
2004 unsigned bytecnt, struct mlx5_ib_qp *qp)
2006 while (bytecnt > 0) {
2007 __iowrite64_copy(dst++, src++, 8);
2008 __iowrite64_copy(dst++, src++, 8);
2009 __iowrite64_copy(dst++, src++, 8);
2010 __iowrite64_copy(dst++, src++, 8);
2011 __iowrite64_copy(dst++, src++, 8);
2012 __iowrite64_copy(dst++, src++, 8);
2013 __iowrite64_copy(dst++, src++, 8);
2014 __iowrite64_copy(dst++, src++, 8);
2016 if (unlikely(src == qp->sq.qend))
2017 src = mlx5_get_send_wqe(qp, 0);
2021 static u8 get_fence(u8 fence, struct ib_send_wr *wr)
2023 if (unlikely(wr->opcode == IB_WR_LOCAL_INV &&
2024 wr->send_flags & IB_SEND_FENCE))
2025 return MLX5_FENCE_MODE_STRONG_ORDERING;
2027 if (unlikely(fence)) {
2028 if (wr->send_flags & IB_SEND_FENCE)
2029 return MLX5_FENCE_MODE_SMALL_AND_FENCE;
2038 int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
2039 struct ib_send_wr **bad_wr)
2041 struct mlx5_wqe_ctrl_seg *ctrl = NULL; /* compiler warning */
2042 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2043 struct mlx5_core_dev *mdev = &dev->mdev;
2044 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2045 struct mlx5_wqe_data_seg *dpseg;
2046 struct mlx5_wqe_xrc_seg *xrc;
2047 struct mlx5_bf *bf = qp->bf;
2048 int uninitialized_var(size);
2049 void *qend = qp->sq.qend;
2050 unsigned long flags;
2063 spin_lock_irqsave(&qp->sq.lock, flags);
2065 for (nreq = 0; wr; nreq++, wr = wr->next) {
2066 if (unlikely(wr->opcode >= sizeof(mlx5_ib_opcode) / sizeof(mlx5_ib_opcode[0]))) {
2067 mlx5_ib_warn(dev, "\n");
2073 if (unlikely(mlx5_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq))) {
2074 mlx5_ib_warn(dev, "\n");
2080 fence = qp->fm_cache;
2081 num_sge = wr->num_sge;
2082 if (unlikely(num_sge > qp->sq.max_gs)) {
2083 mlx5_ib_warn(dev, "\n");
2089 idx = qp->sq.cur_post & (qp->sq.wqe_cnt - 1);
2090 seg = mlx5_get_send_wqe(qp, idx);
2092 *(uint32_t *)(seg + 8) = 0;
2093 ctrl->imm = send_ieth(wr);
2094 ctrl->fm_ce_se = qp->sq_signal_bits |
2095 (wr->send_flags & IB_SEND_SIGNALED ?
2096 MLX5_WQE_CTRL_CQ_UPDATE : 0) |
2097 (wr->send_flags & IB_SEND_SOLICITED ?
2098 MLX5_WQE_CTRL_SOLICITED : 0);
2100 seg += sizeof(*ctrl);
2101 size = sizeof(*ctrl) / 16;
2103 switch (ibqp->qp_type) {
2104 case IB_QPT_XRC_INI:
2106 xrc->xrc_srqn = htonl(wr->xrc_remote_srq_num);
2107 seg += sizeof(*xrc);
2108 size += sizeof(*xrc) / 16;
2111 switch (wr->opcode) {
2112 case IB_WR_RDMA_READ:
2113 case IB_WR_RDMA_WRITE:
2114 case IB_WR_RDMA_WRITE_WITH_IMM:
2115 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2117 seg += sizeof(struct mlx5_wqe_raddr_seg);
2118 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2121 case IB_WR_ATOMIC_CMP_AND_SWP:
2122 case IB_WR_ATOMIC_FETCH_AND_ADD:
2123 case IB_WR_MASKED_ATOMIC_CMP_AND_SWP:
2124 mlx5_ib_warn(dev, "Atomic operations are not supported yet\n");
2129 case IB_WR_LOCAL_INV:
2130 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2131 qp->sq.wr_data[idx] = IB_WR_LOCAL_INV;
2132 ctrl->imm = cpu_to_be32(wr->ex.invalidate_rkey);
2133 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2135 mlx5_ib_warn(dev, "\n");
2142 case IB_WR_FAST_REG_MR:
2143 next_fence = MLX5_FENCE_MODE_INITIATOR_SMALL;
2144 qp->sq.wr_data[idx] = IB_WR_FAST_REG_MR;
2145 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2146 err = set_frwr_li_wr(&seg, wr, &size, mdev, to_mpd(ibqp->pd), qp);
2148 mlx5_ib_warn(dev, "\n");
2161 switch (wr->opcode) {
2162 case IB_WR_RDMA_WRITE:
2163 case IB_WR_RDMA_WRITE_WITH_IMM:
2164 set_raddr_seg(seg, wr->wr.rdma.remote_addr,
2166 seg += sizeof(struct mlx5_wqe_raddr_seg);
2167 size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
2178 set_datagram_seg(seg, wr);
2179 seg += sizeof(struct mlx5_wqe_datagram_seg);
2180 size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
2181 if (unlikely((seg == qend)))
2182 seg = mlx5_get_send_wqe(qp, 0);
2185 case MLX5_IB_QPT_REG_UMR:
2186 if (wr->opcode != MLX5_IB_WR_UMR) {
2188 mlx5_ib_warn(dev, "bad opcode\n");
2191 qp->sq.wr_data[idx] = MLX5_IB_WR_UMR;
2192 ctrl->imm = cpu_to_be32(wr->wr.fast_reg.rkey);
2193 set_reg_umr_segment(seg, wr);
2194 seg += sizeof(struct mlx5_wqe_umr_ctrl_seg);
2195 size += sizeof(struct mlx5_wqe_umr_ctrl_seg) / 16;
2196 if (unlikely((seg == qend)))
2197 seg = mlx5_get_send_wqe(qp, 0);
2198 set_reg_mkey_segment(seg, wr);
2199 seg += sizeof(struct mlx5_mkey_seg);
2200 size += sizeof(struct mlx5_mkey_seg) / 16;
2201 if (unlikely((seg == qend)))
2202 seg = mlx5_get_send_wqe(qp, 0);
2209 if (wr->send_flags & IB_SEND_INLINE && num_sge) {
2210 int uninitialized_var(sz);
2212 err = set_data_inl_seg(qp, wr, seg, &sz);
2213 if (unlikely(err)) {
2214 mlx5_ib_warn(dev, "\n");
2222 for (i = 0; i < num_sge; i++) {
2223 if (unlikely(dpseg == qend)) {
2224 seg = mlx5_get_send_wqe(qp, 0);
2227 if (likely(wr->sg_list[i].length)) {
2228 set_data_ptr_seg(dpseg, wr->sg_list + i);
2229 size += sizeof(struct mlx5_wqe_data_seg) / 16;
2235 mlx5_opcode = mlx5_ib_opcode[wr->opcode];
2236 ctrl->opmod_idx_opcode = cpu_to_be32(((u32)(qp->sq.cur_post) << 8) |
2238 ((u32)opmod << 24));
2239 ctrl->qpn_ds = cpu_to_be32(size | (qp->mqp.qpn << 8));
2240 ctrl->fm_ce_se |= get_fence(fence, wr);
2241 qp->fm_cache = next_fence;
2242 if (unlikely(qp->wq_sig))
2243 ctrl->signature = wq_sig(ctrl);
2245 qp->sq.wrid[idx] = wr->wr_id;
2246 qp->sq.w_list[idx].opcode = mlx5_opcode;
2247 qp->sq.wqe_head[idx] = qp->sq.head + nreq;
2248 qp->sq.cur_post += DIV_ROUND_UP(size * 16, MLX5_SEND_WQE_BB);
2249 qp->sq.w_list[idx].next = qp->sq.cur_post;
2252 dump_wqe(qp, idx, size);
2257 qp->sq.head += nreq;
2259 /* Make sure that descriptors are written before
2260 * updating doorbell record and ringing the doorbell
2264 qp->db.db[MLX5_SND_DBR] = cpu_to_be32(qp->sq.cur_post);
2266 /* Make sure doorbell record is visible to the HCA before
2267 * we hit doorbell */
2271 spin_lock(&bf->lock);
2274 if (0 && nreq == 1 && bf->uuarn && inl && size > 1 && size <= bf->buf_size / 16) {
2275 mlx5_bf_copy(bf->reg + bf->offset, (u64 *)ctrl, ALIGN(size * 16, 64), qp);
2278 mlx5_write64((__be32 *)ctrl, bf->regreg + bf->offset,
2279 MLX5_GET_DOORBELL_LOCK(&bf->lock32));
2280 /* Make sure doorbells don't leak out of SQ spinlock
2281 * and reach the HCA out of order.
2285 bf->offset ^= bf->buf_size;
2287 spin_unlock(&bf->lock);
2290 spin_unlock_irqrestore(&qp->sq.lock, flags);
2295 static void set_sig_seg(struct mlx5_rwqe_sig *sig, int size)
2297 sig->signature = calc_sig(sig, size);
2300 int mlx5_ib_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2301 struct ib_recv_wr **bad_wr)
2303 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2304 struct mlx5_wqe_data_seg *scat;
2305 struct mlx5_rwqe_sig *sig;
2306 unsigned long flags;
2312 spin_lock_irqsave(&qp->rq.lock, flags);
2314 ind = qp->rq.head & (qp->rq.wqe_cnt - 1);
2316 for (nreq = 0; wr; nreq++, wr = wr->next) {
2317 if (mlx5_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2323 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2329 scat = get_recv_wqe(qp, ind);
2333 for (i = 0; i < wr->num_sge; i++)
2334 set_data_ptr_seg(scat + i, wr->sg_list + i);
2336 if (i < qp->rq.max_gs) {
2337 scat[i].byte_count = 0;
2338 scat[i].lkey = cpu_to_be32(MLX5_INVALID_LKEY);
2343 sig = (struct mlx5_rwqe_sig *)scat;
2344 set_sig_seg(sig, (qp->rq.max_gs + 1) << 2);
2347 qp->rq.wrid[ind] = wr->wr_id;
2349 ind = (ind + 1) & (qp->rq.wqe_cnt - 1);
2354 qp->rq.head += nreq;
2356 /* Make sure that descriptors are written before
2361 *qp->db.db = cpu_to_be32(qp->rq.head & 0xffff);
2364 spin_unlock_irqrestore(&qp->rq.lock, flags);
2369 static inline enum ib_qp_state to_ib_qp_state(enum mlx5_qp_state mlx5_state)
2371 switch (mlx5_state) {
2372 case MLX5_QP_STATE_RST: return IB_QPS_RESET;
2373 case MLX5_QP_STATE_INIT: return IB_QPS_INIT;
2374 case MLX5_QP_STATE_RTR: return IB_QPS_RTR;
2375 case MLX5_QP_STATE_RTS: return IB_QPS_RTS;
2376 case MLX5_QP_STATE_SQ_DRAINING:
2377 case MLX5_QP_STATE_SQD: return IB_QPS_SQD;
2378 case MLX5_QP_STATE_SQER: return IB_QPS_SQE;
2379 case MLX5_QP_STATE_ERR: return IB_QPS_ERR;
2384 static inline enum ib_mig_state to_ib_mig_state(int mlx5_mig_state)
2386 switch (mlx5_mig_state) {
2387 case MLX5_QP_PM_ARMED: return IB_MIG_ARMED;
2388 case MLX5_QP_PM_REARM: return IB_MIG_REARM;
2389 case MLX5_QP_PM_MIGRATED: return IB_MIG_MIGRATED;
2394 static int to_ib_qp_access_flags(int mlx5_flags)
2398 if (mlx5_flags & MLX5_QP_BIT_RRE)
2399 ib_flags |= IB_ACCESS_REMOTE_READ;
2400 if (mlx5_flags & MLX5_QP_BIT_RWE)
2401 ib_flags |= IB_ACCESS_REMOTE_WRITE;
2402 if (mlx5_flags & MLX5_QP_BIT_RAE)
2403 ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
2408 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
2409 struct mlx5_qp_path *path)
2411 struct mlx5_core_dev *dev = &ibdev->mdev;
2413 memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
2414 ib_ah_attr->port_num = path->port;
2416 if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->caps.num_ports)
2419 ib_ah_attr->sl = path->sl & 0xf;
2421 ib_ah_attr->dlid = be16_to_cpu(path->rlid);
2422 ib_ah_attr->src_path_bits = path->grh_mlid & 0x7f;
2423 ib_ah_attr->static_rate = path->static_rate ? path->static_rate - 5 : 0;
2424 ib_ah_attr->ah_flags = (path->grh_mlid & (1 << 7)) ? IB_AH_GRH : 0;
2425 if (ib_ah_attr->ah_flags) {
2426 ib_ah_attr->grh.sgid_index = path->mgid_index;
2427 ib_ah_attr->grh.hop_limit = path->hop_limit;
2428 ib_ah_attr->grh.traffic_class =
2429 (be32_to_cpu(path->tclass_flowlabel) >> 20) & 0xff;
2430 ib_ah_attr->grh.flow_label =
2431 be32_to_cpu(path->tclass_flowlabel) & 0xfffff;
2432 memcpy(ib_ah_attr->grh.dgid.raw,
2433 path->rgid, sizeof(ib_ah_attr->grh.dgid.raw));
2437 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
2438 struct ib_qp_init_attr *qp_init_attr)
2440 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
2441 struct mlx5_ib_qp *qp = to_mqp(ibqp);
2442 struct mlx5_query_qp_mbox_out *outb;
2443 struct mlx5_qp_context *context;
2447 mutex_lock(&qp->mutex);
2448 outb = kzalloc(sizeof(*outb), GFP_KERNEL);
2453 context = &outb->ctx;
2454 err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
2458 mlx5_state = be32_to_cpu(context->flags) >> 28;
2460 qp->state = to_ib_qp_state(mlx5_state);
2461 qp_attr->qp_state = qp->state;
2462 qp_attr->path_mtu = context->mtu_msgmax >> 5;
2463 qp_attr->path_mig_state =
2464 to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
2465 qp_attr->qkey = be32_to_cpu(context->qkey);
2466 qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
2467 qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
2468 qp_attr->dest_qp_num = be32_to_cpu(context->log_pg_sz_remote_qpn) & 0xffffff;
2469 qp_attr->qp_access_flags =
2470 to_ib_qp_access_flags(be32_to_cpu(context->params2));
2472 if (qp->ibqp.qp_type == IB_QPT_RC || qp->ibqp.qp_type == IB_QPT_UC) {
2473 to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
2474 to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
2475 qp_attr->alt_pkey_index = context->alt_path.pkey_index & 0x7f;
2476 qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
2479 qp_attr->pkey_index = context->pri_path.pkey_index & 0x7f;
2480 qp_attr->port_num = context->pri_path.port;
2482 /* qp_attr->en_sqd_async_notify is only applicable in modify qp */
2483 qp_attr->sq_draining = mlx5_state == MLX5_QP_STATE_SQ_DRAINING;
2485 qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
2487 qp_attr->max_dest_rd_atomic =
2488 1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
2489 qp_attr->min_rnr_timer =
2490 (be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
2491 qp_attr->timeout = context->pri_path.ackto_lt >> 3;
2492 qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
2493 qp_attr->rnr_retry = (be32_to_cpu(context->params1) >> 13) & 0x7;
2494 qp_attr->alt_timeout = context->alt_path.ackto_lt >> 3;
2495 qp_attr->cur_qp_state = qp_attr->qp_state;
2496 qp_attr->cap.max_recv_wr = qp->rq.wqe_cnt;
2497 qp_attr->cap.max_recv_sge = qp->rq.max_gs;
2499 if (!ibqp->uobject) {
2500 qp_attr->cap.max_send_wr = qp->sq.wqe_cnt;
2501 qp_attr->cap.max_send_sge = qp->sq.max_gs;
2503 qp_attr->cap.max_send_wr = 0;
2504 qp_attr->cap.max_send_sge = 0;
2507 /* We don't support inline sends for kernel QPs (yet), and we
2508 * don't know what userspace's value should be.
2510 qp_attr->cap.max_inline_data = 0;
2512 qp_init_attr->cap = qp_attr->cap;
2514 qp_init_attr->create_flags = 0;
2515 if (qp->flags & MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK)
2516 qp_init_attr->create_flags |= IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK;
2518 qp_init_attr->sq_sig_type = qp->sq_signal_bits & MLX5_WQE_CTRL_CQ_UPDATE ?
2519 IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
2525 mutex_unlock(&qp->mutex);
2529 struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
2530 struct ib_ucontext *context,
2531 struct ib_udata *udata)
2533 struct mlx5_ib_dev *dev = to_mdev(ibdev);
2534 struct mlx5_ib_xrcd *xrcd;
2537 if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
2538 return ERR_PTR(-ENOSYS);
2540 xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
2542 return ERR_PTR(-ENOMEM);
2544 err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
2547 return ERR_PTR(-ENOMEM);
2550 return &xrcd->ibxrcd;
2553 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
2555 struct mlx5_ib_dev *dev = to_mdev(xrcd->device);
2556 u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
2559 err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
2561 mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);