1 /*******************************************************************
2 * This file is part of the Emulex RoCE Device Driver for *
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. *
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
20 * Contact Information:
21 * linux-drivers@emulex.com
25 * Costa Mesa, CA 92626
26 *******************************************************************/
28 #include <linux/sched.h>
29 #include <linux/interrupt.h>
30 #include <linux/log2.h>
31 #include <linux/dma-mapping.h>
33 #include <rdma/ib_verbs.h>
34 #include <rdma/ib_user_verbs.h>
37 #include "ocrdma_hw.h"
38 #include "ocrdma_verbs.h"
39 #include "ocrdma_ah.h"
42 OCRDMA_MBX_STATUS_FAILED = 1,
43 OCRDMA_MBX_STATUS_ILLEGAL_FIELD = 3,
44 OCRDMA_MBX_STATUS_OOR = 100,
45 OCRDMA_MBX_STATUS_INVALID_PD = 101,
46 OCRDMA_MBX_STATUS_PD_INUSE = 102,
47 OCRDMA_MBX_STATUS_INVALID_CQ = 103,
48 OCRDMA_MBX_STATUS_INVALID_QP = 104,
49 OCRDMA_MBX_STATUS_INVALID_LKEY = 105,
50 OCRDMA_MBX_STATUS_ORD_EXCEEDS = 106,
51 OCRDMA_MBX_STATUS_IRD_EXCEEDS = 107,
52 OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS = 108,
53 OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS = 109,
54 OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS = 110,
55 OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS = 111,
56 OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS = 112,
57 OCRDMA_MBX_STATUS_INVALID_STATE_CHANGE = 113,
58 OCRDMA_MBX_STATUS_MW_BOUND = 114,
59 OCRDMA_MBX_STATUS_INVALID_VA = 115,
60 OCRDMA_MBX_STATUS_INVALID_LENGTH = 116,
61 OCRDMA_MBX_STATUS_INVALID_FBO = 117,
62 OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS = 118,
63 OCRDMA_MBX_STATUS_INVALID_PBE_SIZE = 119,
64 OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY = 120,
65 OCRDMA_MBX_STATUS_INVALID_PBL_SHIFT = 121,
66 OCRDMA_MBX_STATUS_INVALID_SRQ_ID = 129,
67 OCRDMA_MBX_STATUS_SRQ_ERROR = 133,
68 OCRDMA_MBX_STATUS_RQE_EXCEEDS = 134,
69 OCRDMA_MBX_STATUS_MTU_EXCEEDS = 135,
70 OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS = 136,
71 OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS = 137,
72 OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS = 138,
73 OCRDMA_MBX_STATUS_QP_BOUND = 130,
74 OCRDMA_MBX_STATUS_INVALID_CHANGE = 139,
75 OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP = 140,
76 OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER = 141,
77 OCRDMA_MBX_STATUS_MW_STILL_BOUND = 142,
78 OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID = 143,
79 OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS = 144
82 enum additional_status {
83 OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES = 22
87 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES = 1,
88 OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER = 2,
89 OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES = 3,
90 OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING = 4,
91 OCRDMA_MBX_CQE_STATUS_DMA_FAILED = 5
94 static inline void *ocrdma_get_eqe(struct ocrdma_eq *eq)
96 return eq->q.va + (eq->q.tail * sizeof(struct ocrdma_eqe));
99 static inline void ocrdma_eq_inc_tail(struct ocrdma_eq *eq)
101 eq->q.tail = (eq->q.tail + 1) & (OCRDMA_EQ_LEN - 1);
104 static inline void *ocrdma_get_mcqe(struct ocrdma_dev *dev)
106 struct ocrdma_mcqe *cqe = (struct ocrdma_mcqe *)
107 (dev->mq.cq.va + (dev->mq.cq.tail * sizeof(struct ocrdma_mcqe)));
109 if (!(le32_to_cpu(cqe->valid_ae_cmpl_cons) & OCRDMA_MCQE_VALID_MASK))
114 static inline void ocrdma_mcq_inc_tail(struct ocrdma_dev *dev)
116 dev->mq.cq.tail = (dev->mq.cq.tail + 1) & (OCRDMA_MQ_CQ_LEN - 1);
119 static inline struct ocrdma_mqe *ocrdma_get_mqe(struct ocrdma_dev *dev)
121 return dev->mq.sq.va + (dev->mq.sq.head * sizeof(struct ocrdma_mqe));
124 static inline void ocrdma_mq_inc_head(struct ocrdma_dev *dev)
126 dev->mq.sq.head = (dev->mq.sq.head + 1) & (OCRDMA_MQ_LEN - 1);
129 static inline void *ocrdma_get_mqe_rsp(struct ocrdma_dev *dev)
131 return dev->mq.sq.va + (dev->mqe_ctx.tag * sizeof(struct ocrdma_mqe));
134 enum ib_qp_state get_ibqp_state(enum ocrdma_qp_state qps)
139 case OCRDMA_QPS_INIT:
146 case OCRDMA_QPS_SQ_DRAINING:
156 static enum ocrdma_qp_state get_ocrdma_qp_state(enum ib_qp_state qps)
160 return OCRDMA_QPS_RST;
162 return OCRDMA_QPS_INIT;
164 return OCRDMA_QPS_RTR;
166 return OCRDMA_QPS_RTS;
168 return OCRDMA_QPS_SQD;
170 return OCRDMA_QPS_SQE;
172 return OCRDMA_QPS_ERR;
174 return OCRDMA_QPS_ERR;
177 static int ocrdma_get_mbx_errno(u32 status)
180 u8 mbox_status = (status & OCRDMA_MBX_RSP_STATUS_MASK) >>
181 OCRDMA_MBX_RSP_STATUS_SHIFT;
182 u8 add_status = (status & OCRDMA_MBX_RSP_ASTATUS_MASK) >>
183 OCRDMA_MBX_RSP_ASTATUS_SHIFT;
185 switch (mbox_status) {
186 case OCRDMA_MBX_STATUS_OOR:
187 case OCRDMA_MBX_STATUS_MAX_QP_EXCEEDS:
191 case OCRDMA_MBX_STATUS_INVALID_PD:
192 case OCRDMA_MBX_STATUS_INVALID_CQ:
193 case OCRDMA_MBX_STATUS_INVALID_SRQ_ID:
194 case OCRDMA_MBX_STATUS_INVALID_QP:
195 case OCRDMA_MBX_STATUS_INVALID_CHANGE:
196 case OCRDMA_MBX_STATUS_MTU_EXCEEDS:
197 case OCRDMA_MBX_STATUS_INVALID_RNR_NAK_TIMER:
198 case OCRDMA_MBX_STATUS_PKEY_INDEX_INVALID:
199 case OCRDMA_MBX_STATUS_PKEY_INDEX_EXCEEDS:
200 case OCRDMA_MBX_STATUS_ILLEGAL_FIELD:
201 case OCRDMA_MBX_STATUS_INVALID_PBL_ENTRY:
202 case OCRDMA_MBX_STATUS_INVALID_LKEY:
203 case OCRDMA_MBX_STATUS_INVALID_VA:
204 case OCRDMA_MBX_STATUS_INVALID_LENGTH:
205 case OCRDMA_MBX_STATUS_INVALID_FBO:
206 case OCRDMA_MBX_STATUS_INVALID_ACC_RIGHTS:
207 case OCRDMA_MBX_STATUS_INVALID_PBE_SIZE:
208 case OCRDMA_MBX_STATUS_ATOMIC_OPS_UNSUP:
209 case OCRDMA_MBX_STATUS_SRQ_ERROR:
210 case OCRDMA_MBX_STATUS_SRQ_SIZE_UNDERUNS:
214 case OCRDMA_MBX_STATUS_PD_INUSE:
215 case OCRDMA_MBX_STATUS_QP_BOUND:
216 case OCRDMA_MBX_STATUS_MW_STILL_BOUND:
217 case OCRDMA_MBX_STATUS_MW_BOUND:
221 case OCRDMA_MBX_STATUS_RECVQ_RQE_EXCEEDS:
222 case OCRDMA_MBX_STATUS_SGE_RECV_EXCEEDS:
223 case OCRDMA_MBX_STATUS_RQE_EXCEEDS:
224 case OCRDMA_MBX_STATUS_SRQ_LIMIT_EXCEEDS:
225 case OCRDMA_MBX_STATUS_ORD_EXCEEDS:
226 case OCRDMA_MBX_STATUS_IRD_EXCEEDS:
227 case OCRDMA_MBX_STATUS_SENDQ_WQE_EXCEEDS:
228 case OCRDMA_MBX_STATUS_SGE_SEND_EXCEEDS:
229 case OCRDMA_MBX_STATUS_SGE_WRITE_EXCEEDS:
233 case OCRDMA_MBX_STATUS_FAILED:
234 switch (add_status) {
235 case OCRDMA_MBX_ADDI_STATUS_INSUFFICIENT_RESOURCES:
245 char *port_speed_string(struct ocrdma_dev *dev)
248 u16 speeds_supported;
250 speeds_supported = dev->phy.fixed_speeds_supported |
251 dev->phy.auto_speeds_supported;
252 if (speeds_supported & OCRDMA_PHY_SPEED_40GBPS)
254 else if (speeds_supported & OCRDMA_PHY_SPEED_10GBPS)
256 else if (speeds_supported & OCRDMA_PHY_SPEED_1GBPS)
262 static int ocrdma_get_mbx_cqe_errno(u16 cqe_status)
264 int err_num = -EINVAL;
266 switch (cqe_status) {
267 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_PRIVILEDGES:
270 case OCRDMA_MBX_CQE_STATUS_INVALID_PARAMETER:
273 case OCRDMA_MBX_CQE_STATUS_INSUFFICIENT_RESOURCES:
274 case OCRDMA_MBX_CQE_STATUS_QUEUE_FLUSHING:
277 case OCRDMA_MBX_CQE_STATUS_DMA_FAILED:
285 void ocrdma_ring_cq_db(struct ocrdma_dev *dev, u16 cq_id, bool armed,
286 bool solicited, u16 cqe_popped)
288 u32 val = cq_id & OCRDMA_DB_CQ_RING_ID_MASK;
290 val |= ((cq_id & OCRDMA_DB_CQ_RING_ID_EXT_MASK) <<
291 OCRDMA_DB_CQ_RING_ID_EXT_MASK_SHIFT);
294 val |= (1 << OCRDMA_DB_CQ_REARM_SHIFT);
296 val |= (1 << OCRDMA_DB_CQ_SOLICIT_SHIFT);
297 val |= (cqe_popped << OCRDMA_DB_CQ_NUM_POPPED_SHIFT);
298 iowrite32(val, dev->nic_info.db + OCRDMA_DB_CQ_OFFSET);
301 static void ocrdma_ring_mq_db(struct ocrdma_dev *dev)
305 val |= dev->mq.sq.id & OCRDMA_MQ_ID_MASK;
306 val |= 1 << OCRDMA_MQ_NUM_MQE_SHIFT;
307 iowrite32(val, dev->nic_info.db + OCRDMA_DB_MQ_OFFSET);
310 static void ocrdma_ring_eq_db(struct ocrdma_dev *dev, u16 eq_id,
311 bool arm, bool clear_int, u16 num_eqe)
315 val |= eq_id & OCRDMA_EQ_ID_MASK;
316 val |= ((eq_id & OCRDMA_EQ_ID_EXT_MASK) << OCRDMA_EQ_ID_EXT_MASK_SHIFT);
318 val |= (1 << OCRDMA_REARM_SHIFT);
320 val |= (1 << OCRDMA_EQ_CLR_SHIFT);
321 val |= (1 << OCRDMA_EQ_TYPE_SHIFT);
322 val |= (num_eqe << OCRDMA_NUM_EQE_SHIFT);
323 iowrite32(val, dev->nic_info.db + OCRDMA_DB_EQ_OFFSET);
326 static void ocrdma_init_mch(struct ocrdma_mbx_hdr *cmd_hdr,
327 u8 opcode, u8 subsys, u32 cmd_len)
329 cmd_hdr->subsys_op = (opcode | (subsys << OCRDMA_MCH_SUBSYS_SHIFT));
330 cmd_hdr->timeout = 20; /* seconds */
331 cmd_hdr->cmd_len = cmd_len - sizeof(struct ocrdma_mbx_hdr);
334 static void *ocrdma_init_emb_mqe(u8 opcode, u32 cmd_len)
336 struct ocrdma_mqe *mqe;
338 mqe = kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
341 mqe->hdr.spcl_sge_cnt_emb |=
342 (OCRDMA_MQE_EMBEDDED << OCRDMA_MQE_HDR_EMB_SHIFT) &
343 OCRDMA_MQE_HDR_EMB_MASK;
344 mqe->hdr.pyld_len = cmd_len - sizeof(struct ocrdma_mqe_hdr);
346 ocrdma_init_mch(&mqe->u.emb_req.mch, opcode, OCRDMA_SUBSYS_ROCE,
351 static void *ocrdma_alloc_mqe(void)
353 return kzalloc(sizeof(struct ocrdma_mqe), GFP_KERNEL);
356 static void ocrdma_free_q(struct ocrdma_dev *dev, struct ocrdma_queue_info *q)
358 dma_free_coherent(&dev->nic_info.pdev->dev, q->size, q->va, q->dma);
361 static int ocrdma_alloc_q(struct ocrdma_dev *dev,
362 struct ocrdma_queue_info *q, u16 len, u16 entry_size)
364 memset(q, 0, sizeof(*q));
366 q->entry_size = entry_size;
367 q->size = len * entry_size;
368 q->va = dma_alloc_coherent(&dev->nic_info.pdev->dev, q->size,
369 &q->dma, GFP_KERNEL);
372 memset(q->va, 0, q->size);
376 static void ocrdma_build_q_pages(struct ocrdma_pa *q_pa, int cnt,
377 dma_addr_t host_pa, int hw_page_size)
381 for (i = 0; i < cnt; i++) {
382 q_pa[i].lo = (u32) (host_pa & 0xffffffff);
383 q_pa[i].hi = (u32) upper_32_bits(host_pa);
384 host_pa += hw_page_size;
388 static int ocrdma_mbx_delete_q(struct ocrdma_dev *dev,
389 struct ocrdma_queue_info *q, int queue_type)
393 struct ocrdma_delete_q_req *cmd = dev->mbx_cmd;
395 switch (queue_type) {
397 opcode = OCRDMA_CMD_DELETE_MQ;
400 opcode = OCRDMA_CMD_DELETE_CQ;
403 opcode = OCRDMA_CMD_DELETE_EQ;
408 memset(cmd, 0, sizeof(*cmd));
409 ocrdma_init_mch(&cmd->req, opcode, OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
412 status = be_roce_mcc_cmd(dev->nic_info.netdev,
413 cmd, sizeof(*cmd), NULL, NULL);
419 static int ocrdma_mbx_create_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
422 struct ocrdma_create_eq_req *cmd = dev->mbx_cmd;
423 struct ocrdma_create_eq_rsp *rsp = dev->mbx_cmd;
425 memset(cmd, 0, sizeof(*cmd));
426 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_EQ, OCRDMA_SUBSYS_COMMON,
429 cmd->req.rsvd_version = 2;
431 cmd->valid = OCRDMA_CREATE_EQ_VALID;
432 cmd->cnt = 4 << OCRDMA_CREATE_EQ_CNT_SHIFT;
434 ocrdma_build_q_pages(&cmd->pa[0], cmd->num_pages, eq->q.dma,
436 status = be_roce_mcc_cmd(dev->nic_info.netdev, cmd, sizeof(*cmd), NULL,
439 eq->q.id = rsp->vector_eqid & 0xffff;
440 eq->vector = (rsp->vector_eqid >> 16) & 0xffff;
441 eq->q.created = true;
446 static int ocrdma_create_eq(struct ocrdma_dev *dev,
447 struct ocrdma_eq *eq, u16 q_len)
451 status = ocrdma_alloc_q(dev, &eq->q, OCRDMA_EQ_LEN,
452 sizeof(struct ocrdma_eqe));
456 status = ocrdma_mbx_create_eq(dev, eq);
460 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
464 ocrdma_free_q(dev, &eq->q);
468 int ocrdma_get_irq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
472 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX)
473 irq = dev->nic_info.pdev->irq;
475 irq = dev->nic_info.msix.vector_list[eq->vector];
479 static void _ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
482 ocrdma_mbx_delete_q(dev, &eq->q, QTYPE_EQ);
483 ocrdma_free_q(dev, &eq->q);
487 static void ocrdma_destroy_eq(struct ocrdma_dev *dev, struct ocrdma_eq *eq)
491 /* disarm EQ so that interrupts are not generated
492 * during freeing and EQ delete is in progress.
494 ocrdma_ring_eq_db(dev, eq->q.id, false, false, 0);
496 irq = ocrdma_get_irq(dev, eq);
498 _ocrdma_destroy_eq(dev, eq);
501 static void ocrdma_destroy_eqs(struct ocrdma_dev *dev)
505 for (i = 0; i < dev->eq_cnt; i++)
506 ocrdma_destroy_eq(dev, &dev->eq_tbl[i]);
509 static int ocrdma_mbx_mq_cq_create(struct ocrdma_dev *dev,
510 struct ocrdma_queue_info *cq,
511 struct ocrdma_queue_info *eq)
513 struct ocrdma_create_cq_cmd *cmd = dev->mbx_cmd;
514 struct ocrdma_create_cq_cmd_rsp *rsp = dev->mbx_cmd;
517 memset(cmd, 0, sizeof(*cmd));
518 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_CQ,
519 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
521 cmd->req.rsvd_version = OCRDMA_CREATE_CQ_VER2;
522 cmd->pgsz_pgcnt = (cq->size / OCRDMA_MIN_Q_PAGE_SIZE) <<
523 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
524 cmd->pgsz_pgcnt |= PAGES_4K_SPANNED(cq->va, cq->size);
526 cmd->ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
528 cmd->pdid_cqecnt = cq->size / sizeof(struct ocrdma_mcqe);
530 ocrdma_build_q_pages(&cmd->pa[0], cq->size / OCRDMA_MIN_Q_PAGE_SIZE,
531 cq->dma, PAGE_SIZE_4K);
532 status = be_roce_mcc_cmd(dev->nic_info.netdev,
533 cmd, sizeof(*cmd), NULL, NULL);
535 cq->id = (u16) (rsp->cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
541 static u32 ocrdma_encoded_q_len(int q_len)
543 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
545 if (len_encoded == 16)
550 static int ocrdma_mbx_create_mq(struct ocrdma_dev *dev,
551 struct ocrdma_queue_info *mq,
552 struct ocrdma_queue_info *cq)
554 int num_pages, status;
555 struct ocrdma_create_mq_req *cmd = dev->mbx_cmd;
556 struct ocrdma_create_mq_rsp *rsp = dev->mbx_cmd;
557 struct ocrdma_pa *pa;
559 memset(cmd, 0, sizeof(*cmd));
560 num_pages = PAGES_4K_SPANNED(mq->va, mq->size);
562 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_CREATE_MQ_EXT,
563 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
564 cmd->req.rsvd_version = 1;
565 cmd->cqid_pages = num_pages;
566 cmd->cqid_pages |= (cq->id << OCRDMA_CREATE_MQ_CQ_ID_SHIFT);
567 cmd->async_cqid_valid = OCRDMA_CREATE_MQ_ASYNC_CQ_VALID;
569 cmd->async_event_bitmap = Bit(OCRDMA_ASYNC_GRP5_EVE_CODE);
570 cmd->async_event_bitmap |= Bit(OCRDMA_ASYNC_RDMA_EVE_CODE);
572 cmd->async_cqid_ringsize = cq->id;
573 cmd->async_cqid_ringsize |= (ocrdma_encoded_q_len(mq->len) <<
574 OCRDMA_CREATE_MQ_RING_SIZE_SHIFT);
575 cmd->valid = OCRDMA_CREATE_MQ_VALID;
578 ocrdma_build_q_pages(pa, num_pages, mq->dma, PAGE_SIZE_4K);
579 status = be_roce_mcc_cmd(dev->nic_info.netdev,
580 cmd, sizeof(*cmd), NULL, NULL);
588 static int ocrdma_create_mq(struct ocrdma_dev *dev)
592 /* Alloc completion queue for Mailbox queue */
593 status = ocrdma_alloc_q(dev, &dev->mq.cq, OCRDMA_MQ_CQ_LEN,
594 sizeof(struct ocrdma_mcqe));
598 dev->eq_tbl[0].cq_cnt++;
599 status = ocrdma_mbx_mq_cq_create(dev, &dev->mq.cq, &dev->eq_tbl[0].q);
603 memset(&dev->mqe_ctx, 0, sizeof(dev->mqe_ctx));
604 init_waitqueue_head(&dev->mqe_ctx.cmd_wait);
605 mutex_init(&dev->mqe_ctx.lock);
607 /* Alloc Mailbox queue */
608 status = ocrdma_alloc_q(dev, &dev->mq.sq, OCRDMA_MQ_LEN,
609 sizeof(struct ocrdma_mqe));
612 status = ocrdma_mbx_create_mq(dev, &dev->mq.sq, &dev->mq.cq);
615 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, 0);
619 ocrdma_free_q(dev, &dev->mq.sq);
621 ocrdma_mbx_delete_q(dev, &dev->mq.cq, QTYPE_CQ);
623 ocrdma_free_q(dev, &dev->mq.cq);
628 static void ocrdma_destroy_mq(struct ocrdma_dev *dev)
630 struct ocrdma_queue_info *mbxq, *cq;
632 /* mqe_ctx lock synchronizes with any other pending cmds. */
633 mutex_lock(&dev->mqe_ctx.lock);
636 ocrdma_mbx_delete_q(dev, mbxq, QTYPE_MCCQ);
637 ocrdma_free_q(dev, mbxq);
639 mutex_unlock(&dev->mqe_ctx.lock);
643 ocrdma_mbx_delete_q(dev, cq, QTYPE_CQ);
644 ocrdma_free_q(dev, cq);
648 static void ocrdma_process_qpcat_error(struct ocrdma_dev *dev,
649 struct ocrdma_qp *qp)
651 enum ib_qp_state new_ib_qps = IB_QPS_ERR;
652 enum ib_qp_state old_ib_qps;
656 ocrdma_qp_state_change(qp, new_ib_qps, &old_ib_qps);
659 static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
660 struct ocrdma_ae_mcqe *cqe)
662 struct ocrdma_qp *qp = NULL;
663 struct ocrdma_cq *cq = NULL;
664 struct ib_event ib_evt;
669 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
670 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
672 if (cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPVALID)
673 qp = dev->qp_tbl[cqe->qpvalid_qpid & OCRDMA_AE_MCQE_QPID_MASK];
674 if (cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQVALID)
675 cq = dev->cq_tbl[cqe->cqvalid_cqid & OCRDMA_AE_MCQE_CQID_MASK];
677 memset(&ib_evt, 0, sizeof(ib_evt));
679 ib_evt.device = &dev->ibdev;
682 case OCRDMA_CQ_ERROR:
683 ib_evt.element.cq = &cq->ibcq;
684 ib_evt.event = IB_EVENT_CQ_ERR;
688 case OCRDMA_CQ_OVERRUN_ERROR:
689 ib_evt.element.cq = &cq->ibcq;
690 ib_evt.event = IB_EVENT_CQ_ERR;
694 case OCRDMA_CQ_QPCAT_ERROR:
695 ib_evt.element.qp = &qp->ibqp;
696 ib_evt.event = IB_EVENT_QP_FATAL;
697 ocrdma_process_qpcat_error(dev, qp);
699 case OCRDMA_QP_ACCESS_ERROR:
700 ib_evt.element.qp = &qp->ibqp;
701 ib_evt.event = IB_EVENT_QP_ACCESS_ERR;
703 case OCRDMA_QP_COMM_EST_EVENT:
704 ib_evt.element.qp = &qp->ibqp;
705 ib_evt.event = IB_EVENT_COMM_EST;
707 case OCRDMA_SQ_DRAINED_EVENT:
708 ib_evt.element.qp = &qp->ibqp;
709 ib_evt.event = IB_EVENT_SQ_DRAINED;
711 case OCRDMA_DEVICE_FATAL_EVENT:
712 ib_evt.element.port_num = 1;
713 ib_evt.event = IB_EVENT_DEVICE_FATAL;
717 case OCRDMA_SRQCAT_ERROR:
718 ib_evt.element.srq = &qp->srq->ibsrq;
719 ib_evt.event = IB_EVENT_SRQ_ERR;
723 case OCRDMA_SRQ_LIMIT_EVENT:
724 ib_evt.element.srq = &qp->srq->ibsrq;
725 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
729 case OCRDMA_QP_LAST_WQE_EVENT:
730 ib_evt.element.qp = &qp->ibqp;
731 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED;
738 pr_err("%s() unknown type=0x%x\n", __func__, type);
743 if (qp->ibqp.event_handler)
744 qp->ibqp.event_handler(&ib_evt, qp->ibqp.qp_context);
745 } else if (cq_event) {
746 if (cq->ibcq.event_handler)
747 cq->ibcq.event_handler(&ib_evt, cq->ibcq.cq_context);
748 } else if (srq_event) {
749 if (qp->srq->ibsrq.event_handler)
750 qp->srq->ibsrq.event_handler(&ib_evt,
753 } else if (dev_event) {
754 pr_err("%s: Fatal event received\n", dev->ibdev.name);
755 ib_dispatch_event(&ib_evt);
760 static void ocrdma_process_grp5_aync(struct ocrdma_dev *dev,
761 struct ocrdma_ae_mcqe *cqe)
763 struct ocrdma_ae_pvid_mcqe *evt;
764 int type = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_TYPE_MASK) >>
765 OCRDMA_AE_MCQE_EVENT_TYPE_SHIFT;
768 case OCRDMA_ASYNC_EVENT_PVID_STATE:
769 evt = (struct ocrdma_ae_pvid_mcqe *)cqe;
770 if ((evt->tag_enabled & OCRDMA_AE_PVID_MCQE_ENABLED_MASK) >>
771 OCRDMA_AE_PVID_MCQE_ENABLED_SHIFT)
772 dev->pvid = ((evt->tag_enabled &
773 OCRDMA_AE_PVID_MCQE_TAG_MASK) >>
774 OCRDMA_AE_PVID_MCQE_TAG_SHIFT);
777 case OCRDMA_ASYNC_EVENT_COS_VALUE:
778 atomic_set(&dev->update_sl, 1);
781 /* Not interested evts. */
786 static void ocrdma_process_acqe(struct ocrdma_dev *dev, void *ae_cqe)
788 /* async CQE processing */
789 struct ocrdma_ae_mcqe *cqe = ae_cqe;
790 u32 evt_code = (cqe->valid_ae_event & OCRDMA_AE_MCQE_EVENT_CODE_MASK) >>
791 OCRDMA_AE_MCQE_EVENT_CODE_SHIFT;
793 if (evt_code == OCRDMA_ASYNC_RDMA_EVE_CODE)
794 ocrdma_dispatch_ibevent(dev, cqe);
795 else if (evt_code == OCRDMA_ASYNC_GRP5_EVE_CODE)
796 ocrdma_process_grp5_aync(dev, cqe);
798 pr_err("%s(%d) invalid evt code=0x%x\n", __func__,
802 static void ocrdma_process_mcqe(struct ocrdma_dev *dev, struct ocrdma_mcqe *cqe)
804 if (dev->mqe_ctx.tag == cqe->tag_lo && dev->mqe_ctx.cmd_done == false) {
805 dev->mqe_ctx.cqe_status = (cqe->status &
806 OCRDMA_MCQE_STATUS_MASK) >> OCRDMA_MCQE_STATUS_SHIFT;
807 dev->mqe_ctx.ext_status =
808 (cqe->status & OCRDMA_MCQE_ESTATUS_MASK)
809 >> OCRDMA_MCQE_ESTATUS_SHIFT;
810 dev->mqe_ctx.cmd_done = true;
811 wake_up(&dev->mqe_ctx.cmd_wait);
813 pr_err("%s() cqe for invalid tag0x%x.expected=0x%x\n",
814 __func__, cqe->tag_lo, dev->mqe_ctx.tag);
817 static int ocrdma_mq_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
820 struct ocrdma_mcqe *cqe;
823 cqe = ocrdma_get_mcqe(dev);
826 ocrdma_le32_to_cpu(cqe, sizeof(*cqe));
828 if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_AE_MASK)
829 ocrdma_process_acqe(dev, cqe);
830 else if (cqe->valid_ae_cmpl_cons & OCRDMA_MCQE_CMPL_MASK)
831 ocrdma_process_mcqe(dev, cqe);
832 memset(cqe, 0, sizeof(struct ocrdma_mcqe));
833 ocrdma_mcq_inc_tail(dev);
835 ocrdma_ring_cq_db(dev, dev->mq.cq.id, true, false, cqe_popped);
839 static void ocrdma_qp_buddy_cq_handler(struct ocrdma_dev *dev,
840 struct ocrdma_cq *cq)
843 struct ocrdma_qp *qp;
844 bool buddy_cq_found = false;
845 /* Go through list of QPs in error state which are using this CQ
846 * and invoke its callback handler to trigger CQE processing for
847 * error/flushed CQE. It is rare to find more than few entries in
848 * this list as most consumers stops after getting error CQE.
849 * List is traversed only once when a matching buddy cq found for a QP.
851 spin_lock_irqsave(&dev->flush_q_lock, flags);
852 list_for_each_entry(qp, &cq->sq_head, sq_entry) {
855 /* if wq and rq share the same cq, than comp_handler
856 * is already invoked.
858 if (qp->sq_cq == qp->rq_cq)
860 /* if completion came on sq, rq's cq is buddy cq.
861 * if completion came on rq, sq's cq is buddy cq.
867 buddy_cq_found = true;
870 spin_unlock_irqrestore(&dev->flush_q_lock, flags);
871 if (buddy_cq_found == false)
873 if (cq->ibcq.comp_handler) {
874 spin_lock_irqsave(&cq->comp_handler_lock, flags);
875 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
876 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
880 static void ocrdma_qp_cq_handler(struct ocrdma_dev *dev, u16 cq_idx)
883 struct ocrdma_cq *cq;
885 if (cq_idx >= OCRDMA_MAX_CQ)
888 cq = dev->cq_tbl[cq_idx];
892 if (cq->ibcq.comp_handler) {
893 spin_lock_irqsave(&cq->comp_handler_lock, flags);
894 (*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
895 spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
897 ocrdma_qp_buddy_cq_handler(dev, cq);
900 static void ocrdma_cq_handler(struct ocrdma_dev *dev, u16 cq_id)
902 /* process the MQ-CQE. */
903 if (cq_id == dev->mq.cq.id)
904 ocrdma_mq_cq_handler(dev, cq_id);
906 ocrdma_qp_cq_handler(dev, cq_id);
909 static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
911 struct ocrdma_eq *eq = handle;
912 struct ocrdma_dev *dev = eq->dev;
913 struct ocrdma_eqe eqe;
914 struct ocrdma_eqe *ptr;
916 int budget = eq->cq_cnt;
919 ptr = ocrdma_get_eqe(eq);
921 ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
922 if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
926 /* ring eq doorbell as soon as its consumed. */
927 ocrdma_ring_eq_db(dev, eq->q.id, false, true, 1);
928 /* check whether its CQE or not. */
929 if ((eqe.id_valid & OCRDMA_EQE_FOR_CQE_MASK) == 0) {
930 cq_id = eqe.id_valid >> OCRDMA_EQE_RESOURCE_ID_SHIFT;
931 ocrdma_cq_handler(dev, cq_id);
933 ocrdma_eq_inc_tail(eq);
935 /* There can be a stale EQE after the last bound CQ is
936 * destroyed. EQE valid and budget == 0 implies this.
943 ocrdma_ring_eq_db(dev, eq->q.id, true, true, 0);
947 static void ocrdma_post_mqe(struct ocrdma_dev *dev, struct ocrdma_mqe *cmd)
949 struct ocrdma_mqe *mqe;
951 dev->mqe_ctx.tag = dev->mq.sq.head;
952 dev->mqe_ctx.cmd_done = false;
953 mqe = ocrdma_get_mqe(dev);
954 cmd->hdr.tag_lo = dev->mq.sq.head;
955 ocrdma_copy_cpu_to_le32(mqe, cmd, sizeof(*mqe));
956 /* make sure descriptor is written before ringing doorbell */
958 ocrdma_mq_inc_head(dev);
959 ocrdma_ring_mq_db(dev);
962 static int ocrdma_wait_mqe_cmpl(struct ocrdma_dev *dev)
966 status = wait_event_timeout(dev->mqe_ctx.cmd_wait,
967 (dev->mqe_ctx.cmd_done != false),
968 msecs_to_jiffies(30000));
972 dev->mqe_ctx.fw_error_state = true;
973 pr_err("%s(%d) mailbox timeout: fw not responding\n",
979 /* issue a mailbox command on the MQ */
980 static int ocrdma_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe)
983 u16 cqe_status, ext_status;
984 struct ocrdma_mqe *rsp_mqe;
985 struct ocrdma_mbx_rsp *rsp = NULL;
987 mutex_lock(&dev->mqe_ctx.lock);
988 if (dev->mqe_ctx.fw_error_state)
990 ocrdma_post_mqe(dev, mqe);
991 status = ocrdma_wait_mqe_cmpl(dev);
994 cqe_status = dev->mqe_ctx.cqe_status;
995 ext_status = dev->mqe_ctx.ext_status;
996 rsp_mqe = ocrdma_get_mqe_rsp(dev);
997 ocrdma_copy_le32_to_cpu(mqe, rsp_mqe, (sizeof(*mqe)));
998 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
999 OCRDMA_MQE_HDR_EMB_SHIFT)
1002 if (cqe_status || ext_status) {
1003 pr_err("%s() cqe_status=0x%x, ext_status=0x%x,",
1004 __func__, cqe_status, ext_status);
1006 /* This is for embedded cmds. */
1007 pr_err("opcode=0x%x, subsystem=0x%x\n",
1008 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1009 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1010 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1011 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1013 status = ocrdma_get_mbx_cqe_errno(cqe_status);
1016 /* For non embedded, rsp errors are handled in ocrdma_nonemb_mbx_cmd */
1017 if (rsp && (mqe->u.rsp.status & OCRDMA_MBX_RSP_STATUS_MASK))
1018 status = ocrdma_get_mbx_errno(mqe->u.rsp.status);
1020 mutex_unlock(&dev->mqe_ctx.lock);
1024 static int ocrdma_nonemb_mbx_cmd(struct ocrdma_dev *dev, struct ocrdma_mqe *mqe,
1028 struct ocrdma_mbx_rsp *rsp = payload_va;
1030 if ((mqe->hdr.spcl_sge_cnt_emb & OCRDMA_MQE_HDR_EMB_MASK) >>
1031 OCRDMA_MQE_HDR_EMB_SHIFT)
1034 status = ocrdma_mbx_cmd(dev, mqe);
1036 /* For non embedded, only CQE failures are handled in
1037 * ocrdma_mbx_cmd. We need to check for RSP errors.
1039 if (rsp->status & OCRDMA_MBX_RSP_STATUS_MASK)
1040 status = ocrdma_get_mbx_errno(rsp->status);
1043 pr_err("opcode=0x%x, subsystem=0x%x\n",
1044 (rsp->subsys_op & OCRDMA_MBX_RSP_OPCODE_MASK) >>
1045 OCRDMA_MBX_RSP_OPCODE_SHIFT,
1046 (rsp->subsys_op & OCRDMA_MBX_RSP_SUBSYS_MASK) >>
1047 OCRDMA_MBX_RSP_SUBSYS_SHIFT);
1051 static void ocrdma_get_attr(struct ocrdma_dev *dev,
1052 struct ocrdma_dev_attr *attr,
1053 struct ocrdma_mbx_query_config *rsp)
1056 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
1057 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
1059 (rsp->qp_srq_cq_ird_ord & OCRDMA_MBX_QUERY_CFG_MAX_QP_MASK) >>
1060 OCRDMA_MBX_QUERY_CFG_MAX_QP_SHIFT;
1062 (rsp->max_srq_rpir_qps & OCRDMA_MBX_QUERY_CFG_MAX_SRQ_MASK) >>
1063 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_OFFSET;
1064 attr->max_send_sge = ((rsp->max_write_send_sge &
1065 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1066 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT);
1067 attr->max_recv_sge = (rsp->max_write_send_sge &
1068 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1069 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
1070 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1071 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1072 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1073 attr->max_rdma_sge = (rsp->max_write_send_sge &
1074 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
1075 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
1076 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1077 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1078 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
1079 attr->max_ird_per_qp = (rsp->max_ird_ord_per_qp &
1080 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_MASK) >>
1081 OCRDMA_MBX_QUERY_CFG_MAX_IRD_PER_QP_SHIFT;
1082 attr->cq_overflow_detect = (rsp->qp_srq_cq_ird_ord &
1083 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_MASK) >>
1084 OCRDMA_MBX_QUERY_CFG_CQ_OVERFLOW_SHIFT;
1085 attr->srq_supported = (rsp->qp_srq_cq_ird_ord &
1086 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_MASK) >>
1087 OCRDMA_MBX_QUERY_CFG_SRQ_SUPPORTED_SHIFT;
1088 attr->local_ca_ack_delay = (rsp->max_pd_ca_ack_delay &
1089 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_MASK) >>
1090 OCRDMA_MBX_QUERY_CFG_CA_ACK_DELAY_SHIFT;
1091 attr->max_mw = rsp->max_mw;
1092 attr->max_mr = rsp->max_mr;
1093 attr->max_mr_size = ((u64)rsp->max_mr_size_hi << 32) |
1094 rsp->max_mr_size_lo;
1096 attr->max_pages_per_frmr = rsp->max_pages_per_frmr;
1097 attr->max_num_mr_pbl = rsp->max_num_mr_pbl;
1098 attr->max_cqe = rsp->max_cq_cqes_per_cq &
1099 OCRDMA_MBX_QUERY_CFG_MAX_CQES_PER_CQ_MASK;
1100 attr->max_cq = (rsp->max_cq_cqes_per_cq &
1101 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK) >>
1102 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET;
1103 attr->wqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1104 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_MASK) >>
1105 OCRDMA_MBX_QUERY_CFG_MAX_WQE_SIZE_OFFSET) *
1107 attr->rqe_size = ((rsp->wqe_rqe_stride_max_dpp_cqs &
1108 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_MASK) >>
1109 OCRDMA_MBX_QUERY_CFG_MAX_RQE_SIZE_OFFSET) *
1111 attr->max_inline_data =
1112 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1113 sizeof(struct ocrdma_sge));
1114 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1116 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1117 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1119 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1120 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1121 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1122 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1125 static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
1126 struct ocrdma_fw_conf_rsp *conf)
1130 fn_mode = conf->fn_mode & OCRDMA_FN_MODE_RDMA;
1131 if (fn_mode != OCRDMA_FN_MODE_RDMA)
1133 dev->base_eqid = conf->base_eqid;
1134 dev->max_eq = conf->max_eq;
1138 /* can be issued only during init time. */
1139 static int ocrdma_mbx_query_fw_ver(struct ocrdma_dev *dev)
1141 int status = -ENOMEM;
1142 struct ocrdma_mqe *cmd;
1143 struct ocrdma_fw_ver_rsp *rsp;
1145 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_VER, sizeof(*cmd));
1148 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1149 OCRDMA_CMD_GET_FW_VER,
1150 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1152 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1155 rsp = (struct ocrdma_fw_ver_rsp *)cmd;
1156 memset(&dev->attr.fw_ver[0], 0, sizeof(dev->attr.fw_ver));
1157 memcpy(&dev->attr.fw_ver[0], &rsp->running_ver[0],
1158 sizeof(rsp->running_ver));
1159 ocrdma_le32_to_cpu(dev->attr.fw_ver, sizeof(rsp->running_ver));
1165 /* can be issued only during init time. */
1166 static int ocrdma_mbx_query_fw_config(struct ocrdma_dev *dev)
1168 int status = -ENOMEM;
1169 struct ocrdma_mqe *cmd;
1170 struct ocrdma_fw_conf_rsp *rsp;
1172 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_GET_FW_CONFIG, sizeof(*cmd));
1175 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1176 OCRDMA_CMD_GET_FW_CONFIG,
1177 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1178 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1181 rsp = (struct ocrdma_fw_conf_rsp *)cmd;
1182 status = ocrdma_check_fw_config(dev, rsp);
1188 int ocrdma_mbx_rdma_stats(struct ocrdma_dev *dev, bool reset)
1190 struct ocrdma_rdma_stats_req *req = dev->stats_mem.va;
1191 struct ocrdma_mqe *mqe = &dev->stats_mem.mqe;
1192 struct ocrdma_rdma_stats_resp *old_stats = NULL;
1195 old_stats = kzalloc(sizeof(*old_stats), GFP_KERNEL);
1196 if (old_stats == NULL)
1199 memset(mqe, 0, sizeof(*mqe));
1200 mqe->hdr.pyld_len = dev->stats_mem.size;
1201 mqe->hdr.spcl_sge_cnt_emb |=
1202 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1203 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1204 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dev->stats_mem.pa & 0xffffffff);
1205 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dev->stats_mem.pa);
1206 mqe->u.nonemb_req.sge[0].len = dev->stats_mem.size;
1208 /* Cache the old stats */
1209 memcpy(old_stats, req, sizeof(struct ocrdma_rdma_stats_resp));
1210 memset(req, 0, dev->stats_mem.size);
1212 ocrdma_init_mch((struct ocrdma_mbx_hdr *)req,
1213 OCRDMA_CMD_GET_RDMA_STATS,
1215 dev->stats_mem.size);
1217 req->reset_stats = reset;
1219 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dev->stats_mem.va);
1221 /* Copy from cache, if mbox fails */
1222 memcpy(req, old_stats, sizeof(struct ocrdma_rdma_stats_resp));
1224 ocrdma_le32_to_cpu(req, dev->stats_mem.size);
1230 static int ocrdma_mbx_get_ctrl_attribs(struct ocrdma_dev *dev)
1232 int status = -ENOMEM;
1233 struct ocrdma_dma_mem dma;
1234 struct ocrdma_mqe *mqe;
1235 struct ocrdma_get_ctrl_attribs_rsp *ctrl_attr_rsp;
1236 struct mgmt_hba_attribs *hba_attribs;
1238 mqe = ocrdma_alloc_mqe();
1241 memset(mqe, 0, sizeof(*mqe));
1243 dma.size = sizeof(struct ocrdma_get_ctrl_attribs_rsp);
1244 dma.va = dma_alloc_coherent(&dev->nic_info.pdev->dev,
1245 dma.size, &dma.pa, GFP_KERNEL);
1249 mqe->hdr.pyld_len = dma.size;
1250 mqe->hdr.spcl_sge_cnt_emb |=
1251 (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
1252 OCRDMA_MQE_HDR_SGE_CNT_MASK;
1253 mqe->u.nonemb_req.sge[0].pa_lo = (u32) (dma.pa & 0xffffffff);
1254 mqe->u.nonemb_req.sge[0].pa_hi = (u32) upper_32_bits(dma.pa);
1255 mqe->u.nonemb_req.sge[0].len = dma.size;
1257 memset(dma.va, 0, dma.size);
1258 ocrdma_init_mch((struct ocrdma_mbx_hdr *)dma.va,
1259 OCRDMA_CMD_GET_CTRL_ATTRIBUTES,
1260 OCRDMA_SUBSYS_COMMON,
1263 status = ocrdma_nonemb_mbx_cmd(dev, mqe, dma.va);
1265 ctrl_attr_rsp = (struct ocrdma_get_ctrl_attribs_rsp *)dma.va;
1266 hba_attribs = &ctrl_attr_rsp->ctrl_attribs.hba_attribs;
1268 dev->hba_port_num = (hba_attribs->ptpnum_maxdoms_hbast_cv &
1269 OCRDMA_HBA_ATTRB_PTNUM_MASK)
1270 >> OCRDMA_HBA_ATTRB_PTNUM_SHIFT;
1271 strncpy(dev->model_number,
1272 hba_attribs->controller_model_number, 31);
1274 dma_free_coherent(&dev->nic_info.pdev->dev, dma.size, dma.va, dma.pa);
1280 static int ocrdma_mbx_query_dev(struct ocrdma_dev *dev)
1282 int status = -ENOMEM;
1283 struct ocrdma_mbx_query_config *rsp;
1284 struct ocrdma_mqe *cmd;
1286 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_CONFIG, sizeof(*cmd));
1289 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1292 rsp = (struct ocrdma_mbx_query_config *)cmd;
1293 ocrdma_get_attr(dev, &dev->attr, rsp);
1299 int ocrdma_mbx_get_link_speed(struct ocrdma_dev *dev, u8 *lnk_speed)
1301 int status = -ENOMEM;
1302 struct ocrdma_get_link_speed_rsp *rsp;
1303 struct ocrdma_mqe *cmd;
1305 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1309 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1310 OCRDMA_CMD_QUERY_NTWK_LINK_CONFIG_V1,
1311 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1313 ((struct ocrdma_mbx_hdr *)cmd->u.cmd)->rsvd_version = 0x1;
1315 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1319 rsp = (struct ocrdma_get_link_speed_rsp *)cmd;
1320 *lnk_speed = (rsp->pflt_pps_ld_pnum & OCRDMA_PHY_PS_MASK)
1321 >> OCRDMA_PHY_PS_SHIFT;
1328 static int ocrdma_mbx_get_phy_info(struct ocrdma_dev *dev)
1330 int status = -ENOMEM;
1331 struct ocrdma_mqe *cmd;
1332 struct ocrdma_get_phy_info_rsp *rsp;
1334 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_PHY_DETAILS, sizeof(*cmd));
1338 ocrdma_init_mch((struct ocrdma_mbx_hdr *)&cmd->u.cmd[0],
1339 OCRDMA_CMD_PHY_DETAILS, OCRDMA_SUBSYS_COMMON,
1342 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1346 rsp = (struct ocrdma_get_phy_info_rsp *)cmd;
1348 (rsp->ityp_ptyp & OCRDMA_PHY_TYPE_MASK);
1349 dev->phy.interface_type =
1350 (rsp->ityp_ptyp & OCRDMA_IF_TYPE_MASK)
1351 >> OCRDMA_IF_TYPE_SHIFT;
1352 dev->phy.auto_speeds_supported =
1353 (rsp->fspeed_aspeed & OCRDMA_ASPEED_SUPP_MASK);
1354 dev->phy.fixed_speeds_supported =
1355 (rsp->fspeed_aspeed & OCRDMA_FSPEED_SUPP_MASK)
1356 >> OCRDMA_FSPEED_SUPP_SHIFT;
1362 int ocrdma_mbx_alloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1364 int status = -ENOMEM;
1365 struct ocrdma_alloc_pd *cmd;
1366 struct ocrdma_alloc_pd_rsp *rsp;
1368 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD, sizeof(*cmd));
1371 if (pd->dpp_enabled)
1372 cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
1373 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1376 rsp = (struct ocrdma_alloc_pd_rsp *)cmd;
1377 pd->id = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_PDID_MASK;
1378 if (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) {
1379 pd->dpp_enabled = true;
1380 pd->dpp_page = rsp->dpp_page_pdid >>
1381 OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
1383 pd->dpp_enabled = false;
1391 int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd)
1393 int status = -ENOMEM;
1394 struct ocrdma_dealloc_pd *cmd;
1396 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_PD, sizeof(*cmd));
1400 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1405 static int ocrdma_build_q_conf(u32 *num_entries, int entry_size,
1406 int *num_pages, int *page_size)
1411 *num_entries = roundup_pow_of_two(*num_entries);
1412 mem_size = *num_entries * entry_size;
1413 /* find the possible lowest possible multiplier */
1414 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1415 if (mem_size <= (OCRDMA_Q_PAGE_BASE_SIZE << i))
1418 if (i >= OCRDMA_MAX_Q_PAGE_SIZE_CNT)
1420 mem_size = roundup(mem_size,
1421 ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES));
1423 mem_size / ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1424 *page_size = ((OCRDMA_Q_PAGE_BASE_SIZE << i) / OCRDMA_MAX_Q_PAGES);
1425 *num_entries = mem_size / entry_size;
1429 static int ocrdma_mbx_create_ah_tbl(struct ocrdma_dev *dev)
1434 struct ocrdma_create_ah_tbl *cmd;
1435 struct ocrdma_create_ah_tbl_rsp *rsp;
1436 struct pci_dev *pdev = dev->nic_info.pdev;
1438 struct ocrdma_pbe *pbes;
1440 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_AH_TBL, sizeof(*cmd));
1444 max_ah = OCRDMA_MAX_AH;
1445 dev->av_tbl.size = sizeof(struct ocrdma_av) * max_ah;
1447 /* number of PBEs in PBL */
1448 cmd->ah_conf = (OCRDMA_AH_TBL_PAGES <<
1449 OCRDMA_CREATE_AH_NUM_PAGES_SHIFT) &
1450 OCRDMA_CREATE_AH_NUM_PAGES_MASK;
1453 for (i = 0; i < OCRDMA_MAX_Q_PAGE_SIZE_CNT; i++) {
1454 if (PAGE_SIZE == (OCRDMA_MIN_Q_PAGE_SIZE << i))
1457 cmd->ah_conf |= (i << OCRDMA_CREATE_AH_PAGE_SIZE_SHIFT) &
1458 OCRDMA_CREATE_AH_PAGE_SIZE_MASK;
1461 cmd->ah_conf |= (sizeof(struct ocrdma_av) <<
1462 OCRDMA_CREATE_AH_ENTRY_SIZE_SHIFT) &
1463 OCRDMA_CREATE_AH_ENTRY_SIZE_MASK;
1465 dev->av_tbl.pbl.va = dma_alloc_coherent(&pdev->dev, PAGE_SIZE,
1466 &dev->av_tbl.pbl.pa,
1468 if (dev->av_tbl.pbl.va == NULL)
1471 dev->av_tbl.va = dma_alloc_coherent(&pdev->dev, dev->av_tbl.size,
1473 if (dev->av_tbl.va == NULL)
1475 dev->av_tbl.pa = pa;
1476 dev->av_tbl.num_ah = max_ah;
1477 memset(dev->av_tbl.va, 0, dev->av_tbl.size);
1479 pbes = (struct ocrdma_pbe *)dev->av_tbl.pbl.va;
1480 for (i = 0; i < dev->av_tbl.size / OCRDMA_MIN_Q_PAGE_SIZE; i++) {
1481 pbes[i].pa_lo = (u32)cpu_to_le32(pa & 0xffffffff);
1482 pbes[i].pa_hi = (u32)cpu_to_le32(upper_32_bits(pa));
1485 cmd->tbl_addr[0].lo = (u32)(dev->av_tbl.pbl.pa & 0xFFFFFFFF);
1486 cmd->tbl_addr[0].hi = (u32)upper_32_bits(dev->av_tbl.pbl.pa);
1487 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1490 rsp = (struct ocrdma_create_ah_tbl_rsp *)cmd;
1491 dev->av_tbl.ahid = rsp->ahid & 0xFFFF;
1496 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1498 dev->av_tbl.va = NULL;
1500 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1501 dev->av_tbl.pbl.pa);
1502 dev->av_tbl.pbl.va = NULL;
1503 dev->av_tbl.size = 0;
1509 static void ocrdma_mbx_delete_ah_tbl(struct ocrdma_dev *dev)
1511 struct ocrdma_delete_ah_tbl *cmd;
1512 struct pci_dev *pdev = dev->nic_info.pdev;
1514 if (dev->av_tbl.va == NULL)
1517 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_AH_TBL, sizeof(*cmd));
1520 cmd->ahid = dev->av_tbl.ahid;
1522 ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1523 dma_free_coherent(&pdev->dev, dev->av_tbl.size, dev->av_tbl.va,
1525 dev->av_tbl.va = NULL;
1526 dma_free_coherent(&pdev->dev, PAGE_SIZE, dev->av_tbl.pbl.va,
1527 dev->av_tbl.pbl.pa);
1531 /* Multiple CQs uses the EQ. This routine returns least used
1532 * EQ to associate with CQ. This will distributes the interrupt
1533 * processing and CPU load to associated EQ, vector and so to that CPU.
1535 static u16 ocrdma_bind_eq(struct ocrdma_dev *dev)
1537 int i, selected_eq = 0, cq_cnt = 0;
1540 mutex_lock(&dev->dev_lock);
1541 cq_cnt = dev->eq_tbl[0].cq_cnt;
1542 eq_id = dev->eq_tbl[0].q.id;
1543 /* find the EQ which is has the least number of
1544 * CQs associated with it.
1546 for (i = 0; i < dev->eq_cnt; i++) {
1547 if (dev->eq_tbl[i].cq_cnt < cq_cnt) {
1548 cq_cnt = dev->eq_tbl[i].cq_cnt;
1549 eq_id = dev->eq_tbl[i].q.id;
1553 dev->eq_tbl[selected_eq].cq_cnt += 1;
1554 mutex_unlock(&dev->dev_lock);
1558 static void ocrdma_unbind_eq(struct ocrdma_dev *dev, u16 eq_id)
1562 mutex_lock(&dev->dev_lock);
1563 i = ocrdma_get_eq_table_index(dev, eq_id);
1566 dev->eq_tbl[i].cq_cnt -= 1;
1567 mutex_unlock(&dev->dev_lock);
1570 int ocrdma_mbx_create_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq,
1571 int entries, int dpp_cq, u16 pd_id)
1573 int status = -ENOMEM; int max_hw_cqe;
1574 struct pci_dev *pdev = dev->nic_info.pdev;
1575 struct ocrdma_create_cq *cmd;
1576 struct ocrdma_create_cq_rsp *rsp;
1577 u32 hw_pages, cqe_size, page_size, cqe_count;
1579 if (entries > dev->attr.max_cqe) {
1580 pr_err("%s(%d) max_cqe=0x%x, requester_cqe=0x%x\n",
1581 __func__, dev->id, dev->attr.max_cqe, entries);
1584 if (dpp_cq && (ocrdma_get_asic_type(dev) != OCRDMA_ASIC_GEN_SKH_R))
1590 cqe_size = OCRDMA_DPP_CQE_SIZE;
1593 cq->max_hw_cqe = dev->attr.max_cqe;
1594 max_hw_cqe = dev->attr.max_cqe;
1595 cqe_size = sizeof(struct ocrdma_cqe);
1596 hw_pages = OCRDMA_CREATE_CQ_MAX_PAGES;
1599 cq->len = roundup(max_hw_cqe * cqe_size, OCRDMA_MIN_Q_PAGE_SIZE);
1601 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_CQ, sizeof(*cmd));
1604 ocrdma_init_mch(&cmd->cmd.req, OCRDMA_CMD_CREATE_CQ,
1605 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1606 cq->va = dma_alloc_coherent(&pdev->dev, cq->len, &cq->pa, GFP_KERNEL);
1611 memset(cq->va, 0, cq->len);
1612 page_size = cq->len / hw_pages;
1613 cmd->cmd.pgsz_pgcnt = (page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
1614 OCRDMA_CREATE_CQ_PAGE_SIZE_SHIFT;
1615 cmd->cmd.pgsz_pgcnt |= hw_pages;
1616 cmd->cmd.ev_cnt_flags = OCRDMA_CREATE_CQ_DEF_FLAGS;
1618 cq->eqn = ocrdma_bind_eq(dev);
1619 cmd->cmd.req.rsvd_version = OCRDMA_CREATE_CQ_VER3;
1620 cqe_count = cq->len / cqe_size;
1621 cq->cqe_cnt = cqe_count;
1622 if (cqe_count > 1024) {
1623 /* Set cnt to 3 to indicate more than 1024 cq entries */
1624 cmd->cmd.ev_cnt_flags |= (0x3 << OCRDMA_CREATE_CQ_CNT_SHIFT);
1627 switch (cqe_count) {
1640 cmd->cmd.ev_cnt_flags |= (count << OCRDMA_CREATE_CQ_CNT_SHIFT);
1642 /* shared eq between all the consumer cqs. */
1643 cmd->cmd.eqn = cq->eqn;
1644 if (ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R) {
1646 cmd->cmd.pgsz_pgcnt |= OCRDMA_CREATE_CQ_DPP <<
1647 OCRDMA_CREATE_CQ_TYPE_SHIFT;
1648 cq->phase_change = false;
1649 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size);
1651 cmd->cmd.pdid_cqecnt = (cq->len / cqe_size) - 1;
1652 cmd->cmd.ev_cnt_flags |= OCRDMA_CREATE_CQ_FLAGS_AUTO_VALID;
1653 cq->phase_change = true;
1656 /* pd_id valid only for v3 */
1657 cmd->cmd.pdid_cqecnt |= (pd_id <<
1658 OCRDMA_CREATE_CQ_CMD_PDID_SHIFT);
1659 ocrdma_build_q_pages(&cmd->cmd.pa[0], hw_pages, cq->pa, page_size);
1660 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1664 rsp = (struct ocrdma_create_cq_rsp *)cmd;
1665 cq->id = (u16) (rsp->rsp.cq_id & OCRDMA_CREATE_CQ_RSP_CQ_ID_MASK);
1669 ocrdma_unbind_eq(dev, cq->eqn);
1670 dma_free_coherent(&pdev->dev, cq->len, cq->va, cq->pa);
1676 int ocrdma_mbx_destroy_cq(struct ocrdma_dev *dev, struct ocrdma_cq *cq)
1678 int status = -ENOMEM;
1679 struct ocrdma_destroy_cq *cmd;
1681 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_CQ, sizeof(*cmd));
1684 ocrdma_init_mch(&cmd->req, OCRDMA_CMD_DELETE_CQ,
1685 OCRDMA_SUBSYS_COMMON, sizeof(*cmd));
1687 cmd->bypass_flush_qid |=
1688 (cq->id << OCRDMA_DESTROY_CQ_QID_SHIFT) &
1689 OCRDMA_DESTROY_CQ_QID_MASK;
1691 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1692 ocrdma_unbind_eq(dev, cq->eqn);
1693 dma_free_coherent(&dev->nic_info.pdev->dev, cq->len, cq->va, cq->pa);
1698 int ocrdma_mbx_alloc_lkey(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1699 u32 pdid, int addr_check)
1701 int status = -ENOMEM;
1702 struct ocrdma_alloc_lkey *cmd;
1703 struct ocrdma_alloc_lkey_rsp *rsp;
1705 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_LKEY, sizeof(*cmd));
1709 cmd->pbl_sz_flags |= addr_check;
1710 cmd->pbl_sz_flags |= (hwmr->fr_mr << OCRDMA_ALLOC_LKEY_FMR_SHIFT);
1711 cmd->pbl_sz_flags |=
1712 (hwmr->remote_wr << OCRDMA_ALLOC_LKEY_REMOTE_WR_SHIFT);
1713 cmd->pbl_sz_flags |=
1714 (hwmr->remote_rd << OCRDMA_ALLOC_LKEY_REMOTE_RD_SHIFT);
1715 cmd->pbl_sz_flags |=
1716 (hwmr->local_wr << OCRDMA_ALLOC_LKEY_LOCAL_WR_SHIFT);
1717 cmd->pbl_sz_flags |=
1718 (hwmr->remote_atomic << OCRDMA_ALLOC_LKEY_REMOTE_ATOMIC_SHIFT);
1719 cmd->pbl_sz_flags |=
1720 (hwmr->num_pbls << OCRDMA_ALLOC_LKEY_PBL_SIZE_SHIFT);
1722 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1725 rsp = (struct ocrdma_alloc_lkey_rsp *)cmd;
1726 hwmr->lkey = rsp->lrkey;
1732 int ocrdma_mbx_dealloc_lkey(struct ocrdma_dev *dev, int fr_mr, u32 lkey)
1734 int status = -ENOMEM;
1735 struct ocrdma_dealloc_lkey *cmd;
1737 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DEALLOC_LKEY, sizeof(*cmd));
1741 cmd->rsvd_frmr = fr_mr ? 1 : 0;
1742 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1750 static int ocrdma_mbx_reg_mr(struct ocrdma_dev *dev, struct ocrdma_hw_mr *hwmr,
1751 u32 pdid, u32 pbl_cnt, u32 pbe_size, u32 last)
1753 int status = -ENOMEM;
1755 struct ocrdma_reg_nsmr *cmd;
1756 struct ocrdma_reg_nsmr_rsp *rsp;
1758 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR, sizeof(*cmd));
1762 pdid | (hwmr->num_pbls << OCRDMA_REG_NSMR_NUM_PBL_SHIFT);
1763 cmd->fr_mr = hwmr->fr_mr;
1765 cmd->flags_hpage_pbe_sz |= (hwmr->remote_wr <<
1766 OCRDMA_REG_NSMR_REMOTE_WR_SHIFT);
1767 cmd->flags_hpage_pbe_sz |= (hwmr->remote_rd <<
1768 OCRDMA_REG_NSMR_REMOTE_RD_SHIFT);
1769 cmd->flags_hpage_pbe_sz |= (hwmr->local_wr <<
1770 OCRDMA_REG_NSMR_LOCAL_WR_SHIFT);
1771 cmd->flags_hpage_pbe_sz |= (hwmr->remote_atomic <<
1772 OCRDMA_REG_NSMR_REMOTE_ATOMIC_SHIFT);
1773 cmd->flags_hpage_pbe_sz |= (hwmr->mw_bind <<
1774 OCRDMA_REG_NSMR_BIND_MEMWIN_SHIFT);
1775 cmd->flags_hpage_pbe_sz |= (last << OCRDMA_REG_NSMR_LAST_SHIFT);
1777 cmd->flags_hpage_pbe_sz |= (hwmr->pbe_size / OCRDMA_MIN_HPAGE_SIZE);
1778 cmd->flags_hpage_pbe_sz |= (hwmr->pbl_size / OCRDMA_MIN_HPAGE_SIZE) <<
1779 OCRDMA_REG_NSMR_HPAGE_SIZE_SHIFT;
1780 cmd->totlen_low = hwmr->len;
1781 cmd->totlen_high = upper_32_bits(hwmr->len);
1782 cmd->fbo_low = (u32) (hwmr->fbo & 0xffffffff);
1783 cmd->fbo_high = (u32) upper_32_bits(hwmr->fbo);
1784 cmd->va_loaddr = (u32) hwmr->va;
1785 cmd->va_hiaddr = (u32) upper_32_bits(hwmr->va);
1787 for (i = 0; i < pbl_cnt; i++) {
1788 cmd->pbl[i].lo = (u32) (hwmr->pbl_table[i].pa & 0xffffffff);
1789 cmd->pbl[i].hi = upper_32_bits(hwmr->pbl_table[i].pa);
1791 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1794 rsp = (struct ocrdma_reg_nsmr_rsp *)cmd;
1795 hwmr->lkey = rsp->lrkey;
1801 static int ocrdma_mbx_reg_mr_cont(struct ocrdma_dev *dev,
1802 struct ocrdma_hw_mr *hwmr, u32 pbl_cnt,
1803 u32 pbl_offset, u32 last)
1805 int status = -ENOMEM;
1807 struct ocrdma_reg_nsmr_cont *cmd;
1809 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_REGISTER_NSMR_CONT, sizeof(*cmd));
1812 cmd->lrkey = hwmr->lkey;
1813 cmd->num_pbl_offset = (pbl_cnt << OCRDMA_REG_NSMR_CONT_NUM_PBL_SHIFT) |
1814 (pbl_offset & OCRDMA_REG_NSMR_CONT_PBL_SHIFT_MASK);
1815 cmd->last = last << OCRDMA_REG_NSMR_CONT_LAST_SHIFT;
1817 for (i = 0; i < pbl_cnt; i++) {
1819 (u32) (hwmr->pbl_table[i + pbl_offset].pa & 0xffffffff);
1821 upper_32_bits(hwmr->pbl_table[i + pbl_offset].pa);
1823 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
1831 int ocrdma_reg_mr(struct ocrdma_dev *dev,
1832 struct ocrdma_hw_mr *hwmr, u32 pdid, int acc)
1836 u32 cur_pbl_cnt, pbl_offset;
1837 u32 pending_pbl_cnt = hwmr->num_pbls;
1840 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1841 if (cur_pbl_cnt == pending_pbl_cnt)
1844 status = ocrdma_mbx_reg_mr(dev, hwmr, pdid,
1845 cur_pbl_cnt, hwmr->pbe_size, last);
1847 pr_err("%s() status=%d\n", __func__, status);
1850 /* if there is no more pbls to register then exit. */
1855 pbl_offset += cur_pbl_cnt;
1856 pending_pbl_cnt -= cur_pbl_cnt;
1857 cur_pbl_cnt = min(pending_pbl_cnt, MAX_OCRDMA_NSMR_PBL);
1858 /* if we reach the end of the pbls, then need to set the last
1859 * bit, indicating no more pbls to register for this memory key.
1861 if (cur_pbl_cnt == pending_pbl_cnt)
1864 status = ocrdma_mbx_reg_mr_cont(dev, hwmr, cur_pbl_cnt,
1870 pr_err("%s() err. status=%d\n", __func__, status);
1875 bool ocrdma_is_qp_in_sq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1877 struct ocrdma_qp *tmp;
1879 list_for_each_entry(tmp, &cq->sq_head, sq_entry) {
1888 bool ocrdma_is_qp_in_rq_flushlist(struct ocrdma_cq *cq, struct ocrdma_qp *qp)
1890 struct ocrdma_qp *tmp;
1892 list_for_each_entry(tmp, &cq->rq_head, rq_entry) {
1901 void ocrdma_flush_qp(struct ocrdma_qp *qp)
1904 unsigned long flags;
1906 spin_lock_irqsave(&qp->dev->flush_q_lock, flags);
1907 found = ocrdma_is_qp_in_sq_flushlist(qp->sq_cq, qp);
1909 list_add_tail(&qp->sq_entry, &qp->sq_cq->sq_head);
1911 found = ocrdma_is_qp_in_rq_flushlist(qp->rq_cq, qp);
1913 list_add_tail(&qp->rq_entry, &qp->rq_cq->rq_head);
1915 spin_unlock_irqrestore(&qp->dev->flush_q_lock, flags);
1918 static void ocrdma_init_hwq_ptr(struct ocrdma_qp *qp)
1926 int ocrdma_qp_state_change(struct ocrdma_qp *qp, enum ib_qp_state new_ib_state,
1927 enum ib_qp_state *old_ib_state)
1929 unsigned long flags;
1931 enum ocrdma_qp_state new_state;
1932 new_state = get_ocrdma_qp_state(new_ib_state);
1934 /* sync with wqe and rqe posting */
1935 spin_lock_irqsave(&qp->q_lock, flags);
1938 *old_ib_state = get_ibqp_state(qp->state);
1939 if (new_state == qp->state) {
1940 spin_unlock_irqrestore(&qp->q_lock, flags);
1945 if (new_state == OCRDMA_QPS_INIT) {
1946 ocrdma_init_hwq_ptr(qp);
1947 ocrdma_del_flush_qp(qp);
1948 } else if (new_state == OCRDMA_QPS_ERR) {
1949 ocrdma_flush_qp(qp);
1952 qp->state = new_state;
1954 spin_unlock_irqrestore(&qp->q_lock, flags);
1958 static u32 ocrdma_set_create_qp_mbx_access_flags(struct ocrdma_qp *qp)
1961 if (qp->cap_flags & OCRDMA_QP_INB_RD)
1962 flags |= OCRDMA_CREATE_QP_REQ_INB_RDEN_MASK;
1963 if (qp->cap_flags & OCRDMA_QP_INB_WR)
1964 flags |= OCRDMA_CREATE_QP_REQ_INB_WREN_MASK;
1965 if (qp->cap_flags & OCRDMA_QP_MW_BIND)
1966 flags |= OCRDMA_CREATE_QP_REQ_BIND_MEMWIN_MASK;
1967 if (qp->cap_flags & OCRDMA_QP_LKEY0)
1968 flags |= OCRDMA_CREATE_QP_REQ_ZERO_LKEYEN_MASK;
1969 if (qp->cap_flags & OCRDMA_QP_FAST_REG)
1970 flags |= OCRDMA_CREATE_QP_REQ_FMR_EN_MASK;
1974 static int ocrdma_set_create_qp_sq_cmd(struct ocrdma_create_qp_req *cmd,
1975 struct ib_qp_init_attr *attrs,
1976 struct ocrdma_qp *qp)
1979 u32 len, hw_pages, hw_page_size;
1981 struct ocrdma_dev *dev = qp->dev;
1982 struct pci_dev *pdev = dev->nic_info.pdev;
1983 u32 max_wqe_allocated;
1984 u32 max_sges = attrs->cap.max_send_sge;
1986 /* QP1 may exceed 127 */
1987 max_wqe_allocated = min_t(u32, attrs->cap.max_send_wr + 1,
1990 status = ocrdma_build_q_conf(&max_wqe_allocated,
1991 dev->attr.wqe_size, &hw_pages, &hw_page_size);
1993 pr_err("%s() req. max_send_wr=0x%x\n", __func__,
1997 qp->sq.max_cnt = max_wqe_allocated;
1998 len = (hw_pages * hw_page_size);
2000 qp->sq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2003 memset(qp->sq.va, 0, len);
2006 qp->sq.entry_size = dev->attr.wqe_size;
2007 ocrdma_build_q_pages(&cmd->wq_addr[0], hw_pages, pa, hw_page_size);
2009 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2010 << OCRDMA_CREATE_QP_REQ_SQ_PAGE_SIZE_SHIFT);
2011 cmd->num_wq_rq_pages |= (hw_pages <<
2012 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_SHIFT) &
2013 OCRDMA_CREATE_QP_REQ_NUM_WQ_PAGES_MASK;
2014 cmd->max_sge_send_write |= (max_sges <<
2015 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_SHIFT) &
2016 OCRDMA_CREATE_QP_REQ_MAX_SGE_SEND_MASK;
2017 cmd->max_sge_send_write |= (max_sges <<
2018 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_SHIFT) &
2019 OCRDMA_CREATE_QP_REQ_MAX_SGE_WRITE_MASK;
2020 cmd->max_wqe_rqe |= (ilog2(qp->sq.max_cnt) <<
2021 OCRDMA_CREATE_QP_REQ_MAX_WQE_SHIFT) &
2022 OCRDMA_CREATE_QP_REQ_MAX_WQE_MASK;
2023 cmd->wqe_rqe_size |= (dev->attr.wqe_size <<
2024 OCRDMA_CREATE_QP_REQ_WQE_SIZE_SHIFT) &
2025 OCRDMA_CREATE_QP_REQ_WQE_SIZE_MASK;
2029 static int ocrdma_set_create_qp_rq_cmd(struct ocrdma_create_qp_req *cmd,
2030 struct ib_qp_init_attr *attrs,
2031 struct ocrdma_qp *qp)
2034 u32 len, hw_pages, hw_page_size;
2036 struct ocrdma_dev *dev = qp->dev;
2037 struct pci_dev *pdev = dev->nic_info.pdev;
2038 u32 max_rqe_allocated = attrs->cap.max_recv_wr + 1;
2040 status = ocrdma_build_q_conf(&max_rqe_allocated, dev->attr.rqe_size,
2041 &hw_pages, &hw_page_size);
2043 pr_err("%s() req. max_recv_wr=0x%x\n", __func__,
2044 attrs->cap.max_recv_wr + 1);
2047 qp->rq.max_cnt = max_rqe_allocated;
2048 len = (hw_pages * hw_page_size);
2050 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2053 memset(qp->rq.va, 0, len);
2056 qp->rq.entry_size = dev->attr.rqe_size;
2058 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2059 cmd->type_pgsz_pdn |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE) <<
2060 OCRDMA_CREATE_QP_REQ_RQ_PAGE_SIZE_SHIFT);
2061 cmd->num_wq_rq_pages |=
2062 (hw_pages << OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_SHIFT) &
2063 OCRDMA_CREATE_QP_REQ_NUM_RQ_PAGES_MASK;
2064 cmd->max_sge_recv_flags |= (attrs->cap.max_recv_sge <<
2065 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_SHIFT) &
2066 OCRDMA_CREATE_QP_REQ_MAX_SGE_RECV_MASK;
2067 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) <<
2068 OCRDMA_CREATE_QP_REQ_MAX_RQE_SHIFT) &
2069 OCRDMA_CREATE_QP_REQ_MAX_RQE_MASK;
2070 cmd->wqe_rqe_size |= (dev->attr.rqe_size <<
2071 OCRDMA_CREATE_QP_REQ_RQE_SIZE_SHIFT) &
2072 OCRDMA_CREATE_QP_REQ_RQE_SIZE_MASK;
2076 static void ocrdma_set_create_qp_dpp_cmd(struct ocrdma_create_qp_req *cmd,
2077 struct ocrdma_pd *pd,
2078 struct ocrdma_qp *qp,
2079 u8 enable_dpp_cq, u16 dpp_cq_id)
2082 qp->dpp_enabled = true;
2083 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2086 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_ENABLE_DPP_MASK;
2087 cmd->dpp_credits_cqid = dpp_cq_id;
2088 cmd->dpp_credits_cqid |= OCRDMA_CREATE_QP_REQ_DPP_CREDIT_LIMIT <<
2089 OCRDMA_CREATE_QP_REQ_DPP_CREDIT_SHIFT;
2092 static int ocrdma_set_create_qp_ird_cmd(struct ocrdma_create_qp_req *cmd,
2093 struct ocrdma_qp *qp)
2095 struct ocrdma_dev *dev = qp->dev;
2096 struct pci_dev *pdev = dev->nic_info.pdev;
2098 int ird_page_size = dev->attr.ird_page_size;
2099 int ird_q_len = dev->attr.num_ird_pages * ird_page_size;
2100 struct ocrdma_hdr_wqe *rqe;
2103 if (dev->attr.ird == 0)
2106 qp->ird_q_va = dma_alloc_coherent(&pdev->dev, ird_q_len,
2110 memset(qp->ird_q_va, 0, ird_q_len);
2111 ocrdma_build_q_pages(&cmd->ird_addr[0], dev->attr.num_ird_pages,
2113 for (; i < ird_q_len / dev->attr.rqe_size; i++) {
2114 rqe = (struct ocrdma_hdr_wqe *)(qp->ird_q_va +
2115 (i * dev->attr.rqe_size));
2118 rqe->cw |= (OCRDMA_TYPE_LKEY << OCRDMA_WQE_TYPE_SHIFT);
2119 rqe->cw |= (8 << OCRDMA_WQE_SIZE_SHIFT);
2120 rqe->cw |= (8 << OCRDMA_WQE_NXT_WQE_SIZE_SHIFT);
2125 static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
2126 struct ocrdma_qp *qp,
2127 struct ib_qp_init_attr *attrs,
2128 u16 *dpp_offset, u16 *dpp_credit_lmt)
2130 u32 max_wqe_allocated, max_rqe_allocated;
2131 qp->id = rsp->qp_id & OCRDMA_CREATE_QP_RSP_QP_ID_MASK;
2132 qp->rq.dbid = rsp->sq_rq_id & OCRDMA_CREATE_QP_RSP_RQ_ID_MASK;
2133 qp->sq.dbid = rsp->sq_rq_id >> OCRDMA_CREATE_QP_RSP_SQ_ID_SHIFT;
2134 qp->max_ird = rsp->max_ord_ird & OCRDMA_CREATE_QP_RSP_MAX_IRD_MASK;
2135 qp->max_ord = (rsp->max_ord_ird >> OCRDMA_CREATE_QP_RSP_MAX_ORD_SHIFT);
2136 qp->dpp_enabled = false;
2137 if (rsp->dpp_response & OCRDMA_CREATE_QP_RSP_DPP_ENABLED_MASK) {
2138 qp->dpp_enabled = true;
2139 *dpp_credit_lmt = (rsp->dpp_response &
2140 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_MASK) >>
2141 OCRDMA_CREATE_QP_RSP_DPP_CREDITS_SHIFT;
2142 *dpp_offset = (rsp->dpp_response &
2143 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_MASK) >>
2144 OCRDMA_CREATE_QP_RSP_DPP_PAGE_OFFSET_SHIFT;
2147 rsp->max_wqe_rqe >> OCRDMA_CREATE_QP_RSP_MAX_WQE_SHIFT;
2148 max_wqe_allocated = 1 << max_wqe_allocated;
2149 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
2151 qp->sq.max_cnt = max_wqe_allocated;
2152 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2155 qp->rq.max_cnt = max_rqe_allocated;
2156 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
2160 int ocrdma_mbx_create_qp(struct ocrdma_qp *qp, struct ib_qp_init_attr *attrs,
2161 u8 enable_dpp_cq, u16 dpp_cq_id, u16 *dpp_offset,
2162 u16 *dpp_credit_lmt)
2164 int status = -ENOMEM;
2166 struct ocrdma_dev *dev = qp->dev;
2167 struct ocrdma_pd *pd = qp->pd;
2168 struct pci_dev *pdev = dev->nic_info.pdev;
2169 struct ocrdma_cq *cq;
2170 struct ocrdma_create_qp_req *cmd;
2171 struct ocrdma_create_qp_rsp *rsp;
2174 switch (attrs->qp_type) {
2176 qptype = OCRDMA_QPT_GSI;
2179 qptype = OCRDMA_QPT_RC;
2182 qptype = OCRDMA_QPT_UD;
2188 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_QP, sizeof(*cmd));
2191 cmd->type_pgsz_pdn |= (qptype << OCRDMA_CREATE_QP_REQ_QPT_SHIFT) &
2192 OCRDMA_CREATE_QP_REQ_QPT_MASK;
2193 status = ocrdma_set_create_qp_sq_cmd(cmd, attrs, qp);
2198 struct ocrdma_srq *srq = get_ocrdma_srq(attrs->srq);
2199 cmd->max_sge_recv_flags |= OCRDMA_CREATE_QP_REQ_USE_SRQ_MASK;
2200 cmd->rq_addr[0].lo = srq->id;
2203 status = ocrdma_set_create_qp_rq_cmd(cmd, attrs, qp);
2208 status = ocrdma_set_create_qp_ird_cmd(cmd, qp);
2212 cmd->type_pgsz_pdn |= (pd->id << OCRDMA_CREATE_QP_REQ_PD_ID_SHIFT) &
2213 OCRDMA_CREATE_QP_REQ_PD_ID_MASK;
2215 flags = ocrdma_set_create_qp_mbx_access_flags(qp);
2217 cmd->max_sge_recv_flags |= flags;
2218 cmd->max_ord_ird |= (dev->attr.max_ord_per_qp <<
2219 OCRDMA_CREATE_QP_REQ_MAX_ORD_SHIFT) &
2220 OCRDMA_CREATE_QP_REQ_MAX_ORD_MASK;
2221 cmd->max_ord_ird |= (dev->attr.max_ird_per_qp <<
2222 OCRDMA_CREATE_QP_REQ_MAX_IRD_SHIFT) &
2223 OCRDMA_CREATE_QP_REQ_MAX_IRD_MASK;
2224 cq = get_ocrdma_cq(attrs->send_cq);
2225 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_WQ_CQID_SHIFT) &
2226 OCRDMA_CREATE_QP_REQ_WQ_CQID_MASK;
2228 cq = get_ocrdma_cq(attrs->recv_cq);
2229 cmd->wq_rq_cqid |= (cq->id << OCRDMA_CREATE_QP_REQ_RQ_CQID_SHIFT) &
2230 OCRDMA_CREATE_QP_REQ_RQ_CQID_MASK;
2233 if (pd->dpp_enabled && attrs->cap.max_inline_data && pd->num_dpp_qp &&
2234 (attrs->cap.max_inline_data <= dev->attr.max_inline_data)) {
2235 ocrdma_set_create_qp_dpp_cmd(cmd, pd, qp, enable_dpp_cq,
2239 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2242 rsp = (struct ocrdma_create_qp_rsp *)cmd;
2243 ocrdma_get_create_qp_rsp(rsp, qp, attrs, dpp_offset, dpp_credit_lmt);
2244 qp->state = OCRDMA_QPS_RST;
2249 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2251 pr_err("%s(%d) rq_err\n", __func__, dev->id);
2252 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2254 pr_err("%s(%d) sq_err\n", __func__, dev->id);
2259 int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2260 struct ocrdma_qp_params *param)
2262 int status = -ENOMEM;
2263 struct ocrdma_query_qp *cmd;
2264 struct ocrdma_query_qp_rsp *rsp;
2266 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
2269 cmd->qp_id = qp->id;
2270 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2273 rsp = (struct ocrdma_query_qp_rsp *)cmd;
2274 memcpy(param, &rsp->params, sizeof(struct ocrdma_qp_params));
2280 static int ocrdma_set_av_params(struct ocrdma_qp *qp,
2281 struct ocrdma_modify_qp *cmd,
2282 struct ib_qp_attr *attrs)
2285 struct ib_ah_attr *ah_attr = &attrs->ah_attr;
2286 union ib_gid sgid, zgid;
2290 if ((ah_attr->ah_flags & IB_AH_GRH) == 0)
2292 if (atomic_cmpxchg(&qp->dev->update_sl, 1, 0))
2293 ocrdma_init_service_level(qp->dev);
2294 cmd->params.tclass_sq_psn |=
2295 (ah_attr->grh.traffic_class << OCRDMA_QP_PARAMS_TCLASS_SHIFT);
2296 cmd->params.rnt_rc_sl_fl |=
2297 (ah_attr->grh.flow_label & OCRDMA_QP_PARAMS_FLOW_LABEL_MASK);
2298 cmd->params.rnt_rc_sl_fl |= (ah_attr->sl << OCRDMA_QP_PARAMS_SL_SHIFT);
2299 cmd->params.hop_lmt_rq_psn |=
2300 (ah_attr->grh.hop_limit << OCRDMA_QP_PARAMS_HOP_LMT_SHIFT);
2301 cmd->flags |= OCRDMA_QP_PARA_FLOW_LBL_VALID;
2302 memcpy(&cmd->params.dgid[0], &ah_attr->grh.dgid.raw[0],
2303 sizeof(cmd->params.dgid));
2304 status = ocrdma_query_gid(&qp->dev->ibdev, 1,
2305 ah_attr->grh.sgid_index, &sgid);
2309 memset(&zgid, 0, sizeof(zgid));
2310 if (!memcmp(&sgid, &zgid, sizeof(zgid)))
2313 qp->sgid_idx = ah_attr->grh.sgid_index;
2314 memcpy(&cmd->params.sgid[0], &sgid.raw[0], sizeof(cmd->params.sgid));
2315 ocrdma_resolve_dmac(qp->dev, ah_attr, &mac_addr[0]);
2316 cmd->params.dmac_b0_to_b3 = mac_addr[0] | (mac_addr[1] << 8) |
2317 (mac_addr[2] << 16) | (mac_addr[3] << 24);
2318 /* convert them to LE format. */
2319 ocrdma_cpu_to_le32(&cmd->params.dgid[0], sizeof(cmd->params.dgid));
2320 ocrdma_cpu_to_le32(&cmd->params.sgid[0], sizeof(cmd->params.sgid));
2321 cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
2322 vlan_id = ah_attr->vlan_id;
2323 if (vlan_id && (vlan_id < 0x1000)) {
2324 cmd->params.vlan_dmac_b4_to_b5 |=
2325 vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
2326 cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
2327 cmd->params.rnt_rc_sl_fl |=
2328 (qp->dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
2333 static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
2334 struct ocrdma_modify_qp *cmd,
2335 struct ib_qp_attr *attrs, int attr_mask)
2339 if (attr_mask & IB_QP_PKEY_INDEX) {
2340 cmd->params.path_mtu_pkey_indx |= (attrs->pkey_index &
2341 OCRDMA_QP_PARAMS_PKEY_INDEX_MASK);
2342 cmd->flags |= OCRDMA_QP_PARA_PKEY_VALID;
2344 if (attr_mask & IB_QP_QKEY) {
2345 qp->qkey = attrs->qkey;
2346 cmd->params.qkey = attrs->qkey;
2347 cmd->flags |= OCRDMA_QP_PARA_QKEY_VALID;
2349 if (attr_mask & IB_QP_AV) {
2350 status = ocrdma_set_av_params(qp, cmd, attrs);
2353 } else if (qp->qp_type == IB_QPT_GSI || qp->qp_type == IB_QPT_UD) {
2354 /* set the default mac address for UD, GSI QPs */
2355 cmd->params.dmac_b0_to_b3 = qp->dev->nic_info.mac_addr[0] |
2356 (qp->dev->nic_info.mac_addr[1] << 8) |
2357 (qp->dev->nic_info.mac_addr[2] << 16) |
2358 (qp->dev->nic_info.mac_addr[3] << 24);
2359 cmd->params.vlan_dmac_b4_to_b5 = qp->dev->nic_info.mac_addr[4] |
2360 (qp->dev->nic_info.mac_addr[5] << 8);
2362 if ((attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY) &&
2363 attrs->en_sqd_async_notify) {
2364 cmd->params.max_sge_recv_flags |=
2365 OCRDMA_QP_PARAMS_FLAGS_SQD_ASYNC;
2366 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2368 if (attr_mask & IB_QP_DEST_QPN) {
2369 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->dest_qp_num &
2370 OCRDMA_QP_PARAMS_DEST_QPN_MASK);
2371 cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
2373 if (attr_mask & IB_QP_PATH_MTU) {
2374 if (attrs->path_mtu < IB_MTU_256 ||
2375 attrs->path_mtu > IB_MTU_4096) {
2379 cmd->params.path_mtu_pkey_indx |=
2380 (ib_mtu_enum_to_int(attrs->path_mtu) <<
2381 OCRDMA_QP_PARAMS_PATH_MTU_SHIFT) &
2382 OCRDMA_QP_PARAMS_PATH_MTU_MASK;
2383 cmd->flags |= OCRDMA_QP_PARA_PMTU_VALID;
2385 if (attr_mask & IB_QP_TIMEOUT) {
2386 cmd->params.ack_to_rnr_rtc_dest_qpn |= attrs->timeout <<
2387 OCRDMA_QP_PARAMS_ACK_TIMEOUT_SHIFT;
2388 cmd->flags |= OCRDMA_QP_PARA_ACK_TO_VALID;
2390 if (attr_mask & IB_QP_RETRY_CNT) {
2391 cmd->params.rnt_rc_sl_fl |= (attrs->retry_cnt <<
2392 OCRDMA_QP_PARAMS_RETRY_CNT_SHIFT) &
2393 OCRDMA_QP_PARAMS_RETRY_CNT_MASK;
2394 cmd->flags |= OCRDMA_QP_PARA_RETRY_CNT_VALID;
2396 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
2397 cmd->params.rnt_rc_sl_fl |= (attrs->min_rnr_timer <<
2398 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_SHIFT) &
2399 OCRDMA_QP_PARAMS_RNR_NAK_TIMER_MASK;
2400 cmd->flags |= OCRDMA_QP_PARA_RNT_VALID;
2402 if (attr_mask & IB_QP_RNR_RETRY) {
2403 cmd->params.ack_to_rnr_rtc_dest_qpn |= (attrs->rnr_retry <<
2404 OCRDMA_QP_PARAMS_RNR_RETRY_CNT_SHIFT)
2405 & OCRDMA_QP_PARAMS_RNR_RETRY_CNT_MASK;
2406 cmd->flags |= OCRDMA_QP_PARA_RRC_VALID;
2408 if (attr_mask & IB_QP_SQ_PSN) {
2409 cmd->params.tclass_sq_psn |= (attrs->sq_psn & 0x00ffffff);
2410 cmd->flags |= OCRDMA_QP_PARA_SQPSN_VALID;
2412 if (attr_mask & IB_QP_RQ_PSN) {
2413 cmd->params.hop_lmt_rq_psn |= (attrs->rq_psn & 0x00ffffff);
2414 cmd->flags |= OCRDMA_QP_PARA_RQPSN_VALID;
2416 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
2417 if (attrs->max_rd_atomic > qp->dev->attr.max_ord_per_qp) {
2421 qp->max_ord = attrs->max_rd_atomic;
2422 cmd->flags |= OCRDMA_QP_PARA_MAX_ORD_VALID;
2424 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
2425 if (attrs->max_dest_rd_atomic > qp->dev->attr.max_ird_per_qp) {
2429 qp->max_ird = attrs->max_dest_rd_atomic;
2430 cmd->flags |= OCRDMA_QP_PARA_MAX_IRD_VALID;
2432 cmd->params.max_ord_ird = (qp->max_ord <<
2433 OCRDMA_QP_PARAMS_MAX_ORD_SHIFT) |
2434 (qp->max_ird & OCRDMA_QP_PARAMS_MAX_IRD_MASK);
2439 int ocrdma_mbx_modify_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
2440 struct ib_qp_attr *attrs, int attr_mask)
2442 int status = -ENOMEM;
2443 struct ocrdma_modify_qp *cmd;
2445 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_QP, sizeof(*cmd));
2449 cmd->params.id = qp->id;
2451 if (attr_mask & IB_QP_STATE) {
2452 cmd->params.max_sge_recv_flags |=
2453 (get_ocrdma_qp_state(attrs->qp_state) <<
2454 OCRDMA_QP_PARAMS_STATE_SHIFT) &
2455 OCRDMA_QP_PARAMS_STATE_MASK;
2456 cmd->flags |= OCRDMA_QP_PARA_QPS_VALID;
2458 cmd->params.max_sge_recv_flags |=
2459 (qp->state << OCRDMA_QP_PARAMS_STATE_SHIFT) &
2460 OCRDMA_QP_PARAMS_STATE_MASK;
2463 status = ocrdma_set_qp_params(qp, cmd, attrs, attr_mask);
2466 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2475 int ocrdma_mbx_destroy_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp)
2477 int status = -ENOMEM;
2478 struct ocrdma_destroy_qp *cmd;
2479 struct pci_dev *pdev = dev->nic_info.pdev;
2481 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_QP, sizeof(*cmd));
2484 cmd->qp_id = qp->id;
2485 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2492 dma_free_coherent(&pdev->dev, qp->sq.len, qp->sq.va, qp->sq.pa);
2493 if (!qp->srq && qp->rq.va)
2494 dma_free_coherent(&pdev->dev, qp->rq.len, qp->rq.va, qp->rq.pa);
2495 if (qp->dpp_enabled)
2496 qp->pd->num_dpp_qp++;
2500 int ocrdma_mbx_create_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq,
2501 struct ib_srq_init_attr *srq_attr,
2502 struct ocrdma_pd *pd)
2504 int status = -ENOMEM;
2505 int hw_pages, hw_page_size;
2507 struct ocrdma_create_srq_rsp *rsp;
2508 struct ocrdma_create_srq *cmd;
2510 struct pci_dev *pdev = dev->nic_info.pdev;
2511 u32 max_rqe_allocated;
2513 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_CREATE_SRQ, sizeof(*cmd));
2517 cmd->pgsz_pdid = pd->id & OCRDMA_CREATE_SRQ_PD_ID_MASK;
2518 max_rqe_allocated = srq_attr->attr.max_wr + 1;
2519 status = ocrdma_build_q_conf(&max_rqe_allocated,
2521 &hw_pages, &hw_page_size);
2523 pr_err("%s() req. max_wr=0x%x\n", __func__,
2524 srq_attr->attr.max_wr);
2528 len = hw_pages * hw_page_size;
2529 srq->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL);
2534 ocrdma_build_q_pages(&cmd->rq_addr[0], hw_pages, pa, hw_page_size);
2536 srq->rq.entry_size = dev->attr.rqe_size;
2539 srq->rq.max_cnt = max_rqe_allocated;
2541 cmd->max_sge_rqe = ilog2(max_rqe_allocated);
2542 cmd->max_sge_rqe |= srq_attr->attr.max_sge <<
2543 OCRDMA_CREATE_SRQ_MAX_SGE_RECV_SHIFT;
2545 cmd->pgsz_pdid |= (ilog2(hw_page_size / OCRDMA_MIN_Q_PAGE_SIZE)
2546 << OCRDMA_CREATE_SRQ_PG_SZ_SHIFT);
2547 cmd->pages_rqe_sz |= (dev->attr.rqe_size
2548 << OCRDMA_CREATE_SRQ_RQE_SIZE_SHIFT)
2549 & OCRDMA_CREATE_SRQ_RQE_SIZE_MASK;
2550 cmd->pages_rqe_sz |= hw_pages << OCRDMA_CREATE_SRQ_NUM_RQ_PAGES_SHIFT;
2552 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2555 rsp = (struct ocrdma_create_srq_rsp *)cmd;
2557 srq->rq.dbid = rsp->id;
2558 max_rqe_allocated = ((rsp->max_sge_rqe_allocated &
2559 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_MASK) >>
2560 OCRDMA_CREATE_SRQ_RSP_MAX_RQE_ALLOCATED_SHIFT);
2561 max_rqe_allocated = (1 << max_rqe_allocated);
2562 srq->rq.max_cnt = max_rqe_allocated;
2563 srq->rq.max_wqe_idx = max_rqe_allocated - 1;
2564 srq->rq.max_sges = (rsp->max_sge_rqe_allocated &
2565 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_MASK) >>
2566 OCRDMA_CREATE_SRQ_RSP_MAX_SGE_RECV_ALLOCATED_SHIFT;
2569 dma_free_coherent(&pdev->dev, srq->rq.len, srq->rq.va, pa);
2575 int ocrdma_mbx_modify_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2577 int status = -ENOMEM;
2578 struct ocrdma_modify_srq *cmd;
2579 struct ocrdma_pd *pd = srq->pd;
2580 struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
2582 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_MODIFY_SRQ, sizeof(*cmd));
2586 cmd->limit_max_rqe |= srq_attr->srq_limit <<
2587 OCRDMA_MODIFY_SRQ_LIMIT_SHIFT;
2588 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2593 int ocrdma_mbx_query_srq(struct ocrdma_srq *srq, struct ib_srq_attr *srq_attr)
2595 int status = -ENOMEM;
2596 struct ocrdma_query_srq *cmd;
2597 struct ocrdma_dev *dev = get_ocrdma_dev(srq->ibsrq.device);
2599 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_SRQ, sizeof(*cmd));
2602 cmd->id = srq->rq.dbid;
2603 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2605 struct ocrdma_query_srq_rsp *rsp =
2606 (struct ocrdma_query_srq_rsp *)cmd;
2608 rsp->srq_lmt_max_sge &
2609 OCRDMA_QUERY_SRQ_RSP_MAX_SGE_RECV_MASK;
2611 rsp->max_rqe_pdid >> OCRDMA_QUERY_SRQ_RSP_MAX_RQE_SHIFT;
2612 srq_attr->srq_limit = rsp->srq_lmt_max_sge >>
2613 OCRDMA_QUERY_SRQ_RSP_SRQ_LIMIT_SHIFT;
2619 int ocrdma_mbx_destroy_srq(struct ocrdma_dev *dev, struct ocrdma_srq *srq)
2621 int status = -ENOMEM;
2622 struct ocrdma_destroy_srq *cmd;
2623 struct pci_dev *pdev = dev->nic_info.pdev;
2624 cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_DELETE_SRQ, sizeof(*cmd));
2628 status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
2630 dma_free_coherent(&pdev->dev, srq->rq.len,
2631 srq->rq.va, srq->rq.pa);
2636 static int ocrdma_mbx_get_dcbx_config(struct ocrdma_dev *dev, u32 ptype,
2637 struct ocrdma_dcbx_cfg *dcbxcfg)
2641 struct ocrdma_mqe cmd;
2643 struct ocrdma_get_dcbx_cfg_req *req = NULL;
2644 struct ocrdma_get_dcbx_cfg_rsp *rsp = NULL;
2645 struct pci_dev *pdev = dev->nic_info.pdev;
2646 struct ocrdma_mqe_sge *mqe_sge = cmd.u.nonemb_req.sge;
2648 memset(&cmd, 0, sizeof(struct ocrdma_mqe));
2649 cmd.hdr.pyld_len = max_t (u32, sizeof(struct ocrdma_get_dcbx_cfg_rsp),
2650 sizeof(struct ocrdma_get_dcbx_cfg_req));
2651 req = dma_alloc_coherent(&pdev->dev, cmd.hdr.pyld_len, &pa, GFP_KERNEL);
2657 cmd.hdr.spcl_sge_cnt_emb |= (1 << OCRDMA_MQE_HDR_SGE_CNT_SHIFT) &
2658 OCRDMA_MQE_HDR_SGE_CNT_MASK;
2659 mqe_sge->pa_lo = (u32) (pa & 0xFFFFFFFFUL);
2660 mqe_sge->pa_hi = (u32) upper_32_bits(pa);
2661 mqe_sge->len = cmd.hdr.pyld_len;
2663 memset(req, 0, sizeof(struct ocrdma_get_dcbx_cfg_req));
2664 ocrdma_init_mch(&req->hdr, OCRDMA_CMD_GET_DCBX_CONFIG,
2665 OCRDMA_SUBSYS_DCBX, cmd.hdr.pyld_len);
2666 req->param_type = ptype;
2668 status = ocrdma_mbx_cmd(dev, &cmd);
2672 rsp = (struct ocrdma_get_dcbx_cfg_rsp *)req;
2673 ocrdma_le32_to_cpu(rsp, sizeof(struct ocrdma_get_dcbx_cfg_rsp));
2674 memcpy(dcbxcfg, &rsp->cfg, sizeof(struct ocrdma_dcbx_cfg));
2677 dma_free_coherent(&pdev->dev, cmd.hdr.pyld_len, req, pa);
2682 #define OCRDMA_MAX_SERVICE_LEVEL_INDEX 0x08
2683 #define OCRDMA_DEFAULT_SERVICE_LEVEL 0x05
2685 static int ocrdma_parse_dcbxcfg_rsp(struct ocrdma_dev *dev, int ptype,
2686 struct ocrdma_dcbx_cfg *dcbxcfg,
2689 int status = -EINVAL, indx, slindx;
2691 struct ocrdma_app_parameter *app_param;
2692 u8 valid, proto_sel;
2693 u8 app_prio, pfc_prio;
2696 if (!(dcbxcfg->tcv_aev_opv_st & OCRDMA_DCBX_STATE_MASK)) {
2697 pr_info("%s ocrdma%d DCBX is disabled\n",
2698 dev_name(&dev->nic_info.pdev->dev), dev->id);
2702 if (!ocrdma_is_enabled_and_synced(dcbxcfg->pfc_state)) {
2703 pr_info("%s ocrdma%d priority flow control(%s) is %s%s\n",
2704 dev_name(&dev->nic_info.pdev->dev), dev->id,
2705 (ptype > 0 ? "operational" : "admin"),
2706 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_ENABLED) ?
2707 "enabled" : "disabled",
2708 (dcbxcfg->pfc_state & OCRDMA_STATE_FLAG_SYNC) ?
2709 "" : ", not sync'ed");
2712 pr_info("%s ocrdma%d priority flow control is enabled and sync'ed\n",
2713 dev_name(&dev->nic_info.pdev->dev), dev->id);
2716 ventry_cnt = (dcbxcfg->tcv_aev_opv_st >>
2717 OCRDMA_DCBX_APP_ENTRY_SHIFT)
2718 & OCRDMA_DCBX_STATE_MASK;
2720 for (indx = 0; indx < ventry_cnt; indx++) {
2721 app_param = &dcbxcfg->app_param[indx];
2722 valid = (app_param->valid_proto_app >>
2723 OCRDMA_APP_PARAM_VALID_SHIFT)
2724 & OCRDMA_APP_PARAM_VALID_MASK;
2725 proto_sel = (app_param->valid_proto_app
2726 >> OCRDMA_APP_PARAM_PROTO_SEL_SHIFT)
2727 & OCRDMA_APP_PARAM_PROTO_SEL_MASK;
2728 proto = app_param->valid_proto_app &
2729 OCRDMA_APP_PARAM_APP_PROTO_MASK;
2732 valid && proto == OCRDMA_APP_PROTO_ROCE &&
2733 proto_sel == OCRDMA_PROTO_SELECT_L2) {
2734 for (slindx = 0; slindx <
2735 OCRDMA_MAX_SERVICE_LEVEL_INDEX; slindx++) {
2736 app_prio = ocrdma_get_app_prio(
2737 (u8 *)app_param->app_prio,
2739 pfc_prio = ocrdma_get_pfc_prio(
2740 (u8 *)dcbxcfg->pfc_prio,
2743 if (app_prio && pfc_prio) {
2749 if (slindx == OCRDMA_MAX_SERVICE_LEVEL_INDEX) {
2750 pr_info("%s ocrdma%d application priority not set for 0x%x protocol\n",
2751 dev_name(&dev->nic_info.pdev->dev),
2761 void ocrdma_init_service_level(struct ocrdma_dev *dev)
2763 int status = 0, indx;
2764 struct ocrdma_dcbx_cfg dcbxcfg;
2765 u8 srvc_lvl = OCRDMA_DEFAULT_SERVICE_LEVEL;
2766 int ptype = OCRDMA_PARAMETER_TYPE_OPER;
2768 for (indx = 0; indx < 2; indx++) {
2769 status = ocrdma_mbx_get_dcbx_config(dev, ptype, &dcbxcfg);
2771 pr_err("%s(): status=%d\n", __func__, status);
2772 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
2776 status = ocrdma_parse_dcbxcfg_rsp(dev, ptype,
2777 &dcbxcfg, &srvc_lvl);
2779 ptype = OCRDMA_PARAMETER_TYPE_ADMIN;
2787 pr_info("%s ocrdma%d service level default\n",
2788 dev_name(&dev->nic_info.pdev->dev), dev->id);
2790 pr_info("%s ocrdma%d service level %d\n",
2791 dev_name(&dev->nic_info.pdev->dev), dev->id,
2794 dev->pfc_state = ocrdma_is_enabled_and_synced(dcbxcfg.pfc_state);
2798 int ocrdma_alloc_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2801 int status = -EINVAL;
2802 struct ocrdma_av *av;
2803 unsigned long flags;
2805 av = dev->av_tbl.va;
2806 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2807 for (i = 0; i < dev->av_tbl.num_ah; i++) {
2808 if (av->valid == 0) {
2809 av->valid = OCRDMA_AV_VALID;
2817 if (i == dev->av_tbl.num_ah)
2819 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2823 int ocrdma_free_av(struct ocrdma_dev *dev, struct ocrdma_ah *ah)
2825 unsigned long flags;
2826 spin_lock_irqsave(&dev->av_tbl.lock, flags);
2828 spin_unlock_irqrestore(&dev->av_tbl.lock, flags);
2832 static int ocrdma_create_eqs(struct ocrdma_dev *dev)
2834 int num_eq, i, status = 0;
2836 unsigned long flags = 0;
2838 num_eq = dev->nic_info.msix.num_vectors -
2839 dev->nic_info.msix.start_vector;
2840 if (dev->nic_info.intr_mode == BE_INTERRUPT_MODE_INTX) {
2842 flags = IRQF_SHARED;
2844 num_eq = min_t(u32, num_eq, num_online_cpus());
2850 dev->eq_tbl = kzalloc(sizeof(struct ocrdma_eq) * num_eq, GFP_KERNEL);
2854 for (i = 0; i < num_eq; i++) {
2855 status = ocrdma_create_eq(dev, &dev->eq_tbl[i],
2861 sprintf(dev->eq_tbl[i].irq_name, "ocrdma%d-%d",
2863 irq = ocrdma_get_irq(dev, &dev->eq_tbl[i]);
2864 status = request_irq(irq, ocrdma_irq_handler, flags,
2865 dev->eq_tbl[i].irq_name,
2871 /* one eq is sufficient for data path to work */
2874 ocrdma_destroy_eqs(dev);
2878 int ocrdma_init_hw(struct ocrdma_dev *dev)
2882 /* create the eqs */
2883 status = ocrdma_create_eqs(dev);
2886 status = ocrdma_create_mq(dev);
2889 status = ocrdma_mbx_query_fw_config(dev);
2892 status = ocrdma_mbx_query_dev(dev);
2895 status = ocrdma_mbx_query_fw_ver(dev);
2898 status = ocrdma_mbx_create_ah_tbl(dev);
2901 status = ocrdma_mbx_get_phy_info(dev);
2903 goto info_attrb_err;
2904 status = ocrdma_mbx_get_ctrl_attribs(dev);
2906 goto info_attrb_err;
2911 ocrdma_mbx_delete_ah_tbl(dev);
2913 ocrdma_destroy_mq(dev);
2915 ocrdma_destroy_eqs(dev);
2917 pr_err("%s() status=%d\n", __func__, status);
2921 void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
2923 ocrdma_mbx_delete_ah_tbl(dev);
2925 /* cleanup the eqs */
2926 ocrdma_destroy_eqs(dev);
2928 /* cleanup the control path */
2929 ocrdma_destroy_mq(dev);