2 * Copyright (C) 2005 - 2010 ServerEngines
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@serverengines.com
14 * 209 N. Fair Oaks Ave
22 void be_mcc_notify(struct beiscsi_hba *phba)
24 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
27 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
28 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
29 iowrite32(val, phba->db_va + DB_MCCQ_OFFSET);
32 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
36 if (phba->ctrl.mcc_tag_available) {
37 tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
38 phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
39 phba->ctrl.mcc_numtag[tag] = 0;
42 phba->ctrl.mcc_tag_available--;
43 if (phba->ctrl.mcc_alloc_index == (MAX_MCC_CMD - 1))
44 phba->ctrl.mcc_alloc_index = 0;
46 phba->ctrl.mcc_alloc_index++;
51 void free_mcc_tag(struct be_ctrl_info *ctrl, unsigned int tag)
53 spin_lock(&ctrl->mbox_lock);
54 tag = tag & 0x000000FF;
55 ctrl->mcc_tag[ctrl->mcc_free_index] = tag;
56 if (ctrl->mcc_free_index == (MAX_MCC_CMD - 1))
57 ctrl->mcc_free_index = 0;
59 ctrl->mcc_free_index++;
60 ctrl->mcc_tag_available++;
61 spin_unlock(&ctrl->mbox_lock);
64 bool is_link_state_evt(u32 trailer)
66 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
67 ASYNC_TRAILER_EVENT_CODE_MASK) ==
68 ASYNC_EVENT_CODE_LINK_STATE);
71 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
73 if (compl->flags != 0) {
74 compl->flags = le32_to_cpu(compl->flags);
75 WARN_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
81 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
86 static int be_mcc_compl_process(struct be_ctrl_info *ctrl,
87 struct be_mcc_compl *compl)
89 u16 compl_status, extd_status;
91 be_dws_le_to_cpu(compl, 4);
93 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
94 CQE_STATUS_COMPL_MASK;
95 if (compl_status != MCC_STATUS_SUCCESS) {
96 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
98 dev_err(&ctrl->pdev->dev,
99 "error in cmd completion: status(compl/extd)=%d/%d\n",
100 compl_status, extd_status);
106 int be_mcc_compl_process_isr(struct be_ctrl_info *ctrl,
107 struct be_mcc_compl *compl)
109 u16 compl_status, extd_status;
112 be_dws_le_to_cpu(compl, 4);
114 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
115 CQE_STATUS_COMPL_MASK;
116 /* The ctrl.mcc_numtag[tag] is filled with
117 * [31] = valid, [30:24] = Rsvd, [23:16] = wrb, [15:8] = extd_status,
118 * [7:0] = compl_status
120 tag = (compl->tag0 & 0x000000FF);
121 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
122 CQE_STATUS_EXTD_MASK;
124 ctrl->mcc_numtag[tag] = 0x80000000;
125 ctrl->mcc_numtag[tag] |= (compl->tag0 & 0x00FF0000);
126 ctrl->mcc_numtag[tag] |= (extd_status & 0x000000FF) << 8;
127 ctrl->mcc_numtag[tag] |= (compl_status & 0x000000FF);
128 wake_up_interruptible(&ctrl->mcc_wait[tag]);
132 static struct be_mcc_compl *be_mcc_compl_get(struct beiscsi_hba *phba)
134 struct be_queue_info *mcc_cq = &phba->ctrl.mcc_obj.cq;
135 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
137 if (be_mcc_compl_is_new(compl)) {
138 queue_tail_inc(mcc_cq);
144 static void be2iscsi_fail_session(struct iscsi_cls_session *cls_session)
146 iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
149 void beiscsi_async_link_state_process(struct beiscsi_hba *phba,
150 struct be_async_event_link_state *evt)
152 switch (evt->port_link_status) {
153 case ASYNC_EVENT_LINK_DOWN:
154 SE_DEBUG(DBG_LVL_1, "Link Down on Physical Port %d \n",
156 phba->state |= BE_ADAPTER_LINK_DOWN;
157 iscsi_host_for_each_session(phba->shost,
158 be2iscsi_fail_session);
160 case ASYNC_EVENT_LINK_UP:
161 phba->state = BE_ADAPTER_UP;
162 SE_DEBUG(DBG_LVL_1, "Link UP on Physical Port %d \n",
166 SE_DEBUG(DBG_LVL_1, "Unexpected Async Notification %d on"
167 "Physical Port %d \n",
168 evt->port_link_status,
173 static void beiscsi_cq_notify(struct beiscsi_hba *phba, u16 qid, bool arm,
177 val |= qid & DB_CQ_RING_ID_MASK;
179 val |= 1 << DB_CQ_REARM_SHIFT;
180 val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
181 iowrite32(val, phba->db_va + DB_CQ_OFFSET);
185 int beiscsi_process_mcc(struct beiscsi_hba *phba)
187 struct be_mcc_compl *compl;
188 int num = 0, status = 0;
189 struct be_ctrl_info *ctrl = &phba->ctrl;
191 spin_lock_bh(&phba->ctrl.mcc_cq_lock);
192 while ((compl = be_mcc_compl_get(phba))) {
193 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
194 /* Interpret flags as an async trailer */
195 if (is_link_state_evt(compl->flags))
196 /* Interpret compl as a async link evt */
197 beiscsi_async_link_state_process(phba,
198 (struct be_async_event_link_state *) compl);
201 " Unsupported Async Event, flags"
202 " = 0x%08x \n", compl->flags);
204 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
205 status = be_mcc_compl_process(ctrl, compl);
206 atomic_dec(&phba->ctrl.mcc_obj.q.used);
208 be_mcc_compl_use(compl);
213 beiscsi_cq_notify(phba, phba->ctrl.mcc_obj.cq.id, true, num);
215 spin_unlock_bh(&phba->ctrl.mcc_cq_lock);
219 /* Wait till no more pending mcc requests are present */
220 static int be_mcc_wait_compl(struct beiscsi_hba *phba)
223 for (i = 0; i < mcc_timeout; i++) {
224 status = beiscsi_process_mcc(phba);
228 if (atomic_read(&phba->ctrl.mcc_obj.q.used) == 0)
232 if (i == mcc_timeout) {
233 dev_err(&phba->pcidev->dev, "mccq poll timed out\n");
239 /* Notify MCC requests and wait for completion */
240 int be_mcc_notify_wait(struct beiscsi_hba *phba)
243 return be_mcc_wait_compl(phba);
246 static int be_mbox_db_ready_wait(struct be_ctrl_info *ctrl)
248 #define long_delay 2000
249 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
250 int cnt = 0, wait = 5; /* in usecs */
254 ready = ioread32(db) & MPU_MAILBOX_DB_RDY_MASK;
259 dev_err(&ctrl->pdev->dev, "mbox_db poll timed out\n");
265 mdelay(long_delay / 1000);
273 int be_mbox_notify(struct be_ctrl_info *ctrl)
277 void __iomem *db = ctrl->db + MPU_MAILBOX_DB_OFFSET;
278 struct be_dma_mem *mbox_mem = &ctrl->mbox_mem;
279 struct be_mcc_mailbox *mbox = mbox_mem->va;
280 struct be_mcc_compl *compl = &mbox->compl;
282 val &= ~MPU_MAILBOX_DB_RDY_MASK;
283 val |= MPU_MAILBOX_DB_HI_MASK;
284 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
287 status = be_mbox_db_ready_wait(ctrl);
289 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 1\n");
293 val &= ~MPU_MAILBOX_DB_RDY_MASK;
294 val &= ~MPU_MAILBOX_DB_HI_MASK;
295 val |= (u32) (mbox_mem->dma >> 4) << 2;
298 status = be_mbox_db_ready_wait(ctrl);
300 SE_DEBUG(DBG_LVL_1, " be_mbox_db_ready_wait failed 2\n");
303 if (be_mcc_compl_is_new(compl)) {
304 status = be_mcc_compl_process(ctrl, &mbox->compl);
305 be_mcc_compl_use(compl);
307 SE_DEBUG(DBG_LVL_1, "After be_mcc_compl_process \n");
311 dev_err(&ctrl->pdev->dev, "invalid mailbox completion\n");
318 * Insert the mailbox address into the doorbell in two steps
319 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
321 static int be_mbox_notify_wait(struct beiscsi_hba *phba)
325 void __iomem *db = phba->ctrl.db + MPU_MAILBOX_DB_OFFSET;
326 struct be_dma_mem *mbox_mem = &phba->ctrl.mbox_mem;
327 struct be_mcc_mailbox *mbox = mbox_mem->va;
328 struct be_mcc_compl *compl = &mbox->compl;
329 struct be_ctrl_info *ctrl = &phba->ctrl;
331 val |= MPU_MAILBOX_DB_HI_MASK;
332 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
333 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
336 /* wait for ready to be set */
337 status = be_mbox_db_ready_wait(ctrl);
342 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
343 val |= (u32)(mbox_mem->dma >> 4) << 2;
346 status = be_mbox_db_ready_wait(ctrl);
350 /* A cq entry has been made now */
351 if (be_mcc_compl_is_new(compl)) {
352 status = be_mcc_compl_process(ctrl, &mbox->compl);
353 be_mcc_compl_use(compl);
357 dev_err(&phba->pcidev->dev, "invalid mailbox completion\n");
363 void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
364 bool embedded, u8 sge_cnt)
367 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
369 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
370 MCC_WRB_SGE_CNT_SHIFT;
371 wrb->payload_length = payload_len;
372 be_dws_cpu_to_le(wrb, 8);
375 void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
376 u8 subsystem, u8 opcode, int cmd_len)
378 req_hdr->opcode = opcode;
379 req_hdr->subsystem = subsystem;
380 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
383 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
384 struct be_dma_mem *mem)
387 u64 dma = (u64) mem->dma;
389 buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
390 for (i = 0; i < buf_pages; i++) {
391 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
392 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
397 static u32 eq_delay_to_mult(u32 usec_delay)
399 #define MAX_INTR_RATE 651042
400 const u32 round = 10;
406 u32 interrupt_rate = 1000000 / usec_delay;
407 if (interrupt_rate == 0)
410 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
411 multiplier /= interrupt_rate;
412 multiplier = (multiplier + round / 2) / round;
413 multiplier = min(multiplier, (u32) 1023);
419 struct be_mcc_wrb *wrb_from_mbox(struct be_dma_mem *mbox_mem)
421 return &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
424 struct be_mcc_wrb *wrb_from_mccq(struct beiscsi_hba *phba)
426 struct be_queue_info *mccq = &phba->ctrl.mcc_obj.q;
427 struct be_mcc_wrb *wrb;
429 BUG_ON(atomic_read(&mccq->used) >= mccq->len);
430 wrb = queue_head_node(mccq);
431 memset(wrb, 0, sizeof(*wrb));
432 wrb->tag0 = (mccq->head & 0x000000FF) << 16;
433 queue_head_inc(mccq);
434 atomic_inc(&mccq->used);
439 int beiscsi_cmd_eq_create(struct be_ctrl_info *ctrl,
440 struct be_queue_info *eq, int eq_delay)
442 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
443 struct be_cmd_req_eq_create *req = embedded_payload(wrb);
444 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
445 struct be_dma_mem *q_mem = &eq->dma_mem;
448 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_eq_create\n");
449 spin_lock(&ctrl->mbox_lock);
450 memset(wrb, 0, sizeof(*wrb));
452 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
454 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
455 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
457 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
459 AMAP_SET_BITS(struct amap_eq_context, func, req->context,
460 PCI_FUNC(ctrl->pdev->devfn));
461 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
462 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
463 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
464 __ilog2_u32(eq->len / 256));
465 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
466 eq_delay_to_mult(eq_delay));
467 be_dws_cpu_to_le(req->context, sizeof(req->context));
469 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
471 status = be_mbox_notify(ctrl);
473 eq->id = le16_to_cpu(resp->eq_id);
476 spin_unlock(&ctrl->mbox_lock);
480 int be_cmd_fw_initialize(struct be_ctrl_info *ctrl)
482 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
486 SE_DEBUG(DBG_LVL_8, "In be_cmd_fw_initialize\n");
487 spin_lock(&ctrl->mbox_lock);
488 memset(wrb, 0, sizeof(*wrb));
490 endian_check = (u8 *) wrb;
491 *endian_check++ = 0xFF;
492 *endian_check++ = 0x12;
493 *endian_check++ = 0x34;
494 *endian_check++ = 0xFF;
495 *endian_check++ = 0xFF;
496 *endian_check++ = 0x56;
497 *endian_check++ = 0x78;
498 *endian_check++ = 0xFF;
499 be_dws_cpu_to_le(wrb, sizeof(*wrb));
501 status = be_mbox_notify(ctrl);
503 SE_DEBUG(DBG_LVL_1, "be_cmd_fw_initialize Failed \n");
505 spin_unlock(&ctrl->mbox_lock);
509 int beiscsi_cmd_cq_create(struct be_ctrl_info *ctrl,
510 struct be_queue_info *cq, struct be_queue_info *eq,
511 bool sol_evts, bool no_delay, int coalesce_wm)
513 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
514 struct be_cmd_req_cq_create *req = embedded_payload(wrb);
515 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
516 struct be_dma_mem *q_mem = &cq->dma_mem;
517 void *ctxt = &req->context;
520 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_cq_create \n");
521 spin_lock(&ctrl->mbox_lock);
522 memset(wrb, 0, sizeof(*wrb));
524 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
526 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
527 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
529 SE_DEBUG(DBG_LVL_1, "uninitialized q_mem->va\n");
531 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
533 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
534 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
535 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
536 __ilog2_u32(cq->len / 256));
537 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
538 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
539 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
540 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
541 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
542 AMAP_SET_BITS(struct amap_cq_context, func, ctxt,
543 PCI_FUNC(ctrl->pdev->devfn));
544 be_dws_cpu_to_le(ctxt, sizeof(req->context));
546 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
548 status = be_mbox_notify(ctrl);
550 cq->id = le16_to_cpu(resp->cq_id);
553 SE_DEBUG(DBG_LVL_1, "In be_cmd_cq_create, status=ox%08x \n",
555 spin_unlock(&ctrl->mbox_lock);
560 static u32 be_encoded_q_len(int q_len)
562 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
563 if (len_encoded == 16)
568 int beiscsi_cmd_mccq_create(struct beiscsi_hba *phba,
569 struct be_queue_info *mccq,
570 struct be_queue_info *cq)
572 struct be_mcc_wrb *wrb;
573 struct be_cmd_req_mcc_create *req;
574 struct be_dma_mem *q_mem = &mccq->dma_mem;
575 struct be_ctrl_info *ctrl;
579 spin_lock(&phba->ctrl.mbox_lock);
581 wrb = wrb_from_mbox(&ctrl->mbox_mem);
582 req = embedded_payload(wrb);
583 ctxt = &req->context;
585 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
587 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
588 OPCODE_COMMON_MCC_CREATE, sizeof(*req));
590 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
592 AMAP_SET_BITS(struct amap_mcc_context, fid, ctxt,
593 PCI_FUNC(phba->pcidev->devfn));
594 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
595 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
596 be_encoded_q_len(mccq->len));
597 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
599 be_dws_cpu_to_le(ctxt, sizeof(req->context));
601 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
603 status = be_mbox_notify_wait(phba);
605 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
606 mccq->id = le16_to_cpu(resp->id);
607 mccq->created = true;
609 spin_unlock(&phba->ctrl.mbox_lock);
614 int beiscsi_cmd_q_destroy(struct be_ctrl_info *ctrl, struct be_queue_info *q,
617 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
618 struct be_cmd_req_q_destroy *req = embedded_payload(wrb);
619 u8 subsys = 0, opcode = 0;
622 SE_DEBUG(DBG_LVL_8, "In beiscsi_cmd_q_destroy \n");
623 spin_lock(&ctrl->mbox_lock);
624 memset(wrb, 0, sizeof(*wrb));
625 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
627 switch (queue_type) {
629 subsys = CMD_SUBSYSTEM_COMMON;
630 opcode = OPCODE_COMMON_EQ_DESTROY;
633 subsys = CMD_SUBSYSTEM_COMMON;
634 opcode = OPCODE_COMMON_CQ_DESTROY;
637 subsys = CMD_SUBSYSTEM_COMMON;
638 opcode = OPCODE_COMMON_MCC_DESTROY;
641 subsys = CMD_SUBSYSTEM_ISCSI;
642 opcode = OPCODE_COMMON_ISCSI_WRBQ_DESTROY;
645 subsys = CMD_SUBSYSTEM_ISCSI;
646 opcode = OPCODE_COMMON_ISCSI_DEFQ_DESTROY;
649 subsys = CMD_SUBSYSTEM_ISCSI;
650 opcode = OPCODE_COMMON_ISCSI_CFG_REMOVE_SGL_PAGES;
653 spin_unlock(&ctrl->mbox_lock);
657 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
658 if (queue_type != QTYPE_SGL)
659 req->id = cpu_to_le16(q->id);
661 status = be_mbox_notify(ctrl);
663 spin_unlock(&ctrl->mbox_lock);
667 int be_cmd_create_default_pdu_queue(struct be_ctrl_info *ctrl,
668 struct be_queue_info *cq,
669 struct be_queue_info *dq, int length,
672 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
673 struct be_defq_create_req *req = embedded_payload(wrb);
674 struct be_dma_mem *q_mem = &dq->dma_mem;
675 void *ctxt = &req->context;
678 SE_DEBUG(DBG_LVL_8, "In be_cmd_create_default_pdu_queue\n");
679 spin_lock(&ctrl->mbox_lock);
680 memset(wrb, 0, sizeof(*wrb));
682 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
684 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
685 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req));
687 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
688 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid, ctxt, 0);
689 AMAP_SET_BITS(struct amap_be_default_pdu_context, rx_pdid_valid, ctxt,
691 AMAP_SET_BITS(struct amap_be_default_pdu_context, pci_func_id, ctxt,
692 PCI_FUNC(ctrl->pdev->devfn));
693 AMAP_SET_BITS(struct amap_be_default_pdu_context, ring_size, ctxt,
694 be_encoded_q_len(length / sizeof(struct phys_addr)));
695 AMAP_SET_BITS(struct amap_be_default_pdu_context, default_buffer_size,
697 AMAP_SET_BITS(struct amap_be_default_pdu_context, cq_id_recv, ctxt,
700 be_dws_cpu_to_le(ctxt, sizeof(req->context));
702 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
704 status = be_mbox_notify(ctrl);
706 struct be_defq_create_resp *resp = embedded_payload(wrb);
708 dq->id = le16_to_cpu(resp->id);
711 spin_unlock(&ctrl->mbox_lock);
716 int be_cmd_wrbq_create(struct be_ctrl_info *ctrl, struct be_dma_mem *q_mem,
717 struct be_queue_info *wrbq)
719 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
720 struct be_wrbq_create_req *req = embedded_payload(wrb);
721 struct be_wrbq_create_resp *resp = embedded_payload(wrb);
724 spin_lock(&ctrl->mbox_lock);
725 memset(wrb, 0, sizeof(*wrb));
727 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
729 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
730 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req));
731 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
732 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
734 status = be_mbox_notify(ctrl);
736 wrbq->id = le16_to_cpu(resp->cid);
737 wrbq->created = true;
739 spin_unlock(&ctrl->mbox_lock);
743 int be_cmd_iscsi_post_sgl_pages(struct be_ctrl_info *ctrl,
744 struct be_dma_mem *q_mem,
745 u32 page_offset, u32 num_pages)
747 struct be_mcc_wrb *wrb = wrb_from_mbox(&ctrl->mbox_mem);
748 struct be_post_sgl_pages_req *req = embedded_payload(wrb);
750 unsigned int curr_pages;
751 u32 internal_page_offset = 0;
752 u32 temp_num_pages = num_pages;
754 if (num_pages == 0xff)
757 spin_lock(&ctrl->mbox_lock);
759 memset(wrb, 0, sizeof(*wrb));
760 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
761 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI,
762 OPCODE_COMMON_ISCSI_CFG_POST_SGL_PAGES,
764 curr_pages = BE_NUMBER_OF_FIELD(struct be_post_sgl_pages_req,
766 req->num_pages = min(num_pages, curr_pages);
767 req->page_offset = page_offset;
768 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem);
769 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE);
770 internal_page_offset += req->num_pages;
771 page_offset += req->num_pages;
772 num_pages -= req->num_pages;
774 if (temp_num_pages == 0xff)
775 req->num_pages = temp_num_pages;
777 status = be_mbox_notify(ctrl);
780 "FW CMD to map iscsi frags failed.\n");
783 } while (num_pages > 0);
785 spin_unlock(&ctrl->mbox_lock);
787 beiscsi_cmd_q_destroy(ctrl, NULL, QTYPE_SGL);