2 * Copyright (C) 2005 - 2011 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
73 return wrb->payload.embedded_payload;
76 static void be_mcc_notify(struct be_adapter *adapter)
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
81 if (be_error(adapter))
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
119 addr = ((addr << 16) << 16) | tag0;
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
130 /* Just swap the status to host endian; mcc tag is opaquely copied
132 be_dws_le_to_cpu(compl, 4);
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191 struct be_async_event_link_state *evt)
193 /* When link status changes, link speed must be re-queried from FW */
194 adapter->phy.link_speed = -1;
196 /* Ignore physical link event */
197 if (lancer_chip(adapter) &&
198 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
201 /* For the initial link status do not rely on the ASYNC event as
202 * it may not be received in some cases.
204 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205 be_link_status_update(adapter, evt->port_link_status);
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210 struct be_async_event_grp5_cos_priority *evt)
213 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215 adapter->recommended_prio =
216 evt->reco_default_priority << VLAN_PRIO_SHIFT;
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222 struct be_async_event_grp5_qos_link_speed *evt)
224 if (adapter->phy.link_speed >= 0 &&
225 evt->physical_port == adapter->port_num)
226 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231 struct be_async_event_grp5_pvid_state *evt)
234 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240 u32 trailer, struct be_mcc_compl *evt)
244 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245 ASYNC_TRAILER_EVENT_TYPE_MASK;
247 switch (event_type) {
248 case ASYNC_EVENT_COS_PRIORITY:
249 be_async_grp5_cos_priority_process(adapter,
250 (struct be_async_event_grp5_cos_priority *)evt);
252 case ASYNC_EVENT_QOS_SPEED:
253 be_async_grp5_qos_speed_process(adapter,
254 (struct be_async_event_grp5_qos_link_speed *)evt);
256 case ASYNC_EVENT_PVID_STATE:
257 be_async_grp5_pvid_state_process(adapter,
258 (struct be_async_event_grp5_pvid_state *)evt);
261 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
266 static inline bool is_link_state_evt(u32 trailer)
268 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
269 ASYNC_TRAILER_EVENT_CODE_MASK) ==
270 ASYNC_EVENT_CODE_LINK_STATE;
273 static inline bool is_grp5_evt(u32 trailer)
275 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
276 ASYNC_TRAILER_EVENT_CODE_MASK) ==
277 ASYNC_EVENT_CODE_GRP_5);
280 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
282 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
283 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
285 if (be_mcc_compl_is_new(compl)) {
286 queue_tail_inc(mcc_cq);
292 void be_async_mcc_enable(struct be_adapter *adapter)
294 spin_lock_bh(&adapter->mcc_cq_lock);
296 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
297 adapter->mcc_obj.rearm_cq = true;
299 spin_unlock_bh(&adapter->mcc_cq_lock);
302 void be_async_mcc_disable(struct be_adapter *adapter)
304 spin_lock_bh(&adapter->mcc_cq_lock);
306 adapter->mcc_obj.rearm_cq = false;
307 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
309 spin_unlock_bh(&adapter->mcc_cq_lock);
312 int be_process_mcc(struct be_adapter *adapter)
314 struct be_mcc_compl *compl;
315 int num = 0, status = 0;
316 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
318 spin_lock(&adapter->mcc_cq_lock);
319 while ((compl = be_mcc_compl_get(adapter))) {
320 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
321 /* Interpret flags as an async trailer */
322 if (is_link_state_evt(compl->flags))
323 be_async_link_state_process(adapter,
324 (struct be_async_event_link_state *) compl);
325 else if (is_grp5_evt(compl->flags))
326 be_async_grp5_evt_process(adapter,
327 compl->flags, compl);
328 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
329 status = be_mcc_compl_process(adapter, compl);
330 atomic_dec(&mcc_obj->q.used);
332 be_mcc_compl_use(compl);
337 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
339 spin_unlock(&adapter->mcc_cq_lock);
343 /* Wait till no more pending mcc requests are present */
344 static int be_mcc_wait_compl(struct be_adapter *adapter)
346 #define mcc_timeout 120000 /* 12s timeout */
348 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
350 for (i = 0; i < mcc_timeout; i++) {
351 if (be_error(adapter))
355 status = be_process_mcc(adapter);
358 if (atomic_read(&mcc_obj->q.used) == 0)
362 if (i == mcc_timeout) {
363 dev_err(&adapter->pdev->dev, "FW not responding\n");
364 adapter->fw_timeout = true;
370 /* Notify MCC requests and wait for completion */
371 static int be_mcc_notify_wait(struct be_adapter *adapter)
374 struct be_mcc_wrb *wrb;
375 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
376 u16 index = mcc_obj->q.head;
377 struct be_cmd_resp_hdr *resp;
379 index_dec(&index, mcc_obj->q.len);
380 wrb = queue_index_node(&mcc_obj->q, index);
382 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
384 be_mcc_notify(adapter);
386 status = be_mcc_wait_compl(adapter);
390 status = resp->status;
395 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
401 if (be_error(adapter))
404 ready = ioread32(db);
405 if (ready == 0xffffffff)
408 ready &= MPU_MAILBOX_DB_RDY_MASK;
413 dev_err(&adapter->pdev->dev, "FW not responding\n");
414 adapter->fw_timeout = true;
415 be_detect_error(adapter);
427 * Insert the mailbox address into the doorbell in two steps
428 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
430 static int be_mbox_notify_wait(struct be_adapter *adapter)
434 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
435 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
436 struct be_mcc_mailbox *mbox = mbox_mem->va;
437 struct be_mcc_compl *compl = &mbox->compl;
439 /* wait for ready to be set */
440 status = be_mbox_db_ready_wait(adapter, db);
444 val |= MPU_MAILBOX_DB_HI_MASK;
445 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
446 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
449 /* wait for ready to be set */
450 status = be_mbox_db_ready_wait(adapter, db);
455 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
456 val |= (u32)(mbox_mem->dma >> 4) << 2;
459 status = be_mbox_db_ready_wait(adapter, db);
463 /* A cq entry has been made now */
464 if (be_mcc_compl_is_new(compl)) {
465 status = be_mcc_compl_process(adapter, &mbox->compl);
466 be_mcc_compl_use(compl);
470 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
476 static void be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
479 u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
480 SLIPORT_SEMAPHORE_OFFSET_BE;
482 pci_read_config_dword(adapter->pdev, reg, &sem);
483 *stage = sem & POST_STAGE_MASK;
486 int lancer_wait_ready(struct be_adapter *adapter)
488 #define SLIPORT_READY_TIMEOUT 30
492 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
493 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
494 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
500 if (i == SLIPORT_READY_TIMEOUT)
506 static bool lancer_provisioning_error(struct be_adapter *adapter)
508 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
509 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
510 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
511 sliport_err1 = ioread32(adapter->db +
512 SLIPORT_ERROR1_OFFSET);
513 sliport_err2 = ioread32(adapter->db +
514 SLIPORT_ERROR2_OFFSET);
516 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
517 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
523 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
526 u32 sliport_status, err, reset_needed;
529 resource_error = lancer_provisioning_error(adapter);
533 status = lancer_wait_ready(adapter);
535 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
536 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
537 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
538 if (err && reset_needed) {
539 iowrite32(SLI_PORT_CONTROL_IP_MASK,
540 adapter->db + SLIPORT_CONTROL_OFFSET);
542 /* check adapter has corrected the error */
543 status = lancer_wait_ready(adapter);
544 sliport_status = ioread32(adapter->db +
545 SLIPORT_STATUS_OFFSET);
546 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
547 SLIPORT_STATUS_RN_MASK);
548 if (status || sliport_status)
550 } else if (err || reset_needed) {
554 /* Stop error recovery if error is not recoverable.
555 * No resource error is temporary errors and will go away
556 * when PF provisions resources.
558 resource_error = lancer_provisioning_error(adapter);
559 if (status == -1 && !resource_error)
560 adapter->eeh_error = true;
565 int be_fw_wait_ready(struct be_adapter *adapter)
568 int status, timeout = 0;
569 struct device *dev = &adapter->pdev->dev;
571 if (lancer_chip(adapter)) {
572 status = lancer_wait_ready(adapter);
577 be_POST_stage_get(adapter, &stage);
578 if (stage == POST_STAGE_ARMFW_RDY)
581 dev_info(dev, "Waiting for POST, %ds elapsed\n",
583 if (msleep_interruptible(2000)) {
584 dev_err(dev, "Waiting for POST aborted\n");
588 } while (timeout < 60);
590 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
595 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
597 return &wrb->payload.sgl[0];
601 /* Don't touch the hdr after it's prepared */
602 /* mem will be NULL for embedded commands */
603 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
604 u8 subsystem, u8 opcode, int cmd_len,
605 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
608 unsigned long addr = (unsigned long)req_hdr;
611 req_hdr->opcode = opcode;
612 req_hdr->subsystem = subsystem;
613 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
614 req_hdr->version = 0;
616 wrb->tag0 = req_addr & 0xFFFFFFFF;
617 wrb->tag1 = upper_32_bits(req_addr);
619 wrb->payload_length = cmd_len;
621 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
622 MCC_WRB_SGE_CNT_SHIFT;
623 sge = nonembedded_sgl(wrb);
624 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
625 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
626 sge->len = cpu_to_le32(mem->size);
628 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
629 be_dws_cpu_to_le(wrb, 8);
632 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
633 struct be_dma_mem *mem)
635 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
636 u64 dma = (u64)mem->dma;
638 for (i = 0; i < buf_pages; i++) {
639 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
640 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
645 /* Converts interrupt delay in microseconds to multiplier value */
646 static u32 eq_delay_to_mult(u32 usec_delay)
648 #define MAX_INTR_RATE 651042
649 const u32 round = 10;
655 u32 interrupt_rate = 1000000 / usec_delay;
656 /* Max delay, corresponding to the lowest interrupt rate */
657 if (interrupt_rate == 0)
660 multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
661 multiplier /= interrupt_rate;
662 /* Round the multiplier to the closest value.*/
663 multiplier = (multiplier + round/2) / round;
664 multiplier = min(multiplier, (u32)1023);
670 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
672 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
673 struct be_mcc_wrb *wrb
674 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
675 memset(wrb, 0, sizeof(*wrb));
679 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
681 struct be_queue_info *mccq = &adapter->mcc_obj.q;
682 struct be_mcc_wrb *wrb;
687 if (atomic_read(&mccq->used) >= mccq->len) {
688 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
692 wrb = queue_head_node(mccq);
693 queue_head_inc(mccq);
694 atomic_inc(&mccq->used);
695 memset(wrb, 0, sizeof(*wrb));
699 /* Tell fw we're about to start firing cmds by writing a
700 * special pattern across the wrb hdr; uses mbox
702 int be_cmd_fw_init(struct be_adapter *adapter)
707 if (lancer_chip(adapter))
710 if (mutex_lock_interruptible(&adapter->mbox_lock))
713 wrb = (u8 *)wrb_from_mbox(adapter);
723 status = be_mbox_notify_wait(adapter);
725 mutex_unlock(&adapter->mbox_lock);
729 /* Tell fw we're done with firing cmds by writing a
730 * special pattern across the wrb hdr; uses mbox
732 int be_cmd_fw_clean(struct be_adapter *adapter)
737 if (lancer_chip(adapter))
740 if (mutex_lock_interruptible(&adapter->mbox_lock))
743 wrb = (u8 *)wrb_from_mbox(adapter);
753 status = be_mbox_notify_wait(adapter);
755 mutex_unlock(&adapter->mbox_lock);
759 int be_cmd_eq_create(struct be_adapter *adapter,
760 struct be_queue_info *eq, int eq_delay)
762 struct be_mcc_wrb *wrb;
763 struct be_cmd_req_eq_create *req;
764 struct be_dma_mem *q_mem = &eq->dma_mem;
767 if (mutex_lock_interruptible(&adapter->mbox_lock))
770 wrb = wrb_from_mbox(adapter);
771 req = embedded_payload(wrb);
773 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
774 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
776 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
778 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
780 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
781 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
782 __ilog2_u32(eq->len/256));
783 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
784 eq_delay_to_mult(eq_delay));
785 be_dws_cpu_to_le(req->context, sizeof(req->context));
787 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
789 status = be_mbox_notify_wait(adapter);
791 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
792 eq->id = le16_to_cpu(resp->eq_id);
796 mutex_unlock(&adapter->mbox_lock);
801 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
802 bool permanent, u32 if_handle, u32 pmac_id)
804 struct be_mcc_wrb *wrb;
805 struct be_cmd_req_mac_query *req;
808 spin_lock_bh(&adapter->mcc_lock);
810 wrb = wrb_from_mccq(adapter);
815 req = embedded_payload(wrb);
817 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
818 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
819 req->type = MAC_ADDRESS_TYPE_NETWORK;
823 req->if_id = cpu_to_le16((u16) if_handle);
824 req->pmac_id = cpu_to_le32(pmac_id);
828 status = be_mcc_notify_wait(adapter);
830 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
831 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
835 spin_unlock_bh(&adapter->mcc_lock);
839 /* Uses synchronous MCCQ */
840 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
841 u32 if_id, u32 *pmac_id, u32 domain)
843 struct be_mcc_wrb *wrb;
844 struct be_cmd_req_pmac_add *req;
847 spin_lock_bh(&adapter->mcc_lock);
849 wrb = wrb_from_mccq(adapter);
854 req = embedded_payload(wrb);
856 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
857 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
859 req->hdr.domain = domain;
860 req->if_id = cpu_to_le32(if_id);
861 memcpy(req->mac_address, mac_addr, ETH_ALEN);
863 status = be_mcc_notify_wait(adapter);
865 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
866 *pmac_id = le32_to_cpu(resp->pmac_id);
870 spin_unlock_bh(&adapter->mcc_lock);
872 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
878 /* Uses synchronous MCCQ */
879 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
881 struct be_mcc_wrb *wrb;
882 struct be_cmd_req_pmac_del *req;
888 spin_lock_bh(&adapter->mcc_lock);
890 wrb = wrb_from_mccq(adapter);
895 req = embedded_payload(wrb);
897 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
898 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
900 req->hdr.domain = dom;
901 req->if_id = cpu_to_le32(if_id);
902 req->pmac_id = cpu_to_le32(pmac_id);
904 status = be_mcc_notify_wait(adapter);
907 spin_unlock_bh(&adapter->mcc_lock);
912 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
913 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
915 struct be_mcc_wrb *wrb;
916 struct be_cmd_req_cq_create *req;
917 struct be_dma_mem *q_mem = &cq->dma_mem;
921 if (mutex_lock_interruptible(&adapter->mbox_lock))
924 wrb = wrb_from_mbox(adapter);
925 req = embedded_payload(wrb);
926 ctxt = &req->context;
928 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
929 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
931 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
932 if (lancer_chip(adapter)) {
933 req->hdr.version = 2;
934 req->page_size = 1; /* 1 for 4K */
935 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
937 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
938 __ilog2_u32(cq->len/256));
939 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
940 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
942 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
945 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
947 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
949 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
950 __ilog2_u32(cq->len/256));
951 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
952 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
953 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
956 be_dws_cpu_to_le(ctxt, sizeof(req->context));
958 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
960 status = be_mbox_notify_wait(adapter);
962 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
963 cq->id = le16_to_cpu(resp->cq_id);
967 mutex_unlock(&adapter->mbox_lock);
972 static u32 be_encoded_q_len(int q_len)
974 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
975 if (len_encoded == 16)
980 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
981 struct be_queue_info *mccq,
982 struct be_queue_info *cq)
984 struct be_mcc_wrb *wrb;
985 struct be_cmd_req_mcc_ext_create *req;
986 struct be_dma_mem *q_mem = &mccq->dma_mem;
990 if (mutex_lock_interruptible(&adapter->mbox_lock))
993 wrb = wrb_from_mbox(adapter);
994 req = embedded_payload(wrb);
995 ctxt = &req->context;
997 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
998 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1000 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1001 if (lancer_chip(adapter)) {
1002 req->hdr.version = 1;
1003 req->cq_id = cpu_to_le16(cq->id);
1005 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1006 be_encoded_q_len(mccq->len));
1007 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1008 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1010 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1014 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1015 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1016 be_encoded_q_len(mccq->len));
1017 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1020 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1021 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1022 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1024 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1026 status = be_mbox_notify_wait(adapter);
1028 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1029 mccq->id = le16_to_cpu(resp->id);
1030 mccq->created = true;
1032 mutex_unlock(&adapter->mbox_lock);
1037 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1038 struct be_queue_info *mccq,
1039 struct be_queue_info *cq)
1041 struct be_mcc_wrb *wrb;
1042 struct be_cmd_req_mcc_create *req;
1043 struct be_dma_mem *q_mem = &mccq->dma_mem;
1047 if (mutex_lock_interruptible(&adapter->mbox_lock))
1050 wrb = wrb_from_mbox(adapter);
1051 req = embedded_payload(wrb);
1052 ctxt = &req->context;
1054 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1055 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1057 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1059 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1060 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1061 be_encoded_q_len(mccq->len));
1062 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1064 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1066 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1068 status = be_mbox_notify_wait(adapter);
1070 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1071 mccq->id = le16_to_cpu(resp->id);
1072 mccq->created = true;
1075 mutex_unlock(&adapter->mbox_lock);
1079 int be_cmd_mccq_create(struct be_adapter *adapter,
1080 struct be_queue_info *mccq,
1081 struct be_queue_info *cq)
1085 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1086 if (status && !lancer_chip(adapter)) {
1087 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1088 "or newer to avoid conflicting priorities between NIC "
1089 "and FCoE traffic");
1090 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1095 int be_cmd_txq_create(struct be_adapter *adapter,
1096 struct be_queue_info *txq,
1097 struct be_queue_info *cq)
1099 struct be_mcc_wrb *wrb;
1100 struct be_cmd_req_eth_tx_create *req;
1101 struct be_dma_mem *q_mem = &txq->dma_mem;
1105 spin_lock_bh(&adapter->mcc_lock);
1107 wrb = wrb_from_mccq(adapter);
1113 req = embedded_payload(wrb);
1114 ctxt = &req->context;
1116 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1117 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1119 if (lancer_chip(adapter)) {
1120 req->hdr.version = 1;
1121 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1122 adapter->if_handle);
1125 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1126 req->ulp_num = BE_ULP1_NUM;
1127 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1129 AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1130 be_encoded_q_len(txq->len));
1131 AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1132 AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1134 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1136 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1138 status = be_mcc_notify_wait(adapter);
1140 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1141 txq->id = le16_to_cpu(resp->cid);
1142 txq->created = true;
1146 spin_unlock_bh(&adapter->mcc_lock);
1152 int be_cmd_rxq_create(struct be_adapter *adapter,
1153 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1154 u32 if_id, u32 rss, u8 *rss_id)
1156 struct be_mcc_wrb *wrb;
1157 struct be_cmd_req_eth_rx_create *req;
1158 struct be_dma_mem *q_mem = &rxq->dma_mem;
1161 spin_lock_bh(&adapter->mcc_lock);
1163 wrb = wrb_from_mccq(adapter);
1168 req = embedded_payload(wrb);
1170 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1171 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1173 req->cq_id = cpu_to_le16(cq_id);
1174 req->frag_size = fls(frag_size) - 1;
1176 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1177 req->interface_id = cpu_to_le32(if_id);
1178 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1179 req->rss_queue = cpu_to_le32(rss);
1181 status = be_mcc_notify_wait(adapter);
1183 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1184 rxq->id = le16_to_cpu(resp->id);
1185 rxq->created = true;
1186 *rss_id = resp->rss_id;
1190 spin_unlock_bh(&adapter->mcc_lock);
1194 /* Generic destroyer function for all types of queues
1197 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1200 struct be_mcc_wrb *wrb;
1201 struct be_cmd_req_q_destroy *req;
1202 u8 subsys = 0, opcode = 0;
1205 if (mutex_lock_interruptible(&adapter->mbox_lock))
1208 wrb = wrb_from_mbox(adapter);
1209 req = embedded_payload(wrb);
1211 switch (queue_type) {
1213 subsys = CMD_SUBSYSTEM_COMMON;
1214 opcode = OPCODE_COMMON_EQ_DESTROY;
1217 subsys = CMD_SUBSYSTEM_COMMON;
1218 opcode = OPCODE_COMMON_CQ_DESTROY;
1221 subsys = CMD_SUBSYSTEM_ETH;
1222 opcode = OPCODE_ETH_TX_DESTROY;
1225 subsys = CMD_SUBSYSTEM_ETH;
1226 opcode = OPCODE_ETH_RX_DESTROY;
1229 subsys = CMD_SUBSYSTEM_COMMON;
1230 opcode = OPCODE_COMMON_MCC_DESTROY;
1236 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1238 req->id = cpu_to_le16(q->id);
1240 status = be_mbox_notify_wait(adapter);
1243 mutex_unlock(&adapter->mbox_lock);
1248 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1250 struct be_mcc_wrb *wrb;
1251 struct be_cmd_req_q_destroy *req;
1254 spin_lock_bh(&adapter->mcc_lock);
1256 wrb = wrb_from_mccq(adapter);
1261 req = embedded_payload(wrb);
1263 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1264 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1265 req->id = cpu_to_le16(q->id);
1267 status = be_mcc_notify_wait(adapter);
1271 spin_unlock_bh(&adapter->mcc_lock);
1275 /* Create an rx filtering policy configuration on an i/f
1278 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1279 u32 *if_handle, u32 domain)
1281 struct be_mcc_wrb *wrb;
1282 struct be_cmd_req_if_create *req;
1285 spin_lock_bh(&adapter->mcc_lock);
1287 wrb = wrb_from_mccq(adapter);
1292 req = embedded_payload(wrb);
1294 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1295 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1296 req->hdr.domain = domain;
1297 req->capability_flags = cpu_to_le32(cap_flags);
1298 req->enable_flags = cpu_to_le32(en_flags);
1300 req->pmac_invalid = true;
1302 status = be_mcc_notify_wait(adapter);
1304 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1305 *if_handle = le32_to_cpu(resp->interface_id);
1309 spin_unlock_bh(&adapter->mcc_lock);
1314 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1316 struct be_mcc_wrb *wrb;
1317 struct be_cmd_req_if_destroy *req;
1320 if (interface_id == -1)
1323 spin_lock_bh(&adapter->mcc_lock);
1325 wrb = wrb_from_mccq(adapter);
1330 req = embedded_payload(wrb);
1332 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1333 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1334 req->hdr.domain = domain;
1335 req->interface_id = cpu_to_le32(interface_id);
1337 status = be_mcc_notify_wait(adapter);
1339 spin_unlock_bh(&adapter->mcc_lock);
1343 /* Get stats is a non embedded command: the request is not embedded inside
1344 * WRB but is a separate dma memory block
1345 * Uses asynchronous MCC
1347 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1349 struct be_mcc_wrb *wrb;
1350 struct be_cmd_req_hdr *hdr;
1353 spin_lock_bh(&adapter->mcc_lock);
1355 wrb = wrb_from_mccq(adapter);
1360 hdr = nonemb_cmd->va;
1362 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1363 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1365 /* version 1 of the cmd is not supported only by BE2 */
1366 if (!BE2_chip(adapter))
1369 be_mcc_notify(adapter);
1370 adapter->stats_cmd_sent = true;
1373 spin_unlock_bh(&adapter->mcc_lock);
1378 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1379 struct be_dma_mem *nonemb_cmd)
1382 struct be_mcc_wrb *wrb;
1383 struct lancer_cmd_req_pport_stats *req;
1386 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1390 spin_lock_bh(&adapter->mcc_lock);
1392 wrb = wrb_from_mccq(adapter);
1397 req = nonemb_cmd->va;
1399 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1400 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1403 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1404 req->cmd_params.params.reset_stats = 0;
1406 be_mcc_notify(adapter);
1407 adapter->stats_cmd_sent = true;
1410 spin_unlock_bh(&adapter->mcc_lock);
1414 static int be_mac_to_link_speed(int mac_speed)
1416 switch (mac_speed) {
1417 case PHY_LINK_SPEED_ZERO:
1419 case PHY_LINK_SPEED_10MBPS:
1421 case PHY_LINK_SPEED_100MBPS:
1423 case PHY_LINK_SPEED_1GBPS:
1425 case PHY_LINK_SPEED_10GBPS:
1431 /* Uses synchronous mcc
1432 * Returns link_speed in Mbps
1434 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1435 u8 *link_status, u32 dom)
1437 struct be_mcc_wrb *wrb;
1438 struct be_cmd_req_link_status *req;
1441 spin_lock_bh(&adapter->mcc_lock);
1444 *link_status = LINK_DOWN;
1446 wrb = wrb_from_mccq(adapter);
1451 req = embedded_payload(wrb);
1453 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1454 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1456 /* version 1 of the cmd is not supported only by BE2 */
1457 if (!BE2_chip(adapter))
1458 req->hdr.version = 1;
1460 req->hdr.domain = dom;
1462 status = be_mcc_notify_wait(adapter);
1464 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1466 *link_speed = resp->link_speed ?
1467 le16_to_cpu(resp->link_speed) * 10 :
1468 be_mac_to_link_speed(resp->mac_speed);
1470 if (!resp->logical_link_status)
1474 *link_status = resp->logical_link_status;
1478 spin_unlock_bh(&adapter->mcc_lock);
1482 /* Uses synchronous mcc */
1483 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1485 struct be_mcc_wrb *wrb;
1486 struct be_cmd_req_get_cntl_addnl_attribs *req;
1489 spin_lock_bh(&adapter->mcc_lock);
1491 wrb = wrb_from_mccq(adapter);
1496 req = embedded_payload(wrb);
1498 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1499 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1502 be_mcc_notify(adapter);
1505 spin_unlock_bh(&adapter->mcc_lock);
1509 /* Uses synchronous mcc */
1510 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1512 struct be_mcc_wrb *wrb;
1513 struct be_cmd_req_get_fat *req;
1516 spin_lock_bh(&adapter->mcc_lock);
1518 wrb = wrb_from_mccq(adapter);
1523 req = embedded_payload(wrb);
1525 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1526 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1527 req->fat_operation = cpu_to_le32(QUERY_FAT);
1528 status = be_mcc_notify_wait(adapter);
1530 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1531 if (log_size && resp->log_size)
1532 *log_size = le32_to_cpu(resp->log_size) -
1536 spin_unlock_bh(&adapter->mcc_lock);
1540 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1542 struct be_dma_mem get_fat_cmd;
1543 struct be_mcc_wrb *wrb;
1544 struct be_cmd_req_get_fat *req;
1545 u32 offset = 0, total_size, buf_size,
1546 log_offset = sizeof(u32), payload_len;
1552 total_size = buf_len;
1554 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1555 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1558 if (!get_fat_cmd.va) {
1560 dev_err(&adapter->pdev->dev,
1561 "Memory allocation failure while retrieving FAT data\n");
1565 spin_lock_bh(&adapter->mcc_lock);
1567 while (total_size) {
1568 buf_size = min(total_size, (u32)60*1024);
1569 total_size -= buf_size;
1571 wrb = wrb_from_mccq(adapter);
1576 req = get_fat_cmd.va;
1578 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1579 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1580 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1583 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1584 req->read_log_offset = cpu_to_le32(log_offset);
1585 req->read_log_length = cpu_to_le32(buf_size);
1586 req->data_buffer_size = cpu_to_le32(buf_size);
1588 status = be_mcc_notify_wait(adapter);
1590 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1591 memcpy(buf + offset,
1593 le32_to_cpu(resp->read_log_length));
1595 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1599 log_offset += buf_size;
1602 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1605 spin_unlock_bh(&adapter->mcc_lock);
1608 /* Uses synchronous mcc */
1609 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1612 struct be_mcc_wrb *wrb;
1613 struct be_cmd_req_get_fw_version *req;
1616 spin_lock_bh(&adapter->mcc_lock);
1618 wrb = wrb_from_mccq(adapter);
1624 req = embedded_payload(wrb);
1626 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1627 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1628 status = be_mcc_notify_wait(adapter);
1630 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1631 strcpy(fw_ver, resp->firmware_version_string);
1633 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1636 spin_unlock_bh(&adapter->mcc_lock);
1640 /* set the EQ delay interval of an EQ to specified value
1643 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1645 struct be_mcc_wrb *wrb;
1646 struct be_cmd_req_modify_eq_delay *req;
1649 spin_lock_bh(&adapter->mcc_lock);
1651 wrb = wrb_from_mccq(adapter);
1656 req = embedded_payload(wrb);
1658 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1659 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1661 req->num_eq = cpu_to_le32(1);
1662 req->delay[0].eq_id = cpu_to_le32(eq_id);
1663 req->delay[0].phase = 0;
1664 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1666 be_mcc_notify(adapter);
1669 spin_unlock_bh(&adapter->mcc_lock);
1673 /* Uses sycnhronous mcc */
1674 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1675 u32 num, bool untagged, bool promiscuous)
1677 struct be_mcc_wrb *wrb;
1678 struct be_cmd_req_vlan_config *req;
1681 spin_lock_bh(&adapter->mcc_lock);
1683 wrb = wrb_from_mccq(adapter);
1688 req = embedded_payload(wrb);
1690 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1691 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1693 req->interface_id = if_id;
1694 req->promiscuous = promiscuous;
1695 req->untagged = untagged;
1696 req->num_vlan = num;
1698 memcpy(req->normal_vlan, vtag_array,
1699 req->num_vlan * sizeof(vtag_array[0]));
1702 status = be_mcc_notify_wait(adapter);
1705 spin_unlock_bh(&adapter->mcc_lock);
1709 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1711 struct be_mcc_wrb *wrb;
1712 struct be_dma_mem *mem = &adapter->rx_filter;
1713 struct be_cmd_req_rx_filter *req = mem->va;
1716 spin_lock_bh(&adapter->mcc_lock);
1718 wrb = wrb_from_mccq(adapter);
1723 memset(req, 0, sizeof(*req));
1724 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1725 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1728 req->if_id = cpu_to_le32(adapter->if_handle);
1729 if (flags & IFF_PROMISC) {
1730 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1731 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1733 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1734 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1735 } else if (flags & IFF_ALLMULTI) {
1736 req->if_flags_mask = req->if_flags =
1737 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1739 struct netdev_hw_addr *ha;
1742 req->if_flags_mask = req->if_flags =
1743 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1745 /* Reset mcast promisc mode if already set by setting mask
1746 * and not setting flags field
1748 req->if_flags_mask |=
1749 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1750 adapter->if_cap_flags);
1752 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1753 netdev_for_each_mc_addr(ha, adapter->netdev)
1754 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1757 status = be_mcc_notify_wait(adapter);
1759 spin_unlock_bh(&adapter->mcc_lock);
1763 /* Uses synchrounous mcc */
1764 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1766 struct be_mcc_wrb *wrb;
1767 struct be_cmd_req_set_flow_control *req;
1770 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1771 CMD_SUBSYSTEM_COMMON))
1774 spin_lock_bh(&adapter->mcc_lock);
1776 wrb = wrb_from_mccq(adapter);
1781 req = embedded_payload(wrb);
1783 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1784 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1786 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1787 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1789 status = be_mcc_notify_wait(adapter);
1792 spin_unlock_bh(&adapter->mcc_lock);
1797 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1799 struct be_mcc_wrb *wrb;
1800 struct be_cmd_req_get_flow_control *req;
1803 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1804 CMD_SUBSYSTEM_COMMON))
1807 spin_lock_bh(&adapter->mcc_lock);
1809 wrb = wrb_from_mccq(adapter);
1814 req = embedded_payload(wrb);
1816 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1817 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1819 status = be_mcc_notify_wait(adapter);
1821 struct be_cmd_resp_get_flow_control *resp =
1822 embedded_payload(wrb);
1823 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1824 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1828 spin_unlock_bh(&adapter->mcc_lock);
1833 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1834 u32 *mode, u32 *caps)
1836 struct be_mcc_wrb *wrb;
1837 struct be_cmd_req_query_fw_cfg *req;
1840 if (mutex_lock_interruptible(&adapter->mbox_lock))
1843 wrb = wrb_from_mbox(adapter);
1844 req = embedded_payload(wrb);
1846 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1847 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1849 status = be_mbox_notify_wait(adapter);
1851 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1852 *port_num = le32_to_cpu(resp->phys_port);
1853 *mode = le32_to_cpu(resp->function_mode);
1854 *caps = le32_to_cpu(resp->function_caps);
1857 mutex_unlock(&adapter->mbox_lock);
1862 int be_cmd_reset_function(struct be_adapter *adapter)
1864 struct be_mcc_wrb *wrb;
1865 struct be_cmd_req_hdr *req;
1868 if (lancer_chip(adapter)) {
1869 status = lancer_wait_ready(adapter);
1871 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1872 adapter->db + SLIPORT_CONTROL_OFFSET);
1873 status = lancer_test_and_set_rdy_state(adapter);
1876 dev_err(&adapter->pdev->dev,
1877 "Adapter in non recoverable error\n");
1882 if (mutex_lock_interruptible(&adapter->mbox_lock))
1885 wrb = wrb_from_mbox(adapter);
1886 req = embedded_payload(wrb);
1888 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1889 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1891 status = be_mbox_notify_wait(adapter);
1893 mutex_unlock(&adapter->mbox_lock);
1897 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1899 struct be_mcc_wrb *wrb;
1900 struct be_cmd_req_rss_config *req;
1901 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1902 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1903 0x3ea83c02, 0x4a110304};
1906 if (mutex_lock_interruptible(&adapter->mbox_lock))
1909 wrb = wrb_from_mbox(adapter);
1910 req = embedded_payload(wrb);
1912 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1913 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1915 req->if_id = cpu_to_le32(adapter->if_handle);
1916 req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1917 RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1919 if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1920 req->hdr.version = 1;
1921 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1922 RSS_ENABLE_UDP_IPV6);
1925 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1926 memcpy(req->cpu_table, rsstable, table_size);
1927 memcpy(req->hash, myhash, sizeof(myhash));
1928 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1930 status = be_mbox_notify_wait(adapter);
1932 mutex_unlock(&adapter->mbox_lock);
1937 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1938 u8 bcn, u8 sts, u8 state)
1940 struct be_mcc_wrb *wrb;
1941 struct be_cmd_req_enable_disable_beacon *req;
1944 spin_lock_bh(&adapter->mcc_lock);
1946 wrb = wrb_from_mccq(adapter);
1951 req = embedded_payload(wrb);
1953 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1954 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1956 req->port_num = port_num;
1957 req->beacon_state = state;
1958 req->beacon_duration = bcn;
1959 req->status_duration = sts;
1961 status = be_mcc_notify_wait(adapter);
1964 spin_unlock_bh(&adapter->mcc_lock);
1969 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1971 struct be_mcc_wrb *wrb;
1972 struct be_cmd_req_get_beacon_state *req;
1975 spin_lock_bh(&adapter->mcc_lock);
1977 wrb = wrb_from_mccq(adapter);
1982 req = embedded_payload(wrb);
1984 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1985 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1987 req->port_num = port_num;
1989 status = be_mcc_notify_wait(adapter);
1991 struct be_cmd_resp_get_beacon_state *resp =
1992 embedded_payload(wrb);
1993 *state = resp->beacon_state;
1997 spin_unlock_bh(&adapter->mcc_lock);
2001 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2002 u32 data_size, u32 data_offset,
2003 const char *obj_name, u32 *data_written,
2004 u8 *change_status, u8 *addn_status)
2006 struct be_mcc_wrb *wrb;
2007 struct lancer_cmd_req_write_object *req;
2008 struct lancer_cmd_resp_write_object *resp;
2012 spin_lock_bh(&adapter->mcc_lock);
2013 adapter->flash_status = 0;
2015 wrb = wrb_from_mccq(adapter);
2021 req = embedded_payload(wrb);
2023 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2024 OPCODE_COMMON_WRITE_OBJECT,
2025 sizeof(struct lancer_cmd_req_write_object), wrb,
2028 ctxt = &req->context;
2029 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2030 write_length, ctxt, data_size);
2033 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2036 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2039 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2040 req->write_offset = cpu_to_le32(data_offset);
2041 strcpy(req->object_name, obj_name);
2042 req->descriptor_count = cpu_to_le32(1);
2043 req->buf_len = cpu_to_le32(data_size);
2044 req->addr_low = cpu_to_le32((cmd->dma +
2045 sizeof(struct lancer_cmd_req_write_object))
2047 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2048 sizeof(struct lancer_cmd_req_write_object)));
2050 be_mcc_notify(adapter);
2051 spin_unlock_bh(&adapter->mcc_lock);
2053 if (!wait_for_completion_timeout(&adapter->flash_compl,
2054 msecs_to_jiffies(30000)))
2057 status = adapter->flash_status;
2059 resp = embedded_payload(wrb);
2061 *data_written = le32_to_cpu(resp->actual_write_len);
2062 *change_status = resp->change_status;
2064 *addn_status = resp->additional_status;
2070 spin_unlock_bh(&adapter->mcc_lock);
2074 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2075 u32 data_size, u32 data_offset, const char *obj_name,
2076 u32 *data_read, u32 *eof, u8 *addn_status)
2078 struct be_mcc_wrb *wrb;
2079 struct lancer_cmd_req_read_object *req;
2080 struct lancer_cmd_resp_read_object *resp;
2083 spin_lock_bh(&adapter->mcc_lock);
2085 wrb = wrb_from_mccq(adapter);
2091 req = embedded_payload(wrb);
2093 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2094 OPCODE_COMMON_READ_OBJECT,
2095 sizeof(struct lancer_cmd_req_read_object), wrb,
2098 req->desired_read_len = cpu_to_le32(data_size);
2099 req->read_offset = cpu_to_le32(data_offset);
2100 strcpy(req->object_name, obj_name);
2101 req->descriptor_count = cpu_to_le32(1);
2102 req->buf_len = cpu_to_le32(data_size);
2103 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2104 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2106 status = be_mcc_notify_wait(adapter);
2108 resp = embedded_payload(wrb);
2110 *data_read = le32_to_cpu(resp->actual_read_len);
2111 *eof = le32_to_cpu(resp->eof);
2113 *addn_status = resp->additional_status;
2117 spin_unlock_bh(&adapter->mcc_lock);
2121 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2122 u32 flash_type, u32 flash_opcode, u32 buf_size)
2124 struct be_mcc_wrb *wrb;
2125 struct be_cmd_write_flashrom *req;
2128 spin_lock_bh(&adapter->mcc_lock);
2129 adapter->flash_status = 0;
2131 wrb = wrb_from_mccq(adapter);
2138 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2139 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2141 req->params.op_type = cpu_to_le32(flash_type);
2142 req->params.op_code = cpu_to_le32(flash_opcode);
2143 req->params.data_buf_size = cpu_to_le32(buf_size);
2145 be_mcc_notify(adapter);
2146 spin_unlock_bh(&adapter->mcc_lock);
2148 if (!wait_for_completion_timeout(&adapter->flash_compl,
2149 msecs_to_jiffies(40000)))
2152 status = adapter->flash_status;
2157 spin_unlock_bh(&adapter->mcc_lock);
2161 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2164 struct be_mcc_wrb *wrb;
2165 struct be_cmd_read_flash_crc *req;
2168 spin_lock_bh(&adapter->mcc_lock);
2170 wrb = wrb_from_mccq(adapter);
2175 req = embedded_payload(wrb);
2177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2178 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2181 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2182 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2183 req->params.offset = cpu_to_le32(offset);
2184 req->params.data_buf_size = cpu_to_le32(0x4);
2186 status = be_mcc_notify_wait(adapter);
2188 memcpy(flashed_crc, req->crc, 4);
2191 spin_unlock_bh(&adapter->mcc_lock);
2195 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2196 struct be_dma_mem *nonemb_cmd)
2198 struct be_mcc_wrb *wrb;
2199 struct be_cmd_req_acpi_wol_magic_config *req;
2202 spin_lock_bh(&adapter->mcc_lock);
2204 wrb = wrb_from_mccq(adapter);
2209 req = nonemb_cmd->va;
2211 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2212 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2214 memcpy(req->magic_mac, mac, ETH_ALEN);
2216 status = be_mcc_notify_wait(adapter);
2219 spin_unlock_bh(&adapter->mcc_lock);
2223 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2224 u8 loopback_type, u8 enable)
2226 struct be_mcc_wrb *wrb;
2227 struct be_cmd_req_set_lmode *req;
2230 spin_lock_bh(&adapter->mcc_lock);
2232 wrb = wrb_from_mccq(adapter);
2238 req = embedded_payload(wrb);
2240 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2241 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2244 req->src_port = port_num;
2245 req->dest_port = port_num;
2246 req->loopback_type = loopback_type;
2247 req->loopback_state = enable;
2249 status = be_mcc_notify_wait(adapter);
2251 spin_unlock_bh(&adapter->mcc_lock);
2255 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2256 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2258 struct be_mcc_wrb *wrb;
2259 struct be_cmd_req_loopback_test *req;
2262 spin_lock_bh(&adapter->mcc_lock);
2264 wrb = wrb_from_mccq(adapter);
2270 req = embedded_payload(wrb);
2272 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2273 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2274 req->hdr.timeout = cpu_to_le32(4);
2276 req->pattern = cpu_to_le64(pattern);
2277 req->src_port = cpu_to_le32(port_num);
2278 req->dest_port = cpu_to_le32(port_num);
2279 req->pkt_size = cpu_to_le32(pkt_size);
2280 req->num_pkts = cpu_to_le32(num_pkts);
2281 req->loopback_type = cpu_to_le32(loopback_type);
2283 status = be_mcc_notify_wait(adapter);
2285 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2286 status = le32_to_cpu(resp->status);
2290 spin_unlock_bh(&adapter->mcc_lock);
2294 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2295 u32 byte_cnt, struct be_dma_mem *cmd)
2297 struct be_mcc_wrb *wrb;
2298 struct be_cmd_req_ddrdma_test *req;
2302 spin_lock_bh(&adapter->mcc_lock);
2304 wrb = wrb_from_mccq(adapter);
2310 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2311 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2313 req->pattern = cpu_to_le64(pattern);
2314 req->byte_count = cpu_to_le32(byte_cnt);
2315 for (i = 0; i < byte_cnt; i++) {
2316 req->snd_buff[i] = (u8)(pattern >> (j*8));
2322 status = be_mcc_notify_wait(adapter);
2325 struct be_cmd_resp_ddrdma_test *resp;
2327 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2334 spin_unlock_bh(&adapter->mcc_lock);
2338 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2339 struct be_dma_mem *nonemb_cmd)
2341 struct be_mcc_wrb *wrb;
2342 struct be_cmd_req_seeprom_read *req;
2346 spin_lock_bh(&adapter->mcc_lock);
2348 wrb = wrb_from_mccq(adapter);
2353 req = nonemb_cmd->va;
2354 sge = nonembedded_sgl(wrb);
2356 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2357 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2360 status = be_mcc_notify_wait(adapter);
2363 spin_unlock_bh(&adapter->mcc_lock);
2367 int be_cmd_get_phy_info(struct be_adapter *adapter)
2369 struct be_mcc_wrb *wrb;
2370 struct be_cmd_req_get_phy_info *req;
2371 struct be_dma_mem cmd;
2374 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2375 CMD_SUBSYSTEM_COMMON))
2378 spin_lock_bh(&adapter->mcc_lock);
2380 wrb = wrb_from_mccq(adapter);
2385 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2386 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2389 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2396 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2397 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2400 status = be_mcc_notify_wait(adapter);
2402 struct be_phy_info *resp_phy_info =
2403 cmd.va + sizeof(struct be_cmd_req_hdr);
2404 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2405 adapter->phy.interface_type =
2406 le16_to_cpu(resp_phy_info->interface_type);
2407 adapter->phy.auto_speeds_supported =
2408 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2409 adapter->phy.fixed_speeds_supported =
2410 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2411 adapter->phy.misc_params =
2412 le32_to_cpu(resp_phy_info->misc_params);
2414 pci_free_consistent(adapter->pdev, cmd.size,
2417 spin_unlock_bh(&adapter->mcc_lock);
2421 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2423 struct be_mcc_wrb *wrb;
2424 struct be_cmd_req_set_qos *req;
2427 spin_lock_bh(&adapter->mcc_lock);
2429 wrb = wrb_from_mccq(adapter);
2435 req = embedded_payload(wrb);
2437 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2438 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2440 req->hdr.domain = domain;
2441 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2442 req->max_bps_nic = cpu_to_le32(bps);
2444 status = be_mcc_notify_wait(adapter);
2447 spin_unlock_bh(&adapter->mcc_lock);
2451 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2453 struct be_mcc_wrb *wrb;
2454 struct be_cmd_req_cntl_attribs *req;
2455 struct be_cmd_resp_cntl_attribs *resp;
2457 int payload_len = max(sizeof(*req), sizeof(*resp));
2458 struct mgmt_controller_attrib *attribs;
2459 struct be_dma_mem attribs_cmd;
2461 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2462 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2463 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2465 if (!attribs_cmd.va) {
2466 dev_err(&adapter->pdev->dev,
2467 "Memory allocation failure\n");
2471 if (mutex_lock_interruptible(&adapter->mbox_lock))
2474 wrb = wrb_from_mbox(adapter);
2479 req = attribs_cmd.va;
2481 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2482 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2485 status = be_mbox_notify_wait(adapter);
2487 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2488 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2492 mutex_unlock(&adapter->mbox_lock);
2493 pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2499 int be_cmd_req_native_mode(struct be_adapter *adapter)
2501 struct be_mcc_wrb *wrb;
2502 struct be_cmd_req_set_func_cap *req;
2505 if (mutex_lock_interruptible(&adapter->mbox_lock))
2508 wrb = wrb_from_mbox(adapter);
2514 req = embedded_payload(wrb);
2516 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2517 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2519 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2520 CAPABILITY_BE3_NATIVE_ERX_API);
2521 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2523 status = be_mbox_notify_wait(adapter);
2525 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2526 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2527 CAPABILITY_BE3_NATIVE_ERX_API;
2528 if (!adapter->be3_native)
2529 dev_warn(&adapter->pdev->dev,
2530 "adapter not in advanced mode\n");
2533 mutex_unlock(&adapter->mbox_lock);
2537 /* Get privilege(s) for a function */
2538 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2541 struct be_mcc_wrb *wrb;
2542 struct be_cmd_req_get_fn_privileges *req;
2545 spin_lock_bh(&adapter->mcc_lock);
2547 wrb = wrb_from_mccq(adapter);
2553 req = embedded_payload(wrb);
2555 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2556 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2559 req->hdr.domain = domain;
2561 status = be_mcc_notify_wait(adapter);
2563 struct be_cmd_resp_get_fn_privileges *resp =
2564 embedded_payload(wrb);
2565 *privilege = le32_to_cpu(resp->privilege_mask);
2569 spin_unlock_bh(&adapter->mcc_lock);
2573 /* Uses synchronous MCCQ */
2574 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2575 bool *pmac_id_active, u32 *pmac_id, u8 domain)
2577 struct be_mcc_wrb *wrb;
2578 struct be_cmd_req_get_mac_list *req;
2581 struct be_dma_mem get_mac_list_cmd;
2584 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2585 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2586 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2587 get_mac_list_cmd.size,
2588 &get_mac_list_cmd.dma);
2590 if (!get_mac_list_cmd.va) {
2591 dev_err(&adapter->pdev->dev,
2592 "Memory allocation failure during GET_MAC_LIST\n");
2596 spin_lock_bh(&adapter->mcc_lock);
2598 wrb = wrb_from_mccq(adapter);
2604 req = get_mac_list_cmd.va;
2606 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2607 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2608 wrb, &get_mac_list_cmd);
2610 req->hdr.domain = domain;
2611 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2612 req->perm_override = 1;
2614 status = be_mcc_notify_wait(adapter);
2616 struct be_cmd_resp_get_mac_list *resp =
2617 get_mac_list_cmd.va;
2618 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2619 /* Mac list returned could contain one or more active mac_ids
2620 * or one or more true or pseudo permanant mac addresses.
2621 * If an active mac_id is present, return first active mac_id
2624 for (i = 0; i < mac_count; i++) {
2625 struct get_list_macaddr *mac_entry;
2629 mac_entry = &resp->macaddr_list[i];
2630 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2631 /* mac_id is a 32 bit value and mac_addr size
2634 if (mac_addr_size == sizeof(u32)) {
2635 *pmac_id_active = true;
2636 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2637 *pmac_id = le32_to_cpu(mac_id);
2641 /* If no active mac_id found, return first mac addr */
2642 *pmac_id_active = false;
2643 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2648 spin_unlock_bh(&adapter->mcc_lock);
2649 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2650 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2654 /* Uses synchronous MCCQ */
2655 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2656 u8 mac_count, u32 domain)
2658 struct be_mcc_wrb *wrb;
2659 struct be_cmd_req_set_mac_list *req;
2661 struct be_dma_mem cmd;
2663 memset(&cmd, 0, sizeof(struct be_dma_mem));
2664 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2665 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2666 &cmd.dma, GFP_KERNEL);
2668 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2672 spin_lock_bh(&adapter->mcc_lock);
2674 wrb = wrb_from_mccq(adapter);
2681 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2682 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2685 req->hdr.domain = domain;
2686 req->mac_count = mac_count;
2688 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2690 status = be_mcc_notify_wait(adapter);
2693 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2695 spin_unlock_bh(&adapter->mcc_lock);
2699 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2700 u32 domain, u16 intf_id)
2702 struct be_mcc_wrb *wrb;
2703 struct be_cmd_req_set_hsw_config *req;
2707 spin_lock_bh(&adapter->mcc_lock);
2709 wrb = wrb_from_mccq(adapter);
2715 req = embedded_payload(wrb);
2716 ctxt = &req->context;
2718 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2719 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2721 req->hdr.domain = domain;
2722 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2724 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2725 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2728 be_dws_cpu_to_le(req->context, sizeof(req->context));
2729 status = be_mcc_notify_wait(adapter);
2732 spin_unlock_bh(&adapter->mcc_lock);
2736 /* Get Hyper switch config */
2737 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2738 u32 domain, u16 intf_id)
2740 struct be_mcc_wrb *wrb;
2741 struct be_cmd_req_get_hsw_config *req;
2746 spin_lock_bh(&adapter->mcc_lock);
2748 wrb = wrb_from_mccq(adapter);
2754 req = embedded_payload(wrb);
2755 ctxt = &req->context;
2757 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2758 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2760 req->hdr.domain = domain;
2761 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2763 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2764 be_dws_cpu_to_le(req->context, sizeof(req->context));
2766 status = be_mcc_notify_wait(adapter);
2768 struct be_cmd_resp_get_hsw_config *resp =
2769 embedded_payload(wrb);
2770 be_dws_le_to_cpu(&resp->context,
2771 sizeof(resp->context));
2772 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2773 pvid, &resp->context);
2774 *pvid = le16_to_cpu(vid);
2778 spin_unlock_bh(&adapter->mcc_lock);
2782 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2784 struct be_mcc_wrb *wrb;
2785 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2787 int payload_len = sizeof(*req);
2788 struct be_dma_mem cmd;
2790 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2794 memset(&cmd, 0, sizeof(struct be_dma_mem));
2795 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2796 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2799 dev_err(&adapter->pdev->dev,
2800 "Memory allocation failure\n");
2804 if (mutex_lock_interruptible(&adapter->mbox_lock))
2807 wrb = wrb_from_mbox(adapter);
2815 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2816 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2817 payload_len, wrb, &cmd);
2819 req->hdr.version = 1;
2820 req->query_options = BE_GET_WOL_CAP;
2822 status = be_mbox_notify_wait(adapter);
2824 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2825 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2827 /* the command could succeed misleadingly on old f/w
2828 * which is not aware of the V1 version. fake an error. */
2829 if (resp->hdr.response_length < payload_len) {
2833 adapter->wol_cap = resp->wol_settings;
2836 mutex_unlock(&adapter->mbox_lock);
2837 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2841 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2842 struct be_dma_mem *cmd)
2844 struct be_mcc_wrb *wrb;
2845 struct be_cmd_req_get_ext_fat_caps *req;
2848 if (mutex_lock_interruptible(&adapter->mbox_lock))
2851 wrb = wrb_from_mbox(adapter);
2858 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2859 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2860 cmd->size, wrb, cmd);
2861 req->parameter_type = cpu_to_le32(1);
2863 status = be_mbox_notify_wait(adapter);
2865 mutex_unlock(&adapter->mbox_lock);
2869 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2870 struct be_dma_mem *cmd,
2871 struct be_fat_conf_params *configs)
2873 struct be_mcc_wrb *wrb;
2874 struct be_cmd_req_set_ext_fat_caps *req;
2877 spin_lock_bh(&adapter->mcc_lock);
2879 wrb = wrb_from_mccq(adapter);
2886 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2887 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2888 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2889 cmd->size, wrb, cmd);
2891 status = be_mcc_notify_wait(adapter);
2893 spin_unlock_bh(&adapter->mcc_lock);
2897 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2899 struct be_mcc_wrb *wrb;
2900 struct be_cmd_req_get_port_name *req;
2903 if (!lancer_chip(adapter)) {
2904 *port_name = adapter->hba_port_num + '0';
2908 spin_lock_bh(&adapter->mcc_lock);
2910 wrb = wrb_from_mccq(adapter);
2916 req = embedded_payload(wrb);
2918 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2919 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2921 req->hdr.version = 1;
2923 status = be_mcc_notify_wait(adapter);
2925 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2926 *port_name = resp->port_name[adapter->hba_port_num];
2928 *port_name = adapter->hba_port_num + '0';
2931 spin_unlock_bh(&adapter->mcc_lock);
2935 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2938 struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2941 for (i = 0; i < desc_count; i++) {
2942 desc->desc_len = RESOURCE_DESC_SIZE;
2943 if (((void *)desc + desc->desc_len) >
2944 (void *)(buf + max_buf_size)) {
2949 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2952 desc = (void *)desc + desc->desc_len;
2955 if (!desc || i == MAX_RESOURCE_DESC)
2962 int be_cmd_get_func_config(struct be_adapter *adapter)
2964 struct be_mcc_wrb *wrb;
2965 struct be_cmd_req_get_func_config *req;
2967 struct be_dma_mem cmd;
2969 memset(&cmd, 0, sizeof(struct be_dma_mem));
2970 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2971 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2974 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2977 if (mutex_lock_interruptible(&adapter->mbox_lock))
2980 wrb = wrb_from_mbox(adapter);
2988 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2989 OPCODE_COMMON_GET_FUNC_CONFIG,
2990 cmd.size, wrb, &cmd);
2992 status = be_mbox_notify_wait(adapter);
2994 struct be_cmd_resp_get_func_config *resp = cmd.va;
2995 u32 desc_count = le32_to_cpu(resp->desc_count);
2996 struct be_nic_resource_desc *desc;
2998 desc = be_get_nic_desc(resp->func_param, desc_count,
2999 sizeof(resp->func_param));
3005 adapter->pf_number = desc->pf_num;
3006 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3007 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3008 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3009 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3010 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3011 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3013 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3014 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3017 mutex_unlock(&adapter->mbox_lock);
3018 pci_free_consistent(adapter->pdev, cmd.size,
3024 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3027 struct be_mcc_wrb *wrb;
3028 struct be_cmd_req_get_profile_config *req;
3030 struct be_dma_mem cmd;
3032 memset(&cmd, 0, sizeof(struct be_dma_mem));
3033 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3034 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3037 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3041 spin_lock_bh(&adapter->mcc_lock);
3043 wrb = wrb_from_mccq(adapter);
3051 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3052 OPCODE_COMMON_GET_PROFILE_CONFIG,
3053 cmd.size, wrb, &cmd);
3055 req->type = ACTIVE_PROFILE_TYPE;
3056 req->hdr.domain = domain;
3058 status = be_mcc_notify_wait(adapter);
3060 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3061 u32 desc_count = le32_to_cpu(resp->desc_count);
3062 struct be_nic_resource_desc *desc;
3064 desc = be_get_nic_desc(resp->func_param, desc_count,
3065 sizeof(resp->func_param));
3071 *cap_flags = le32_to_cpu(desc->cap_flags);
3074 spin_unlock_bh(&adapter->mcc_lock);
3075 pci_free_consistent(adapter->pdev, cmd.size,
3081 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3084 struct be_mcc_wrb *wrb;
3085 struct be_cmd_req_set_profile_config *req;
3088 spin_lock_bh(&adapter->mcc_lock);
3090 wrb = wrb_from_mccq(adapter);
3096 req = embedded_payload(wrb);
3098 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3099 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3102 req->hdr.domain = domain;
3103 req->desc_count = cpu_to_le32(1);
3105 req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3106 req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3107 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3108 req->nic_desc.pf_num = adapter->pf_number;
3109 req->nic_desc.vf_num = domain;
3111 /* Mark fields invalid */
3112 req->nic_desc.unicast_mac_count = 0xFFFF;
3113 req->nic_desc.mcc_count = 0xFFFF;
3114 req->nic_desc.vlan_count = 0xFFFF;
3115 req->nic_desc.mcast_mac_count = 0xFFFF;
3116 req->nic_desc.txq_count = 0xFFFF;
3117 req->nic_desc.rq_count = 0xFFFF;
3118 req->nic_desc.rssq_count = 0xFFFF;
3119 req->nic_desc.lro_count = 0xFFFF;
3120 req->nic_desc.cq_count = 0xFFFF;
3121 req->nic_desc.toe_conn_count = 0xFFFF;
3122 req->nic_desc.eq_count = 0xFFFF;
3123 req->nic_desc.link_param = 0xFF;
3124 req->nic_desc.bw_min = 0xFFFFFFFF;
3125 req->nic_desc.acpi_params = 0xFF;
3126 req->nic_desc.wol_param = 0x0F;
3129 req->nic_desc.bw_min = cpu_to_le32(bps);
3130 req->nic_desc.bw_max = cpu_to_le32(bps);
3131 status = be_mcc_notify_wait(adapter);
3133 spin_unlock_bh(&adapter->mcc_lock);
3137 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3140 struct be_mcc_wrb *wrb;
3141 struct be_cmd_req_get_iface_list *req;
3142 struct be_cmd_resp_get_iface_list *resp;
3145 spin_lock_bh(&adapter->mcc_lock);
3147 wrb = wrb_from_mccq(adapter);
3152 req = embedded_payload(wrb);
3154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3155 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3157 req->hdr.domain = vf_num + 1;
3159 status = be_mcc_notify_wait(adapter);
3161 resp = (struct be_cmd_resp_get_iface_list *)req;
3162 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3166 spin_unlock_bh(&adapter->mcc_lock);
3171 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3173 struct be_mcc_wrb *wrb;
3174 struct be_cmd_enable_disable_vf *req;
3177 if (!lancer_chip(adapter))
3180 spin_lock_bh(&adapter->mcc_lock);
3182 wrb = wrb_from_mccq(adapter);
3188 req = embedded_payload(wrb);
3190 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3191 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3194 req->hdr.domain = domain;
3196 status = be_mcc_notify_wait(adapter);
3198 spin_unlock_bh(&adapter->mcc_lock);
3202 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3203 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3205 struct be_adapter *adapter = netdev_priv(netdev_handle);
3206 struct be_mcc_wrb *wrb;
3207 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3208 struct be_cmd_req_hdr *req;
3209 struct be_cmd_resp_hdr *resp;
3212 spin_lock_bh(&adapter->mcc_lock);
3214 wrb = wrb_from_mccq(adapter);
3219 req = embedded_payload(wrb);
3220 resp = embedded_payload(wrb);
3222 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3223 hdr->opcode, wrb_payload_size, wrb, NULL);
3224 memcpy(req, wrb_payload, wrb_payload_size);
3225 be_dws_cpu_to_le(req, wrb_payload_size);
3227 status = be_mcc_notify_wait(adapter);
3229 *cmd_status = (status & 0xffff);
3232 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3233 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3235 spin_unlock_bh(&adapter->mcc_lock);
3238 EXPORT_SYMBOL(be_roce_mcc_cmd);