2 * Copyright (C) 2005 - 2013 Emulex
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version 2
7 * as published by the Free Software Foundation. The full GNU General
8 * Public License is included in this distribution in the file called COPYING.
10 * Contact Information:
11 * linux-drivers@emulex.com
15 * Costa Mesa, CA 92626
18 #include <linux/module.h>
22 static struct be_cmd_priv_map cmd_priv_map[] = {
24 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
26 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
30 OPCODE_COMMON_GET_FLOW_CONTROL,
32 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
36 OPCODE_COMMON_SET_FLOW_CONTROL,
38 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
42 OPCODE_ETH_GET_PPORT_STATS,
44 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
48 OPCODE_COMMON_GET_PHY_DETAILS,
50 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
59 int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60 u32 cmd_privileges = adapter->cmd_privileges;
62 for (i = 0; i < num_entries; i++)
63 if (opcode == cmd_priv_map[i].opcode &&
64 subsystem == cmd_priv_map[i].subsystem)
65 if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
73 return wrb->payload.embedded_payload;
76 static void be_mcc_notify(struct be_adapter *adapter)
78 struct be_queue_info *mccq = &adapter->mcc_obj.q;
81 if (be_error(adapter))
84 val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85 val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
88 iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
91 /* To check if valid bit is set, check the entire word as we don't know
92 * the endianness of the data (old entry is host endian while a new entry is
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
98 if (compl->flags != 0) {
99 flags = le32_to_cpu(compl->flags);
100 if (flags & CQE_FLAGS_VALID_MASK) {
101 compl->flags = flags;
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
119 addr = ((addr << 16) << 16) | tag0;
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124 struct be_mcc_compl *compl)
126 u16 compl_status, extd_status;
127 struct be_cmd_resp_hdr *resp_hdr;
128 u8 opcode = 0, subsystem = 0;
130 /* Just swap the status to host endian; mcc tag is opaquely copied
132 be_dws_le_to_cpu(compl, 4);
134 compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135 CQE_STATUS_COMPL_MASK;
137 resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
140 opcode = resp_hdr->opcode;
141 subsystem = resp_hdr->subsystem;
144 if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145 (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146 (subsystem == CMD_SUBSYSTEM_COMMON)) {
147 adapter->flash_status = compl_status;
148 complete(&adapter->flash_compl);
151 if (compl_status == MCC_STATUS_SUCCESS) {
152 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153 (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154 (subsystem == CMD_SUBSYSTEM_ETH)) {
155 be_parse_stats(adapter);
156 adapter->stats_cmd_sent = false;
158 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159 subsystem == CMD_SUBSYSTEM_COMMON) {
160 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
162 adapter->drv_stats.be_on_die_temperature =
163 resp->on_die_temperature;
166 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167 adapter->be_get_temp_freq = 0;
169 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170 compl_status == MCC_STATUS_ILLEGAL_REQUEST)
173 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174 dev_warn(&adapter->pdev->dev,
175 "VF is not privileged to issue opcode %d-%d\n",
178 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179 CQE_STATUS_EXTD_MASK;
180 dev_err(&adapter->pdev->dev,
181 "opcode %d-%d failed:status %d-%d\n",
182 opcode, subsystem, compl_status, extd_status);
184 if (extd_status == MCC_ADDL_STS_INSUFFICIENT_RESOURCES)
192 /* Link state evt is a string of bytes; no need for endian swapping */
193 static void be_async_link_state_process(struct be_adapter *adapter,
194 struct be_async_event_link_state *evt)
196 /* When link status changes, link speed must be re-queried from FW */
197 adapter->phy.link_speed = -1;
199 /* Ignore physical link event */
200 if (lancer_chip(adapter) &&
201 !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
204 /* For the initial link status do not rely on the ASYNC event as
205 * it may not be received in some cases.
207 if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
208 be_link_status_update(adapter, evt->port_link_status);
211 /* Grp5 CoS Priority evt */
212 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
213 struct be_async_event_grp5_cos_priority *evt)
216 adapter->vlan_prio_bmap = evt->available_priority_bmap;
217 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
218 adapter->recommended_prio =
219 evt->reco_default_priority << VLAN_PRIO_SHIFT;
223 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
224 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
225 struct be_async_event_grp5_qos_link_speed *evt)
227 if (adapter->phy.link_speed >= 0 &&
228 evt->physical_port == adapter->port_num)
229 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
233 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
234 struct be_async_event_grp5_pvid_state *evt)
237 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
242 static void be_async_grp5_evt_process(struct be_adapter *adapter,
243 u32 trailer, struct be_mcc_compl *evt)
247 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
248 ASYNC_TRAILER_EVENT_TYPE_MASK;
250 switch (event_type) {
251 case ASYNC_EVENT_COS_PRIORITY:
252 be_async_grp5_cos_priority_process(adapter,
253 (struct be_async_event_grp5_cos_priority *)evt);
255 case ASYNC_EVENT_QOS_SPEED:
256 be_async_grp5_qos_speed_process(adapter,
257 (struct be_async_event_grp5_qos_link_speed *)evt);
259 case ASYNC_EVENT_PVID_STATE:
260 be_async_grp5_pvid_state_process(adapter,
261 (struct be_async_event_grp5_pvid_state *)evt);
264 dev_warn(&adapter->pdev->dev, "Unknown grp5 event 0x%x!\n",
270 static void be_async_dbg_evt_process(struct be_adapter *adapter,
271 u32 trailer, struct be_mcc_compl *cmp)
274 struct be_async_event_qnq *evt = (struct be_async_event_qnq *) cmp;
276 event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
277 ASYNC_TRAILER_EVENT_TYPE_MASK;
279 switch (event_type) {
280 case ASYNC_DEBUG_EVENT_TYPE_QNQ:
282 adapter->qnq_vid = le16_to_cpu(evt->vlan_tag);
283 adapter->flags |= BE_FLAGS_QNQ_ASYNC_EVT_RCVD;
286 dev_warn(&adapter->pdev->dev, "Unknown debug event 0x%x!\n",
292 static inline bool is_link_state_evt(u32 trailer)
294 return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
295 ASYNC_TRAILER_EVENT_CODE_MASK) ==
296 ASYNC_EVENT_CODE_LINK_STATE;
299 static inline bool is_grp5_evt(u32 trailer)
301 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
302 ASYNC_TRAILER_EVENT_CODE_MASK) ==
303 ASYNC_EVENT_CODE_GRP_5);
306 static inline bool is_dbg_evt(u32 trailer)
308 return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
309 ASYNC_TRAILER_EVENT_CODE_MASK) ==
310 ASYNC_EVENT_CODE_QNQ);
313 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
315 struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
316 struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
318 if (be_mcc_compl_is_new(compl)) {
319 queue_tail_inc(mcc_cq);
325 void be_async_mcc_enable(struct be_adapter *adapter)
327 spin_lock_bh(&adapter->mcc_cq_lock);
329 be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
330 adapter->mcc_obj.rearm_cq = true;
332 spin_unlock_bh(&adapter->mcc_cq_lock);
335 void be_async_mcc_disable(struct be_adapter *adapter)
337 spin_lock_bh(&adapter->mcc_cq_lock);
339 adapter->mcc_obj.rearm_cq = false;
340 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
342 spin_unlock_bh(&adapter->mcc_cq_lock);
345 int be_process_mcc(struct be_adapter *adapter)
347 struct be_mcc_compl *compl;
348 int num = 0, status = 0;
349 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
351 spin_lock(&adapter->mcc_cq_lock);
352 while ((compl = be_mcc_compl_get(adapter))) {
353 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
354 /* Interpret flags as an async trailer */
355 if (is_link_state_evt(compl->flags))
356 be_async_link_state_process(adapter,
357 (struct be_async_event_link_state *) compl);
358 else if (is_grp5_evt(compl->flags))
359 be_async_grp5_evt_process(adapter,
360 compl->flags, compl);
361 else if (is_dbg_evt(compl->flags))
362 be_async_dbg_evt_process(adapter,
363 compl->flags, compl);
364 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
365 status = be_mcc_compl_process(adapter, compl);
366 atomic_dec(&mcc_obj->q.used);
368 be_mcc_compl_use(compl);
373 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
375 spin_unlock(&adapter->mcc_cq_lock);
379 /* Wait till no more pending mcc requests are present */
380 static int be_mcc_wait_compl(struct be_adapter *adapter)
382 #define mcc_timeout 120000 /* 12s timeout */
384 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
386 for (i = 0; i < mcc_timeout; i++) {
387 if (be_error(adapter))
391 status = be_process_mcc(adapter);
394 if (atomic_read(&mcc_obj->q.used) == 0)
398 if (i == mcc_timeout) {
399 dev_err(&adapter->pdev->dev, "FW not responding\n");
400 adapter->fw_timeout = true;
406 /* Notify MCC requests and wait for completion */
407 static int be_mcc_notify_wait(struct be_adapter *adapter)
410 struct be_mcc_wrb *wrb;
411 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
412 u16 index = mcc_obj->q.head;
413 struct be_cmd_resp_hdr *resp;
415 index_dec(&index, mcc_obj->q.len);
416 wrb = queue_index_node(&mcc_obj->q, index);
418 resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
420 be_mcc_notify(adapter);
422 status = be_mcc_wait_compl(adapter);
426 status = resp->status;
431 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
437 if (be_error(adapter))
440 ready = ioread32(db);
441 if (ready == 0xffffffff)
444 ready &= MPU_MAILBOX_DB_RDY_MASK;
449 dev_err(&adapter->pdev->dev, "FW not responding\n");
450 adapter->fw_timeout = true;
451 be_detect_error(adapter);
463 * Insert the mailbox address into the doorbell in two steps
464 * Polls on the mbox doorbell till a command completion (or a timeout) occurs
466 static int be_mbox_notify_wait(struct be_adapter *adapter)
470 void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
471 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
472 struct be_mcc_mailbox *mbox = mbox_mem->va;
473 struct be_mcc_compl *compl = &mbox->compl;
475 /* wait for ready to be set */
476 status = be_mbox_db_ready_wait(adapter, db);
480 val |= MPU_MAILBOX_DB_HI_MASK;
481 /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
482 val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
485 /* wait for ready to be set */
486 status = be_mbox_db_ready_wait(adapter, db);
491 /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
492 val |= (u32)(mbox_mem->dma >> 4) << 2;
495 status = be_mbox_db_ready_wait(adapter, db);
499 /* A cq entry has been made now */
500 if (be_mcc_compl_is_new(compl)) {
501 status = be_mcc_compl_process(adapter, &mbox->compl);
502 be_mcc_compl_use(compl);
506 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
512 static u16 be_POST_stage_get(struct be_adapter *adapter)
516 if (BEx_chip(adapter))
517 sem = ioread32(adapter->csr + SLIPORT_SEMAPHORE_OFFSET_BEx);
519 pci_read_config_dword(adapter->pdev,
520 SLIPORT_SEMAPHORE_OFFSET_SH, &sem);
522 return sem & POST_STAGE_MASK;
525 int lancer_wait_ready(struct be_adapter *adapter)
527 #define SLIPORT_READY_TIMEOUT 30
531 for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
532 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
533 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
539 if (i == SLIPORT_READY_TIMEOUT)
545 static bool lancer_provisioning_error(struct be_adapter *adapter)
547 u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
548 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
549 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
550 sliport_err1 = ioread32(adapter->db +
551 SLIPORT_ERROR1_OFFSET);
552 sliport_err2 = ioread32(adapter->db +
553 SLIPORT_ERROR2_OFFSET);
555 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
556 sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
562 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
565 u32 sliport_status, err, reset_needed;
568 resource_error = lancer_provisioning_error(adapter);
572 status = lancer_wait_ready(adapter);
574 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
575 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
576 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
577 if (err && reset_needed) {
578 iowrite32(SLI_PORT_CONTROL_IP_MASK,
579 adapter->db + SLIPORT_CONTROL_OFFSET);
581 /* check adapter has corrected the error */
582 status = lancer_wait_ready(adapter);
583 sliport_status = ioread32(adapter->db +
584 SLIPORT_STATUS_OFFSET);
585 sliport_status &= (SLIPORT_STATUS_ERR_MASK |
586 SLIPORT_STATUS_RN_MASK);
587 if (status || sliport_status)
589 } else if (err || reset_needed) {
593 /* Stop error recovery if error is not recoverable.
594 * No resource error is temporary errors and will go away
595 * when PF provisions resources.
597 resource_error = lancer_provisioning_error(adapter);
604 int be_fw_wait_ready(struct be_adapter *adapter)
607 int status, timeout = 0;
608 struct device *dev = &adapter->pdev->dev;
610 if (lancer_chip(adapter)) {
611 status = lancer_wait_ready(adapter);
616 stage = be_POST_stage_get(adapter);
617 if (stage == POST_STAGE_ARMFW_RDY)
620 dev_info(dev, "Waiting for POST, %ds elapsed\n",
622 if (msleep_interruptible(2000)) {
623 dev_err(dev, "Waiting for POST aborted\n");
627 } while (timeout < 60);
629 dev_err(dev, "POST timeout; stage=0x%x\n", stage);
634 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
636 return &wrb->payload.sgl[0];
639 static inline void fill_wrb_tags(struct be_mcc_wrb *wrb,
642 wrb->tag0 = addr & 0xFFFFFFFF;
643 wrb->tag1 = upper_32_bits(addr);
646 /* Don't touch the hdr after it's prepared */
647 /* mem will be NULL for embedded commands */
648 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
649 u8 subsystem, u8 opcode, int cmd_len,
650 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
654 req_hdr->opcode = opcode;
655 req_hdr->subsystem = subsystem;
656 req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
657 req_hdr->version = 0;
658 fill_wrb_tags(wrb, (ulong) req_hdr);
659 wrb->payload_length = cmd_len;
661 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
662 MCC_WRB_SGE_CNT_SHIFT;
663 sge = nonembedded_sgl(wrb);
664 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
665 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
666 sge->len = cpu_to_le32(mem->size);
668 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
669 be_dws_cpu_to_le(wrb, 8);
672 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
673 struct be_dma_mem *mem)
675 int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
676 u64 dma = (u64)mem->dma;
678 for (i = 0; i < buf_pages; i++) {
679 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
680 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
685 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
687 struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
688 struct be_mcc_wrb *wrb
689 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
690 memset(wrb, 0, sizeof(*wrb));
694 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
696 struct be_queue_info *mccq = &adapter->mcc_obj.q;
697 struct be_mcc_wrb *wrb;
702 if (atomic_read(&mccq->used) >= mccq->len)
705 wrb = queue_head_node(mccq);
706 queue_head_inc(mccq);
707 atomic_inc(&mccq->used);
708 memset(wrb, 0, sizeof(*wrb));
712 static bool use_mcc(struct be_adapter *adapter)
714 return adapter->mcc_obj.q.created;
717 /* Must be used only in process context */
718 static int be_cmd_lock(struct be_adapter *adapter)
720 if (use_mcc(adapter)) {
721 spin_lock_bh(&adapter->mcc_lock);
724 return mutex_lock_interruptible(&adapter->mbox_lock);
728 /* Must be used only in process context */
729 static void be_cmd_unlock(struct be_adapter *adapter)
731 if (use_mcc(adapter))
732 spin_unlock_bh(&adapter->mcc_lock);
734 return mutex_unlock(&adapter->mbox_lock);
737 static struct be_mcc_wrb *be_cmd_copy(struct be_adapter *adapter,
738 struct be_mcc_wrb *wrb)
740 struct be_mcc_wrb *dest_wrb;
742 if (use_mcc(adapter)) {
743 dest_wrb = wrb_from_mccq(adapter);
747 dest_wrb = wrb_from_mbox(adapter);
750 memcpy(dest_wrb, wrb, sizeof(*wrb));
751 if (wrb->embedded & cpu_to_le32(MCC_WRB_EMBEDDED_MASK))
752 fill_wrb_tags(dest_wrb, (ulong) embedded_payload(wrb));
757 /* Must be used only in process context */
758 static int be_cmd_notify_wait(struct be_adapter *adapter,
759 struct be_mcc_wrb *wrb)
761 struct be_mcc_wrb *dest_wrb;
764 status = be_cmd_lock(adapter);
768 dest_wrb = be_cmd_copy(adapter, wrb);
772 if (use_mcc(adapter))
773 status = be_mcc_notify_wait(adapter);
775 status = be_mbox_notify_wait(adapter);
778 memcpy(wrb, dest_wrb, sizeof(*wrb));
780 be_cmd_unlock(adapter);
784 /* Tell fw we're about to start firing cmds by writing a
785 * special pattern across the wrb hdr; uses mbox
787 int be_cmd_fw_init(struct be_adapter *adapter)
792 if (lancer_chip(adapter))
795 if (mutex_lock_interruptible(&adapter->mbox_lock))
798 wrb = (u8 *)wrb_from_mbox(adapter);
808 status = be_mbox_notify_wait(adapter);
810 mutex_unlock(&adapter->mbox_lock);
814 /* Tell fw we're done with firing cmds by writing a
815 * special pattern across the wrb hdr; uses mbox
817 int be_cmd_fw_clean(struct be_adapter *adapter)
822 if (lancer_chip(adapter))
825 if (mutex_lock_interruptible(&adapter->mbox_lock))
828 wrb = (u8 *)wrb_from_mbox(adapter);
838 status = be_mbox_notify_wait(adapter);
840 mutex_unlock(&adapter->mbox_lock);
844 int be_cmd_eq_create(struct be_adapter *adapter, struct be_eq_obj *eqo)
846 struct be_mcc_wrb *wrb;
847 struct be_cmd_req_eq_create *req;
848 struct be_dma_mem *q_mem = &eqo->q.dma_mem;
851 if (mutex_lock_interruptible(&adapter->mbox_lock))
854 wrb = wrb_from_mbox(adapter);
855 req = embedded_payload(wrb);
857 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
858 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
860 /* Support for EQ_CREATEv2 available only SH-R onwards */
861 if (!(BEx_chip(adapter) || lancer_chip(adapter)))
864 req->hdr.version = ver;
865 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
867 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
869 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
870 AMAP_SET_BITS(struct amap_eq_context, count, req->context,
871 __ilog2_u32(eqo->q.len / 256));
872 be_dws_cpu_to_le(req->context, sizeof(req->context));
874 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
876 status = be_mbox_notify_wait(adapter);
878 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
879 eqo->q.id = le16_to_cpu(resp->eq_id);
881 (ver == 2) ? le16_to_cpu(resp->msix_idx) : eqo->idx;
882 eqo->q.created = true;
885 mutex_unlock(&adapter->mbox_lock);
890 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
891 bool permanent, u32 if_handle, u32 pmac_id)
893 struct be_mcc_wrb *wrb;
894 struct be_cmd_req_mac_query *req;
897 spin_lock_bh(&adapter->mcc_lock);
899 wrb = wrb_from_mccq(adapter);
904 req = embedded_payload(wrb);
906 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
907 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
908 req->type = MAC_ADDRESS_TYPE_NETWORK;
912 req->if_id = cpu_to_le16((u16) if_handle);
913 req->pmac_id = cpu_to_le32(pmac_id);
917 status = be_mcc_notify_wait(adapter);
919 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
920 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
924 spin_unlock_bh(&adapter->mcc_lock);
928 /* Uses synchronous MCCQ */
929 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
930 u32 if_id, u32 *pmac_id, u32 domain)
932 struct be_mcc_wrb *wrb;
933 struct be_cmd_req_pmac_add *req;
936 spin_lock_bh(&adapter->mcc_lock);
938 wrb = wrb_from_mccq(adapter);
943 req = embedded_payload(wrb);
945 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
946 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
948 req->hdr.domain = domain;
949 req->if_id = cpu_to_le32(if_id);
950 memcpy(req->mac_address, mac_addr, ETH_ALEN);
952 status = be_mcc_notify_wait(adapter);
954 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
955 *pmac_id = le32_to_cpu(resp->pmac_id);
959 spin_unlock_bh(&adapter->mcc_lock);
961 if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
967 /* Uses synchronous MCCQ */
968 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
970 struct be_mcc_wrb *wrb;
971 struct be_cmd_req_pmac_del *req;
977 spin_lock_bh(&adapter->mcc_lock);
979 wrb = wrb_from_mccq(adapter);
984 req = embedded_payload(wrb);
986 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
987 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
989 req->hdr.domain = dom;
990 req->if_id = cpu_to_le32(if_id);
991 req->pmac_id = cpu_to_le32(pmac_id);
993 status = be_mcc_notify_wait(adapter);
996 spin_unlock_bh(&adapter->mcc_lock);
1001 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
1002 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
1004 struct be_mcc_wrb *wrb;
1005 struct be_cmd_req_cq_create *req;
1006 struct be_dma_mem *q_mem = &cq->dma_mem;
1010 if (mutex_lock_interruptible(&adapter->mbox_lock))
1013 wrb = wrb_from_mbox(adapter);
1014 req = embedded_payload(wrb);
1015 ctxt = &req->context;
1017 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1018 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
1020 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1022 if (BEx_chip(adapter)) {
1023 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
1025 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
1027 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
1028 __ilog2_u32(cq->len/256));
1029 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
1030 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
1031 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
1033 req->hdr.version = 2;
1034 req->page_size = 1; /* 1 for 4K */
1035 AMAP_SET_BITS(struct amap_cq_context_v2, nodelay, ctxt,
1037 AMAP_SET_BITS(struct amap_cq_context_v2, count, ctxt,
1038 __ilog2_u32(cq->len/256));
1039 AMAP_SET_BITS(struct amap_cq_context_v2, valid, ctxt, 1);
1040 AMAP_SET_BITS(struct amap_cq_context_v2, eventable,
1042 AMAP_SET_BITS(struct amap_cq_context_v2, eqid,
1046 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1048 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1050 status = be_mbox_notify_wait(adapter);
1052 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
1053 cq->id = le16_to_cpu(resp->cq_id);
1057 mutex_unlock(&adapter->mbox_lock);
1062 static u32 be_encoded_q_len(int q_len)
1064 u32 len_encoded = fls(q_len); /* log2(len) + 1 */
1065 if (len_encoded == 16)
1070 static int be_cmd_mccq_ext_create(struct be_adapter *adapter,
1071 struct be_queue_info *mccq,
1072 struct be_queue_info *cq)
1074 struct be_mcc_wrb *wrb;
1075 struct be_cmd_req_mcc_ext_create *req;
1076 struct be_dma_mem *q_mem = &mccq->dma_mem;
1080 if (mutex_lock_interruptible(&adapter->mbox_lock))
1083 wrb = wrb_from_mbox(adapter);
1084 req = embedded_payload(wrb);
1085 ctxt = &req->context;
1087 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1088 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
1090 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1091 if (lancer_chip(adapter)) {
1092 req->hdr.version = 1;
1093 req->cq_id = cpu_to_le16(cq->id);
1095 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1096 be_encoded_q_len(mccq->len));
1097 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1098 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1100 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1104 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1105 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1106 be_encoded_q_len(mccq->len));
1107 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1110 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1111 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1112 req->async_event_bitmap[0] |= cpu_to_le32(1 << ASYNC_EVENT_CODE_QNQ);
1113 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1115 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1117 status = be_mbox_notify_wait(adapter);
1119 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1120 mccq->id = le16_to_cpu(resp->id);
1121 mccq->created = true;
1123 mutex_unlock(&adapter->mbox_lock);
1128 static int be_cmd_mccq_org_create(struct be_adapter *adapter,
1129 struct be_queue_info *mccq,
1130 struct be_queue_info *cq)
1132 struct be_mcc_wrb *wrb;
1133 struct be_cmd_req_mcc_create *req;
1134 struct be_dma_mem *q_mem = &mccq->dma_mem;
1138 if (mutex_lock_interruptible(&adapter->mbox_lock))
1141 wrb = wrb_from_mbox(adapter);
1142 req = embedded_payload(wrb);
1143 ctxt = &req->context;
1145 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1146 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1148 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1150 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1151 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1152 be_encoded_q_len(mccq->len));
1153 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1155 be_dws_cpu_to_le(ctxt, sizeof(req->context));
1157 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1159 status = be_mbox_notify_wait(adapter);
1161 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1162 mccq->id = le16_to_cpu(resp->id);
1163 mccq->created = true;
1166 mutex_unlock(&adapter->mbox_lock);
1170 int be_cmd_mccq_create(struct be_adapter *adapter,
1171 struct be_queue_info *mccq,
1172 struct be_queue_info *cq)
1176 status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1177 if (status && !lancer_chip(adapter)) {
1178 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1179 "or newer to avoid conflicting priorities between NIC "
1180 "and FCoE traffic");
1181 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1186 int be_cmd_txq_create(struct be_adapter *adapter, struct be_tx_obj *txo)
1188 struct be_mcc_wrb wrb = {0};
1189 struct be_cmd_req_eth_tx_create *req;
1190 struct be_queue_info *txq = &txo->q;
1191 struct be_queue_info *cq = &txo->cq;
1192 struct be_dma_mem *q_mem = &txq->dma_mem;
1193 int status, ver = 0;
1195 req = embedded_payload(&wrb);
1196 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1197 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL);
1199 if (lancer_chip(adapter)) {
1200 req->hdr.version = 1;
1201 } else if (BEx_chip(adapter)) {
1202 if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC)
1203 req->hdr.version = 2;
1204 } else { /* For SH */
1205 req->hdr.version = 2;
1208 if (req->hdr.version > 0)
1209 req->if_id = cpu_to_le16(adapter->if_handle);
1210 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1211 req->ulp_num = BE_ULP1_NUM;
1212 req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1213 req->cq_id = cpu_to_le16(cq->id);
1214 req->queue_size = be_encoded_q_len(txq->len);
1215 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1216 ver = req->hdr.version;
1218 status = be_cmd_notify_wait(adapter, &wrb);
1220 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(&wrb);
1221 txq->id = le16_to_cpu(resp->cid);
1223 txo->db_offset = le32_to_cpu(resp->db_offset);
1225 txo->db_offset = DB_TXULP1_OFFSET;
1226 txq->created = true;
1233 int be_cmd_rxq_create(struct be_adapter *adapter,
1234 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1235 u32 if_id, u32 rss, u8 *rss_id)
1237 struct be_mcc_wrb *wrb;
1238 struct be_cmd_req_eth_rx_create *req;
1239 struct be_dma_mem *q_mem = &rxq->dma_mem;
1242 spin_lock_bh(&adapter->mcc_lock);
1244 wrb = wrb_from_mccq(adapter);
1249 req = embedded_payload(wrb);
1251 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1252 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1254 req->cq_id = cpu_to_le16(cq_id);
1255 req->frag_size = fls(frag_size) - 1;
1257 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1258 req->interface_id = cpu_to_le32(if_id);
1259 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1260 req->rss_queue = cpu_to_le32(rss);
1262 status = be_mcc_notify_wait(adapter);
1264 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1265 rxq->id = le16_to_cpu(resp->id);
1266 rxq->created = true;
1267 *rss_id = resp->rss_id;
1271 spin_unlock_bh(&adapter->mcc_lock);
1275 /* Generic destroyer function for all types of queues
1278 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1281 struct be_mcc_wrb *wrb;
1282 struct be_cmd_req_q_destroy *req;
1283 u8 subsys = 0, opcode = 0;
1286 if (mutex_lock_interruptible(&adapter->mbox_lock))
1289 wrb = wrb_from_mbox(adapter);
1290 req = embedded_payload(wrb);
1292 switch (queue_type) {
1294 subsys = CMD_SUBSYSTEM_COMMON;
1295 opcode = OPCODE_COMMON_EQ_DESTROY;
1298 subsys = CMD_SUBSYSTEM_COMMON;
1299 opcode = OPCODE_COMMON_CQ_DESTROY;
1302 subsys = CMD_SUBSYSTEM_ETH;
1303 opcode = OPCODE_ETH_TX_DESTROY;
1306 subsys = CMD_SUBSYSTEM_ETH;
1307 opcode = OPCODE_ETH_RX_DESTROY;
1310 subsys = CMD_SUBSYSTEM_COMMON;
1311 opcode = OPCODE_COMMON_MCC_DESTROY;
1317 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1319 req->id = cpu_to_le16(q->id);
1321 status = be_mbox_notify_wait(adapter);
1324 mutex_unlock(&adapter->mbox_lock);
1329 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1331 struct be_mcc_wrb *wrb;
1332 struct be_cmd_req_q_destroy *req;
1335 spin_lock_bh(&adapter->mcc_lock);
1337 wrb = wrb_from_mccq(adapter);
1342 req = embedded_payload(wrb);
1344 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1345 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1346 req->id = cpu_to_le16(q->id);
1348 status = be_mcc_notify_wait(adapter);
1352 spin_unlock_bh(&adapter->mcc_lock);
1356 /* Create an rx filtering policy configuration on an i/f
1357 * Will use MBOX only if MCCQ has not been created.
1359 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1360 u32 *if_handle, u32 domain)
1362 struct be_mcc_wrb wrb = {0};
1363 struct be_cmd_req_if_create *req;
1366 req = embedded_payload(&wrb);
1367 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1368 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), &wrb, NULL);
1369 req->hdr.domain = domain;
1370 req->capability_flags = cpu_to_le32(cap_flags);
1371 req->enable_flags = cpu_to_le32(en_flags);
1372 req->pmac_invalid = true;
1374 status = be_cmd_notify_wait(adapter, &wrb);
1376 struct be_cmd_resp_if_create *resp = embedded_payload(&wrb);
1377 *if_handle = le32_to_cpu(resp->interface_id);
1379 /* Hack to retrieve VF's pmac-id on BE3 */
1380 if (BE3_chip(adapter) && !be_physfn(adapter))
1381 adapter->pmac_id[0] = le32_to_cpu(resp->pmac_id);
1387 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1389 struct be_mcc_wrb *wrb;
1390 struct be_cmd_req_if_destroy *req;
1393 if (interface_id == -1)
1396 spin_lock_bh(&adapter->mcc_lock);
1398 wrb = wrb_from_mccq(adapter);
1403 req = embedded_payload(wrb);
1405 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1406 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1407 req->hdr.domain = domain;
1408 req->interface_id = cpu_to_le32(interface_id);
1410 status = be_mcc_notify_wait(adapter);
1412 spin_unlock_bh(&adapter->mcc_lock);
1416 /* Get stats is a non embedded command: the request is not embedded inside
1417 * WRB but is a separate dma memory block
1418 * Uses asynchronous MCC
1420 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1422 struct be_mcc_wrb *wrb;
1423 struct be_cmd_req_hdr *hdr;
1426 spin_lock_bh(&adapter->mcc_lock);
1428 wrb = wrb_from_mccq(adapter);
1433 hdr = nonemb_cmd->va;
1435 be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1436 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1438 /* version 1 of the cmd is not supported only by BE2 */
1439 if (!BE2_chip(adapter))
1442 be_mcc_notify(adapter);
1443 adapter->stats_cmd_sent = true;
1446 spin_unlock_bh(&adapter->mcc_lock);
1451 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1452 struct be_dma_mem *nonemb_cmd)
1455 struct be_mcc_wrb *wrb;
1456 struct lancer_cmd_req_pport_stats *req;
1459 if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1463 spin_lock_bh(&adapter->mcc_lock);
1465 wrb = wrb_from_mccq(adapter);
1470 req = nonemb_cmd->va;
1472 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1473 OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1476 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1477 req->cmd_params.params.reset_stats = 0;
1479 be_mcc_notify(adapter);
1480 adapter->stats_cmd_sent = true;
1483 spin_unlock_bh(&adapter->mcc_lock);
1487 static int be_mac_to_link_speed(int mac_speed)
1489 switch (mac_speed) {
1490 case PHY_LINK_SPEED_ZERO:
1492 case PHY_LINK_SPEED_10MBPS:
1494 case PHY_LINK_SPEED_100MBPS:
1496 case PHY_LINK_SPEED_1GBPS:
1498 case PHY_LINK_SPEED_10GBPS:
1500 case PHY_LINK_SPEED_20GBPS:
1502 case PHY_LINK_SPEED_25GBPS:
1504 case PHY_LINK_SPEED_40GBPS:
1510 /* Uses synchronous mcc
1511 * Returns link_speed in Mbps
1513 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1514 u8 *link_status, u32 dom)
1516 struct be_mcc_wrb *wrb;
1517 struct be_cmd_req_link_status *req;
1520 spin_lock_bh(&adapter->mcc_lock);
1523 *link_status = LINK_DOWN;
1525 wrb = wrb_from_mccq(adapter);
1530 req = embedded_payload(wrb);
1532 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1533 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1535 /* version 1 of the cmd is not supported only by BE2 */
1536 if (!BE2_chip(adapter))
1537 req->hdr.version = 1;
1539 req->hdr.domain = dom;
1541 status = be_mcc_notify_wait(adapter);
1543 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1545 *link_speed = resp->link_speed ?
1546 le16_to_cpu(resp->link_speed) * 10 :
1547 be_mac_to_link_speed(resp->mac_speed);
1549 if (!resp->logical_link_status)
1553 *link_status = resp->logical_link_status;
1557 spin_unlock_bh(&adapter->mcc_lock);
1561 /* Uses synchronous mcc */
1562 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1564 struct be_mcc_wrb *wrb;
1565 struct be_cmd_req_get_cntl_addnl_attribs *req;
1568 spin_lock_bh(&adapter->mcc_lock);
1570 wrb = wrb_from_mccq(adapter);
1575 req = embedded_payload(wrb);
1577 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1578 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1581 be_mcc_notify(adapter);
1584 spin_unlock_bh(&adapter->mcc_lock);
1588 /* Uses synchronous mcc */
1589 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1591 struct be_mcc_wrb *wrb;
1592 struct be_cmd_req_get_fat *req;
1595 spin_lock_bh(&adapter->mcc_lock);
1597 wrb = wrb_from_mccq(adapter);
1602 req = embedded_payload(wrb);
1604 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1605 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1606 req->fat_operation = cpu_to_le32(QUERY_FAT);
1607 status = be_mcc_notify_wait(adapter);
1609 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1610 if (log_size && resp->log_size)
1611 *log_size = le32_to_cpu(resp->log_size) -
1615 spin_unlock_bh(&adapter->mcc_lock);
1619 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1621 struct be_dma_mem get_fat_cmd;
1622 struct be_mcc_wrb *wrb;
1623 struct be_cmd_req_get_fat *req;
1624 u32 offset = 0, total_size, buf_size,
1625 log_offset = sizeof(u32), payload_len;
1631 total_size = buf_len;
1633 get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1634 get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1637 if (!get_fat_cmd.va) {
1639 dev_err(&adapter->pdev->dev,
1640 "Memory allocation failure while retrieving FAT data\n");
1644 spin_lock_bh(&adapter->mcc_lock);
1646 while (total_size) {
1647 buf_size = min(total_size, (u32)60*1024);
1648 total_size -= buf_size;
1650 wrb = wrb_from_mccq(adapter);
1655 req = get_fat_cmd.va;
1657 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1658 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1659 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1662 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1663 req->read_log_offset = cpu_to_le32(log_offset);
1664 req->read_log_length = cpu_to_le32(buf_size);
1665 req->data_buffer_size = cpu_to_le32(buf_size);
1667 status = be_mcc_notify_wait(adapter);
1669 struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1670 memcpy(buf + offset,
1672 le32_to_cpu(resp->read_log_length));
1674 dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1678 log_offset += buf_size;
1681 pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1684 spin_unlock_bh(&adapter->mcc_lock);
1687 /* Uses synchronous mcc */
1688 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1691 struct be_mcc_wrb *wrb;
1692 struct be_cmd_req_get_fw_version *req;
1695 spin_lock_bh(&adapter->mcc_lock);
1697 wrb = wrb_from_mccq(adapter);
1703 req = embedded_payload(wrb);
1705 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1706 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1707 status = be_mcc_notify_wait(adapter);
1709 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1710 strcpy(fw_ver, resp->firmware_version_string);
1712 strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1715 spin_unlock_bh(&adapter->mcc_lock);
1719 /* set the EQ delay interval of an EQ to specified value
1722 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1724 struct be_mcc_wrb *wrb;
1725 struct be_cmd_req_modify_eq_delay *req;
1728 spin_lock_bh(&adapter->mcc_lock);
1730 wrb = wrb_from_mccq(adapter);
1735 req = embedded_payload(wrb);
1737 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1738 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1740 req->num_eq = cpu_to_le32(1);
1741 req->delay[0].eq_id = cpu_to_le32(eq_id);
1742 req->delay[0].phase = 0;
1743 req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1745 be_mcc_notify(adapter);
1748 spin_unlock_bh(&adapter->mcc_lock);
1752 /* Uses sycnhronous mcc */
1753 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1754 u32 num, bool untagged, bool promiscuous)
1756 struct be_mcc_wrb *wrb;
1757 struct be_cmd_req_vlan_config *req;
1760 spin_lock_bh(&adapter->mcc_lock);
1762 wrb = wrb_from_mccq(adapter);
1767 req = embedded_payload(wrb);
1769 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1770 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1772 req->interface_id = if_id;
1773 req->promiscuous = promiscuous;
1774 req->untagged = untagged;
1775 req->num_vlan = num;
1777 memcpy(req->normal_vlan, vtag_array,
1778 req->num_vlan * sizeof(vtag_array[0]));
1781 status = be_mcc_notify_wait(adapter);
1784 spin_unlock_bh(&adapter->mcc_lock);
1788 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1790 struct be_mcc_wrb *wrb;
1791 struct be_dma_mem *mem = &adapter->rx_filter;
1792 struct be_cmd_req_rx_filter *req = mem->va;
1795 spin_lock_bh(&adapter->mcc_lock);
1797 wrb = wrb_from_mccq(adapter);
1802 memset(req, 0, sizeof(*req));
1803 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1804 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1807 req->if_id = cpu_to_le32(adapter->if_handle);
1808 if (flags & IFF_PROMISC) {
1809 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1810 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1811 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1813 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1814 BE_IF_FLAGS_VLAN_PROMISCUOUS |
1815 BE_IF_FLAGS_MCAST_PROMISCUOUS);
1816 } else if (flags & IFF_ALLMULTI) {
1817 req->if_flags_mask = req->if_flags =
1818 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1819 } else if (flags & BE_FLAGS_VLAN_PROMISC) {
1820 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1824 cpu_to_le32(BE_IF_FLAGS_VLAN_PROMISCUOUS);
1826 struct netdev_hw_addr *ha;
1829 req->if_flags_mask = req->if_flags =
1830 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1832 /* Reset mcast promisc mode if already set by setting mask
1833 * and not setting flags field
1835 req->if_flags_mask |=
1836 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1837 be_if_cap_flags(adapter));
1838 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1839 netdev_for_each_mc_addr(ha, adapter->netdev)
1840 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1843 status = be_mcc_notify_wait(adapter);
1845 spin_unlock_bh(&adapter->mcc_lock);
1849 /* Uses synchrounous mcc */
1850 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1852 struct be_mcc_wrb *wrb;
1853 struct be_cmd_req_set_flow_control *req;
1856 if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1857 CMD_SUBSYSTEM_COMMON))
1860 spin_lock_bh(&adapter->mcc_lock);
1862 wrb = wrb_from_mccq(adapter);
1867 req = embedded_payload(wrb);
1869 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1870 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1872 req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1873 req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1875 status = be_mcc_notify_wait(adapter);
1878 spin_unlock_bh(&adapter->mcc_lock);
1883 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1885 struct be_mcc_wrb *wrb;
1886 struct be_cmd_req_get_flow_control *req;
1889 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1890 CMD_SUBSYSTEM_COMMON))
1893 spin_lock_bh(&adapter->mcc_lock);
1895 wrb = wrb_from_mccq(adapter);
1900 req = embedded_payload(wrb);
1902 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1903 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1905 status = be_mcc_notify_wait(adapter);
1907 struct be_cmd_resp_get_flow_control *resp =
1908 embedded_payload(wrb);
1909 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1910 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1914 spin_unlock_bh(&adapter->mcc_lock);
1919 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1920 u32 *mode, u32 *caps, u16 *asic_rev)
1922 struct be_mcc_wrb *wrb;
1923 struct be_cmd_req_query_fw_cfg *req;
1926 if (mutex_lock_interruptible(&adapter->mbox_lock))
1929 wrb = wrb_from_mbox(adapter);
1930 req = embedded_payload(wrb);
1932 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1933 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1935 status = be_mbox_notify_wait(adapter);
1937 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1938 *port_num = le32_to_cpu(resp->phys_port);
1939 *mode = le32_to_cpu(resp->function_mode);
1940 *caps = le32_to_cpu(resp->function_caps);
1941 *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
1944 mutex_unlock(&adapter->mbox_lock);
1949 int be_cmd_reset_function(struct be_adapter *adapter)
1951 struct be_mcc_wrb *wrb;
1952 struct be_cmd_req_hdr *req;
1955 if (lancer_chip(adapter)) {
1956 status = lancer_wait_ready(adapter);
1958 iowrite32(SLI_PORT_CONTROL_IP_MASK,
1959 adapter->db + SLIPORT_CONTROL_OFFSET);
1960 status = lancer_test_and_set_rdy_state(adapter);
1963 dev_err(&adapter->pdev->dev,
1964 "Adapter in non recoverable error\n");
1969 if (mutex_lock_interruptible(&adapter->mbox_lock))
1972 wrb = wrb_from_mbox(adapter);
1973 req = embedded_payload(wrb);
1975 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1976 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1978 status = be_mbox_notify_wait(adapter);
1980 mutex_unlock(&adapter->mbox_lock);
1984 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
1985 u32 rss_hash_opts, u16 table_size)
1987 struct be_mcc_wrb *wrb;
1988 struct be_cmd_req_rss_config *req;
1989 u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1990 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1991 0x3ea83c02, 0x4a110304};
1994 if (mutex_lock_interruptible(&adapter->mbox_lock))
1997 wrb = wrb_from_mbox(adapter);
1998 req = embedded_payload(wrb);
2000 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2001 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
2003 req->if_id = cpu_to_le32(adapter->if_handle);
2004 req->enable_rss = cpu_to_le16(rss_hash_opts);
2005 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
2007 if (lancer_chip(adapter) || skyhawk_chip(adapter))
2008 req->hdr.version = 1;
2010 memcpy(req->cpu_table, rsstable, table_size);
2011 memcpy(req->hash, myhash, sizeof(myhash));
2012 be_dws_cpu_to_le(req->hash, sizeof(req->hash));
2014 status = be_mbox_notify_wait(adapter);
2016 mutex_unlock(&adapter->mbox_lock);
2021 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
2022 u8 bcn, u8 sts, u8 state)
2024 struct be_mcc_wrb *wrb;
2025 struct be_cmd_req_enable_disable_beacon *req;
2028 spin_lock_bh(&adapter->mcc_lock);
2030 wrb = wrb_from_mccq(adapter);
2035 req = embedded_payload(wrb);
2037 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2038 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
2040 req->port_num = port_num;
2041 req->beacon_state = state;
2042 req->beacon_duration = bcn;
2043 req->status_duration = sts;
2045 status = be_mcc_notify_wait(adapter);
2048 spin_unlock_bh(&adapter->mcc_lock);
2053 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
2055 struct be_mcc_wrb *wrb;
2056 struct be_cmd_req_get_beacon_state *req;
2059 spin_lock_bh(&adapter->mcc_lock);
2061 wrb = wrb_from_mccq(adapter);
2066 req = embedded_payload(wrb);
2068 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2069 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
2071 req->port_num = port_num;
2073 status = be_mcc_notify_wait(adapter);
2075 struct be_cmd_resp_get_beacon_state *resp =
2076 embedded_payload(wrb);
2077 *state = resp->beacon_state;
2081 spin_unlock_bh(&adapter->mcc_lock);
2085 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2086 u32 data_size, u32 data_offset,
2087 const char *obj_name, u32 *data_written,
2088 u8 *change_status, u8 *addn_status)
2090 struct be_mcc_wrb *wrb;
2091 struct lancer_cmd_req_write_object *req;
2092 struct lancer_cmd_resp_write_object *resp;
2096 spin_lock_bh(&adapter->mcc_lock);
2097 adapter->flash_status = 0;
2099 wrb = wrb_from_mccq(adapter);
2105 req = embedded_payload(wrb);
2107 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2108 OPCODE_COMMON_WRITE_OBJECT,
2109 sizeof(struct lancer_cmd_req_write_object), wrb,
2112 ctxt = &req->context;
2113 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2114 write_length, ctxt, data_size);
2117 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2120 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2123 be_dws_cpu_to_le(ctxt, sizeof(req->context));
2124 req->write_offset = cpu_to_le32(data_offset);
2125 strcpy(req->object_name, obj_name);
2126 req->descriptor_count = cpu_to_le32(1);
2127 req->buf_len = cpu_to_le32(data_size);
2128 req->addr_low = cpu_to_le32((cmd->dma +
2129 sizeof(struct lancer_cmd_req_write_object))
2131 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2132 sizeof(struct lancer_cmd_req_write_object)));
2134 be_mcc_notify(adapter);
2135 spin_unlock_bh(&adapter->mcc_lock);
2137 if (!wait_for_completion_timeout(&adapter->flash_compl,
2138 msecs_to_jiffies(60000)))
2141 status = adapter->flash_status;
2143 resp = embedded_payload(wrb);
2145 *data_written = le32_to_cpu(resp->actual_write_len);
2146 *change_status = resp->change_status;
2148 *addn_status = resp->additional_status;
2154 spin_unlock_bh(&adapter->mcc_lock);
2158 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2159 u32 data_size, u32 data_offset, const char *obj_name,
2160 u32 *data_read, u32 *eof, u8 *addn_status)
2162 struct be_mcc_wrb *wrb;
2163 struct lancer_cmd_req_read_object *req;
2164 struct lancer_cmd_resp_read_object *resp;
2167 spin_lock_bh(&adapter->mcc_lock);
2169 wrb = wrb_from_mccq(adapter);
2175 req = embedded_payload(wrb);
2177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2178 OPCODE_COMMON_READ_OBJECT,
2179 sizeof(struct lancer_cmd_req_read_object), wrb,
2182 req->desired_read_len = cpu_to_le32(data_size);
2183 req->read_offset = cpu_to_le32(data_offset);
2184 strcpy(req->object_name, obj_name);
2185 req->descriptor_count = cpu_to_le32(1);
2186 req->buf_len = cpu_to_le32(data_size);
2187 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2188 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2190 status = be_mcc_notify_wait(adapter);
2192 resp = embedded_payload(wrb);
2194 *data_read = le32_to_cpu(resp->actual_read_len);
2195 *eof = le32_to_cpu(resp->eof);
2197 *addn_status = resp->additional_status;
2201 spin_unlock_bh(&adapter->mcc_lock);
2205 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2206 u32 flash_type, u32 flash_opcode, u32 buf_size)
2208 struct be_mcc_wrb *wrb;
2209 struct be_cmd_write_flashrom *req;
2212 spin_lock_bh(&adapter->mcc_lock);
2213 adapter->flash_status = 0;
2215 wrb = wrb_from_mccq(adapter);
2222 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2223 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2225 req->params.op_type = cpu_to_le32(flash_type);
2226 req->params.op_code = cpu_to_le32(flash_opcode);
2227 req->params.data_buf_size = cpu_to_le32(buf_size);
2229 be_mcc_notify(adapter);
2230 spin_unlock_bh(&adapter->mcc_lock);
2232 if (!wait_for_completion_timeout(&adapter->flash_compl,
2233 msecs_to_jiffies(40000)))
2236 status = adapter->flash_status;
2241 spin_unlock_bh(&adapter->mcc_lock);
2245 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2248 struct be_mcc_wrb *wrb;
2249 struct be_cmd_read_flash_crc *req;
2252 spin_lock_bh(&adapter->mcc_lock);
2254 wrb = wrb_from_mccq(adapter);
2259 req = embedded_payload(wrb);
2261 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2262 OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2265 req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2266 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2267 req->params.offset = cpu_to_le32(offset);
2268 req->params.data_buf_size = cpu_to_le32(0x4);
2270 status = be_mcc_notify_wait(adapter);
2272 memcpy(flashed_crc, req->crc, 4);
2275 spin_unlock_bh(&adapter->mcc_lock);
2279 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2280 struct be_dma_mem *nonemb_cmd)
2282 struct be_mcc_wrb *wrb;
2283 struct be_cmd_req_acpi_wol_magic_config *req;
2286 spin_lock_bh(&adapter->mcc_lock);
2288 wrb = wrb_from_mccq(adapter);
2293 req = nonemb_cmd->va;
2295 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2296 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2298 memcpy(req->magic_mac, mac, ETH_ALEN);
2300 status = be_mcc_notify_wait(adapter);
2303 spin_unlock_bh(&adapter->mcc_lock);
2307 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2308 u8 loopback_type, u8 enable)
2310 struct be_mcc_wrb *wrb;
2311 struct be_cmd_req_set_lmode *req;
2314 spin_lock_bh(&adapter->mcc_lock);
2316 wrb = wrb_from_mccq(adapter);
2322 req = embedded_payload(wrb);
2324 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2325 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2328 req->src_port = port_num;
2329 req->dest_port = port_num;
2330 req->loopback_type = loopback_type;
2331 req->loopback_state = enable;
2333 status = be_mcc_notify_wait(adapter);
2335 spin_unlock_bh(&adapter->mcc_lock);
2339 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2340 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2342 struct be_mcc_wrb *wrb;
2343 struct be_cmd_req_loopback_test *req;
2346 spin_lock_bh(&adapter->mcc_lock);
2348 wrb = wrb_from_mccq(adapter);
2354 req = embedded_payload(wrb);
2356 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2357 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2358 req->hdr.timeout = cpu_to_le32(4);
2360 req->pattern = cpu_to_le64(pattern);
2361 req->src_port = cpu_to_le32(port_num);
2362 req->dest_port = cpu_to_le32(port_num);
2363 req->pkt_size = cpu_to_le32(pkt_size);
2364 req->num_pkts = cpu_to_le32(num_pkts);
2365 req->loopback_type = cpu_to_le32(loopback_type);
2367 status = be_mcc_notify_wait(adapter);
2369 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2370 status = le32_to_cpu(resp->status);
2374 spin_unlock_bh(&adapter->mcc_lock);
2378 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2379 u32 byte_cnt, struct be_dma_mem *cmd)
2381 struct be_mcc_wrb *wrb;
2382 struct be_cmd_req_ddrdma_test *req;
2386 spin_lock_bh(&adapter->mcc_lock);
2388 wrb = wrb_from_mccq(adapter);
2394 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2395 OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2397 req->pattern = cpu_to_le64(pattern);
2398 req->byte_count = cpu_to_le32(byte_cnt);
2399 for (i = 0; i < byte_cnt; i++) {
2400 req->snd_buff[i] = (u8)(pattern >> (j*8));
2406 status = be_mcc_notify_wait(adapter);
2409 struct be_cmd_resp_ddrdma_test *resp;
2411 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2418 spin_unlock_bh(&adapter->mcc_lock);
2422 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2423 struct be_dma_mem *nonemb_cmd)
2425 struct be_mcc_wrb *wrb;
2426 struct be_cmd_req_seeprom_read *req;
2429 spin_lock_bh(&adapter->mcc_lock);
2431 wrb = wrb_from_mccq(adapter);
2436 req = nonemb_cmd->va;
2438 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2439 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2442 status = be_mcc_notify_wait(adapter);
2445 spin_unlock_bh(&adapter->mcc_lock);
2449 int be_cmd_get_phy_info(struct be_adapter *adapter)
2451 struct be_mcc_wrb *wrb;
2452 struct be_cmd_req_get_phy_info *req;
2453 struct be_dma_mem cmd;
2456 if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2457 CMD_SUBSYSTEM_COMMON))
2460 spin_lock_bh(&adapter->mcc_lock);
2462 wrb = wrb_from_mccq(adapter);
2467 cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2468 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2471 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2478 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2479 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2482 status = be_mcc_notify_wait(adapter);
2484 struct be_phy_info *resp_phy_info =
2485 cmd.va + sizeof(struct be_cmd_req_hdr);
2486 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2487 adapter->phy.interface_type =
2488 le16_to_cpu(resp_phy_info->interface_type);
2489 adapter->phy.auto_speeds_supported =
2490 le16_to_cpu(resp_phy_info->auto_speeds_supported);
2491 adapter->phy.fixed_speeds_supported =
2492 le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2493 adapter->phy.misc_params =
2494 le32_to_cpu(resp_phy_info->misc_params);
2496 if (BE2_chip(adapter)) {
2497 adapter->phy.fixed_speeds_supported =
2498 BE_SUPPORTED_SPEED_10GBPS |
2499 BE_SUPPORTED_SPEED_1GBPS;
2502 pci_free_consistent(adapter->pdev, cmd.size,
2505 spin_unlock_bh(&adapter->mcc_lock);
2509 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2511 struct be_mcc_wrb *wrb;
2512 struct be_cmd_req_set_qos *req;
2515 spin_lock_bh(&adapter->mcc_lock);
2517 wrb = wrb_from_mccq(adapter);
2523 req = embedded_payload(wrb);
2525 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2526 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2528 req->hdr.domain = domain;
2529 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2530 req->max_bps_nic = cpu_to_le32(bps);
2532 status = be_mcc_notify_wait(adapter);
2535 spin_unlock_bh(&adapter->mcc_lock);
2539 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2541 struct be_mcc_wrb *wrb;
2542 struct be_cmd_req_cntl_attribs *req;
2543 struct be_cmd_resp_cntl_attribs *resp;
2545 int payload_len = max(sizeof(*req), sizeof(*resp));
2546 struct mgmt_controller_attrib *attribs;
2547 struct be_dma_mem attribs_cmd;
2549 if (mutex_lock_interruptible(&adapter->mbox_lock))
2552 memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2553 attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2554 attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2556 if (!attribs_cmd.va) {
2557 dev_err(&adapter->pdev->dev,
2558 "Memory allocation failure\n");
2563 wrb = wrb_from_mbox(adapter);
2568 req = attribs_cmd.va;
2570 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2571 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2574 status = be_mbox_notify_wait(adapter);
2576 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2577 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2581 mutex_unlock(&adapter->mbox_lock);
2583 pci_free_consistent(adapter->pdev, attribs_cmd.size,
2584 attribs_cmd.va, attribs_cmd.dma);
2589 int be_cmd_req_native_mode(struct be_adapter *adapter)
2591 struct be_mcc_wrb *wrb;
2592 struct be_cmd_req_set_func_cap *req;
2595 if (mutex_lock_interruptible(&adapter->mbox_lock))
2598 wrb = wrb_from_mbox(adapter);
2604 req = embedded_payload(wrb);
2606 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2607 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2609 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2610 CAPABILITY_BE3_NATIVE_ERX_API);
2611 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2613 status = be_mbox_notify_wait(adapter);
2615 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2616 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2617 CAPABILITY_BE3_NATIVE_ERX_API;
2618 if (!adapter->be3_native)
2619 dev_warn(&adapter->pdev->dev,
2620 "adapter not in advanced mode\n");
2623 mutex_unlock(&adapter->mbox_lock);
2627 /* Get privilege(s) for a function */
2628 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2631 struct be_mcc_wrb *wrb;
2632 struct be_cmd_req_get_fn_privileges *req;
2635 spin_lock_bh(&adapter->mcc_lock);
2637 wrb = wrb_from_mccq(adapter);
2643 req = embedded_payload(wrb);
2645 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2646 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2649 req->hdr.domain = domain;
2651 status = be_mcc_notify_wait(adapter);
2653 struct be_cmd_resp_get_fn_privileges *resp =
2654 embedded_payload(wrb);
2655 *privilege = le32_to_cpu(resp->privilege_mask);
2659 spin_unlock_bh(&adapter->mcc_lock);
2663 /* Set privilege(s) for a function */
2664 int be_cmd_set_fn_privileges(struct be_adapter *adapter, u32 privileges,
2667 struct be_mcc_wrb *wrb;
2668 struct be_cmd_req_set_fn_privileges *req;
2671 spin_lock_bh(&adapter->mcc_lock);
2673 wrb = wrb_from_mccq(adapter);
2679 req = embedded_payload(wrb);
2680 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2681 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req),
2683 req->hdr.domain = domain;
2684 if (lancer_chip(adapter))
2685 req->privileges_lancer = cpu_to_le32(privileges);
2687 req->privileges = cpu_to_le32(privileges);
2689 status = be_mcc_notify_wait(adapter);
2691 spin_unlock_bh(&adapter->mcc_lock);
2695 /* pmac_id_valid: true => pmac_id is supplied and MAC address is requested.
2696 * pmac_id_valid: false => pmac_id or MAC address is requested.
2697 * If pmac_id is returned, pmac_id_valid is returned as true
2699 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2700 bool *pmac_id_valid, u32 *pmac_id, u8 domain)
2702 struct be_mcc_wrb *wrb;
2703 struct be_cmd_req_get_mac_list *req;
2706 struct be_dma_mem get_mac_list_cmd;
2709 memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2710 get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2711 get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2712 get_mac_list_cmd.size,
2713 &get_mac_list_cmd.dma);
2715 if (!get_mac_list_cmd.va) {
2716 dev_err(&adapter->pdev->dev,
2717 "Memory allocation failure during GET_MAC_LIST\n");
2721 spin_lock_bh(&adapter->mcc_lock);
2723 wrb = wrb_from_mccq(adapter);
2729 req = get_mac_list_cmd.va;
2731 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2732 OPCODE_COMMON_GET_MAC_LIST,
2733 get_mac_list_cmd.size, wrb, &get_mac_list_cmd);
2734 req->hdr.domain = domain;
2735 req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2736 if (*pmac_id_valid) {
2737 req->mac_id = cpu_to_le32(*pmac_id);
2738 req->iface_id = cpu_to_le16(adapter->if_handle);
2739 req->perm_override = 0;
2741 req->perm_override = 1;
2744 status = be_mcc_notify_wait(adapter);
2746 struct be_cmd_resp_get_mac_list *resp =
2747 get_mac_list_cmd.va;
2749 if (*pmac_id_valid) {
2750 memcpy(mac, resp->macid_macaddr.mac_addr_id.macaddr,
2755 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2756 /* Mac list returned could contain one or more active mac_ids
2757 * or one or more true or pseudo permanant mac addresses.
2758 * If an active mac_id is present, return first active mac_id
2761 for (i = 0; i < mac_count; i++) {
2762 struct get_list_macaddr *mac_entry;
2766 mac_entry = &resp->macaddr_list[i];
2767 mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2768 /* mac_id is a 32 bit value and mac_addr size
2771 if (mac_addr_size == sizeof(u32)) {
2772 *pmac_id_valid = true;
2773 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2774 *pmac_id = le32_to_cpu(mac_id);
2778 /* If no active mac_id found, return first mac addr */
2779 *pmac_id_valid = false;
2780 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2785 spin_unlock_bh(&adapter->mcc_lock);
2786 pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2787 get_mac_list_cmd.va, get_mac_list_cmd.dma);
2791 int be_cmd_get_active_mac(struct be_adapter *adapter, u32 curr_pmac_id, u8 *mac)
2795 if (BEx_chip(adapter))
2796 return be_cmd_mac_addr_query(adapter, mac, false,
2797 adapter->if_handle, curr_pmac_id);
2799 /* Fetch the MAC address using pmac_id */
2800 return be_cmd_get_mac_from_list(adapter, mac, &active,
2804 int be_cmd_get_perm_mac(struct be_adapter *adapter, u8 *mac)
2807 bool pmac_valid = false;
2809 memset(mac, 0, ETH_ALEN);
2811 if (BEx_chip(adapter)) {
2812 if (be_physfn(adapter))
2813 status = be_cmd_mac_addr_query(adapter, mac, true, 0,
2816 status = be_cmd_mac_addr_query(adapter, mac, false,
2817 adapter->if_handle, 0);
2819 status = be_cmd_get_mac_from_list(adapter, mac, &pmac_valid,
2826 /* Uses synchronous MCCQ */
2827 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2828 u8 mac_count, u32 domain)
2830 struct be_mcc_wrb *wrb;
2831 struct be_cmd_req_set_mac_list *req;
2833 struct be_dma_mem cmd;
2835 memset(&cmd, 0, sizeof(struct be_dma_mem));
2836 cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2837 cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2838 &cmd.dma, GFP_KERNEL);
2842 spin_lock_bh(&adapter->mcc_lock);
2844 wrb = wrb_from_mccq(adapter);
2851 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2852 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2855 req->hdr.domain = domain;
2856 req->mac_count = mac_count;
2858 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2860 status = be_mcc_notify_wait(adapter);
2863 dma_free_coherent(&adapter->pdev->dev, cmd.size,
2865 spin_unlock_bh(&adapter->mcc_lock);
2869 /* Wrapper to delete any active MACs and provision the new mac.
2870 * Changes to MAC_LIST are allowed iff none of the MAC addresses in the
2871 * current list are active.
2873 int be_cmd_set_mac(struct be_adapter *adapter, u8 *mac, int if_id, u32 dom)
2875 bool active_mac = false;
2876 u8 old_mac[ETH_ALEN];
2880 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
2882 if (!status && active_mac)
2883 be_cmd_pmac_del(adapter, if_id, pmac_id, dom);
2885 return be_cmd_set_mac_list(adapter, mac, mac ? 1 : 0, dom);
2888 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2889 u32 domain, u16 intf_id, u16 hsw_mode)
2891 struct be_mcc_wrb *wrb;
2892 struct be_cmd_req_set_hsw_config *req;
2896 spin_lock_bh(&adapter->mcc_lock);
2898 wrb = wrb_from_mccq(adapter);
2904 req = embedded_payload(wrb);
2905 ctxt = &req->context;
2907 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2908 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2910 req->hdr.domain = domain;
2911 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2913 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2914 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2916 if (!BEx_chip(adapter) && hsw_mode) {
2917 AMAP_SET_BITS(struct amap_set_hsw_context, interface_id,
2918 ctxt, adapter->hba_port_num);
2919 AMAP_SET_BITS(struct amap_set_hsw_context, pport, ctxt, 1);
2920 AMAP_SET_BITS(struct amap_set_hsw_context, port_fwd_type,
2924 be_dws_cpu_to_le(req->context, sizeof(req->context));
2925 status = be_mcc_notify_wait(adapter);
2928 spin_unlock_bh(&adapter->mcc_lock);
2932 /* Get Hyper switch config */
2933 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2934 u32 domain, u16 intf_id, u8 *mode)
2936 struct be_mcc_wrb *wrb;
2937 struct be_cmd_req_get_hsw_config *req;
2942 spin_lock_bh(&adapter->mcc_lock);
2944 wrb = wrb_from_mccq(adapter);
2950 req = embedded_payload(wrb);
2951 ctxt = &req->context;
2953 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2954 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2956 req->hdr.domain = domain;
2957 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2959 AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2961 if (!BEx_chip(adapter)) {
2962 AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id,
2963 ctxt, adapter->hba_port_num);
2964 AMAP_SET_BITS(struct amap_get_hsw_req_context, pport, ctxt, 1);
2966 be_dws_cpu_to_le(req->context, sizeof(req->context));
2968 status = be_mcc_notify_wait(adapter);
2970 struct be_cmd_resp_get_hsw_config *resp =
2971 embedded_payload(wrb);
2972 be_dws_le_to_cpu(&resp->context,
2973 sizeof(resp->context));
2974 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2975 pvid, &resp->context);
2977 *pvid = le16_to_cpu(vid);
2979 *mode = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2980 port_fwd_type, &resp->context);
2984 spin_unlock_bh(&adapter->mcc_lock);
2988 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2990 struct be_mcc_wrb *wrb;
2991 struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2993 int payload_len = sizeof(*req);
2994 struct be_dma_mem cmd;
2996 if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3000 if (mutex_lock_interruptible(&adapter->mbox_lock))
3003 memset(&cmd, 0, sizeof(struct be_dma_mem));
3004 cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
3005 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3008 dev_err(&adapter->pdev->dev,
3009 "Memory allocation failure\n");
3014 wrb = wrb_from_mbox(adapter);
3022 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
3023 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
3024 payload_len, wrb, &cmd);
3026 req->hdr.version = 1;
3027 req->query_options = BE_GET_WOL_CAP;
3029 status = be_mbox_notify_wait(adapter);
3031 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
3032 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
3034 /* the command could succeed misleadingly on old f/w
3035 * which is not aware of the V1 version. fake an error. */
3036 if (resp->hdr.response_length < payload_len) {
3040 adapter->wol_cap = resp->wol_settings;
3043 mutex_unlock(&adapter->mbox_lock);
3045 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3049 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
3050 struct be_dma_mem *cmd)
3052 struct be_mcc_wrb *wrb;
3053 struct be_cmd_req_get_ext_fat_caps *req;
3056 if (mutex_lock_interruptible(&adapter->mbox_lock))
3059 wrb = wrb_from_mbox(adapter);
3066 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3067 OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
3068 cmd->size, wrb, cmd);
3069 req->parameter_type = cpu_to_le32(1);
3071 status = be_mbox_notify_wait(adapter);
3073 mutex_unlock(&adapter->mbox_lock);
3077 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
3078 struct be_dma_mem *cmd,
3079 struct be_fat_conf_params *configs)
3081 struct be_mcc_wrb *wrb;
3082 struct be_cmd_req_set_ext_fat_caps *req;
3085 spin_lock_bh(&adapter->mcc_lock);
3087 wrb = wrb_from_mccq(adapter);
3094 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
3095 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3096 OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
3097 cmd->size, wrb, cmd);
3099 status = be_mcc_notify_wait(adapter);
3101 spin_unlock_bh(&adapter->mcc_lock);
3105 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
3107 struct be_mcc_wrb *wrb;
3108 struct be_cmd_req_get_port_name *req;
3111 if (!lancer_chip(adapter)) {
3112 *port_name = adapter->hba_port_num + '0';
3116 spin_lock_bh(&adapter->mcc_lock);
3118 wrb = wrb_from_mccq(adapter);
3124 req = embedded_payload(wrb);
3126 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3127 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
3129 req->hdr.version = 1;
3131 status = be_mcc_notify_wait(adapter);
3133 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
3134 *port_name = resp->port_name[adapter->hba_port_num];
3136 *port_name = adapter->hba_port_num + '0';
3139 spin_unlock_bh(&adapter->mcc_lock);
3143 static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
3145 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3148 for (i = 0; i < desc_count; i++) {
3149 if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
3150 hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
3151 return (struct be_nic_res_desc *)hdr;
3153 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3154 hdr = (void *)hdr + hdr->desc_len;
3159 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
3162 struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
3163 struct be_pcie_res_desc *pcie;
3166 for (i = 0; i < desc_count; i++) {
3167 if ((hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V0 ||
3168 hdr->desc_type == PCIE_RESOURCE_DESC_TYPE_V1)) {
3169 pcie = (struct be_pcie_res_desc *)hdr;
3170 if (pcie->pf_num == devfn)
3174 hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
3175 hdr = (void *)hdr + hdr->desc_len;
3180 static void be_copy_nic_desc(struct be_resources *res,
3181 struct be_nic_res_desc *desc)
3183 res->max_uc_mac = le16_to_cpu(desc->unicast_mac_count);
3184 res->max_vlans = le16_to_cpu(desc->vlan_count);
3185 res->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3186 res->max_tx_qs = le16_to_cpu(desc->txq_count);
3187 res->max_rss_qs = le16_to_cpu(desc->rssq_count);
3188 res->max_rx_qs = le16_to_cpu(desc->rq_count);
3189 res->max_evt_qs = le16_to_cpu(desc->eq_count);
3190 /* Clear flags that driver is not interested in */
3191 res->if_cap_flags = le32_to_cpu(desc->cap_flags) &
3192 BE_IF_CAP_FLAGS_WANT;
3193 /* Need 1 RXQ as the default RXQ */
3194 if (res->max_rss_qs && res->max_rss_qs == res->max_rx_qs)
3195 res->max_rss_qs -= 1;
3199 int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
3201 struct be_mcc_wrb *wrb;
3202 struct be_cmd_req_get_func_config *req;
3204 struct be_dma_mem cmd;
3206 if (mutex_lock_interruptible(&adapter->mbox_lock))
3209 memset(&cmd, 0, sizeof(struct be_dma_mem));
3210 cmd.size = sizeof(struct be_cmd_resp_get_func_config);
3211 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3214 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3219 wrb = wrb_from_mbox(adapter);
3227 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3228 OPCODE_COMMON_GET_FUNC_CONFIG,
3229 cmd.size, wrb, &cmd);
3231 if (skyhawk_chip(adapter))
3232 req->hdr.version = 1;
3234 status = be_mbox_notify_wait(adapter);
3236 struct be_cmd_resp_get_func_config *resp = cmd.va;
3237 u32 desc_count = le32_to_cpu(resp->desc_count);
3238 struct be_nic_res_desc *desc;
3240 desc = be_get_nic_desc(resp->func_param, desc_count);
3246 adapter->pf_number = desc->pf_num;
3247 be_copy_nic_desc(res, desc);
3250 mutex_unlock(&adapter->mbox_lock);
3252 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3257 static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
3258 u8 domain, struct be_dma_mem *cmd)
3260 struct be_mcc_wrb *wrb;
3261 struct be_cmd_req_get_profile_config *req;
3264 if (mutex_lock_interruptible(&adapter->mbox_lock))
3266 wrb = wrb_from_mbox(adapter);
3269 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3270 OPCODE_COMMON_GET_PROFILE_CONFIG,
3271 cmd->size, wrb, cmd);
3273 req->type = ACTIVE_PROFILE_TYPE;
3274 req->hdr.domain = domain;
3275 if (!lancer_chip(adapter))
3276 req->hdr.version = 1;
3278 status = be_mbox_notify_wait(adapter);
3280 mutex_unlock(&adapter->mbox_lock);
3285 static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
3286 u8 domain, struct be_dma_mem *cmd)
3288 struct be_mcc_wrb *wrb;
3289 struct be_cmd_req_get_profile_config *req;
3292 spin_lock_bh(&adapter->mcc_lock);
3294 wrb = wrb_from_mccq(adapter);
3301 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3302 OPCODE_COMMON_GET_PROFILE_CONFIG,
3303 cmd->size, wrb, cmd);
3305 req->type = ACTIVE_PROFILE_TYPE;
3306 req->hdr.domain = domain;
3307 if (!lancer_chip(adapter))
3308 req->hdr.version = 1;
3310 status = be_mcc_notify_wait(adapter);
3313 spin_unlock_bh(&adapter->mcc_lock);
3317 /* Uses sync mcc, if MCCQ is already created otherwise mbox */
3318 int be_cmd_get_profile_config(struct be_adapter *adapter,
3319 struct be_resources *res, u8 domain)
3321 struct be_cmd_resp_get_profile_config *resp;
3322 struct be_pcie_res_desc *pcie;
3323 struct be_nic_res_desc *nic;
3324 struct be_queue_info *mccq = &adapter->mcc_obj.q;
3325 struct be_dma_mem cmd;
3329 memset(&cmd, 0, sizeof(struct be_dma_mem));
3330 cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3331 cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
3336 status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
3338 status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
3343 desc_count = le32_to_cpu(resp->desc_count);
3345 pcie = be_get_pcie_desc(adapter->pdev->devfn, resp->func_param,
3348 res->max_vfs = le16_to_cpu(pcie->num_vfs);
3350 nic = be_get_nic_desc(resp->func_param, desc_count);
3352 be_copy_nic_desc(res, nic);
3356 pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
3360 /* Currently only Lancer uses this command and it supports version 0 only
3363 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3366 struct be_mcc_wrb *wrb;
3367 struct be_cmd_req_set_profile_config *req;
3370 spin_lock_bh(&adapter->mcc_lock);
3372 wrb = wrb_from_mccq(adapter);
3378 req = embedded_payload(wrb);
3380 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3381 OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3383 req->hdr.domain = domain;
3384 req->desc_count = cpu_to_le32(1);
3385 req->nic_desc.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V0;
3386 req->nic_desc.hdr.desc_len = RESOURCE_DESC_SIZE_V0;
3387 req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3388 req->nic_desc.pf_num = adapter->pf_number;
3389 req->nic_desc.vf_num = domain;
3391 /* Mark fields invalid */
3392 req->nic_desc.unicast_mac_count = 0xFFFF;
3393 req->nic_desc.mcc_count = 0xFFFF;
3394 req->nic_desc.vlan_count = 0xFFFF;
3395 req->nic_desc.mcast_mac_count = 0xFFFF;
3396 req->nic_desc.txq_count = 0xFFFF;
3397 req->nic_desc.rq_count = 0xFFFF;
3398 req->nic_desc.rssq_count = 0xFFFF;
3399 req->nic_desc.lro_count = 0xFFFF;
3400 req->nic_desc.cq_count = 0xFFFF;
3401 req->nic_desc.toe_conn_count = 0xFFFF;
3402 req->nic_desc.eq_count = 0xFFFF;
3403 req->nic_desc.link_param = 0xFF;
3404 req->nic_desc.bw_min = 0xFFFFFFFF;
3405 req->nic_desc.acpi_params = 0xFF;
3406 req->nic_desc.wol_param = 0x0F;
3409 req->nic_desc.bw_min = cpu_to_le32(bps);
3410 req->nic_desc.bw_max = cpu_to_le32(bps);
3411 status = be_mcc_notify_wait(adapter);
3413 spin_unlock_bh(&adapter->mcc_lock);
3417 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3420 struct be_mcc_wrb *wrb;
3421 struct be_cmd_req_get_iface_list *req;
3422 struct be_cmd_resp_get_iface_list *resp;
3425 spin_lock_bh(&adapter->mcc_lock);
3427 wrb = wrb_from_mccq(adapter);
3432 req = embedded_payload(wrb);
3434 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3435 OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3437 req->hdr.domain = vf_num + 1;
3439 status = be_mcc_notify_wait(adapter);
3441 resp = (struct be_cmd_resp_get_iface_list *)req;
3442 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3446 spin_unlock_bh(&adapter->mcc_lock);
3450 static int lancer_wait_idle(struct be_adapter *adapter)
3452 #define SLIPORT_IDLE_TIMEOUT 30
3456 for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3457 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3458 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3464 if (i == SLIPORT_IDLE_TIMEOUT)
3470 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask)
3474 status = lancer_wait_idle(adapter);
3478 iowrite32(mask, adapter->db + PHYSDEV_CONTROL_OFFSET);
3483 /* Routine to check whether dump image is present or not */
3484 bool dump_present(struct be_adapter *adapter)
3486 u32 sliport_status = 0;
3488 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3489 return !!(sliport_status & SLIPORT_STATUS_DIP_MASK);
3492 int lancer_initiate_dump(struct be_adapter *adapter)
3496 /* give firmware reset and diagnostic dump */
3497 status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
3498 PHYSDEV_CONTROL_DD_MASK);
3500 dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
3504 status = lancer_wait_idle(adapter);
3508 if (!dump_present(adapter)) {
3509 dev_err(&adapter->pdev->dev, "Dump image not present\n");
3517 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3519 struct be_mcc_wrb *wrb;
3520 struct be_cmd_enable_disable_vf *req;
3523 if (!lancer_chip(adapter))
3526 spin_lock_bh(&adapter->mcc_lock);
3528 wrb = wrb_from_mccq(adapter);
3534 req = embedded_payload(wrb);
3536 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3537 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3540 req->hdr.domain = domain;
3542 status = be_mcc_notify_wait(adapter);
3544 spin_unlock_bh(&adapter->mcc_lock);
3548 int be_cmd_intr_set(struct be_adapter *adapter, bool intr_enable)
3550 struct be_mcc_wrb *wrb;
3551 struct be_cmd_req_intr_set *req;
3554 if (mutex_lock_interruptible(&adapter->mbox_lock))
3557 wrb = wrb_from_mbox(adapter);
3559 req = embedded_payload(wrb);
3561 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3562 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req),
3565 req->intr_enabled = intr_enable;
3567 status = be_mbox_notify_wait(adapter);
3569 mutex_unlock(&adapter->mbox_lock);
3573 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3574 int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3576 struct be_adapter *adapter = netdev_priv(netdev_handle);
3577 struct be_mcc_wrb *wrb;
3578 struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3579 struct be_cmd_req_hdr *req;
3580 struct be_cmd_resp_hdr *resp;
3583 spin_lock_bh(&adapter->mcc_lock);
3585 wrb = wrb_from_mccq(adapter);
3590 req = embedded_payload(wrb);
3591 resp = embedded_payload(wrb);
3593 be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3594 hdr->opcode, wrb_payload_size, wrb, NULL);
3595 memcpy(req, wrb_payload, wrb_payload_size);
3596 be_dws_cpu_to_le(req, wrb_payload_size);
3598 status = be_mcc_notify_wait(adapter);
3600 *cmd_status = (status & 0xffff);
3603 memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3604 be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3606 spin_unlock_bh(&adapter->mcc_lock);
3609 EXPORT_SYMBOL(be_roce_mcc_cmd);