813407f66c7c8dd579588c9afdab2b5d1a28d3a7
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/module.h>
19 #include "be.h"
20 #include "be_cmds.h"
21
22 static struct be_cmd_priv_map cmd_priv_map[] = {
23         {
24                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
25                 CMD_SUBSYSTEM_ETH,
26                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
27                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
28         },
29         {
30                 OPCODE_COMMON_GET_FLOW_CONTROL,
31                 CMD_SUBSYSTEM_COMMON,
32                 BE_PRIV_LNKQUERY | BE_PRIV_VHADM |
33                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
34         },
35         {
36                 OPCODE_COMMON_SET_FLOW_CONTROL,
37                 CMD_SUBSYSTEM_COMMON,
38                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
39                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
40         },
41         {
42                 OPCODE_ETH_GET_PPORT_STATS,
43                 CMD_SUBSYSTEM_ETH,
44                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
45                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
46         },
47         {
48                 OPCODE_COMMON_GET_PHY_DETAILS,
49                 CMD_SUBSYSTEM_COMMON,
50                 BE_PRIV_LNKMGMT | BE_PRIV_VHADM |
51                 BE_PRIV_DEVCFG | BE_PRIV_DEVSEC
52         }
53 };
54
55 static bool be_cmd_allowed(struct be_adapter *adapter, u8 opcode,
56                            u8 subsystem)
57 {
58         int i;
59         int num_entries = sizeof(cmd_priv_map)/sizeof(struct be_cmd_priv_map);
60         u32 cmd_privileges = adapter->cmd_privileges;
61
62         for (i = 0; i < num_entries; i++)
63                 if (opcode == cmd_priv_map[i].opcode &&
64                     subsystem == cmd_priv_map[i].subsystem)
65                         if (!(cmd_privileges & cmd_priv_map[i].priv_mask))
66                                 return false;
67
68         return true;
69 }
70
71 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
72 {
73         return wrb->payload.embedded_payload;
74 }
75
76 static void be_mcc_notify(struct be_adapter *adapter)
77 {
78         struct be_queue_info *mccq = &adapter->mcc_obj.q;
79         u32 val = 0;
80
81         if (be_error(adapter))
82                 return;
83
84         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
85         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
86
87         wmb();
88         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
89 }
90
91 /* To check if valid bit is set, check the entire word as we don't know
92  * the endianness of the data (old entry is host endian while a new entry is
93  * little endian) */
94 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
95 {
96         u32 flags;
97
98         if (compl->flags != 0) {
99                 flags = le32_to_cpu(compl->flags);
100                 if (flags & CQE_FLAGS_VALID_MASK) {
101                         compl->flags = flags;
102                         return true;
103                 }
104         }
105         return false;
106 }
107
108 /* Need to reset the entire word that houses the valid bit */
109 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
110 {
111         compl->flags = 0;
112 }
113
114 static struct be_cmd_resp_hdr *be_decode_resp_hdr(u32 tag0, u32 tag1)
115 {
116         unsigned long addr;
117
118         addr = tag1;
119         addr = ((addr << 16) << 16) | tag0;
120         return (void *)addr;
121 }
122
123 static int be_mcc_compl_process(struct be_adapter *adapter,
124                                 struct be_mcc_compl *compl)
125 {
126         u16 compl_status, extd_status;
127         struct be_cmd_resp_hdr *resp_hdr;
128         u8 opcode = 0, subsystem = 0;
129
130         /* Just swap the status to host endian; mcc tag is opaquely copied
131          * from mcc_wrb */
132         be_dws_le_to_cpu(compl, 4);
133
134         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
135                                 CQE_STATUS_COMPL_MASK;
136
137         resp_hdr = be_decode_resp_hdr(compl->tag0, compl->tag1);
138
139         if (resp_hdr) {
140                 opcode = resp_hdr->opcode;
141                 subsystem = resp_hdr->subsystem;
142         }
143
144         if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
145              (opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
146             (subsystem == CMD_SUBSYSTEM_COMMON)) {
147                 adapter->flash_status = compl_status;
148                 complete(&adapter->flash_compl);
149         }
150
151         if (compl_status == MCC_STATUS_SUCCESS) {
152                 if (((opcode == OPCODE_ETH_GET_STATISTICS) ||
153                      (opcode == OPCODE_ETH_GET_PPORT_STATS)) &&
154                     (subsystem == CMD_SUBSYSTEM_ETH)) {
155                         be_parse_stats(adapter);
156                         adapter->stats_cmd_sent = false;
157                 }
158                 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES &&
159                     subsystem == CMD_SUBSYSTEM_COMMON) {
160                         struct be_cmd_resp_get_cntl_addnl_attribs *resp =
161                                 (void *)resp_hdr;
162                         adapter->drv_stats.be_on_die_temperature =
163                                 resp->on_die_temperature;
164                 }
165         } else {
166                 if (opcode == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES)
167                         adapter->be_get_temp_freq = 0;
168
169                 if (compl_status == MCC_STATUS_NOT_SUPPORTED ||
170                         compl_status == MCC_STATUS_ILLEGAL_REQUEST)
171                         goto done;
172
173                 if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) {
174                         dev_warn(&adapter->pdev->dev,
175                                  "VF is not privileged to issue opcode %d-%d\n",
176                                  opcode, subsystem);
177                 } else {
178                         extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
179                                         CQE_STATUS_EXTD_MASK;
180                         dev_err(&adapter->pdev->dev,
181                                 "opcode %d-%d failed:status %d-%d\n",
182                                 opcode, subsystem, compl_status, extd_status);
183                 }
184         }
185 done:
186         return compl_status;
187 }
188
189 /* Link state evt is a string of bytes; no need for endian swapping */
190 static void be_async_link_state_process(struct be_adapter *adapter,
191                 struct be_async_event_link_state *evt)
192 {
193         /* When link status changes, link speed must be re-queried from FW */
194         adapter->phy.link_speed = -1;
195
196         /* Ignore physical link event */
197         if (lancer_chip(adapter) &&
198             !(evt->port_link_status & LOGICAL_LINK_STATUS_MASK))
199                 return;
200
201         /* For the initial link status do not rely on the ASYNC event as
202          * it may not be received in some cases.
203          */
204         if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT)
205                 be_link_status_update(adapter, evt->port_link_status);
206 }
207
208 /* Grp5 CoS Priority evt */
209 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
210                 struct be_async_event_grp5_cos_priority *evt)
211 {
212         if (evt->valid) {
213                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
214                 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
215                 adapter->recommended_prio =
216                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
217         }
218 }
219
220 /* Grp5 QOS Speed evt: qos_link_speed is in units of 10 Mbps */
221 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
222                 struct be_async_event_grp5_qos_link_speed *evt)
223 {
224         if (adapter->phy.link_speed >= 0 &&
225             evt->physical_port == adapter->port_num)
226                 adapter->phy.link_speed = le16_to_cpu(evt->qos_link_speed) * 10;
227 }
228
229 /*Grp5 PVID evt*/
230 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
231                 struct be_async_event_grp5_pvid_state *evt)
232 {
233         if (evt->enabled)
234                 adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK;
235         else
236                 adapter->pvid = 0;
237 }
238
239 static void be_async_grp5_evt_process(struct be_adapter *adapter,
240                 u32 trailer, struct be_mcc_compl *evt)
241 {
242         u8 event_type = 0;
243
244         event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
245                 ASYNC_TRAILER_EVENT_TYPE_MASK;
246
247         switch (event_type) {
248         case ASYNC_EVENT_COS_PRIORITY:
249                 be_async_grp5_cos_priority_process(adapter,
250                 (struct be_async_event_grp5_cos_priority *)evt);
251         break;
252         case ASYNC_EVENT_QOS_SPEED:
253                 be_async_grp5_qos_speed_process(adapter,
254                 (struct be_async_event_grp5_qos_link_speed *)evt);
255         break;
256         case ASYNC_EVENT_PVID_STATE:
257                 be_async_grp5_pvid_state_process(adapter,
258                 (struct be_async_event_grp5_pvid_state *)evt);
259         break;
260         default:
261                 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
262                 break;
263         }
264 }
265
266 static inline bool is_link_state_evt(u32 trailer)
267 {
268         return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
269                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
270                                 ASYNC_EVENT_CODE_LINK_STATE;
271 }
272
273 static inline bool is_grp5_evt(u32 trailer)
274 {
275         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
276                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
277                                 ASYNC_EVENT_CODE_GRP_5);
278 }
279
280 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
281 {
282         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
283         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
284
285         if (be_mcc_compl_is_new(compl)) {
286                 queue_tail_inc(mcc_cq);
287                 return compl;
288         }
289         return NULL;
290 }
291
292 void be_async_mcc_enable(struct be_adapter *adapter)
293 {
294         spin_lock_bh(&adapter->mcc_cq_lock);
295
296         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
297         adapter->mcc_obj.rearm_cq = true;
298
299         spin_unlock_bh(&adapter->mcc_cq_lock);
300 }
301
302 void be_async_mcc_disable(struct be_adapter *adapter)
303 {
304         spin_lock_bh(&adapter->mcc_cq_lock);
305
306         adapter->mcc_obj.rearm_cq = false;
307         be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
308
309         spin_unlock_bh(&adapter->mcc_cq_lock);
310 }
311
312 int be_process_mcc(struct be_adapter *adapter)
313 {
314         struct be_mcc_compl *compl;
315         int num = 0, status = 0;
316         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
317
318         spin_lock(&adapter->mcc_cq_lock);
319         while ((compl = be_mcc_compl_get(adapter))) {
320                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
321                         /* Interpret flags as an async trailer */
322                         if (is_link_state_evt(compl->flags))
323                                 be_async_link_state_process(adapter,
324                                 (struct be_async_event_link_state *) compl);
325                         else if (is_grp5_evt(compl->flags))
326                                 be_async_grp5_evt_process(adapter,
327                                 compl->flags, compl);
328                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
329                                 status = be_mcc_compl_process(adapter, compl);
330                                 atomic_dec(&mcc_obj->q.used);
331                 }
332                 be_mcc_compl_use(compl);
333                 num++;
334         }
335
336         if (num)
337                 be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num);
338
339         spin_unlock(&adapter->mcc_cq_lock);
340         return status;
341 }
342
343 /* Wait till no more pending mcc requests are present */
344 static int be_mcc_wait_compl(struct be_adapter *adapter)
345 {
346 #define mcc_timeout             120000 /* 12s timeout */
347         int i, status = 0;
348         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
349
350         for (i = 0; i < mcc_timeout; i++) {
351                 if (be_error(adapter))
352                         return -EIO;
353
354                 local_bh_disable();
355                 status = be_process_mcc(adapter);
356                 local_bh_enable();
357
358                 if (atomic_read(&mcc_obj->q.used) == 0)
359                         break;
360                 udelay(100);
361         }
362         if (i == mcc_timeout) {
363                 dev_err(&adapter->pdev->dev, "FW not responding\n");
364                 adapter->fw_timeout = true;
365                 return -EIO;
366         }
367         return status;
368 }
369
370 /* Notify MCC requests and wait for completion */
371 static int be_mcc_notify_wait(struct be_adapter *adapter)
372 {
373         int status;
374         struct be_mcc_wrb *wrb;
375         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
376         u16 index = mcc_obj->q.head;
377         struct be_cmd_resp_hdr *resp;
378
379         index_dec(&index, mcc_obj->q.len);
380         wrb = queue_index_node(&mcc_obj->q, index);
381
382         resp = be_decode_resp_hdr(wrb->tag0, wrb->tag1);
383
384         be_mcc_notify(adapter);
385
386         status = be_mcc_wait_compl(adapter);
387         if (status == -EIO)
388                 goto out;
389
390         status = resp->status;
391 out:
392         return status;
393 }
394
395 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
396 {
397         int msecs = 0;
398         u32 ready;
399
400         do {
401                 if (be_error(adapter))
402                         return -EIO;
403
404                 ready = ioread32(db);
405                 if (ready == 0xffffffff)
406                         return -1;
407
408                 ready &= MPU_MAILBOX_DB_RDY_MASK;
409                 if (ready)
410                         break;
411
412                 if (msecs > 4000) {
413                         dev_err(&adapter->pdev->dev, "FW not responding\n");
414                         adapter->fw_timeout = true;
415                         be_detect_error(adapter);
416                         return -1;
417                 }
418
419                 msleep(1);
420                 msecs++;
421         } while (true);
422
423         return 0;
424 }
425
426 /*
427  * Insert the mailbox address into the doorbell in two steps
428  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
429  */
430 static int be_mbox_notify_wait(struct be_adapter *adapter)
431 {
432         int status;
433         u32 val = 0;
434         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
435         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
436         struct be_mcc_mailbox *mbox = mbox_mem->va;
437         struct be_mcc_compl *compl = &mbox->compl;
438
439         /* wait for ready to be set */
440         status = be_mbox_db_ready_wait(adapter, db);
441         if (status != 0)
442                 return status;
443
444         val |= MPU_MAILBOX_DB_HI_MASK;
445         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
446         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
447         iowrite32(val, db);
448
449         /* wait for ready to be set */
450         status = be_mbox_db_ready_wait(adapter, db);
451         if (status != 0)
452                 return status;
453
454         val = 0;
455         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
456         val |= (u32)(mbox_mem->dma >> 4) << 2;
457         iowrite32(val, db);
458
459         status = be_mbox_db_ready_wait(adapter, db);
460         if (status != 0)
461                 return status;
462
463         /* A cq entry has been made now */
464         if (be_mcc_compl_is_new(compl)) {
465                 status = be_mcc_compl_process(adapter, &mbox->compl);
466                 be_mcc_compl_use(compl);
467                 if (status)
468                         return status;
469         } else {
470                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
471                 return -1;
472         }
473         return 0;
474 }
475
476 static void be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
477 {
478         u32 sem;
479         u32 reg = skyhawk_chip(adapter) ? SLIPORT_SEMAPHORE_OFFSET_SH :
480                                           SLIPORT_SEMAPHORE_OFFSET_BE;
481
482         pci_read_config_dword(adapter->pdev, reg, &sem);
483         *stage = sem & POST_STAGE_MASK;
484 }
485
486 int lancer_wait_ready(struct be_adapter *adapter)
487 {
488 #define SLIPORT_READY_TIMEOUT 30
489         u32 sliport_status;
490         int status = 0, i;
491
492         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
493                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
494                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
495                         break;
496
497                 msleep(1000);
498         }
499
500         if (i == SLIPORT_READY_TIMEOUT)
501                 status = -1;
502
503         return status;
504 }
505
506 static bool lancer_provisioning_error(struct be_adapter *adapter)
507 {
508         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
509         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
510         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
511                 sliport_err1 = ioread32(adapter->db +
512                                         SLIPORT_ERROR1_OFFSET);
513                 sliport_err2 = ioread32(adapter->db +
514                                         SLIPORT_ERROR2_OFFSET);
515
516                 if (sliport_err1 == SLIPORT_ERROR_NO_RESOURCE1 &&
517                     sliport_err2 == SLIPORT_ERROR_NO_RESOURCE2)
518                         return true;
519         }
520         return false;
521 }
522
523 int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
524 {
525         int status;
526         u32 sliport_status, err, reset_needed;
527         bool resource_error;
528
529         resource_error = lancer_provisioning_error(adapter);
530         if (resource_error)
531                 return -1;
532
533         status = lancer_wait_ready(adapter);
534         if (!status) {
535                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
536                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
537                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
538                 if (err && reset_needed) {
539                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
540                                   adapter->db + SLIPORT_CONTROL_OFFSET);
541
542                         /* check adapter has corrected the error */
543                         status = lancer_wait_ready(adapter);
544                         sliport_status = ioread32(adapter->db +
545                                                   SLIPORT_STATUS_OFFSET);
546                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
547                                                 SLIPORT_STATUS_RN_MASK);
548                         if (status || sliport_status)
549                                 status = -1;
550                 } else if (err || reset_needed) {
551                         status = -1;
552                 }
553         }
554         /* Stop error recovery if error is not recoverable.
555          * No resource error is temporary errors and will go away
556          * when PF provisions resources.
557          */
558         resource_error = lancer_provisioning_error(adapter);
559         if (status == -1 && !resource_error)
560                 adapter->eeh_error = true;
561
562         return status;
563 }
564
565 int be_fw_wait_ready(struct be_adapter *adapter)
566 {
567         u16 stage;
568         int status, timeout = 0;
569         struct device *dev = &adapter->pdev->dev;
570
571         if (lancer_chip(adapter)) {
572                 status = lancer_wait_ready(adapter);
573                 return status;
574         }
575
576         do {
577                 be_POST_stage_get(adapter, &stage);
578                 if (stage == POST_STAGE_ARMFW_RDY)
579                         return 0;
580
581                 dev_info(dev, "Waiting for POST, %ds elapsed\n",
582                          timeout);
583                 if (msleep_interruptible(2000)) {
584                         dev_err(dev, "Waiting for POST aborted\n");
585                         return -EINTR;
586                 }
587                 timeout += 2;
588         } while (timeout < 60);
589
590         dev_err(dev, "POST timeout; stage=0x%x\n", stage);
591         return -1;
592 }
593
594
595 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
596 {
597         return &wrb->payload.sgl[0];
598 }
599
600
601 /* Don't touch the hdr after it's prepared */
602 /* mem will be NULL for embedded commands */
603 static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
604                                 u8 subsystem, u8 opcode, int cmd_len,
605                                 struct be_mcc_wrb *wrb, struct be_dma_mem *mem)
606 {
607         struct be_sge *sge;
608         unsigned long addr = (unsigned long)req_hdr;
609         u64 req_addr = addr;
610
611         req_hdr->opcode = opcode;
612         req_hdr->subsystem = subsystem;
613         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
614         req_hdr->version = 0;
615
616         wrb->tag0 = req_addr & 0xFFFFFFFF;
617         wrb->tag1 = upper_32_bits(req_addr);
618
619         wrb->payload_length = cmd_len;
620         if (mem) {
621                 wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) <<
622                         MCC_WRB_SGE_CNT_SHIFT;
623                 sge = nonembedded_sgl(wrb);
624                 sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
625                 sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
626                 sge->len = cpu_to_le32(mem->size);
627         } else
628                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
629         be_dws_cpu_to_le(wrb, 8);
630 }
631
632 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
633                         struct be_dma_mem *mem)
634 {
635         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
636         u64 dma = (u64)mem->dma;
637
638         for (i = 0; i < buf_pages; i++) {
639                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
640                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
641                 dma += PAGE_SIZE_4K;
642         }
643 }
644
645 /* Converts interrupt delay in microseconds to multiplier value */
646 static u32 eq_delay_to_mult(u32 usec_delay)
647 {
648 #define MAX_INTR_RATE                   651042
649         const u32 round = 10;
650         u32 multiplier;
651
652         if (usec_delay == 0)
653                 multiplier = 0;
654         else {
655                 u32 interrupt_rate = 1000000 / usec_delay;
656                 /* Max delay, corresponding to the lowest interrupt rate */
657                 if (interrupt_rate == 0)
658                         multiplier = 1023;
659                 else {
660                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
661                         multiplier /= interrupt_rate;
662                         /* Round the multiplier to the closest value.*/
663                         multiplier = (multiplier + round/2) / round;
664                         multiplier = min(multiplier, (u32)1023);
665                 }
666         }
667         return multiplier;
668 }
669
670 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
671 {
672         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
673         struct be_mcc_wrb *wrb
674                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
675         memset(wrb, 0, sizeof(*wrb));
676         return wrb;
677 }
678
679 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
680 {
681         struct be_queue_info *mccq = &adapter->mcc_obj.q;
682         struct be_mcc_wrb *wrb;
683
684         if (!mccq->created)
685                 return NULL;
686
687         if (atomic_read(&mccq->used) >= mccq->len) {
688                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
689                 return NULL;
690         }
691
692         wrb = queue_head_node(mccq);
693         queue_head_inc(mccq);
694         atomic_inc(&mccq->used);
695         memset(wrb, 0, sizeof(*wrb));
696         return wrb;
697 }
698
699 /* Tell fw we're about to start firing cmds by writing a
700  * special pattern across the wrb hdr; uses mbox
701  */
702 int be_cmd_fw_init(struct be_adapter *adapter)
703 {
704         u8 *wrb;
705         int status;
706
707         if (lancer_chip(adapter))
708                 return 0;
709
710         if (mutex_lock_interruptible(&adapter->mbox_lock))
711                 return -1;
712
713         wrb = (u8 *)wrb_from_mbox(adapter);
714         *wrb++ = 0xFF;
715         *wrb++ = 0x12;
716         *wrb++ = 0x34;
717         *wrb++ = 0xFF;
718         *wrb++ = 0xFF;
719         *wrb++ = 0x56;
720         *wrb++ = 0x78;
721         *wrb = 0xFF;
722
723         status = be_mbox_notify_wait(adapter);
724
725         mutex_unlock(&adapter->mbox_lock);
726         return status;
727 }
728
729 /* Tell fw we're done with firing cmds by writing a
730  * special pattern across the wrb hdr; uses mbox
731  */
732 int be_cmd_fw_clean(struct be_adapter *adapter)
733 {
734         u8 *wrb;
735         int status;
736
737         if (lancer_chip(adapter))
738                 return 0;
739
740         if (mutex_lock_interruptible(&adapter->mbox_lock))
741                 return -1;
742
743         wrb = (u8 *)wrb_from_mbox(adapter);
744         *wrb++ = 0xFF;
745         *wrb++ = 0xAA;
746         *wrb++ = 0xBB;
747         *wrb++ = 0xFF;
748         *wrb++ = 0xFF;
749         *wrb++ = 0xCC;
750         *wrb++ = 0xDD;
751         *wrb = 0xFF;
752
753         status = be_mbox_notify_wait(adapter);
754
755         mutex_unlock(&adapter->mbox_lock);
756         return status;
757 }
758
759 int be_cmd_eq_create(struct be_adapter *adapter,
760                 struct be_queue_info *eq, int eq_delay)
761 {
762         struct be_mcc_wrb *wrb;
763         struct be_cmd_req_eq_create *req;
764         struct be_dma_mem *q_mem = &eq->dma_mem;
765         int status;
766
767         if (mutex_lock_interruptible(&adapter->mbox_lock))
768                 return -1;
769
770         wrb = wrb_from_mbox(adapter);
771         req = embedded_payload(wrb);
772
773         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
774                 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL);
775
776         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
777
778         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
779         /* 4byte eqe*/
780         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
781         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
782                         __ilog2_u32(eq->len/256));
783         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
784                         eq_delay_to_mult(eq_delay));
785         be_dws_cpu_to_le(req->context, sizeof(req->context));
786
787         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
788
789         status = be_mbox_notify_wait(adapter);
790         if (!status) {
791                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
792                 eq->id = le16_to_cpu(resp->eq_id);
793                 eq->created = true;
794         }
795
796         mutex_unlock(&adapter->mbox_lock);
797         return status;
798 }
799
800 /* Use MCC */
801 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
802                           bool permanent, u32 if_handle, u32 pmac_id)
803 {
804         struct be_mcc_wrb *wrb;
805         struct be_cmd_req_mac_query *req;
806         int status;
807
808         spin_lock_bh(&adapter->mcc_lock);
809
810         wrb = wrb_from_mccq(adapter);
811         if (!wrb) {
812                 status = -EBUSY;
813                 goto err;
814         }
815         req = embedded_payload(wrb);
816
817         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
818                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL);
819         req->type = MAC_ADDRESS_TYPE_NETWORK;
820         if (permanent) {
821                 req->permanent = 1;
822         } else {
823                 req->if_id = cpu_to_le16((u16) if_handle);
824                 req->pmac_id = cpu_to_le32(pmac_id);
825                 req->permanent = 0;
826         }
827
828         status = be_mcc_notify_wait(adapter);
829         if (!status) {
830                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
831                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
832         }
833
834 err:
835         spin_unlock_bh(&adapter->mcc_lock);
836         return status;
837 }
838
839 /* Uses synchronous MCCQ */
840 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
841                 u32 if_id, u32 *pmac_id, u32 domain)
842 {
843         struct be_mcc_wrb *wrb;
844         struct be_cmd_req_pmac_add *req;
845         int status;
846
847         spin_lock_bh(&adapter->mcc_lock);
848
849         wrb = wrb_from_mccq(adapter);
850         if (!wrb) {
851                 status = -EBUSY;
852                 goto err;
853         }
854         req = embedded_payload(wrb);
855
856         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
857                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL);
858
859         req->hdr.domain = domain;
860         req->if_id = cpu_to_le32(if_id);
861         memcpy(req->mac_address, mac_addr, ETH_ALEN);
862
863         status = be_mcc_notify_wait(adapter);
864         if (!status) {
865                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
866                 *pmac_id = le32_to_cpu(resp->pmac_id);
867         }
868
869 err:
870         spin_unlock_bh(&adapter->mcc_lock);
871
872          if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
873                 status = -EPERM;
874
875         return status;
876 }
877
878 /* Uses synchronous MCCQ */
879 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom)
880 {
881         struct be_mcc_wrb *wrb;
882         struct be_cmd_req_pmac_del *req;
883         int status;
884
885         if (pmac_id == -1)
886                 return 0;
887
888         spin_lock_bh(&adapter->mcc_lock);
889
890         wrb = wrb_from_mccq(adapter);
891         if (!wrb) {
892                 status = -EBUSY;
893                 goto err;
894         }
895         req = embedded_payload(wrb);
896
897         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
898                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL);
899
900         req->hdr.domain = dom;
901         req->if_id = cpu_to_le32(if_id);
902         req->pmac_id = cpu_to_le32(pmac_id);
903
904         status = be_mcc_notify_wait(adapter);
905
906 err:
907         spin_unlock_bh(&adapter->mcc_lock);
908         return status;
909 }
910
911 /* Uses Mbox */
912 int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq,
913                 struct be_queue_info *eq, bool no_delay, int coalesce_wm)
914 {
915         struct be_mcc_wrb *wrb;
916         struct be_cmd_req_cq_create *req;
917         struct be_dma_mem *q_mem = &cq->dma_mem;
918         void *ctxt;
919         int status;
920
921         if (mutex_lock_interruptible(&adapter->mbox_lock))
922                 return -1;
923
924         wrb = wrb_from_mbox(adapter);
925         req = embedded_payload(wrb);
926         ctxt = &req->context;
927
928         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
929                 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL);
930
931         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
932         if (lancer_chip(adapter)) {
933                 req->hdr.version = 2;
934                 req->page_size = 1; /* 1 for 4K */
935                 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
936                                                                 no_delay);
937                 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
938                                                 __ilog2_u32(cq->len/256));
939                 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
940                 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
941                                                                 ctxt, 1);
942                 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
943                                                                 ctxt, eq->id);
944         } else {
945                 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
946                                                                 coalesce_wm);
947                 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
948                                                                 ctxt, no_delay);
949                 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
950                                                 __ilog2_u32(cq->len/256));
951                 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
952                 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
953                 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
954         }
955
956         be_dws_cpu_to_le(ctxt, sizeof(req->context));
957
958         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
959
960         status = be_mbox_notify_wait(adapter);
961         if (!status) {
962                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
963                 cq->id = le16_to_cpu(resp->cq_id);
964                 cq->created = true;
965         }
966
967         mutex_unlock(&adapter->mbox_lock);
968
969         return status;
970 }
971
972 static u32 be_encoded_q_len(int q_len)
973 {
974         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
975         if (len_encoded == 16)
976                 len_encoded = 0;
977         return len_encoded;
978 }
979
980 int be_cmd_mccq_ext_create(struct be_adapter *adapter,
981                         struct be_queue_info *mccq,
982                         struct be_queue_info *cq)
983 {
984         struct be_mcc_wrb *wrb;
985         struct be_cmd_req_mcc_ext_create *req;
986         struct be_dma_mem *q_mem = &mccq->dma_mem;
987         void *ctxt;
988         int status;
989
990         if (mutex_lock_interruptible(&adapter->mbox_lock))
991                 return -1;
992
993         wrb = wrb_from_mbox(adapter);
994         req = embedded_payload(wrb);
995         ctxt = &req->context;
996
997         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
998                         OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL);
999
1000         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1001         if (lancer_chip(adapter)) {
1002                 req->hdr.version = 1;
1003                 req->cq_id = cpu_to_le16(cq->id);
1004
1005                 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
1006                                                 be_encoded_q_len(mccq->len));
1007                 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
1008                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
1009                                                                 ctxt, cq->id);
1010                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
1011                                                                  ctxt, 1);
1012
1013         } else {
1014                 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1015                 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1016                                                 be_encoded_q_len(mccq->len));
1017                 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1018         }
1019
1020         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
1021         req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
1022         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1023
1024         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1025
1026         status = be_mbox_notify_wait(adapter);
1027         if (!status) {
1028                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1029                 mccq->id = le16_to_cpu(resp->id);
1030                 mccq->created = true;
1031         }
1032         mutex_unlock(&adapter->mbox_lock);
1033
1034         return status;
1035 }
1036
1037 int be_cmd_mccq_org_create(struct be_adapter *adapter,
1038                         struct be_queue_info *mccq,
1039                         struct be_queue_info *cq)
1040 {
1041         struct be_mcc_wrb *wrb;
1042         struct be_cmd_req_mcc_create *req;
1043         struct be_dma_mem *q_mem = &mccq->dma_mem;
1044         void *ctxt;
1045         int status;
1046
1047         if (mutex_lock_interruptible(&adapter->mbox_lock))
1048                 return -1;
1049
1050         wrb = wrb_from_mbox(adapter);
1051         req = embedded_payload(wrb);
1052         ctxt = &req->context;
1053
1054         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1055                         OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL);
1056
1057         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
1058
1059         AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
1060         AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
1061                         be_encoded_q_len(mccq->len));
1062         AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
1063
1064         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1065
1066         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1067
1068         status = be_mbox_notify_wait(adapter);
1069         if (!status) {
1070                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
1071                 mccq->id = le16_to_cpu(resp->id);
1072                 mccq->created = true;
1073         }
1074
1075         mutex_unlock(&adapter->mbox_lock);
1076         return status;
1077 }
1078
1079 int be_cmd_mccq_create(struct be_adapter *adapter,
1080                         struct be_queue_info *mccq,
1081                         struct be_queue_info *cq)
1082 {
1083         int status;
1084
1085         status = be_cmd_mccq_ext_create(adapter, mccq, cq);
1086         if (status && !lancer_chip(adapter)) {
1087                 dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 "
1088                         "or newer to avoid conflicting priorities between NIC "
1089                         "and FCoE traffic");
1090                 status = be_cmd_mccq_org_create(adapter, mccq, cq);
1091         }
1092         return status;
1093 }
1094
1095 int be_cmd_txq_create(struct be_adapter *adapter,
1096                         struct be_queue_info *txq,
1097                         struct be_queue_info *cq)
1098 {
1099         struct be_mcc_wrb *wrb;
1100         struct be_cmd_req_eth_tx_create *req;
1101         struct be_dma_mem *q_mem = &txq->dma_mem;
1102         void *ctxt;
1103         int status;
1104
1105         spin_lock_bh(&adapter->mcc_lock);
1106
1107         wrb = wrb_from_mccq(adapter);
1108         if (!wrb) {
1109                 status = -EBUSY;
1110                 goto err;
1111         }
1112
1113         req = embedded_payload(wrb);
1114         ctxt = &req->context;
1115
1116         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1117                 OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL);
1118
1119         if (lancer_chip(adapter)) {
1120                 req->hdr.version = 1;
1121                 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
1122                                         adapter->if_handle);
1123         }
1124
1125         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
1126         req->ulp_num = BE_ULP1_NUM;
1127         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
1128
1129         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
1130                 be_encoded_q_len(txq->len));
1131         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
1132         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
1133
1134         be_dws_cpu_to_le(ctxt, sizeof(req->context));
1135
1136         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1137
1138         status = be_mcc_notify_wait(adapter);
1139         if (!status) {
1140                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
1141                 txq->id = le16_to_cpu(resp->cid);
1142                 txq->created = true;
1143         }
1144
1145 err:
1146         spin_unlock_bh(&adapter->mcc_lock);
1147
1148         return status;
1149 }
1150
1151 /* Uses MCC */
1152 int be_cmd_rxq_create(struct be_adapter *adapter,
1153                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
1154                 u32 if_id, u32 rss, u8 *rss_id)
1155 {
1156         struct be_mcc_wrb *wrb;
1157         struct be_cmd_req_eth_rx_create *req;
1158         struct be_dma_mem *q_mem = &rxq->dma_mem;
1159         int status;
1160
1161         spin_lock_bh(&adapter->mcc_lock);
1162
1163         wrb = wrb_from_mccq(adapter);
1164         if (!wrb) {
1165                 status = -EBUSY;
1166                 goto err;
1167         }
1168         req = embedded_payload(wrb);
1169
1170         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1171                                 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL);
1172
1173         req->cq_id = cpu_to_le16(cq_id);
1174         req->frag_size = fls(frag_size) - 1;
1175         req->num_pages = 2;
1176         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
1177         req->interface_id = cpu_to_le32(if_id);
1178         req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE);
1179         req->rss_queue = cpu_to_le32(rss);
1180
1181         status = be_mcc_notify_wait(adapter);
1182         if (!status) {
1183                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
1184                 rxq->id = le16_to_cpu(resp->id);
1185                 rxq->created = true;
1186                 *rss_id = resp->rss_id;
1187         }
1188
1189 err:
1190         spin_unlock_bh(&adapter->mcc_lock);
1191         return status;
1192 }
1193
1194 /* Generic destroyer function for all types of queues
1195  * Uses Mbox
1196  */
1197 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
1198                 int queue_type)
1199 {
1200         struct be_mcc_wrb *wrb;
1201         struct be_cmd_req_q_destroy *req;
1202         u8 subsys = 0, opcode = 0;
1203         int status;
1204
1205         if (mutex_lock_interruptible(&adapter->mbox_lock))
1206                 return -1;
1207
1208         wrb = wrb_from_mbox(adapter);
1209         req = embedded_payload(wrb);
1210
1211         switch (queue_type) {
1212         case QTYPE_EQ:
1213                 subsys = CMD_SUBSYSTEM_COMMON;
1214                 opcode = OPCODE_COMMON_EQ_DESTROY;
1215                 break;
1216         case QTYPE_CQ:
1217                 subsys = CMD_SUBSYSTEM_COMMON;
1218                 opcode = OPCODE_COMMON_CQ_DESTROY;
1219                 break;
1220         case QTYPE_TXQ:
1221                 subsys = CMD_SUBSYSTEM_ETH;
1222                 opcode = OPCODE_ETH_TX_DESTROY;
1223                 break;
1224         case QTYPE_RXQ:
1225                 subsys = CMD_SUBSYSTEM_ETH;
1226                 opcode = OPCODE_ETH_RX_DESTROY;
1227                 break;
1228         case QTYPE_MCCQ:
1229                 subsys = CMD_SUBSYSTEM_COMMON;
1230                 opcode = OPCODE_COMMON_MCC_DESTROY;
1231                 break;
1232         default:
1233                 BUG();
1234         }
1235
1236         be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb,
1237                                 NULL);
1238         req->id = cpu_to_le16(q->id);
1239
1240         status = be_mbox_notify_wait(adapter);
1241         q->created = false;
1242
1243         mutex_unlock(&adapter->mbox_lock);
1244         return status;
1245 }
1246
1247 /* Uses MCC */
1248 int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q)
1249 {
1250         struct be_mcc_wrb *wrb;
1251         struct be_cmd_req_q_destroy *req;
1252         int status;
1253
1254         spin_lock_bh(&adapter->mcc_lock);
1255
1256         wrb = wrb_from_mccq(adapter);
1257         if (!wrb) {
1258                 status = -EBUSY;
1259                 goto err;
1260         }
1261         req = embedded_payload(wrb);
1262
1263         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1264                         OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL);
1265         req->id = cpu_to_le16(q->id);
1266
1267         status = be_mcc_notify_wait(adapter);
1268         q->created = false;
1269
1270 err:
1271         spin_unlock_bh(&adapter->mcc_lock);
1272         return status;
1273 }
1274
1275 /* Create an rx filtering policy configuration on an i/f
1276  * Uses MCCQ
1277  */
1278 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1279                      u32 *if_handle, u32 domain)
1280 {
1281         struct be_mcc_wrb *wrb;
1282         struct be_cmd_req_if_create *req;
1283         int status;
1284
1285         spin_lock_bh(&adapter->mcc_lock);
1286
1287         wrb = wrb_from_mccq(adapter);
1288         if (!wrb) {
1289                 status = -EBUSY;
1290                 goto err;
1291         }
1292         req = embedded_payload(wrb);
1293
1294         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1295                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL);
1296         req->hdr.domain = domain;
1297         req->capability_flags = cpu_to_le32(cap_flags);
1298         req->enable_flags = cpu_to_le32(en_flags);
1299
1300         req->pmac_invalid = true;
1301
1302         status = be_mcc_notify_wait(adapter);
1303         if (!status) {
1304                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1305                 *if_handle = le32_to_cpu(resp->interface_id);
1306         }
1307
1308 err:
1309         spin_unlock_bh(&adapter->mcc_lock);
1310         return status;
1311 }
1312
1313 /* Uses MCCQ */
1314 int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain)
1315 {
1316         struct be_mcc_wrb *wrb;
1317         struct be_cmd_req_if_destroy *req;
1318         int status;
1319
1320         if (interface_id == -1)
1321                 return 0;
1322
1323         spin_lock_bh(&adapter->mcc_lock);
1324
1325         wrb = wrb_from_mccq(adapter);
1326         if (!wrb) {
1327                 status = -EBUSY;
1328                 goto err;
1329         }
1330         req = embedded_payload(wrb);
1331
1332         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1333                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL);
1334         req->hdr.domain = domain;
1335         req->interface_id = cpu_to_le32(interface_id);
1336
1337         status = be_mcc_notify_wait(adapter);
1338 err:
1339         spin_unlock_bh(&adapter->mcc_lock);
1340         return status;
1341 }
1342
1343 /* Get stats is a non embedded command: the request is not embedded inside
1344  * WRB but is a separate dma memory block
1345  * Uses asynchronous MCC
1346  */
1347 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1348 {
1349         struct be_mcc_wrb *wrb;
1350         struct be_cmd_req_hdr *hdr;
1351         int status = 0;
1352
1353         spin_lock_bh(&adapter->mcc_lock);
1354
1355         wrb = wrb_from_mccq(adapter);
1356         if (!wrb) {
1357                 status = -EBUSY;
1358                 goto err;
1359         }
1360         hdr = nonemb_cmd->va;
1361
1362         be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1363                 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd);
1364
1365         /* version 1 of the cmd is not supported only by BE2 */
1366         if (!BE2_chip(adapter))
1367                 hdr->version = 1;
1368
1369         be_mcc_notify(adapter);
1370         adapter->stats_cmd_sent = true;
1371
1372 err:
1373         spin_unlock_bh(&adapter->mcc_lock);
1374         return status;
1375 }
1376
1377 /* Lancer Stats */
1378 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1379                                 struct be_dma_mem *nonemb_cmd)
1380 {
1381
1382         struct be_mcc_wrb *wrb;
1383         struct lancer_cmd_req_pport_stats *req;
1384         int status = 0;
1385
1386         if (!be_cmd_allowed(adapter, OPCODE_ETH_GET_PPORT_STATS,
1387                             CMD_SUBSYSTEM_ETH))
1388                 return -EPERM;
1389
1390         spin_lock_bh(&adapter->mcc_lock);
1391
1392         wrb = wrb_from_mccq(adapter);
1393         if (!wrb) {
1394                 status = -EBUSY;
1395                 goto err;
1396         }
1397         req = nonemb_cmd->va;
1398
1399         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1400                         OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb,
1401                         nonemb_cmd);
1402
1403         req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num);
1404         req->cmd_params.params.reset_stats = 0;
1405
1406         be_mcc_notify(adapter);
1407         adapter->stats_cmd_sent = true;
1408
1409 err:
1410         spin_unlock_bh(&adapter->mcc_lock);
1411         return status;
1412 }
1413
1414 static int be_mac_to_link_speed(int mac_speed)
1415 {
1416         switch (mac_speed) {
1417         case PHY_LINK_SPEED_ZERO:
1418                 return 0;
1419         case PHY_LINK_SPEED_10MBPS:
1420                 return 10;
1421         case PHY_LINK_SPEED_100MBPS:
1422                 return 100;
1423         case PHY_LINK_SPEED_1GBPS:
1424                 return 1000;
1425         case PHY_LINK_SPEED_10GBPS:
1426                 return 10000;
1427         }
1428         return 0;
1429 }
1430
1431 /* Uses synchronous mcc
1432  * Returns link_speed in Mbps
1433  */
1434 int be_cmd_link_status_query(struct be_adapter *adapter, u16 *link_speed,
1435                              u8 *link_status, u32 dom)
1436 {
1437         struct be_mcc_wrb *wrb;
1438         struct be_cmd_req_link_status *req;
1439         int status;
1440
1441         spin_lock_bh(&adapter->mcc_lock);
1442
1443         if (link_status)
1444                 *link_status = LINK_DOWN;
1445
1446         wrb = wrb_from_mccq(adapter);
1447         if (!wrb) {
1448                 status = -EBUSY;
1449                 goto err;
1450         }
1451         req = embedded_payload(wrb);
1452
1453         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1454                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL);
1455
1456         /* version 1 of the cmd is not supported only by BE2 */
1457         if (!BE2_chip(adapter))
1458                 req->hdr.version = 1;
1459
1460         req->hdr.domain = dom;
1461
1462         status = be_mcc_notify_wait(adapter);
1463         if (!status) {
1464                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1465                 if (link_speed) {
1466                         *link_speed = resp->link_speed ?
1467                                       le16_to_cpu(resp->link_speed) * 10 :
1468                                       be_mac_to_link_speed(resp->mac_speed);
1469
1470                         if (!resp->logical_link_status)
1471                                 *link_speed = 0;
1472                 }
1473                 if (link_status)
1474                         *link_status = resp->logical_link_status;
1475         }
1476
1477 err:
1478         spin_unlock_bh(&adapter->mcc_lock);
1479         return status;
1480 }
1481
1482 /* Uses synchronous mcc */
1483 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1484 {
1485         struct be_mcc_wrb *wrb;
1486         struct be_cmd_req_get_cntl_addnl_attribs *req;
1487         int status;
1488
1489         spin_lock_bh(&adapter->mcc_lock);
1490
1491         wrb = wrb_from_mccq(adapter);
1492         if (!wrb) {
1493                 status = -EBUSY;
1494                 goto err;
1495         }
1496         req = embedded_payload(wrb);
1497
1498         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1499                 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req),
1500                 wrb, NULL);
1501
1502         be_mcc_notify(adapter);
1503
1504 err:
1505         spin_unlock_bh(&adapter->mcc_lock);
1506         return status;
1507 }
1508
1509 /* Uses synchronous mcc */
1510 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1511 {
1512         struct be_mcc_wrb *wrb;
1513         struct be_cmd_req_get_fat *req;
1514         int status;
1515
1516         spin_lock_bh(&adapter->mcc_lock);
1517
1518         wrb = wrb_from_mccq(adapter);
1519         if (!wrb) {
1520                 status = -EBUSY;
1521                 goto err;
1522         }
1523         req = embedded_payload(wrb);
1524
1525         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1526                 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL);
1527         req->fat_operation = cpu_to_le32(QUERY_FAT);
1528         status = be_mcc_notify_wait(adapter);
1529         if (!status) {
1530                 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1531                 if (log_size && resp->log_size)
1532                         *log_size = le32_to_cpu(resp->log_size) -
1533                                         sizeof(u32);
1534         }
1535 err:
1536         spin_unlock_bh(&adapter->mcc_lock);
1537         return status;
1538 }
1539
1540 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1541 {
1542         struct be_dma_mem get_fat_cmd;
1543         struct be_mcc_wrb *wrb;
1544         struct be_cmd_req_get_fat *req;
1545         u32 offset = 0, total_size, buf_size,
1546                                 log_offset = sizeof(u32), payload_len;
1547         int status;
1548
1549         if (buf_len == 0)
1550                 return;
1551
1552         total_size = buf_len;
1553
1554         get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1555         get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1556                         get_fat_cmd.size,
1557                         &get_fat_cmd.dma);
1558         if (!get_fat_cmd.va) {
1559                 status = -ENOMEM;
1560                 dev_err(&adapter->pdev->dev,
1561                 "Memory allocation failure while retrieving FAT data\n");
1562                 return;
1563         }
1564
1565         spin_lock_bh(&adapter->mcc_lock);
1566
1567         while (total_size) {
1568                 buf_size = min(total_size, (u32)60*1024);
1569                 total_size -= buf_size;
1570
1571                 wrb = wrb_from_mccq(adapter);
1572                 if (!wrb) {
1573                         status = -EBUSY;
1574                         goto err;
1575                 }
1576                 req = get_fat_cmd.va;
1577
1578                 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1579                 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1580                                 OPCODE_COMMON_MANAGE_FAT, payload_len, wrb,
1581                                 &get_fat_cmd);
1582
1583                 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1584                 req->read_log_offset = cpu_to_le32(log_offset);
1585                 req->read_log_length = cpu_to_le32(buf_size);
1586                 req->data_buffer_size = cpu_to_le32(buf_size);
1587
1588                 status = be_mcc_notify_wait(adapter);
1589                 if (!status) {
1590                         struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1591                         memcpy(buf + offset,
1592                                 resp->data_buffer,
1593                                 le32_to_cpu(resp->read_log_length));
1594                 } else {
1595                         dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1596                         goto err;
1597                 }
1598                 offset += buf_size;
1599                 log_offset += buf_size;
1600         }
1601 err:
1602         pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1603                         get_fat_cmd.va,
1604                         get_fat_cmd.dma);
1605         spin_unlock_bh(&adapter->mcc_lock);
1606 }
1607
1608 /* Uses synchronous mcc */
1609 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
1610                         char *fw_on_flash)
1611 {
1612         struct be_mcc_wrb *wrb;
1613         struct be_cmd_req_get_fw_version *req;
1614         int status;
1615
1616         spin_lock_bh(&adapter->mcc_lock);
1617
1618         wrb = wrb_from_mccq(adapter);
1619         if (!wrb) {
1620                 status = -EBUSY;
1621                 goto err;
1622         }
1623
1624         req = embedded_payload(wrb);
1625
1626         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1627                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL);
1628         status = be_mcc_notify_wait(adapter);
1629         if (!status) {
1630                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1631                 strcpy(fw_ver, resp->firmware_version_string);
1632                 if (fw_on_flash)
1633                         strcpy(fw_on_flash, resp->fw_on_flash_version_string);
1634         }
1635 err:
1636         spin_unlock_bh(&adapter->mcc_lock);
1637         return status;
1638 }
1639
1640 /* set the EQ delay interval of an EQ to specified value
1641  * Uses async mcc
1642  */
1643 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1644 {
1645         struct be_mcc_wrb *wrb;
1646         struct be_cmd_req_modify_eq_delay *req;
1647         int status = 0;
1648
1649         spin_lock_bh(&adapter->mcc_lock);
1650
1651         wrb = wrb_from_mccq(adapter);
1652         if (!wrb) {
1653                 status = -EBUSY;
1654                 goto err;
1655         }
1656         req = embedded_payload(wrb);
1657
1658         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1659                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL);
1660
1661         req->num_eq = cpu_to_le32(1);
1662         req->delay[0].eq_id = cpu_to_le32(eq_id);
1663         req->delay[0].phase = 0;
1664         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1665
1666         be_mcc_notify(adapter);
1667
1668 err:
1669         spin_unlock_bh(&adapter->mcc_lock);
1670         return status;
1671 }
1672
1673 /* Uses sycnhronous mcc */
1674 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1675                         u32 num, bool untagged, bool promiscuous)
1676 {
1677         struct be_mcc_wrb *wrb;
1678         struct be_cmd_req_vlan_config *req;
1679         int status;
1680
1681         spin_lock_bh(&adapter->mcc_lock);
1682
1683         wrb = wrb_from_mccq(adapter);
1684         if (!wrb) {
1685                 status = -EBUSY;
1686                 goto err;
1687         }
1688         req = embedded_payload(wrb);
1689
1690         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1691                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL);
1692
1693         req->interface_id = if_id;
1694         req->promiscuous = promiscuous;
1695         req->untagged = untagged;
1696         req->num_vlan = num;
1697         if (!promiscuous) {
1698                 memcpy(req->normal_vlan, vtag_array,
1699                         req->num_vlan * sizeof(vtag_array[0]));
1700         }
1701
1702         status = be_mcc_notify_wait(adapter);
1703
1704 err:
1705         spin_unlock_bh(&adapter->mcc_lock);
1706         return status;
1707 }
1708
1709 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value)
1710 {
1711         struct be_mcc_wrb *wrb;
1712         struct be_dma_mem *mem = &adapter->rx_filter;
1713         struct be_cmd_req_rx_filter *req = mem->va;
1714         int status;
1715
1716         spin_lock_bh(&adapter->mcc_lock);
1717
1718         wrb = wrb_from_mccq(adapter);
1719         if (!wrb) {
1720                 status = -EBUSY;
1721                 goto err;
1722         }
1723         memset(req, 0, sizeof(*req));
1724         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1725                                 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req),
1726                                 wrb, mem);
1727
1728         req->if_id = cpu_to_le32(adapter->if_handle);
1729         if (flags & IFF_PROMISC) {
1730                 req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1731                                         BE_IF_FLAGS_VLAN_PROMISCUOUS);
1732                 if (value == ON)
1733                         req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS |
1734                                                 BE_IF_FLAGS_VLAN_PROMISCUOUS);
1735         } else if (flags & IFF_ALLMULTI) {
1736                 req->if_flags_mask = req->if_flags =
1737                                 cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS);
1738         } else {
1739                 struct netdev_hw_addr *ha;
1740                 int i = 0;
1741
1742                 req->if_flags_mask = req->if_flags =
1743                                 cpu_to_le32(BE_IF_FLAGS_MULTICAST);
1744
1745                 /* Reset mcast promisc mode if already set by setting mask
1746                  * and not setting flags field
1747                  */
1748                 req->if_flags_mask |=
1749                         cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS &
1750                                     adapter->if_cap_flags);
1751
1752                 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev));
1753                 netdev_for_each_mc_addr(ha, adapter->netdev)
1754                         memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN);
1755         }
1756
1757         status = be_mcc_notify_wait(adapter);
1758 err:
1759         spin_unlock_bh(&adapter->mcc_lock);
1760         return status;
1761 }
1762
1763 /* Uses synchrounous mcc */
1764 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1765 {
1766         struct be_mcc_wrb *wrb;
1767         struct be_cmd_req_set_flow_control *req;
1768         int status;
1769
1770         if (!be_cmd_allowed(adapter, OPCODE_COMMON_SET_FLOW_CONTROL,
1771                             CMD_SUBSYSTEM_COMMON))
1772                 return -EPERM;
1773
1774         spin_lock_bh(&adapter->mcc_lock);
1775
1776         wrb = wrb_from_mccq(adapter);
1777         if (!wrb) {
1778                 status = -EBUSY;
1779                 goto err;
1780         }
1781         req = embedded_payload(wrb);
1782
1783         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1784                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1785
1786         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1787         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1788
1789         status = be_mcc_notify_wait(adapter);
1790
1791 err:
1792         spin_unlock_bh(&adapter->mcc_lock);
1793         return status;
1794 }
1795
1796 /* Uses sycn mcc */
1797 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1798 {
1799         struct be_mcc_wrb *wrb;
1800         struct be_cmd_req_get_flow_control *req;
1801         int status;
1802
1803         if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_FLOW_CONTROL,
1804                             CMD_SUBSYSTEM_COMMON))
1805                 return -EPERM;
1806
1807         spin_lock_bh(&adapter->mcc_lock);
1808
1809         wrb = wrb_from_mccq(adapter);
1810         if (!wrb) {
1811                 status = -EBUSY;
1812                 goto err;
1813         }
1814         req = embedded_payload(wrb);
1815
1816         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1817                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL);
1818
1819         status = be_mcc_notify_wait(adapter);
1820         if (!status) {
1821                 struct be_cmd_resp_get_flow_control *resp =
1822                                                 embedded_payload(wrb);
1823                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1824                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1825         }
1826
1827 err:
1828         spin_unlock_bh(&adapter->mcc_lock);
1829         return status;
1830 }
1831
1832 /* Uses mbox */
1833 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1834                 u32 *mode, u32 *caps)
1835 {
1836         struct be_mcc_wrb *wrb;
1837         struct be_cmd_req_query_fw_cfg *req;
1838         int status;
1839
1840         if (mutex_lock_interruptible(&adapter->mbox_lock))
1841                 return -1;
1842
1843         wrb = wrb_from_mbox(adapter);
1844         req = embedded_payload(wrb);
1845
1846         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1847                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL);
1848
1849         status = be_mbox_notify_wait(adapter);
1850         if (!status) {
1851                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1852                 *port_num = le32_to_cpu(resp->phys_port);
1853                 *mode = le32_to_cpu(resp->function_mode);
1854                 *caps = le32_to_cpu(resp->function_caps);
1855         }
1856
1857         mutex_unlock(&adapter->mbox_lock);
1858         return status;
1859 }
1860
1861 /* Uses mbox */
1862 int be_cmd_reset_function(struct be_adapter *adapter)
1863 {
1864         struct be_mcc_wrb *wrb;
1865         struct be_cmd_req_hdr *req;
1866         int status;
1867
1868         if (lancer_chip(adapter)) {
1869                 status = lancer_wait_ready(adapter);
1870                 if (!status) {
1871                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
1872                                   adapter->db + SLIPORT_CONTROL_OFFSET);
1873                         status = lancer_test_and_set_rdy_state(adapter);
1874                 }
1875                 if (status) {
1876                         dev_err(&adapter->pdev->dev,
1877                                 "Adapter in non recoverable error\n");
1878                 }
1879                 return status;
1880         }
1881
1882         if (mutex_lock_interruptible(&adapter->mbox_lock))
1883                 return -1;
1884
1885         wrb = wrb_from_mbox(adapter);
1886         req = embedded_payload(wrb);
1887
1888         be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1889                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL);
1890
1891         status = be_mbox_notify_wait(adapter);
1892
1893         mutex_unlock(&adapter->mbox_lock);
1894         return status;
1895 }
1896
1897 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1898 {
1899         struct be_mcc_wrb *wrb;
1900         struct be_cmd_req_rss_config *req;
1901         u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e,
1902                         0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2,
1903                         0x3ea83c02, 0x4a110304};
1904         int status;
1905
1906         if (mutex_lock_interruptible(&adapter->mbox_lock))
1907                 return -1;
1908
1909         wrb = wrb_from_mbox(adapter);
1910         req = embedded_payload(wrb);
1911
1912         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1913                 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL);
1914
1915         req->if_id = cpu_to_le32(adapter->if_handle);
1916         req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
1917                                       RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6);
1918
1919         if (lancer_chip(adapter) || skyhawk_chip(adapter)) {
1920                 req->hdr.version = 1;
1921                 req->enable_rss |= cpu_to_le16(RSS_ENABLE_UDP_IPV4 |
1922                                                RSS_ENABLE_UDP_IPV6);
1923         }
1924
1925         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1926         memcpy(req->cpu_table, rsstable, table_size);
1927         memcpy(req->hash, myhash, sizeof(myhash));
1928         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1929
1930         status = be_mbox_notify_wait(adapter);
1931
1932         mutex_unlock(&adapter->mbox_lock);
1933         return status;
1934 }
1935
1936 /* Uses sync mcc */
1937 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1938                         u8 bcn, u8 sts, u8 state)
1939 {
1940         struct be_mcc_wrb *wrb;
1941         struct be_cmd_req_enable_disable_beacon *req;
1942         int status;
1943
1944         spin_lock_bh(&adapter->mcc_lock);
1945
1946         wrb = wrb_from_mccq(adapter);
1947         if (!wrb) {
1948                 status = -EBUSY;
1949                 goto err;
1950         }
1951         req = embedded_payload(wrb);
1952
1953         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1954                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL);
1955
1956         req->port_num = port_num;
1957         req->beacon_state = state;
1958         req->beacon_duration = bcn;
1959         req->status_duration = sts;
1960
1961         status = be_mcc_notify_wait(adapter);
1962
1963 err:
1964         spin_unlock_bh(&adapter->mcc_lock);
1965         return status;
1966 }
1967
1968 /* Uses sync mcc */
1969 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1970 {
1971         struct be_mcc_wrb *wrb;
1972         struct be_cmd_req_get_beacon_state *req;
1973         int status;
1974
1975         spin_lock_bh(&adapter->mcc_lock);
1976
1977         wrb = wrb_from_mccq(adapter);
1978         if (!wrb) {
1979                 status = -EBUSY;
1980                 goto err;
1981         }
1982         req = embedded_payload(wrb);
1983
1984         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1985                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL);
1986
1987         req->port_num = port_num;
1988
1989         status = be_mcc_notify_wait(adapter);
1990         if (!status) {
1991                 struct be_cmd_resp_get_beacon_state *resp =
1992                                                 embedded_payload(wrb);
1993                 *state = resp->beacon_state;
1994         }
1995
1996 err:
1997         spin_unlock_bh(&adapter->mcc_lock);
1998         return status;
1999 }
2000
2001 int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2002                             u32 data_size, u32 data_offset,
2003                             const char *obj_name, u32 *data_written,
2004                             u8 *change_status, u8 *addn_status)
2005 {
2006         struct be_mcc_wrb *wrb;
2007         struct lancer_cmd_req_write_object *req;
2008         struct lancer_cmd_resp_write_object *resp;
2009         void *ctxt = NULL;
2010         int status;
2011
2012         spin_lock_bh(&adapter->mcc_lock);
2013         adapter->flash_status = 0;
2014
2015         wrb = wrb_from_mccq(adapter);
2016         if (!wrb) {
2017                 status = -EBUSY;
2018                 goto err_unlock;
2019         }
2020
2021         req = embedded_payload(wrb);
2022
2023         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2024                                 OPCODE_COMMON_WRITE_OBJECT,
2025                                 sizeof(struct lancer_cmd_req_write_object), wrb,
2026                                 NULL);
2027
2028         ctxt = &req->context;
2029         AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2030                         write_length, ctxt, data_size);
2031
2032         if (data_size == 0)
2033                 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2034                                 eof, ctxt, 1);
2035         else
2036                 AMAP_SET_BITS(struct amap_lancer_write_obj_context,
2037                                 eof, ctxt, 0);
2038
2039         be_dws_cpu_to_le(ctxt, sizeof(req->context));
2040         req->write_offset = cpu_to_le32(data_offset);
2041         strcpy(req->object_name, obj_name);
2042         req->descriptor_count = cpu_to_le32(1);
2043         req->buf_len = cpu_to_le32(data_size);
2044         req->addr_low = cpu_to_le32((cmd->dma +
2045                                 sizeof(struct lancer_cmd_req_write_object))
2046                                 & 0xFFFFFFFF);
2047         req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma +
2048                                 sizeof(struct lancer_cmd_req_write_object)));
2049
2050         be_mcc_notify(adapter);
2051         spin_unlock_bh(&adapter->mcc_lock);
2052
2053         if (!wait_for_completion_timeout(&adapter->flash_compl,
2054                                          msecs_to_jiffies(30000)))
2055                 status = -1;
2056         else
2057                 status = adapter->flash_status;
2058
2059         resp = embedded_payload(wrb);
2060         if (!status) {
2061                 *data_written = le32_to_cpu(resp->actual_write_len);
2062                 *change_status = resp->change_status;
2063         } else {
2064                 *addn_status = resp->additional_status;
2065         }
2066
2067         return status;
2068
2069 err_unlock:
2070         spin_unlock_bh(&adapter->mcc_lock);
2071         return status;
2072 }
2073
2074 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
2075                 u32 data_size, u32 data_offset, const char *obj_name,
2076                 u32 *data_read, u32 *eof, u8 *addn_status)
2077 {
2078         struct be_mcc_wrb *wrb;
2079         struct lancer_cmd_req_read_object *req;
2080         struct lancer_cmd_resp_read_object *resp;
2081         int status;
2082
2083         spin_lock_bh(&adapter->mcc_lock);
2084
2085         wrb = wrb_from_mccq(adapter);
2086         if (!wrb) {
2087                 status = -EBUSY;
2088                 goto err_unlock;
2089         }
2090
2091         req = embedded_payload(wrb);
2092
2093         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2094                         OPCODE_COMMON_READ_OBJECT,
2095                         sizeof(struct lancer_cmd_req_read_object), wrb,
2096                         NULL);
2097
2098         req->desired_read_len = cpu_to_le32(data_size);
2099         req->read_offset = cpu_to_le32(data_offset);
2100         strcpy(req->object_name, obj_name);
2101         req->descriptor_count = cpu_to_le32(1);
2102         req->buf_len = cpu_to_le32(data_size);
2103         req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF));
2104         req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma));
2105
2106         status = be_mcc_notify_wait(adapter);
2107
2108         resp = embedded_payload(wrb);
2109         if (!status) {
2110                 *data_read = le32_to_cpu(resp->actual_read_len);
2111                 *eof = le32_to_cpu(resp->eof);
2112         } else {
2113                 *addn_status = resp->additional_status;
2114         }
2115
2116 err_unlock:
2117         spin_unlock_bh(&adapter->mcc_lock);
2118         return status;
2119 }
2120
2121 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
2122                         u32 flash_type, u32 flash_opcode, u32 buf_size)
2123 {
2124         struct be_mcc_wrb *wrb;
2125         struct be_cmd_write_flashrom *req;
2126         int status;
2127
2128         spin_lock_bh(&adapter->mcc_lock);
2129         adapter->flash_status = 0;
2130
2131         wrb = wrb_from_mccq(adapter);
2132         if (!wrb) {
2133                 status = -EBUSY;
2134                 goto err_unlock;
2135         }
2136         req = cmd->va;
2137
2138         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2139                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd);
2140
2141         req->params.op_type = cpu_to_le32(flash_type);
2142         req->params.op_code = cpu_to_le32(flash_opcode);
2143         req->params.data_buf_size = cpu_to_le32(buf_size);
2144
2145         be_mcc_notify(adapter);
2146         spin_unlock_bh(&adapter->mcc_lock);
2147
2148         if (!wait_for_completion_timeout(&adapter->flash_compl,
2149                         msecs_to_jiffies(40000)))
2150                 status = -1;
2151         else
2152                 status = adapter->flash_status;
2153
2154         return status;
2155
2156 err_unlock:
2157         spin_unlock_bh(&adapter->mcc_lock);
2158         return status;
2159 }
2160
2161 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
2162                          int offset)
2163 {
2164         struct be_mcc_wrb *wrb;
2165         struct be_cmd_read_flash_crc *req;
2166         int status;
2167
2168         spin_lock_bh(&adapter->mcc_lock);
2169
2170         wrb = wrb_from_mccq(adapter);
2171         if (!wrb) {
2172                 status = -EBUSY;
2173                 goto err;
2174         }
2175         req = embedded_payload(wrb);
2176
2177         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2178                                OPCODE_COMMON_READ_FLASHROM, sizeof(*req),
2179                                wrb, NULL);
2180
2181         req->params.op_type = cpu_to_le32(OPTYPE_REDBOOT);
2182         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
2183         req->params.offset = cpu_to_le32(offset);
2184         req->params.data_buf_size = cpu_to_le32(0x4);
2185
2186         status = be_mcc_notify_wait(adapter);
2187         if (!status)
2188                 memcpy(flashed_crc, req->crc, 4);
2189
2190 err:
2191         spin_unlock_bh(&adapter->mcc_lock);
2192         return status;
2193 }
2194
2195 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
2196                                 struct be_dma_mem *nonemb_cmd)
2197 {
2198         struct be_mcc_wrb *wrb;
2199         struct be_cmd_req_acpi_wol_magic_config *req;
2200         int status;
2201
2202         spin_lock_bh(&adapter->mcc_lock);
2203
2204         wrb = wrb_from_mccq(adapter);
2205         if (!wrb) {
2206                 status = -EBUSY;
2207                 goto err;
2208         }
2209         req = nonemb_cmd->va;
2210
2211         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2212                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb,
2213                 nonemb_cmd);
2214         memcpy(req->magic_mac, mac, ETH_ALEN);
2215
2216         status = be_mcc_notify_wait(adapter);
2217
2218 err:
2219         spin_unlock_bh(&adapter->mcc_lock);
2220         return status;
2221 }
2222
2223 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
2224                         u8 loopback_type, u8 enable)
2225 {
2226         struct be_mcc_wrb *wrb;
2227         struct be_cmd_req_set_lmode *req;
2228         int status;
2229
2230         spin_lock_bh(&adapter->mcc_lock);
2231
2232         wrb = wrb_from_mccq(adapter);
2233         if (!wrb) {
2234                 status = -EBUSY;
2235                 goto err;
2236         }
2237
2238         req = embedded_payload(wrb);
2239
2240         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2241                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb,
2242                         NULL);
2243
2244         req->src_port = port_num;
2245         req->dest_port = port_num;
2246         req->loopback_type = loopback_type;
2247         req->loopback_state = enable;
2248
2249         status = be_mcc_notify_wait(adapter);
2250 err:
2251         spin_unlock_bh(&adapter->mcc_lock);
2252         return status;
2253 }
2254
2255 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
2256                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
2257 {
2258         struct be_mcc_wrb *wrb;
2259         struct be_cmd_req_loopback_test *req;
2260         int status;
2261
2262         spin_lock_bh(&adapter->mcc_lock);
2263
2264         wrb = wrb_from_mccq(adapter);
2265         if (!wrb) {
2266                 status = -EBUSY;
2267                 goto err;
2268         }
2269
2270         req = embedded_payload(wrb);
2271
2272         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2273                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
2274         req->hdr.timeout = cpu_to_le32(4);
2275
2276         req->pattern = cpu_to_le64(pattern);
2277         req->src_port = cpu_to_le32(port_num);
2278         req->dest_port = cpu_to_le32(port_num);
2279         req->pkt_size = cpu_to_le32(pkt_size);
2280         req->num_pkts = cpu_to_le32(num_pkts);
2281         req->loopback_type = cpu_to_le32(loopback_type);
2282
2283         status = be_mcc_notify_wait(adapter);
2284         if (!status) {
2285                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
2286                 status = le32_to_cpu(resp->status);
2287         }
2288
2289 err:
2290         spin_unlock_bh(&adapter->mcc_lock);
2291         return status;
2292 }
2293
2294 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2295                                 u32 byte_cnt, struct be_dma_mem *cmd)
2296 {
2297         struct be_mcc_wrb *wrb;
2298         struct be_cmd_req_ddrdma_test *req;
2299         int status;
2300         int i, j = 0;
2301
2302         spin_lock_bh(&adapter->mcc_lock);
2303
2304         wrb = wrb_from_mccq(adapter);
2305         if (!wrb) {
2306                 status = -EBUSY;
2307                 goto err;
2308         }
2309         req = cmd->va;
2310         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2311                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd);
2312
2313         req->pattern = cpu_to_le64(pattern);
2314         req->byte_count = cpu_to_le32(byte_cnt);
2315         for (i = 0; i < byte_cnt; i++) {
2316                 req->snd_buff[i] = (u8)(pattern >> (j*8));
2317                 j++;
2318                 if (j > 7)
2319                         j = 0;
2320         }
2321
2322         status = be_mcc_notify_wait(adapter);
2323
2324         if (!status) {
2325                 struct be_cmd_resp_ddrdma_test *resp;
2326                 resp = cmd->va;
2327                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2328                                 resp->snd_err) {
2329                         status = -1;
2330                 }
2331         }
2332
2333 err:
2334         spin_unlock_bh(&adapter->mcc_lock);
2335         return status;
2336 }
2337
2338 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2339                                 struct be_dma_mem *nonemb_cmd)
2340 {
2341         struct be_mcc_wrb *wrb;
2342         struct be_cmd_req_seeprom_read *req;
2343         struct be_sge *sge;
2344         int status;
2345
2346         spin_lock_bh(&adapter->mcc_lock);
2347
2348         wrb = wrb_from_mccq(adapter);
2349         if (!wrb) {
2350                 status = -EBUSY;
2351                 goto err;
2352         }
2353         req = nonemb_cmd->va;
2354         sge = nonembedded_sgl(wrb);
2355
2356         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2357                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb,
2358                         nonemb_cmd);
2359
2360         status = be_mcc_notify_wait(adapter);
2361
2362 err:
2363         spin_unlock_bh(&adapter->mcc_lock);
2364         return status;
2365 }
2366
2367 int be_cmd_get_phy_info(struct be_adapter *adapter)
2368 {
2369         struct be_mcc_wrb *wrb;
2370         struct be_cmd_req_get_phy_info *req;
2371         struct be_dma_mem cmd;
2372         int status;
2373
2374         if (!be_cmd_allowed(adapter, OPCODE_COMMON_GET_PHY_DETAILS,
2375                             CMD_SUBSYSTEM_COMMON))
2376                 return -EPERM;
2377
2378         spin_lock_bh(&adapter->mcc_lock);
2379
2380         wrb = wrb_from_mccq(adapter);
2381         if (!wrb) {
2382                 status = -EBUSY;
2383                 goto err;
2384         }
2385         cmd.size = sizeof(struct be_cmd_req_get_phy_info);
2386         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2387                                         &cmd.dma);
2388         if (!cmd.va) {
2389                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2390                 status = -ENOMEM;
2391                 goto err;
2392         }
2393
2394         req = cmd.va;
2395
2396         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2397                         OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req),
2398                         wrb, &cmd);
2399
2400         status = be_mcc_notify_wait(adapter);
2401         if (!status) {
2402                 struct be_phy_info *resp_phy_info =
2403                                 cmd.va + sizeof(struct be_cmd_req_hdr);
2404                 adapter->phy.phy_type = le16_to_cpu(resp_phy_info->phy_type);
2405                 adapter->phy.interface_type =
2406                         le16_to_cpu(resp_phy_info->interface_type);
2407                 adapter->phy.auto_speeds_supported =
2408                         le16_to_cpu(resp_phy_info->auto_speeds_supported);
2409                 adapter->phy.fixed_speeds_supported =
2410                         le16_to_cpu(resp_phy_info->fixed_speeds_supported);
2411                 adapter->phy.misc_params =
2412                         le32_to_cpu(resp_phy_info->misc_params);
2413         }
2414         pci_free_consistent(adapter->pdev, cmd.size,
2415                                 cmd.va, cmd.dma);
2416 err:
2417         spin_unlock_bh(&adapter->mcc_lock);
2418         return status;
2419 }
2420
2421 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2422 {
2423         struct be_mcc_wrb *wrb;
2424         struct be_cmd_req_set_qos *req;
2425         int status;
2426
2427         spin_lock_bh(&adapter->mcc_lock);
2428
2429         wrb = wrb_from_mccq(adapter);
2430         if (!wrb) {
2431                 status = -EBUSY;
2432                 goto err;
2433         }
2434
2435         req = embedded_payload(wrb);
2436
2437         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2438                         OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL);
2439
2440         req->hdr.domain = domain;
2441         req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2442         req->max_bps_nic = cpu_to_le32(bps);
2443
2444         status = be_mcc_notify_wait(adapter);
2445
2446 err:
2447         spin_unlock_bh(&adapter->mcc_lock);
2448         return status;
2449 }
2450
2451 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2452 {
2453         struct be_mcc_wrb *wrb;
2454         struct be_cmd_req_cntl_attribs *req;
2455         struct be_cmd_resp_cntl_attribs *resp;
2456         int status;
2457         int payload_len = max(sizeof(*req), sizeof(*resp));
2458         struct mgmt_controller_attrib *attribs;
2459         struct be_dma_mem attribs_cmd;
2460
2461         memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2462         attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2463         attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2464                                                 &attribs_cmd.dma);
2465         if (!attribs_cmd.va) {
2466                 dev_err(&adapter->pdev->dev,
2467                                 "Memory allocation failure\n");
2468                 return -ENOMEM;
2469         }
2470
2471         if (mutex_lock_interruptible(&adapter->mbox_lock))
2472                 return -1;
2473
2474         wrb = wrb_from_mbox(adapter);
2475         if (!wrb) {
2476                 status = -EBUSY;
2477                 goto err;
2478         }
2479         req = attribs_cmd.va;
2480
2481         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2482                          OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb,
2483                         &attribs_cmd);
2484
2485         status = be_mbox_notify_wait(adapter);
2486         if (!status) {
2487                 attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr);
2488                 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2489         }
2490
2491 err:
2492         mutex_unlock(&adapter->mbox_lock);
2493         pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2494                                         attribs_cmd.dma);
2495         return status;
2496 }
2497
2498 /* Uses mbox */
2499 int be_cmd_req_native_mode(struct be_adapter *adapter)
2500 {
2501         struct be_mcc_wrb *wrb;
2502         struct be_cmd_req_set_func_cap *req;
2503         int status;
2504
2505         if (mutex_lock_interruptible(&adapter->mbox_lock))
2506                 return -1;
2507
2508         wrb = wrb_from_mbox(adapter);
2509         if (!wrb) {
2510                 status = -EBUSY;
2511                 goto err;
2512         }
2513
2514         req = embedded_payload(wrb);
2515
2516         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2517                 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL);
2518
2519         req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2520                                 CAPABILITY_BE3_NATIVE_ERX_API);
2521         req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2522
2523         status = be_mbox_notify_wait(adapter);
2524         if (!status) {
2525                 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2526                 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2527                                         CAPABILITY_BE3_NATIVE_ERX_API;
2528                 if (!adapter->be3_native)
2529                         dev_warn(&adapter->pdev->dev,
2530                                  "adapter not in advanced mode\n");
2531         }
2532 err:
2533         mutex_unlock(&adapter->mbox_lock);
2534         return status;
2535 }
2536
2537 /* Get privilege(s) for a function */
2538 int be_cmd_get_fn_privileges(struct be_adapter *adapter, u32 *privilege,
2539                              u32 domain)
2540 {
2541         struct be_mcc_wrb *wrb;
2542         struct be_cmd_req_get_fn_privileges *req;
2543         int status;
2544
2545         spin_lock_bh(&adapter->mcc_lock);
2546
2547         wrb = wrb_from_mccq(adapter);
2548         if (!wrb) {
2549                 status = -EBUSY;
2550                 goto err;
2551         }
2552
2553         req = embedded_payload(wrb);
2554
2555         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2556                                OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req),
2557                                wrb, NULL);
2558
2559         req->hdr.domain = domain;
2560
2561         status = be_mcc_notify_wait(adapter);
2562         if (!status) {
2563                 struct be_cmd_resp_get_fn_privileges *resp =
2564                                                 embedded_payload(wrb);
2565                 *privilege = le32_to_cpu(resp->privilege_mask);
2566         }
2567
2568 err:
2569         spin_unlock_bh(&adapter->mcc_lock);
2570         return status;
2571 }
2572
2573 /* Uses synchronous MCCQ */
2574 int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
2575                              bool *pmac_id_active, u32 *pmac_id, u8 domain)
2576 {
2577         struct be_mcc_wrb *wrb;
2578         struct be_cmd_req_get_mac_list *req;
2579         int status;
2580         int mac_count;
2581         struct be_dma_mem get_mac_list_cmd;
2582         int i;
2583
2584         memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
2585         get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
2586         get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
2587                         get_mac_list_cmd.size,
2588                         &get_mac_list_cmd.dma);
2589
2590         if (!get_mac_list_cmd.va) {
2591                 dev_err(&adapter->pdev->dev,
2592                                 "Memory allocation failure during GET_MAC_LIST\n");
2593                 return -ENOMEM;
2594         }
2595
2596         spin_lock_bh(&adapter->mcc_lock);
2597
2598         wrb = wrb_from_mccq(adapter);
2599         if (!wrb) {
2600                 status = -EBUSY;
2601                 goto out;
2602         }
2603
2604         req = get_mac_list_cmd.va;
2605
2606         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2607                                 OPCODE_COMMON_GET_MAC_LIST, sizeof(*req),
2608                                 wrb, &get_mac_list_cmd);
2609
2610         req->hdr.domain = domain;
2611         req->mac_type = MAC_ADDRESS_TYPE_NETWORK;
2612         req->perm_override = 1;
2613
2614         status = be_mcc_notify_wait(adapter);
2615         if (!status) {
2616                 struct be_cmd_resp_get_mac_list *resp =
2617                                                 get_mac_list_cmd.va;
2618                 mac_count = resp->true_mac_count + resp->pseudo_mac_count;
2619                 /* Mac list returned could contain one or more active mac_ids
2620                  * or one or more true or pseudo permanant mac addresses.
2621                  * If an active mac_id is present, return first active mac_id
2622                  * found.
2623                  */
2624                 for (i = 0; i < mac_count; i++) {
2625                         struct get_list_macaddr *mac_entry;
2626                         u16 mac_addr_size;
2627                         u32 mac_id;
2628
2629                         mac_entry = &resp->macaddr_list[i];
2630                         mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size);
2631                         /* mac_id is a 32 bit value and mac_addr size
2632                          * is 6 bytes
2633                          */
2634                         if (mac_addr_size == sizeof(u32)) {
2635                                 *pmac_id_active = true;
2636                                 mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id;
2637                                 *pmac_id = le32_to_cpu(mac_id);
2638                                 goto out;
2639                         }
2640                 }
2641                 /* If no active mac_id found, return first mac addr */
2642                 *pmac_id_active = false;
2643                 memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr,
2644                                                                 ETH_ALEN);
2645         }
2646
2647 out:
2648         spin_unlock_bh(&adapter->mcc_lock);
2649         pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
2650                         get_mac_list_cmd.va, get_mac_list_cmd.dma);
2651         return status;
2652 }
2653
2654 /* Uses synchronous MCCQ */
2655 int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
2656                         u8 mac_count, u32 domain)
2657 {
2658         struct be_mcc_wrb *wrb;
2659         struct be_cmd_req_set_mac_list *req;
2660         int status;
2661         struct be_dma_mem cmd;
2662
2663         memset(&cmd, 0, sizeof(struct be_dma_mem));
2664         cmd.size = sizeof(struct be_cmd_req_set_mac_list);
2665         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
2666                         &cmd.dma, GFP_KERNEL);
2667         if (!cmd.va) {
2668                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2669                 return -ENOMEM;
2670         }
2671
2672         spin_lock_bh(&adapter->mcc_lock);
2673
2674         wrb = wrb_from_mccq(adapter);
2675         if (!wrb) {
2676                 status = -EBUSY;
2677                 goto err;
2678         }
2679
2680         req = cmd.va;
2681         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2682                                 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req),
2683                                 wrb, &cmd);
2684
2685         req->hdr.domain = domain;
2686         req->mac_count = mac_count;
2687         if (mac_count)
2688                 memcpy(req->mac, mac_array, ETH_ALEN*mac_count);
2689
2690         status = be_mcc_notify_wait(adapter);
2691
2692 err:
2693         dma_free_coherent(&adapter->pdev->dev, cmd.size,
2694                                 cmd.va, cmd.dma);
2695         spin_unlock_bh(&adapter->mcc_lock);
2696         return status;
2697 }
2698
2699 int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid,
2700                         u32 domain, u16 intf_id)
2701 {
2702         struct be_mcc_wrb *wrb;
2703         struct be_cmd_req_set_hsw_config *req;
2704         void *ctxt;
2705         int status;
2706
2707         spin_lock_bh(&adapter->mcc_lock);
2708
2709         wrb = wrb_from_mccq(adapter);
2710         if (!wrb) {
2711                 status = -EBUSY;
2712                 goto err;
2713         }
2714
2715         req = embedded_payload(wrb);
2716         ctxt = &req->context;
2717
2718         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2719                         OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2720
2721         req->hdr.domain = domain;
2722         AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id);
2723         if (pvid) {
2724                 AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1);
2725                 AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid);
2726         }
2727
2728         be_dws_cpu_to_le(req->context, sizeof(req->context));
2729         status = be_mcc_notify_wait(adapter);
2730
2731 err:
2732         spin_unlock_bh(&adapter->mcc_lock);
2733         return status;
2734 }
2735
2736 /* Get Hyper switch config */
2737 int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid,
2738                         u32 domain, u16 intf_id)
2739 {
2740         struct be_mcc_wrb *wrb;
2741         struct be_cmd_req_get_hsw_config *req;
2742         void *ctxt;
2743         int status;
2744         u16 vid;
2745
2746         spin_lock_bh(&adapter->mcc_lock);
2747
2748         wrb = wrb_from_mccq(adapter);
2749         if (!wrb) {
2750                 status = -EBUSY;
2751                 goto err;
2752         }
2753
2754         req = embedded_payload(wrb);
2755         ctxt = &req->context;
2756
2757         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2758                         OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL);
2759
2760         req->hdr.domain = domain;
2761         AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt,
2762                                                                 intf_id);
2763         AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1);
2764         be_dws_cpu_to_le(req->context, sizeof(req->context));
2765
2766         status = be_mcc_notify_wait(adapter);
2767         if (!status) {
2768                 struct be_cmd_resp_get_hsw_config *resp =
2769                                                 embedded_payload(wrb);
2770                 be_dws_le_to_cpu(&resp->context,
2771                                                 sizeof(resp->context));
2772                 vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context,
2773                                                         pvid, &resp->context);
2774                 *pvid = le16_to_cpu(vid);
2775         }
2776
2777 err:
2778         spin_unlock_bh(&adapter->mcc_lock);
2779         return status;
2780 }
2781
2782 int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
2783 {
2784         struct be_mcc_wrb *wrb;
2785         struct be_cmd_req_acpi_wol_magic_config_v1 *req;
2786         int status;
2787         int payload_len = sizeof(*req);
2788         struct be_dma_mem cmd;
2789
2790         if (!be_cmd_allowed(adapter, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2791                             CMD_SUBSYSTEM_ETH))
2792                 return -EPERM;
2793
2794         memset(&cmd, 0, sizeof(struct be_dma_mem));
2795         cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
2796         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2797                                                &cmd.dma);
2798         if (!cmd.va) {
2799                 dev_err(&adapter->pdev->dev,
2800                                 "Memory allocation failure\n");
2801                 return -ENOMEM;
2802         }
2803
2804         if (mutex_lock_interruptible(&adapter->mbox_lock))
2805                 return -1;
2806
2807         wrb = wrb_from_mbox(adapter);
2808         if (!wrb) {
2809                 status = -EBUSY;
2810                 goto err;
2811         }
2812
2813         req = cmd.va;
2814
2815         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
2816                                OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG,
2817                                payload_len, wrb, &cmd);
2818
2819         req->hdr.version = 1;
2820         req->query_options = BE_GET_WOL_CAP;
2821
2822         status = be_mbox_notify_wait(adapter);
2823         if (!status) {
2824                 struct be_cmd_resp_acpi_wol_magic_config_v1 *resp;
2825                 resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va;
2826
2827                 /* the command could succeed misleadingly on old f/w
2828                  * which is not aware of the V1 version. fake an error. */
2829                 if (resp->hdr.response_length < payload_len) {
2830                         status = -1;
2831                         goto err;
2832                 }
2833                 adapter->wol_cap = resp->wol_settings;
2834         }
2835 err:
2836         mutex_unlock(&adapter->mbox_lock);
2837         pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
2838         return status;
2839
2840 }
2841 int be_cmd_get_ext_fat_capabilites(struct be_adapter *adapter,
2842                                    struct be_dma_mem *cmd)
2843 {
2844         struct be_mcc_wrb *wrb;
2845         struct be_cmd_req_get_ext_fat_caps *req;
2846         int status;
2847
2848         if (mutex_lock_interruptible(&adapter->mbox_lock))
2849                 return -1;
2850
2851         wrb = wrb_from_mbox(adapter);
2852         if (!wrb) {
2853                 status = -EBUSY;
2854                 goto err;
2855         }
2856
2857         req = cmd->va;
2858         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2859                                OPCODE_COMMON_GET_EXT_FAT_CAPABILITES,
2860                                cmd->size, wrb, cmd);
2861         req->parameter_type = cpu_to_le32(1);
2862
2863         status = be_mbox_notify_wait(adapter);
2864 err:
2865         mutex_unlock(&adapter->mbox_lock);
2866         return status;
2867 }
2868
2869 int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
2870                                    struct be_dma_mem *cmd,
2871                                    struct be_fat_conf_params *configs)
2872 {
2873         struct be_mcc_wrb *wrb;
2874         struct be_cmd_req_set_ext_fat_caps *req;
2875         int status;
2876
2877         spin_lock_bh(&adapter->mcc_lock);
2878
2879         wrb = wrb_from_mccq(adapter);
2880         if (!wrb) {
2881                 status = -EBUSY;
2882                 goto err;
2883         }
2884
2885         req = cmd->va;
2886         memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params));
2887         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2888                                OPCODE_COMMON_SET_EXT_FAT_CAPABILITES,
2889                                cmd->size, wrb, cmd);
2890
2891         status = be_mcc_notify_wait(adapter);
2892 err:
2893         spin_unlock_bh(&adapter->mcc_lock);
2894         return status;
2895 }
2896
2897 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name)
2898 {
2899         struct be_mcc_wrb *wrb;
2900         struct be_cmd_req_get_port_name *req;
2901         int status;
2902
2903         if (!lancer_chip(adapter)) {
2904                 *port_name = adapter->hba_port_num + '0';
2905                 return 0;
2906         }
2907
2908         spin_lock_bh(&adapter->mcc_lock);
2909
2910         wrb = wrb_from_mccq(adapter);
2911         if (!wrb) {
2912                 status = -EBUSY;
2913                 goto err;
2914         }
2915
2916         req = embedded_payload(wrb);
2917
2918         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2919                                OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb,
2920                                NULL);
2921         req->hdr.version = 1;
2922
2923         status = be_mcc_notify_wait(adapter);
2924         if (!status) {
2925                 struct be_cmd_resp_get_port_name *resp = embedded_payload(wrb);
2926                 *port_name = resp->port_name[adapter->hba_port_num];
2927         } else {
2928                 *port_name = adapter->hba_port_num + '0';
2929         }
2930 err:
2931         spin_unlock_bh(&adapter->mcc_lock);
2932         return status;
2933 }
2934
2935 static struct be_nic_resource_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
2936                                                     u32 max_buf_size)
2937 {
2938         struct be_nic_resource_desc *desc = (struct be_nic_resource_desc *)buf;
2939         int i;
2940
2941         for (i = 0; i < desc_count; i++) {
2942                 desc->desc_len = RESOURCE_DESC_SIZE;
2943                 if (((void *)desc + desc->desc_len) >
2944                     (void *)(buf + max_buf_size)) {
2945                         desc = NULL;
2946                         break;
2947                 }
2948
2949                 if (desc->desc_type == NIC_RESOURCE_DESC_TYPE_ID)
2950                         break;
2951
2952                 desc = (void *)desc + desc->desc_len;
2953         }
2954
2955         if (!desc || i == MAX_RESOURCE_DESC)
2956                 return NULL;
2957
2958         return desc;
2959 }
2960
2961 /* Uses Mbox */
2962 int be_cmd_get_func_config(struct be_adapter *adapter)
2963 {
2964         struct be_mcc_wrb *wrb;
2965         struct be_cmd_req_get_func_config *req;
2966         int status;
2967         struct be_dma_mem cmd;
2968
2969         memset(&cmd, 0, sizeof(struct be_dma_mem));
2970         cmd.size = sizeof(struct be_cmd_resp_get_func_config);
2971         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
2972                                       &cmd.dma);
2973         if (!cmd.va) {
2974                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
2975                 return -ENOMEM;
2976         }
2977         if (mutex_lock_interruptible(&adapter->mbox_lock))
2978                 return -1;
2979
2980         wrb = wrb_from_mbox(adapter);
2981         if (!wrb) {
2982                 status = -EBUSY;
2983                 goto err;
2984         }
2985
2986         req = cmd.va;
2987
2988         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2989                                OPCODE_COMMON_GET_FUNC_CONFIG,
2990                                cmd.size, wrb, &cmd);
2991
2992         status = be_mbox_notify_wait(adapter);
2993         if (!status) {
2994                 struct be_cmd_resp_get_func_config *resp = cmd.va;
2995                 u32 desc_count = le32_to_cpu(resp->desc_count);
2996                 struct be_nic_resource_desc *desc;
2997
2998                 desc = be_get_nic_desc(resp->func_param, desc_count,
2999                                        sizeof(resp->func_param));
3000                 if (!desc) {
3001                         status = -EINVAL;
3002                         goto err;
3003                 }
3004
3005                 adapter->pf_number = desc->pf_num;
3006                 adapter->max_pmac_cnt = le16_to_cpu(desc->unicast_mac_count);
3007                 adapter->max_vlans = le16_to_cpu(desc->vlan_count);
3008                 adapter->max_mcast_mac = le16_to_cpu(desc->mcast_mac_count);
3009                 adapter->max_tx_queues = le16_to_cpu(desc->txq_count);
3010                 adapter->max_rss_queues = le16_to_cpu(desc->rssq_count);
3011                 adapter->max_rx_queues = le16_to_cpu(desc->rq_count);
3012
3013                 adapter->max_event_queues = le16_to_cpu(desc->eq_count);
3014                 adapter->if_cap_flags = le32_to_cpu(desc->cap_flags);
3015         }
3016 err:
3017         mutex_unlock(&adapter->mbox_lock);
3018         pci_free_consistent(adapter->pdev, cmd.size,
3019                             cmd.va, cmd.dma);
3020         return status;
3021 }
3022
3023  /* Uses sync mcc */
3024 int be_cmd_get_profile_config(struct be_adapter *adapter, u32 *cap_flags,
3025                               u8 domain)
3026 {
3027         struct be_mcc_wrb *wrb;
3028         struct be_cmd_req_get_profile_config *req;
3029         int status;
3030         struct be_dma_mem cmd;
3031
3032         memset(&cmd, 0, sizeof(struct be_dma_mem));
3033         cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
3034         cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size,
3035                                       &cmd.dma);
3036         if (!cmd.va) {
3037                 dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
3038                 return -ENOMEM;
3039         }
3040
3041         spin_lock_bh(&adapter->mcc_lock);
3042
3043         wrb = wrb_from_mccq(adapter);
3044         if (!wrb) {
3045                 status = -EBUSY;
3046                 goto err;
3047         }
3048
3049         req = cmd.va;
3050
3051         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3052                                OPCODE_COMMON_GET_PROFILE_CONFIG,
3053                                cmd.size, wrb, &cmd);
3054
3055         req->type = ACTIVE_PROFILE_TYPE;
3056         req->hdr.domain = domain;
3057
3058         status = be_mcc_notify_wait(adapter);
3059         if (!status) {
3060                 struct be_cmd_resp_get_profile_config *resp = cmd.va;
3061                 u32 desc_count = le32_to_cpu(resp->desc_count);
3062                 struct be_nic_resource_desc *desc;
3063
3064                 desc = be_get_nic_desc(resp->func_param, desc_count,
3065                                        sizeof(resp->func_param));
3066
3067                 if (!desc) {
3068                         status = -EINVAL;
3069                         goto err;
3070                 }
3071                 *cap_flags = le32_to_cpu(desc->cap_flags);
3072         }
3073 err:
3074         spin_unlock_bh(&adapter->mcc_lock);
3075         pci_free_consistent(adapter->pdev, cmd.size,
3076                             cmd.va, cmd.dma);
3077         return status;
3078 }
3079
3080 /* Uses sync mcc */
3081 int be_cmd_set_profile_config(struct be_adapter *adapter, u32 bps,
3082                               u8 domain)
3083 {
3084         struct be_mcc_wrb *wrb;
3085         struct be_cmd_req_set_profile_config *req;
3086         int status;
3087
3088         spin_lock_bh(&adapter->mcc_lock);
3089
3090         wrb = wrb_from_mccq(adapter);
3091         if (!wrb) {
3092                 status = -EBUSY;
3093                 goto err;
3094         }
3095
3096         req = embedded_payload(wrb);
3097
3098         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3099                                OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
3100                                wrb, NULL);
3101
3102         req->hdr.domain = domain;
3103         req->desc_count = cpu_to_le32(1);
3104
3105         req->nic_desc.desc_type = NIC_RESOURCE_DESC_TYPE_ID;
3106         req->nic_desc.desc_len = RESOURCE_DESC_SIZE;
3107         req->nic_desc.flags = (1 << QUN) | (1 << IMM) | (1 << NOSV);
3108         req->nic_desc.pf_num = adapter->pf_number;
3109         req->nic_desc.vf_num = domain;
3110
3111         /* Mark fields invalid */
3112         req->nic_desc.unicast_mac_count = 0xFFFF;
3113         req->nic_desc.mcc_count = 0xFFFF;
3114         req->nic_desc.vlan_count = 0xFFFF;
3115         req->nic_desc.mcast_mac_count = 0xFFFF;
3116         req->nic_desc.txq_count = 0xFFFF;
3117         req->nic_desc.rq_count = 0xFFFF;
3118         req->nic_desc.rssq_count = 0xFFFF;
3119         req->nic_desc.lro_count = 0xFFFF;
3120         req->nic_desc.cq_count = 0xFFFF;
3121         req->nic_desc.toe_conn_count = 0xFFFF;
3122         req->nic_desc.eq_count = 0xFFFF;
3123         req->nic_desc.link_param = 0xFF;
3124         req->nic_desc.bw_min = 0xFFFFFFFF;
3125         req->nic_desc.acpi_params = 0xFF;
3126         req->nic_desc.wol_param = 0x0F;
3127
3128         /* Change BW */
3129         req->nic_desc.bw_min = cpu_to_le32(bps);
3130         req->nic_desc.bw_max = cpu_to_le32(bps);
3131         status = be_mcc_notify_wait(adapter);
3132 err:
3133         spin_unlock_bh(&adapter->mcc_lock);
3134         return status;
3135 }
3136
3137 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
3138                      int vf_num)
3139 {
3140         struct be_mcc_wrb *wrb;
3141         struct be_cmd_req_get_iface_list *req;
3142         struct be_cmd_resp_get_iface_list *resp;
3143         int status;
3144
3145         spin_lock_bh(&adapter->mcc_lock);
3146
3147         wrb = wrb_from_mccq(adapter);
3148         if (!wrb) {
3149                 status = -EBUSY;
3150                 goto err;
3151         }
3152         req = embedded_payload(wrb);
3153
3154         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3155                                OPCODE_COMMON_GET_IFACE_LIST, sizeof(*resp),
3156                                wrb, NULL);
3157         req->hdr.domain = vf_num + 1;
3158
3159         status = be_mcc_notify_wait(adapter);
3160         if (!status) {
3161                 resp = (struct be_cmd_resp_get_iface_list *)req;
3162                 vf_cfg->if_handle = le32_to_cpu(resp->if_desc.if_id);
3163         }
3164
3165 err:
3166         spin_unlock_bh(&adapter->mcc_lock);
3167         return status;
3168 }
3169
3170 /* Uses sync mcc */
3171 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
3172 {
3173         struct be_mcc_wrb *wrb;
3174         struct be_cmd_enable_disable_vf *req;
3175         int status;
3176
3177         if (!lancer_chip(adapter))
3178                 return 0;
3179
3180         spin_lock_bh(&adapter->mcc_lock);
3181
3182         wrb = wrb_from_mccq(adapter);
3183         if (!wrb) {
3184                 status = -EBUSY;
3185                 goto err;
3186         }
3187
3188         req = embedded_payload(wrb);
3189
3190         be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
3191                                OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req),
3192                                wrb, NULL);
3193
3194         req->hdr.domain = domain;
3195         req->enable = 1;
3196         status = be_mcc_notify_wait(adapter);
3197 err:
3198         spin_unlock_bh(&adapter->mcc_lock);
3199         return status;
3200 }
3201
3202 int be_roce_mcc_cmd(void *netdev_handle, void *wrb_payload,
3203                         int wrb_payload_size, u16 *cmd_status, u16 *ext_status)
3204 {
3205         struct be_adapter *adapter = netdev_priv(netdev_handle);
3206         struct be_mcc_wrb *wrb;
3207         struct be_cmd_req_hdr *hdr = (struct be_cmd_req_hdr *) wrb_payload;
3208         struct be_cmd_req_hdr *req;
3209         struct be_cmd_resp_hdr *resp;
3210         int status;
3211
3212         spin_lock_bh(&adapter->mcc_lock);
3213
3214         wrb = wrb_from_mccq(adapter);
3215         if (!wrb) {
3216                 status = -EBUSY;
3217                 goto err;
3218         }
3219         req = embedded_payload(wrb);
3220         resp = embedded_payload(wrb);
3221
3222         be_wrb_cmd_hdr_prepare(req, hdr->subsystem,
3223                                hdr->opcode, wrb_payload_size, wrb, NULL);
3224         memcpy(req, wrb_payload, wrb_payload_size);
3225         be_dws_cpu_to_le(req, wrb_payload_size);
3226
3227         status = be_mcc_notify_wait(adapter);
3228         if (cmd_status)
3229                 *cmd_status = (status & 0xffff);
3230         if (ext_status)
3231                 *ext_status = 0;
3232         memcpy(wrb_payload, resp, sizeof(*resp) + resp->response_length);
3233         be_dws_le_to_cpu(wrb_payload, sizeof(*resp) + resp->response_length);
3234 err:
3235         spin_unlock_bh(&adapter->mcc_lock);
3236         return status;
3237 }
3238 EXPORT_SYMBOL(be_roce_mcc_cmd);