be2net: Fix crash on 2nd invocation of PCI AER/EEH error_detected hook
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb))
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785         else if (qnq_async_evt_rcvd(adapter) && adapter->pvid)
786                 vlan_tag = adapter->pvid;
787
788         if (vlan_tag) {
789                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
790                 if (unlikely(!skb))
791                         return skb;
792                 skb->vlan_tci = 0;
793                 if (skip_hw_vlan)
794                         *skip_hw_vlan = true;
795         }
796
797         /* Insert the outer VLAN, if any */
798         if (adapter->qnq_vid) {
799                 vlan_tag = adapter->qnq_vid;
800                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
801                 if (unlikely(!skb))
802                         return skb;
803                 if (skip_hw_vlan)
804                         *skip_hw_vlan = true;
805         }
806
807         return skb;
808 }
809
810 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
811 {
812         struct ethhdr *eh = (struct ethhdr *)skb->data;
813         u16 offset = ETH_HLEN;
814
815         if (eh->h_proto == htons(ETH_P_IPV6)) {
816                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
817
818                 offset += sizeof(struct ipv6hdr);
819                 if (ip6h->nexthdr != NEXTHDR_TCP &&
820                     ip6h->nexthdr != NEXTHDR_UDP) {
821                         struct ipv6_opt_hdr *ehdr =
822                                 (struct ipv6_opt_hdr *) (skb->data + offset);
823
824                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
825                         if (ehdr->hdrlen == 0xff)
826                                 return true;
827                 }
828         }
829         return false;
830 }
831
832 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
833 {
834         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
835 }
836
837 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
838 {
839         return BE3_chip(adapter) &&
840                 be_ipv6_exthdr_check(skb);
841 }
842
843 static netdev_tx_t be_xmit(struct sk_buff *skb,
844                         struct net_device *netdev)
845 {
846         struct be_adapter *adapter = netdev_priv(netdev);
847         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
848         struct be_queue_info *txq = &txo->q;
849         struct iphdr *ip = NULL;
850         u32 wrb_cnt = 0, copied = 0;
851         u32 start = txq->head, eth_hdr_len;
852         bool dummy_wrb, stopped = false;
853         bool skip_hw_vlan = false;
854         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
855
856         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
857                 VLAN_ETH_HLEN : ETH_HLEN;
858
859         /* For padded packets, BE HW modifies tot_len field in IP header
860          * incorrecly when VLAN tag is inserted by HW.
861          */
862         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
863                 ip = (struct iphdr *)ip_hdr(skb);
864                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
865         }
866
867         /* If vlan tag is already inlined in the packet, skip HW VLAN
868          * tagging in UMC mode
869          */
870         if ((adapter->function_mode & UMC_ENABLED) &&
871             veh->h_vlan_proto == htons(ETH_P_8021Q))
872                         skip_hw_vlan = true;
873
874         /* HW has a bug wherein it will calculate CSUM for VLAN
875          * pkts even though it is disabled.
876          * Manually insert VLAN in pkt.
877          */
878         if (skb->ip_summed != CHECKSUM_PARTIAL &&
879                         vlan_tx_tag_present(skb)) {
880                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
881                 if (unlikely(!skb))
882                         goto tx_drop;
883         }
884
885         /* HW may lockup when VLAN HW tagging is requested on
886          * certain ipv6 packets. Drop such pkts if the HW workaround to
887          * skip HW tagging is not enabled by FW.
888          */
889         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
890                      (adapter->pvid || adapter->qnq_vid) &&
891                      !qnq_async_evt_rcvd(adapter)))
892                 goto tx_drop;
893
894         /* Manual VLAN tag insertion to prevent:
895          * ASIC lockup when the ASIC inserts VLAN tag into
896          * certain ipv6 packets. Insert VLAN tags in driver,
897          * and set event, completion, vlan bits accordingly
898          * in the Tx WRB.
899          */
900         if (be_ipv6_tx_stall_chk(adapter, skb) &&
901             be_vlan_tag_tx_chk(adapter, skb)) {
902                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
903                 if (unlikely(!skb))
904                         goto tx_drop;
905         }
906
907         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
908
909         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
910                               skip_hw_vlan);
911         if (copied) {
912                 int gso_segs = skb_shinfo(skb)->gso_segs;
913
914                 /* record the sent skb in the sent_skb table */
915                 BUG_ON(txo->sent_skb_list[start]);
916                 txo->sent_skb_list[start] = skb;
917
918                 /* Ensure txq has space for the next skb; Else stop the queue
919                  * *BEFORE* ringing the tx doorbell, so that we serialze the
920                  * tx compls of the current transmit which'll wake up the queue
921                  */
922                 atomic_add(wrb_cnt, &txq->used);
923                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
924                                                                 txq->len) {
925                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
926                         stopped = true;
927                 }
928
929                 be_txq_notify(adapter, txo, wrb_cnt);
930
931                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
932         } else {
933                 txq->head = start;
934                 dev_kfree_skb_any(skb);
935         }
936 tx_drop:
937         return NETDEV_TX_OK;
938 }
939
940 static int be_change_mtu(struct net_device *netdev, int new_mtu)
941 {
942         struct be_adapter *adapter = netdev_priv(netdev);
943         if (new_mtu < BE_MIN_MTU ||
944                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
945                                         (ETH_HLEN + ETH_FCS_LEN))) {
946                 dev_info(&adapter->pdev->dev,
947                         "MTU must be between %d and %d bytes\n",
948                         BE_MIN_MTU,
949                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
950                 return -EINVAL;
951         }
952         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
953                         netdev->mtu, new_mtu);
954         netdev->mtu = new_mtu;
955         return 0;
956 }
957
958 /*
959  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
960  * If the user configures more, place BE in vlan promiscuous mode.
961  */
962 static int be_vid_config(struct be_adapter *adapter)
963 {
964         u16 vids[BE_NUM_VLANS_SUPPORTED];
965         u16 num = 0, i;
966         int status = 0;
967
968         /* No need to further configure vids if in promiscuous mode */
969         if (adapter->promiscuous)
970                 return 0;
971
972         if (adapter->vlans_added > adapter->max_vlans)
973                 goto set_vlan_promisc;
974
975         /* Construct VLAN Table to give to HW */
976         for (i = 0; i < VLAN_N_VID; i++)
977                 if (adapter->vlan_tag[i])
978                         vids[num++] = cpu_to_le16(i);
979
980         status = be_cmd_vlan_config(adapter, adapter->if_handle,
981                                     vids, num, 1, 0);
982
983         /* Set to VLAN promisc mode as setting VLAN filter failed */
984         if (status) {
985                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
986                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
987                 goto set_vlan_promisc;
988         }
989
990         return status;
991
992 set_vlan_promisc:
993         status = be_cmd_vlan_config(adapter, adapter->if_handle,
994                                     NULL, 0, 1, 1);
995         return status;
996 }
997
998 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
999 {
1000         struct be_adapter *adapter = netdev_priv(netdev);
1001         int status = 0;
1002
1003         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1004                 status = -EINVAL;
1005                 goto ret;
1006         }
1007
1008         /* Packets with VID 0 are always received by Lancer by default */
1009         if (lancer_chip(adapter) && vid == 0)
1010                 goto ret;
1011
1012         adapter->vlan_tag[vid] = 1;
1013         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1014                 status = be_vid_config(adapter);
1015
1016         if (!status)
1017                 adapter->vlans_added++;
1018         else
1019                 adapter->vlan_tag[vid] = 0;
1020 ret:
1021         return status;
1022 }
1023
1024 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1025 {
1026         struct be_adapter *adapter = netdev_priv(netdev);
1027         int status = 0;
1028
1029         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1030                 status = -EINVAL;
1031                 goto ret;
1032         }
1033
1034         /* Packets with VID 0 are always received by Lancer by default */
1035         if (lancer_chip(adapter) && vid == 0)
1036                 goto ret;
1037
1038         adapter->vlan_tag[vid] = 0;
1039         if (adapter->vlans_added <= adapter->max_vlans)
1040                 status = be_vid_config(adapter);
1041
1042         if (!status)
1043                 adapter->vlans_added--;
1044         else
1045                 adapter->vlan_tag[vid] = 1;
1046 ret:
1047         return status;
1048 }
1049
1050 static void be_set_rx_mode(struct net_device *netdev)
1051 {
1052         struct be_adapter *adapter = netdev_priv(netdev);
1053         int status;
1054
1055         if (netdev->flags & IFF_PROMISC) {
1056                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1057                 adapter->promiscuous = true;
1058                 goto done;
1059         }
1060
1061         /* BE was previously in promiscuous mode; disable it */
1062         if (adapter->promiscuous) {
1063                 adapter->promiscuous = false;
1064                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1065
1066                 if (adapter->vlans_added)
1067                         be_vid_config(adapter);
1068         }
1069
1070         /* Enable multicast promisc if num configured exceeds what we support */
1071         if (netdev->flags & IFF_ALLMULTI ||
1072             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1073                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1074                 goto done;
1075         }
1076
1077         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1078                 struct netdev_hw_addr *ha;
1079                 int i = 1; /* First slot is claimed by the Primary MAC */
1080
1081                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1082                         be_cmd_pmac_del(adapter, adapter->if_handle,
1083                                         adapter->pmac_id[i], 0);
1084                 }
1085
1086                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1087                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1088                         adapter->promiscuous = true;
1089                         goto done;
1090                 }
1091
1092                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1093                         adapter->uc_macs++; /* First slot is for Primary MAC */
1094                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1095                                         adapter->if_handle,
1096                                         &adapter->pmac_id[adapter->uc_macs], 0);
1097                 }
1098         }
1099
1100         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1101
1102         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1103         if (status) {
1104                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1105                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1106                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1107         }
1108 done:
1109         return;
1110 }
1111
1112 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1113 {
1114         struct be_adapter *adapter = netdev_priv(netdev);
1115         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1116         int status;
1117         bool active_mac = false;
1118         u32 pmac_id;
1119         u8 old_mac[ETH_ALEN];
1120
1121         if (!sriov_enabled(adapter))
1122                 return -EPERM;
1123
1124         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1125                 return -EINVAL;
1126
1127         if (lancer_chip(adapter)) {
1128                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1129                                                   &pmac_id, vf + 1);
1130                 if (!status && active_mac)
1131                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1132                                         pmac_id, vf + 1);
1133
1134                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1135         } else {
1136                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1137                                          vf_cfg->pmac_id, vf + 1);
1138
1139                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1140                                          &vf_cfg->pmac_id, vf + 1);
1141         }
1142
1143         if (status)
1144                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1145                                 mac, vf);
1146         else
1147                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1148
1149         return status;
1150 }
1151
1152 static int be_get_vf_config(struct net_device *netdev, int vf,
1153                         struct ifla_vf_info *vi)
1154 {
1155         struct be_adapter *adapter = netdev_priv(netdev);
1156         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1157
1158         if (!sriov_enabled(adapter))
1159                 return -EPERM;
1160
1161         if (vf >= adapter->num_vfs)
1162                 return -EINVAL;
1163
1164         vi->vf = vf;
1165         vi->tx_rate = vf_cfg->tx_rate;
1166         vi->vlan = vf_cfg->vlan_tag;
1167         vi->qos = 0;
1168         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1169
1170         return 0;
1171 }
1172
1173 static int be_set_vf_vlan(struct net_device *netdev,
1174                         int vf, u16 vlan, u8 qos)
1175 {
1176         struct be_adapter *adapter = netdev_priv(netdev);
1177         int status = 0;
1178
1179         if (!sriov_enabled(adapter))
1180                 return -EPERM;
1181
1182         if (vf >= adapter->num_vfs || vlan > 4095)
1183                 return -EINVAL;
1184
1185         if (vlan) {
1186                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1187                         /* If this is new value, program it. Else skip. */
1188                         adapter->vf_cfg[vf].vlan_tag = vlan;
1189
1190                         status = be_cmd_set_hsw_config(adapter, vlan,
1191                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1192                 }
1193         } else {
1194                 /* Reset Transparent Vlan Tagging. */
1195                 adapter->vf_cfg[vf].vlan_tag = 0;
1196                 vlan = adapter->vf_cfg[vf].def_vid;
1197                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1198                         adapter->vf_cfg[vf].if_handle);
1199         }
1200
1201
1202         if (status)
1203                 dev_info(&adapter->pdev->dev,
1204                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1205         return status;
1206 }
1207
1208 static int be_set_vf_tx_rate(struct net_device *netdev,
1209                         int vf, int rate)
1210 {
1211         struct be_adapter *adapter = netdev_priv(netdev);
1212         int status = 0;
1213
1214         if (!sriov_enabled(adapter))
1215                 return -EPERM;
1216
1217         if (vf >= adapter->num_vfs)
1218                 return -EINVAL;
1219
1220         if (rate < 100 || rate > 10000) {
1221                 dev_err(&adapter->pdev->dev,
1222                         "tx rate must be between 100 and 10000 Mbps\n");
1223                 return -EINVAL;
1224         }
1225
1226         if (lancer_chip(adapter))
1227                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1228         else
1229                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1230
1231         if (status)
1232                 dev_err(&adapter->pdev->dev,
1233                                 "tx rate %d on VF %d failed\n", rate, vf);
1234         else
1235                 adapter->vf_cfg[vf].tx_rate = rate;
1236         return status;
1237 }
1238
1239 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1240 {
1241         struct pci_dev *dev, *pdev = adapter->pdev;
1242         int vfs = 0, assigned_vfs = 0, pos;
1243         u16 offset, stride;
1244
1245         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1246         if (!pos)
1247                 return 0;
1248         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1249         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1250
1251         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1252         while (dev) {
1253                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1254                         vfs++;
1255                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1256                                 assigned_vfs++;
1257                 }
1258                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1259         }
1260         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1261 }
1262
1263 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1264 {
1265         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1266         ulong now = jiffies;
1267         ulong delta = now - stats->rx_jiffies;
1268         u64 pkts;
1269         unsigned int start, eqd;
1270
1271         if (!eqo->enable_aic) {
1272                 eqd = eqo->eqd;
1273                 goto modify_eqd;
1274         }
1275
1276         if (eqo->idx >= adapter->num_rx_qs)
1277                 return;
1278
1279         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1280
1281         /* Wrapped around */
1282         if (time_before(now, stats->rx_jiffies)) {
1283                 stats->rx_jiffies = now;
1284                 return;
1285         }
1286
1287         /* Update once a second */
1288         if (delta < HZ)
1289                 return;
1290
1291         do {
1292                 start = u64_stats_fetch_begin_bh(&stats->sync);
1293                 pkts = stats->rx_pkts;
1294         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1295
1296         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1297         stats->rx_pkts_prev = pkts;
1298         stats->rx_jiffies = now;
1299         eqd = (stats->rx_pps / 110000) << 3;
1300         eqd = min(eqd, eqo->max_eqd);
1301         eqd = max(eqd, eqo->min_eqd);
1302         if (eqd < 10)
1303                 eqd = 0;
1304
1305 modify_eqd:
1306         if (eqd != eqo->cur_eqd) {
1307                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1308                 eqo->cur_eqd = eqd;
1309         }
1310 }
1311
1312 static void be_rx_stats_update(struct be_rx_obj *rxo,
1313                 struct be_rx_compl_info *rxcp)
1314 {
1315         struct be_rx_stats *stats = rx_stats(rxo);
1316
1317         u64_stats_update_begin(&stats->sync);
1318         stats->rx_compl++;
1319         stats->rx_bytes += rxcp->pkt_size;
1320         stats->rx_pkts++;
1321         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1322                 stats->rx_mcast_pkts++;
1323         if (rxcp->err)
1324                 stats->rx_compl_err++;
1325         u64_stats_update_end(&stats->sync);
1326 }
1327
1328 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1329 {
1330         /* L4 checksum is not reliable for non TCP/UDP packets.
1331          * Also ignore ipcksm for ipv6 pkts */
1332         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1333                                 (rxcp->ip_csum || rxcp->ipv6);
1334 }
1335
1336 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1337                                                 u16 frag_idx)
1338 {
1339         struct be_adapter *adapter = rxo->adapter;
1340         struct be_rx_page_info *rx_page_info;
1341         struct be_queue_info *rxq = &rxo->q;
1342
1343         rx_page_info = &rxo->page_info_tbl[frag_idx];
1344         BUG_ON(!rx_page_info->page);
1345
1346         if (rx_page_info->last_page_user) {
1347                 dma_unmap_page(&adapter->pdev->dev,
1348                                dma_unmap_addr(rx_page_info, bus),
1349                                adapter->big_page_size, DMA_FROM_DEVICE);
1350                 rx_page_info->last_page_user = false;
1351         }
1352
1353         atomic_dec(&rxq->used);
1354         return rx_page_info;
1355 }
1356
1357 /* Throwaway the data in the Rx completion */
1358 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1359                                 struct be_rx_compl_info *rxcp)
1360 {
1361         struct be_queue_info *rxq = &rxo->q;
1362         struct be_rx_page_info *page_info;
1363         u16 i, num_rcvd = rxcp->num_rcvd;
1364
1365         for (i = 0; i < num_rcvd; i++) {
1366                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1367                 put_page(page_info->page);
1368                 memset(page_info, 0, sizeof(*page_info));
1369                 index_inc(&rxcp->rxq_idx, rxq->len);
1370         }
1371 }
1372
1373 /*
1374  * skb_fill_rx_data forms a complete skb for an ether frame
1375  * indicated by rxcp.
1376  */
1377 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1378                              struct be_rx_compl_info *rxcp)
1379 {
1380         struct be_queue_info *rxq = &rxo->q;
1381         struct be_rx_page_info *page_info;
1382         u16 i, j;
1383         u16 hdr_len, curr_frag_len, remaining;
1384         u8 *start;
1385
1386         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1387         start = page_address(page_info->page) + page_info->page_offset;
1388         prefetch(start);
1389
1390         /* Copy data in the first descriptor of this completion */
1391         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1392
1393         skb->len = curr_frag_len;
1394         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1395                 memcpy(skb->data, start, curr_frag_len);
1396                 /* Complete packet has now been moved to data */
1397                 put_page(page_info->page);
1398                 skb->data_len = 0;
1399                 skb->tail += curr_frag_len;
1400         } else {
1401                 hdr_len = ETH_HLEN;
1402                 memcpy(skb->data, start, hdr_len);
1403                 skb_shinfo(skb)->nr_frags = 1;
1404                 skb_frag_set_page(skb, 0, page_info->page);
1405                 skb_shinfo(skb)->frags[0].page_offset =
1406                                         page_info->page_offset + hdr_len;
1407                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1408                 skb->data_len = curr_frag_len - hdr_len;
1409                 skb->truesize += rx_frag_size;
1410                 skb->tail += hdr_len;
1411         }
1412         page_info->page = NULL;
1413
1414         if (rxcp->pkt_size <= rx_frag_size) {
1415                 BUG_ON(rxcp->num_rcvd != 1);
1416                 return;
1417         }
1418
1419         /* More frags present for this completion */
1420         index_inc(&rxcp->rxq_idx, rxq->len);
1421         remaining = rxcp->pkt_size - curr_frag_len;
1422         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1423                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1424                 curr_frag_len = min(remaining, rx_frag_size);
1425
1426                 /* Coalesce all frags from the same physical page in one slot */
1427                 if (page_info->page_offset == 0) {
1428                         /* Fresh page */
1429                         j++;
1430                         skb_frag_set_page(skb, j, page_info->page);
1431                         skb_shinfo(skb)->frags[j].page_offset =
1432                                                         page_info->page_offset;
1433                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1434                         skb_shinfo(skb)->nr_frags++;
1435                 } else {
1436                         put_page(page_info->page);
1437                 }
1438
1439                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1440                 skb->len += curr_frag_len;
1441                 skb->data_len += curr_frag_len;
1442                 skb->truesize += rx_frag_size;
1443                 remaining -= curr_frag_len;
1444                 index_inc(&rxcp->rxq_idx, rxq->len);
1445                 page_info->page = NULL;
1446         }
1447         BUG_ON(j > MAX_SKB_FRAGS);
1448 }
1449
1450 /* Process the RX completion indicated by rxcp when GRO is disabled */
1451 static void be_rx_compl_process(struct be_rx_obj *rxo,
1452                                 struct be_rx_compl_info *rxcp)
1453 {
1454         struct be_adapter *adapter = rxo->adapter;
1455         struct net_device *netdev = adapter->netdev;
1456         struct sk_buff *skb;
1457
1458         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1459         if (unlikely(!skb)) {
1460                 rx_stats(rxo)->rx_drops_no_skbs++;
1461                 be_rx_compl_discard(rxo, rxcp);
1462                 return;
1463         }
1464
1465         skb_fill_rx_data(rxo, skb, rxcp);
1466
1467         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1468                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1469         else
1470                 skb_checksum_none_assert(skb);
1471
1472         skb->protocol = eth_type_trans(skb, netdev);
1473         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1474         if (netdev->features & NETIF_F_RXHASH)
1475                 skb->rxhash = rxcp->rss_hash;
1476
1477
1478         if (rxcp->vlanf)
1479                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1480
1481         netif_receive_skb(skb);
1482 }
1483
1484 /* Process the RX completion indicated by rxcp when GRO is enabled */
1485 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1486                              struct be_rx_compl_info *rxcp)
1487 {
1488         struct be_adapter *adapter = rxo->adapter;
1489         struct be_rx_page_info *page_info;
1490         struct sk_buff *skb = NULL;
1491         struct be_queue_info *rxq = &rxo->q;
1492         u16 remaining, curr_frag_len;
1493         u16 i, j;
1494
1495         skb = napi_get_frags(napi);
1496         if (!skb) {
1497                 be_rx_compl_discard(rxo, rxcp);
1498                 return;
1499         }
1500
1501         remaining = rxcp->pkt_size;
1502         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1503                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1504
1505                 curr_frag_len = min(remaining, rx_frag_size);
1506
1507                 /* Coalesce all frags from the same physical page in one slot */
1508                 if (i == 0 || page_info->page_offset == 0) {
1509                         /* First frag or Fresh page */
1510                         j++;
1511                         skb_frag_set_page(skb, j, page_info->page);
1512                         skb_shinfo(skb)->frags[j].page_offset =
1513                                                         page_info->page_offset;
1514                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1515                 } else {
1516                         put_page(page_info->page);
1517                 }
1518                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1519                 skb->truesize += rx_frag_size;
1520                 remaining -= curr_frag_len;
1521                 index_inc(&rxcp->rxq_idx, rxq->len);
1522                 memset(page_info, 0, sizeof(*page_info));
1523         }
1524         BUG_ON(j > MAX_SKB_FRAGS);
1525
1526         skb_shinfo(skb)->nr_frags = j + 1;
1527         skb->len = rxcp->pkt_size;
1528         skb->data_len = rxcp->pkt_size;
1529         skb->ip_summed = CHECKSUM_UNNECESSARY;
1530         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1531         if (adapter->netdev->features & NETIF_F_RXHASH)
1532                 skb->rxhash = rxcp->rss_hash;
1533
1534         if (rxcp->vlanf)
1535                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1536
1537         napi_gro_frags(napi);
1538 }
1539
1540 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1541                                  struct be_rx_compl_info *rxcp)
1542 {
1543         rxcp->pkt_size =
1544                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1545         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1546         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1547         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1548         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1549         rxcp->ip_csum =
1550                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1551         rxcp->l4_csum =
1552                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1553         rxcp->ipv6 =
1554                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1555         rxcp->rxq_idx =
1556                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1557         rxcp->num_rcvd =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1559         rxcp->pkt_type =
1560                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1561         rxcp->rss_hash =
1562                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1563         if (rxcp->vlanf) {
1564                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1565                                           compl);
1566                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1567                                                compl);
1568         }
1569         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1570 }
1571
1572 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1573                                  struct be_rx_compl_info *rxcp)
1574 {
1575         rxcp->pkt_size =
1576                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1577         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1578         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1579         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1580         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1581         rxcp->ip_csum =
1582                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1583         rxcp->l4_csum =
1584                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1585         rxcp->ipv6 =
1586                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1587         rxcp->rxq_idx =
1588                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1589         rxcp->num_rcvd =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1591         rxcp->pkt_type =
1592                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1593         rxcp->rss_hash =
1594                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1595         if (rxcp->vlanf) {
1596                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1597                                           compl);
1598                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1599                                                compl);
1600         }
1601         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1602         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1603                                       ip_frag, compl);
1604 }
1605
1606 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1607 {
1608         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1609         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1610         struct be_adapter *adapter = rxo->adapter;
1611
1612         /* For checking the valid bit it is Ok to use either definition as the
1613          * valid bit is at the same position in both v0 and v1 Rx compl */
1614         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(compl, sizeof(*compl));
1619
1620         if (adapter->be3_native)
1621                 be_parse_rx_compl_v1(compl, rxcp);
1622         else
1623                 be_parse_rx_compl_v0(compl, rxcp);
1624
1625         if (rxcp->ip_frag)
1626                 rxcp->l4_csum = 0;
1627
1628         if (rxcp->vlanf) {
1629                 /* vlanf could be wrongly set in some cards.
1630                  * ignore if vtm is not set */
1631                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1632                         rxcp->vlanf = 0;
1633
1634                 if (!lancer_chip(adapter))
1635                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1636
1637                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1638                     !adapter->vlan_tag[rxcp->vlan_tag])
1639                         rxcp->vlanf = 0;
1640         }
1641
1642         /* As the compl has been parsed, reset it; we wont touch it again */
1643         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1644
1645         queue_tail_inc(&rxo->cq);
1646         return rxcp;
1647 }
1648
1649 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1650 {
1651         u32 order = get_order(size);
1652
1653         if (order > 0)
1654                 gfp |= __GFP_COMP;
1655         return  alloc_pages(gfp, order);
1656 }
1657
1658 /*
1659  * Allocate a page, split it to fragments of size rx_frag_size and post as
1660  * receive buffers to BE
1661  */
1662 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1663 {
1664         struct be_adapter *adapter = rxo->adapter;
1665         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1666         struct be_queue_info *rxq = &rxo->q;
1667         struct page *pagep = NULL;
1668         struct be_eth_rx_d *rxd;
1669         u64 page_dmaaddr = 0, frag_dmaaddr;
1670         u32 posted, page_offset = 0;
1671
1672         page_info = &rxo->page_info_tbl[rxq->head];
1673         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1674                 if (!pagep) {
1675                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1676                         if (unlikely(!pagep)) {
1677                                 rx_stats(rxo)->rx_post_fail++;
1678                                 break;
1679                         }
1680                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1681                                                     0, adapter->big_page_size,
1682                                                     DMA_FROM_DEVICE);
1683                         page_info->page_offset = 0;
1684                 } else {
1685                         get_page(pagep);
1686                         page_info->page_offset = page_offset + rx_frag_size;
1687                 }
1688                 page_offset = page_info->page_offset;
1689                 page_info->page = pagep;
1690                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1691                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1692
1693                 rxd = queue_head_node(rxq);
1694                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1695                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1696
1697                 /* Any space left in the current big page for another frag? */
1698                 if ((page_offset + rx_frag_size + rx_frag_size) >
1699                                         adapter->big_page_size) {
1700                         pagep = NULL;
1701                         page_info->last_page_user = true;
1702                 }
1703
1704                 prev_page_info = page_info;
1705                 queue_head_inc(rxq);
1706                 page_info = &rxo->page_info_tbl[rxq->head];
1707         }
1708         if (pagep)
1709                 prev_page_info->last_page_user = true;
1710
1711         if (posted) {
1712                 atomic_add(posted, &rxq->used);
1713                 be_rxq_notify(adapter, rxq->id, posted);
1714         } else if (atomic_read(&rxq->used) == 0) {
1715                 /* Let be_worker replenish when memory is available */
1716                 rxo->rx_post_starved = true;
1717         }
1718 }
1719
1720 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1721 {
1722         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1723
1724         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1725                 return NULL;
1726
1727         rmb();
1728         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1729
1730         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1731
1732         queue_tail_inc(tx_cq);
1733         return txcp;
1734 }
1735
1736 static u16 be_tx_compl_process(struct be_adapter *adapter,
1737                 struct be_tx_obj *txo, u16 last_index)
1738 {
1739         struct be_queue_info *txq = &txo->q;
1740         struct be_eth_wrb *wrb;
1741         struct sk_buff **sent_skbs = txo->sent_skb_list;
1742         struct sk_buff *sent_skb;
1743         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1744         bool unmap_skb_hdr = true;
1745
1746         sent_skb = sent_skbs[txq->tail];
1747         BUG_ON(!sent_skb);
1748         sent_skbs[txq->tail] = NULL;
1749
1750         /* skip header wrb */
1751         queue_tail_inc(txq);
1752
1753         do {
1754                 cur_index = txq->tail;
1755                 wrb = queue_tail_node(txq);
1756                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1757                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1758                 unmap_skb_hdr = false;
1759
1760                 num_wrbs++;
1761                 queue_tail_inc(txq);
1762         } while (cur_index != last_index);
1763
1764         kfree_skb(sent_skb);
1765         return num_wrbs;
1766 }
1767
1768 /* Return the number of events in the event queue */
1769 static inline int events_get(struct be_eq_obj *eqo)
1770 {
1771         struct be_eq_entry *eqe;
1772         int num = 0;
1773
1774         do {
1775                 eqe = queue_tail_node(&eqo->q);
1776                 if (eqe->evt == 0)
1777                         break;
1778
1779                 rmb();
1780                 eqe->evt = 0;
1781                 num++;
1782                 queue_tail_inc(&eqo->q);
1783         } while (true);
1784
1785         return num;
1786 }
1787
1788 /* Leaves the EQ is disarmed state */
1789 static void be_eq_clean(struct be_eq_obj *eqo)
1790 {
1791         int num = events_get(eqo);
1792
1793         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1794 }
1795
1796 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1797 {
1798         struct be_rx_page_info *page_info;
1799         struct be_queue_info *rxq = &rxo->q;
1800         struct be_queue_info *rx_cq = &rxo->cq;
1801         struct be_rx_compl_info *rxcp;
1802         struct be_adapter *adapter = rxo->adapter;
1803         int flush_wait = 0;
1804         u16 tail;
1805
1806         /* Consume pending rx completions.
1807          * Wait for the flush completion (identified by zero num_rcvd)
1808          * to arrive. Notify CQ even when there are no more CQ entries
1809          * for HW to flush partially coalesced CQ entries.
1810          * In Lancer, there is no need to wait for flush compl.
1811          */
1812         for (;;) {
1813                 rxcp = be_rx_compl_get(rxo);
1814                 if (rxcp == NULL) {
1815                         if (lancer_chip(adapter))
1816                                 break;
1817
1818                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1819                                 dev_warn(&adapter->pdev->dev,
1820                                          "did not receive flush compl\n");
1821                                 break;
1822                         }
1823                         be_cq_notify(adapter, rx_cq->id, true, 0);
1824                         mdelay(1);
1825                 } else {
1826                         be_rx_compl_discard(rxo, rxcp);
1827                         be_cq_notify(adapter, rx_cq->id, false, 1);
1828                         if (rxcp->num_rcvd == 0)
1829                                 break;
1830                 }
1831         }
1832
1833         /* After cleanup, leave the CQ in unarmed state */
1834         be_cq_notify(adapter, rx_cq->id, false, 0);
1835
1836         /* Then free posted rx buffers that were not used */
1837         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1838         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1839                 page_info = get_rx_page_info(rxo, tail);
1840                 put_page(page_info->page);
1841                 memset(page_info, 0, sizeof(*page_info));
1842         }
1843         BUG_ON(atomic_read(&rxq->used));
1844         rxq->tail = rxq->head = 0;
1845 }
1846
1847 static void be_tx_compl_clean(struct be_adapter *adapter)
1848 {
1849         struct be_tx_obj *txo;
1850         struct be_queue_info *txq;
1851         struct be_eth_tx_compl *txcp;
1852         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1853         struct sk_buff *sent_skb;
1854         bool dummy_wrb;
1855         int i, pending_txqs;
1856
1857         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1858         do {
1859                 pending_txqs = adapter->num_tx_qs;
1860
1861                 for_all_tx_queues(adapter, txo, i) {
1862                         txq = &txo->q;
1863                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1864                                 end_idx =
1865                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1866                                                       wrb_index, txcp);
1867                                 num_wrbs += be_tx_compl_process(adapter, txo,
1868                                                                 end_idx);
1869                                 cmpl++;
1870                         }
1871                         if (cmpl) {
1872                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1873                                 atomic_sub(num_wrbs, &txq->used);
1874                                 cmpl = 0;
1875                                 num_wrbs = 0;
1876                         }
1877                         if (atomic_read(&txq->used) == 0)
1878                                 pending_txqs--;
1879                 }
1880
1881                 if (pending_txqs == 0 || ++timeo > 200)
1882                         break;
1883
1884                 mdelay(1);
1885         } while (true);
1886
1887         for_all_tx_queues(adapter, txo, i) {
1888                 txq = &txo->q;
1889                 if (atomic_read(&txq->used))
1890                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1891                                 atomic_read(&txq->used));
1892
1893                 /* free posted tx for which compls will never arrive */
1894                 while (atomic_read(&txq->used)) {
1895                         sent_skb = txo->sent_skb_list[txq->tail];
1896                         end_idx = txq->tail;
1897                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1898                                                    &dummy_wrb);
1899                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1900                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1901                         atomic_sub(num_wrbs, &txq->used);
1902                 }
1903         }
1904 }
1905
1906 static void be_evt_queues_destroy(struct be_adapter *adapter)
1907 {
1908         struct be_eq_obj *eqo;
1909         int i;
1910
1911         for_all_evt_queues(adapter, eqo, i) {
1912                 if (eqo->q.created) {
1913                         be_eq_clean(eqo);
1914                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1915                 }
1916                 be_queue_free(adapter, &eqo->q);
1917         }
1918 }
1919
1920 static int be_evt_queues_create(struct be_adapter *adapter)
1921 {
1922         struct be_queue_info *eq;
1923         struct be_eq_obj *eqo;
1924         int i, rc;
1925
1926         adapter->num_evt_qs = num_irqs(adapter);
1927
1928         for_all_evt_queues(adapter, eqo, i) {
1929                 eqo->adapter = adapter;
1930                 eqo->tx_budget = BE_TX_BUDGET;
1931                 eqo->idx = i;
1932                 eqo->max_eqd = BE_MAX_EQD;
1933                 eqo->enable_aic = true;
1934
1935                 eq = &eqo->q;
1936                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1937                                         sizeof(struct be_eq_entry));
1938                 if (rc)
1939                         return rc;
1940
1941                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1942                 if (rc)
1943                         return rc;
1944         }
1945         return 0;
1946 }
1947
1948 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1949 {
1950         struct be_queue_info *q;
1951
1952         q = &adapter->mcc_obj.q;
1953         if (q->created)
1954                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1955         be_queue_free(adapter, q);
1956
1957         q = &adapter->mcc_obj.cq;
1958         if (q->created)
1959                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1960         be_queue_free(adapter, q);
1961 }
1962
1963 /* Must be called only after TX qs are created as MCC shares TX EQ */
1964 static int be_mcc_queues_create(struct be_adapter *adapter)
1965 {
1966         struct be_queue_info *q, *cq;
1967
1968         cq = &adapter->mcc_obj.cq;
1969         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1970                         sizeof(struct be_mcc_compl)))
1971                 goto err;
1972
1973         /* Use the default EQ for MCC completions */
1974         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1975                 goto mcc_cq_free;
1976
1977         q = &adapter->mcc_obj.q;
1978         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1979                 goto mcc_cq_destroy;
1980
1981         if (be_cmd_mccq_create(adapter, q, cq))
1982                 goto mcc_q_free;
1983
1984         return 0;
1985
1986 mcc_q_free:
1987         be_queue_free(adapter, q);
1988 mcc_cq_destroy:
1989         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1990 mcc_cq_free:
1991         be_queue_free(adapter, cq);
1992 err:
1993         return -1;
1994 }
1995
1996 static void be_tx_queues_destroy(struct be_adapter *adapter)
1997 {
1998         struct be_queue_info *q;
1999         struct be_tx_obj *txo;
2000         u8 i;
2001
2002         for_all_tx_queues(adapter, txo, i) {
2003                 q = &txo->q;
2004                 if (q->created)
2005                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2006                 be_queue_free(adapter, q);
2007
2008                 q = &txo->cq;
2009                 if (q->created)
2010                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2011                 be_queue_free(adapter, q);
2012         }
2013 }
2014
2015 static int be_num_txqs_want(struct be_adapter *adapter)
2016 {
2017         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2018             be_is_mc(adapter) ||
2019             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2020             BE2_chip(adapter))
2021                 return 1;
2022         else
2023                 return adapter->max_tx_queues;
2024 }
2025
2026 static int be_tx_cqs_create(struct be_adapter *adapter)
2027 {
2028         struct be_queue_info *cq, *eq;
2029         int status;
2030         struct be_tx_obj *txo;
2031         u8 i;
2032
2033         adapter->num_tx_qs = be_num_txqs_want(adapter);
2034         if (adapter->num_tx_qs != MAX_TX_QS) {
2035                 rtnl_lock();
2036                 netif_set_real_num_tx_queues(adapter->netdev,
2037                         adapter->num_tx_qs);
2038                 rtnl_unlock();
2039         }
2040
2041         for_all_tx_queues(adapter, txo, i) {
2042                 cq = &txo->cq;
2043                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2044                                         sizeof(struct be_eth_tx_compl));
2045                 if (status)
2046                         return status;
2047
2048                 /* If num_evt_qs is less than num_tx_qs, then more than
2049                  * one txq share an eq
2050                  */
2051                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2052                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2053                 if (status)
2054                         return status;
2055         }
2056         return 0;
2057 }
2058
2059 static int be_tx_qs_create(struct be_adapter *adapter)
2060 {
2061         struct be_tx_obj *txo;
2062         int i, status;
2063
2064         for_all_tx_queues(adapter, txo, i) {
2065                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2066                                         sizeof(struct be_eth_wrb));
2067                 if (status)
2068                         return status;
2069
2070                 status = be_cmd_txq_create(adapter, txo);
2071                 if (status)
2072                         return status;
2073         }
2074
2075         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2076                  adapter->num_tx_qs);
2077         return 0;
2078 }
2079
2080 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2081 {
2082         struct be_queue_info *q;
2083         struct be_rx_obj *rxo;
2084         int i;
2085
2086         for_all_rx_queues(adapter, rxo, i) {
2087                 q = &rxo->cq;
2088                 if (q->created)
2089                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2090                 be_queue_free(adapter, q);
2091         }
2092 }
2093
2094 static int be_rx_cqs_create(struct be_adapter *adapter)
2095 {
2096         struct be_queue_info *eq, *cq;
2097         struct be_rx_obj *rxo;
2098         int rc, i;
2099
2100         /* We'll create as many RSS rings as there are irqs.
2101          * But when there's only one irq there's no use creating RSS rings
2102          */
2103         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2104                                 num_irqs(adapter) + 1 : 1;
2105         if (adapter->num_rx_qs != MAX_RX_QS) {
2106                 rtnl_lock();
2107                 netif_set_real_num_rx_queues(adapter->netdev,
2108                                              adapter->num_rx_qs);
2109                 rtnl_unlock();
2110         }
2111
2112         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2113         for_all_rx_queues(adapter, rxo, i) {
2114                 rxo->adapter = adapter;
2115                 cq = &rxo->cq;
2116                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2117                                 sizeof(struct be_eth_rx_compl));
2118                 if (rc)
2119                         return rc;
2120
2121                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2122                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2123                 if (rc)
2124                         return rc;
2125         }
2126
2127         dev_info(&adapter->pdev->dev,
2128                  "created %d RSS queue(s) and 1 default RX queue\n",
2129                  adapter->num_rx_qs - 1);
2130         return 0;
2131 }
2132
2133 static irqreturn_t be_intx(int irq, void *dev)
2134 {
2135         struct be_eq_obj *eqo = dev;
2136         struct be_adapter *adapter = eqo->adapter;
2137         int num_evts = 0;
2138
2139         /* IRQ is not expected when NAPI is scheduled as the EQ
2140          * will not be armed.
2141          * But, this can happen on Lancer INTx where it takes
2142          * a while to de-assert INTx or in BE2 where occasionaly
2143          * an interrupt may be raised even when EQ is unarmed.
2144          * If NAPI is already scheduled, then counting & notifying
2145          * events will orphan them.
2146          */
2147         if (napi_schedule_prep(&eqo->napi)) {
2148                 num_evts = events_get(eqo);
2149                 __napi_schedule(&eqo->napi);
2150                 if (num_evts)
2151                         eqo->spurious_intr = 0;
2152         }
2153         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2154
2155         /* Return IRQ_HANDLED only for the the first spurious intr
2156          * after a valid intr to stop the kernel from branding
2157          * this irq as a bad one!
2158          */
2159         if (num_evts || eqo->spurious_intr++ == 0)
2160                 return IRQ_HANDLED;
2161         else
2162                 return IRQ_NONE;
2163 }
2164
2165 static irqreturn_t be_msix(int irq, void *dev)
2166 {
2167         struct be_eq_obj *eqo = dev;
2168
2169         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2170         napi_schedule(&eqo->napi);
2171         return IRQ_HANDLED;
2172 }
2173
2174 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2175 {
2176         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2177 }
2178
2179 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2180                         int budget)
2181 {
2182         struct be_adapter *adapter = rxo->adapter;
2183         struct be_queue_info *rx_cq = &rxo->cq;
2184         struct be_rx_compl_info *rxcp;
2185         u32 work_done;
2186
2187         for (work_done = 0; work_done < budget; work_done++) {
2188                 rxcp = be_rx_compl_get(rxo);
2189                 if (!rxcp)
2190                         break;
2191
2192                 /* Is it a flush compl that has no data */
2193                 if (unlikely(rxcp->num_rcvd == 0))
2194                         goto loop_continue;
2195
2196                 /* Discard compl with partial DMA Lancer B0 */
2197                 if (unlikely(!rxcp->pkt_size)) {
2198                         be_rx_compl_discard(rxo, rxcp);
2199                         goto loop_continue;
2200                 }
2201
2202                 /* On BE drop pkts that arrive due to imperfect filtering in
2203                  * promiscuous mode on some skews
2204                  */
2205                 if (unlikely(rxcp->port != adapter->port_num &&
2206                                 !lancer_chip(adapter))) {
2207                         be_rx_compl_discard(rxo, rxcp);
2208                         goto loop_continue;
2209                 }
2210
2211                 if (do_gro(rxcp))
2212                         be_rx_compl_process_gro(rxo, napi, rxcp);
2213                 else
2214                         be_rx_compl_process(rxo, rxcp);
2215 loop_continue:
2216                 be_rx_stats_update(rxo, rxcp);
2217         }
2218
2219         if (work_done) {
2220                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2221
2222                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2223                         be_post_rx_frags(rxo, GFP_ATOMIC);
2224         }
2225
2226         return work_done;
2227 }
2228
2229 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2230                           int budget, int idx)
2231 {
2232         struct be_eth_tx_compl *txcp;
2233         int num_wrbs = 0, work_done;
2234
2235         for (work_done = 0; work_done < budget; work_done++) {
2236                 txcp = be_tx_compl_get(&txo->cq);
2237                 if (!txcp)
2238                         break;
2239                 num_wrbs += be_tx_compl_process(adapter, txo,
2240                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2241                                         wrb_index, txcp));
2242         }
2243
2244         if (work_done) {
2245                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2246                 atomic_sub(num_wrbs, &txo->q.used);
2247
2248                 /* As Tx wrbs have been freed up, wake up netdev queue
2249                  * if it was stopped due to lack of tx wrbs.  */
2250                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2251                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2252                         netif_wake_subqueue(adapter->netdev, idx);
2253                 }
2254
2255                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2256                 tx_stats(txo)->tx_compl += work_done;
2257                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2258         }
2259         return (work_done < budget); /* Done */
2260 }
2261
2262 int be_poll(struct napi_struct *napi, int budget)
2263 {
2264         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2265         struct be_adapter *adapter = eqo->adapter;
2266         int max_work = 0, work, i, num_evts;
2267         bool tx_done;
2268
2269         num_evts = events_get(eqo);
2270
2271         /* Process all TXQs serviced by this EQ */
2272         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2273                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2274                                         eqo->tx_budget, i);
2275                 if (!tx_done)
2276                         max_work = budget;
2277         }
2278
2279         /* This loop will iterate twice for EQ0 in which
2280          * completions of the last RXQ (default one) are also processed
2281          * For other EQs the loop iterates only once
2282          */
2283         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2284                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2285                 max_work = max(work, max_work);
2286         }
2287
2288         if (is_mcc_eqo(eqo))
2289                 be_process_mcc(adapter);
2290
2291         if (max_work < budget) {
2292                 napi_complete(napi);
2293                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2294         } else {
2295                 /* As we'll continue in polling mode, count and clear events */
2296                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2297         }
2298         return max_work;
2299 }
2300
2301 void be_detect_error(struct be_adapter *adapter)
2302 {
2303         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2304         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2305         u32 i;
2306
2307         if (be_hw_error(adapter))
2308                 return;
2309
2310         if (lancer_chip(adapter)) {
2311                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2312                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2313                         sliport_err1 = ioread32(adapter->db +
2314                                         SLIPORT_ERROR1_OFFSET);
2315                         sliport_err2 = ioread32(adapter->db +
2316                                         SLIPORT_ERROR2_OFFSET);
2317                 }
2318         } else {
2319                 pci_read_config_dword(adapter->pdev,
2320                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2321                 pci_read_config_dword(adapter->pdev,
2322                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2323                 pci_read_config_dword(adapter->pdev,
2324                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2325                 pci_read_config_dword(adapter->pdev,
2326                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2327
2328                 ue_lo = (ue_lo & ~ue_lo_mask);
2329                 ue_hi = (ue_hi & ~ue_hi_mask);
2330         }
2331
2332         /* On certain platforms BE hardware can indicate spurious UEs.
2333          * Allow the h/w to stop working completely in case of a real UE.
2334          * Hence not setting the hw_error for UE detection.
2335          */
2336         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2337                 adapter->hw_error = true;
2338                 dev_err(&adapter->pdev->dev,
2339                         "Error detected in the card\n");
2340         }
2341
2342         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2343                 dev_err(&adapter->pdev->dev,
2344                         "ERR: sliport status 0x%x\n", sliport_status);
2345                 dev_err(&adapter->pdev->dev,
2346                         "ERR: sliport error1 0x%x\n", sliport_err1);
2347                 dev_err(&adapter->pdev->dev,
2348                         "ERR: sliport error2 0x%x\n", sliport_err2);
2349         }
2350
2351         if (ue_lo) {
2352                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2353                         if (ue_lo & 1)
2354                                 dev_err(&adapter->pdev->dev,
2355                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2356                 }
2357         }
2358
2359         if (ue_hi) {
2360                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2361                         if (ue_hi & 1)
2362                                 dev_err(&adapter->pdev->dev,
2363                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2364                 }
2365         }
2366
2367 }
2368
2369 static void be_msix_disable(struct be_adapter *adapter)
2370 {
2371         if (msix_enabled(adapter)) {
2372                 pci_disable_msix(adapter->pdev);
2373                 adapter->num_msix_vec = 0;
2374         }
2375 }
2376
2377 static uint be_num_rss_want(struct be_adapter *adapter)
2378 {
2379         u32 num = 0;
2380
2381         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2382             (lancer_chip(adapter) ||
2383              (!sriov_want(adapter) && be_physfn(adapter)))) {
2384                 num = adapter->max_rss_queues;
2385                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2386         }
2387         return num;
2388 }
2389
2390 static int be_msix_enable(struct be_adapter *adapter)
2391 {
2392 #define BE_MIN_MSIX_VECTORS             1
2393         int i, status, num_vec, num_roce_vec = 0;
2394         struct device *dev = &adapter->pdev->dev;
2395
2396         /* If RSS queues are not used, need a vec for default RX Q */
2397         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2398         if (be_roce_supported(adapter)) {
2399                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2400                                         (num_online_cpus() + 1));
2401                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2402                 num_vec += num_roce_vec;
2403                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2404         }
2405         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2406
2407         for (i = 0; i < num_vec; i++)
2408                 adapter->msix_entries[i].entry = i;
2409
2410         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2411         if (status == 0) {
2412                 goto done;
2413         } else if (status >= BE_MIN_MSIX_VECTORS) {
2414                 num_vec = status;
2415                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2416                                          num_vec);
2417                 if (!status)
2418                         goto done;
2419         }
2420
2421         dev_warn(dev, "MSIx enable failed\n");
2422         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2423         if (!be_physfn(adapter))
2424                 return status;
2425         return 0;
2426 done:
2427         if (be_roce_supported(adapter)) {
2428                 if (num_vec > num_roce_vec) {
2429                         adapter->num_msix_vec = num_vec - num_roce_vec;
2430                         adapter->num_msix_roce_vec =
2431                                 num_vec - adapter->num_msix_vec;
2432                 } else {
2433                         adapter->num_msix_vec = num_vec;
2434                         adapter->num_msix_roce_vec = 0;
2435                 }
2436         } else
2437                 adapter->num_msix_vec = num_vec;
2438         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2439         return 0;
2440 }
2441
2442 static inline int be_msix_vec_get(struct be_adapter *adapter,
2443                                 struct be_eq_obj *eqo)
2444 {
2445         return adapter->msix_entries[eqo->idx].vector;
2446 }
2447
2448 static int be_msix_register(struct be_adapter *adapter)
2449 {
2450         struct net_device *netdev = adapter->netdev;
2451         struct be_eq_obj *eqo;
2452         int status, i, vec;
2453
2454         for_all_evt_queues(adapter, eqo, i) {
2455                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2456                 vec = be_msix_vec_get(adapter, eqo);
2457                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2458                 if (status)
2459                         goto err_msix;
2460         }
2461
2462         return 0;
2463 err_msix:
2464         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2465                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2466         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2467                 status);
2468         be_msix_disable(adapter);
2469         return status;
2470 }
2471
2472 static int be_irq_register(struct be_adapter *adapter)
2473 {
2474         struct net_device *netdev = adapter->netdev;
2475         int status;
2476
2477         if (msix_enabled(adapter)) {
2478                 status = be_msix_register(adapter);
2479                 if (status == 0)
2480                         goto done;
2481                 /* INTx is not supported for VF */
2482                 if (!be_physfn(adapter))
2483                         return status;
2484         }
2485
2486         /* INTx: only the first EQ is used */
2487         netdev->irq = adapter->pdev->irq;
2488         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2489                              &adapter->eq_obj[0]);
2490         if (status) {
2491                 dev_err(&adapter->pdev->dev,
2492                         "INTx request IRQ failed - err %d\n", status);
2493                 return status;
2494         }
2495 done:
2496         adapter->isr_registered = true;
2497         return 0;
2498 }
2499
2500 static void be_irq_unregister(struct be_adapter *adapter)
2501 {
2502         struct net_device *netdev = adapter->netdev;
2503         struct be_eq_obj *eqo;
2504         int i;
2505
2506         if (!adapter->isr_registered)
2507                 return;
2508
2509         /* INTx */
2510         if (!msix_enabled(adapter)) {
2511                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2512                 goto done;
2513         }
2514
2515         /* MSIx */
2516         for_all_evt_queues(adapter, eqo, i)
2517                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2518
2519 done:
2520         adapter->isr_registered = false;
2521 }
2522
2523 static void be_rx_qs_destroy(struct be_adapter *adapter)
2524 {
2525         struct be_queue_info *q;
2526         struct be_rx_obj *rxo;
2527         int i;
2528
2529         for_all_rx_queues(adapter, rxo, i) {
2530                 q = &rxo->q;
2531                 if (q->created) {
2532                         be_cmd_rxq_destroy(adapter, q);
2533                         be_rx_cq_clean(rxo);
2534                 }
2535                 be_queue_free(adapter, q);
2536         }
2537 }
2538
2539 static int be_close(struct net_device *netdev)
2540 {
2541         struct be_adapter *adapter = netdev_priv(netdev);
2542         struct be_eq_obj *eqo;
2543         int i;
2544
2545         be_roce_dev_close(adapter);
2546
2547         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2548                 for_all_evt_queues(adapter, eqo, i)
2549                         napi_disable(&eqo->napi);
2550                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2551         }
2552
2553         be_async_mcc_disable(adapter);
2554
2555         /* Wait for all pending tx completions to arrive so that
2556          * all tx skbs are freed.
2557          */
2558         be_tx_compl_clean(adapter);
2559         netif_tx_disable(netdev);
2560
2561         be_rx_qs_destroy(adapter);
2562
2563         for_all_evt_queues(adapter, eqo, i) {
2564                 if (msix_enabled(adapter))
2565                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2566                 else
2567                         synchronize_irq(netdev->irq);
2568                 be_eq_clean(eqo);
2569         }
2570
2571         be_irq_unregister(adapter);
2572
2573         return 0;
2574 }
2575
2576 static int be_rx_qs_create(struct be_adapter *adapter)
2577 {
2578         struct be_rx_obj *rxo;
2579         int rc, i, j;
2580         u8 rsstable[128];
2581
2582         for_all_rx_queues(adapter, rxo, i) {
2583                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2584                                     sizeof(struct be_eth_rx_d));
2585                 if (rc)
2586                         return rc;
2587         }
2588
2589         /* The FW would like the default RXQ to be created first */
2590         rxo = default_rxo(adapter);
2591         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2592                                adapter->if_handle, false, &rxo->rss_id);
2593         if (rc)
2594                 return rc;
2595
2596         for_all_rss_queues(adapter, rxo, i) {
2597                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2598                                        rx_frag_size, adapter->if_handle,
2599                                        true, &rxo->rss_id);
2600                 if (rc)
2601                         return rc;
2602         }
2603
2604         if (be_multi_rxq(adapter)) {
2605                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2606                         for_all_rss_queues(adapter, rxo, i) {
2607                                 if ((j + i) >= 128)
2608                                         break;
2609                                 rsstable[j + i] = rxo->rss_id;
2610                         }
2611                 }
2612                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2613                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2614
2615                 if (!BEx_chip(adapter))
2616                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2617                                                 RSS_ENABLE_UDP_IPV6;
2618
2619                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2620                                        128);
2621                 if (rc) {
2622                         adapter->rss_flags = 0;
2623                         return rc;
2624                 }
2625         }
2626
2627         /* First time posting */
2628         for_all_rx_queues(adapter, rxo, i)
2629                 be_post_rx_frags(rxo, GFP_KERNEL);
2630         return 0;
2631 }
2632
2633 static int be_open(struct net_device *netdev)
2634 {
2635         struct be_adapter *adapter = netdev_priv(netdev);
2636         struct be_eq_obj *eqo;
2637         struct be_rx_obj *rxo;
2638         struct be_tx_obj *txo;
2639         u8 link_status;
2640         int status, i;
2641
2642         status = be_rx_qs_create(adapter);
2643         if (status)
2644                 goto err;
2645
2646         status = be_irq_register(adapter);
2647         if (status)
2648                 goto err;
2649
2650         for_all_rx_queues(adapter, rxo, i)
2651                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2652
2653         for_all_tx_queues(adapter, txo, i)
2654                 be_cq_notify(adapter, txo->cq.id, true, 0);
2655
2656         be_async_mcc_enable(adapter);
2657
2658         for_all_evt_queues(adapter, eqo, i) {
2659                 napi_enable(&eqo->napi);
2660                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2661         }
2662         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2663
2664         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2665         if (!status)
2666                 be_link_status_update(adapter, link_status);
2667
2668         netif_tx_start_all_queues(netdev);
2669         be_roce_dev_open(adapter);
2670         return 0;
2671 err:
2672         be_close(adapter->netdev);
2673         return -EIO;
2674 }
2675
2676 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2677 {
2678         struct be_dma_mem cmd;
2679         int status = 0;
2680         u8 mac[ETH_ALEN];
2681
2682         memset(mac, 0, ETH_ALEN);
2683
2684         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2685         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2686                                     GFP_KERNEL | __GFP_ZERO);
2687         if (cmd.va == NULL)
2688                 return -1;
2689
2690         if (enable) {
2691                 status = pci_write_config_dword(adapter->pdev,
2692                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2693                 if (status) {
2694                         dev_err(&adapter->pdev->dev,
2695                                 "Could not enable Wake-on-lan\n");
2696                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2697                                           cmd.dma);
2698                         return status;
2699                 }
2700                 status = be_cmd_enable_magic_wol(adapter,
2701                                 adapter->netdev->dev_addr, &cmd);
2702                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2703                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2704         } else {
2705                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2706                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2707                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2708         }
2709
2710         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2711         return status;
2712 }
2713
2714 /*
2715  * Generate a seed MAC address from the PF MAC Address using jhash.
2716  * MAC Address for VFs are assigned incrementally starting from the seed.
2717  * These addresses are programmed in the ASIC by the PF and the VF driver
2718  * queries for the MAC address during its probe.
2719  */
2720 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2721 {
2722         u32 vf;
2723         int status = 0;
2724         u8 mac[ETH_ALEN];
2725         struct be_vf_cfg *vf_cfg;
2726
2727         be_vf_eth_addr_generate(adapter, mac);
2728
2729         for_all_vfs(adapter, vf_cfg, vf) {
2730                 if (lancer_chip(adapter)) {
2731                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2732                 } else {
2733                         status = be_cmd_pmac_add(adapter, mac,
2734                                                  vf_cfg->if_handle,
2735                                                  &vf_cfg->pmac_id, vf + 1);
2736                 }
2737
2738                 if (status)
2739                         dev_err(&adapter->pdev->dev,
2740                         "Mac address assignment failed for VF %d\n", vf);
2741                 else
2742                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2743
2744                 mac[5] += 1;
2745         }
2746         return status;
2747 }
2748
2749 static int be_vfs_mac_query(struct be_adapter *adapter)
2750 {
2751         int status, vf;
2752         u8 mac[ETH_ALEN];
2753         struct be_vf_cfg *vf_cfg;
2754         bool active;
2755
2756         for_all_vfs(adapter, vf_cfg, vf) {
2757                 be_cmd_get_mac_from_list(adapter, mac, &active,
2758                                          &vf_cfg->pmac_id, 0);
2759
2760                 status = be_cmd_mac_addr_query(adapter, mac, false,
2761                                                vf_cfg->if_handle, 0);
2762                 if (status)
2763                         return status;
2764                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2765         }
2766         return 0;
2767 }
2768
2769 static void be_vf_clear(struct be_adapter *adapter)
2770 {
2771         struct be_vf_cfg *vf_cfg;
2772         u32 vf;
2773
2774         if (be_find_vfs(adapter, ASSIGNED)) {
2775                 dev_warn(&adapter->pdev->dev,
2776                          "VFs are assigned to VMs: not disabling VFs\n");
2777                 goto done;
2778         }
2779
2780         pci_disable_sriov(adapter->pdev);
2781
2782         for_all_vfs(adapter, vf_cfg, vf) {
2783                 if (lancer_chip(adapter))
2784                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2785                 else
2786                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2787                                         vf_cfg->pmac_id, vf + 1);
2788
2789                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2790         }
2791 done:
2792         kfree(adapter->vf_cfg);
2793         adapter->num_vfs = 0;
2794 }
2795
2796 static int be_clear(struct be_adapter *adapter)
2797 {
2798         int i = 1;
2799
2800         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2801                 cancel_delayed_work_sync(&adapter->work);
2802                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2803         }
2804
2805         if (sriov_enabled(adapter))
2806                 be_vf_clear(adapter);
2807
2808         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2809                 be_cmd_pmac_del(adapter, adapter->if_handle,
2810                         adapter->pmac_id[i], 0);
2811
2812         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2813
2814         be_mcc_queues_destroy(adapter);
2815         be_rx_cqs_destroy(adapter);
2816         be_tx_queues_destroy(adapter);
2817         be_evt_queues_destroy(adapter);
2818
2819         kfree(adapter->pmac_id);
2820         adapter->pmac_id = NULL;
2821
2822         be_msix_disable(adapter);
2823         return 0;
2824 }
2825
2826 static int be_vfs_if_create(struct be_adapter *adapter)
2827 {
2828         struct be_vf_cfg *vf_cfg;
2829         u32 cap_flags, en_flags, vf;
2830         int status;
2831
2832         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2833                     BE_IF_FLAGS_MULTICAST;
2834
2835         for_all_vfs(adapter, vf_cfg, vf) {
2836                 if (!BE3_chip(adapter))
2837                         be_cmd_get_profile_config(adapter, &cap_flags,
2838                                                   NULL, vf + 1);
2839
2840                 /* If a FW profile exists, then cap_flags are updated */
2841                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2842                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2843                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2844                                           &vf_cfg->if_handle, vf + 1);
2845                 if (status)
2846                         goto err;
2847         }
2848 err:
2849         return status;
2850 }
2851
2852 static int be_vf_setup_init(struct be_adapter *adapter)
2853 {
2854         struct be_vf_cfg *vf_cfg;
2855         int vf;
2856
2857         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2858                                   GFP_KERNEL);
2859         if (!adapter->vf_cfg)
2860                 return -ENOMEM;
2861
2862         for_all_vfs(adapter, vf_cfg, vf) {
2863                 vf_cfg->if_handle = -1;
2864                 vf_cfg->pmac_id = -1;
2865         }
2866         return 0;
2867 }
2868
2869 static int be_vf_setup(struct be_adapter *adapter)
2870 {
2871         struct be_vf_cfg *vf_cfg;
2872         u16 def_vlan, lnk_speed;
2873         int status, old_vfs, vf;
2874         struct device *dev = &adapter->pdev->dev;
2875
2876         old_vfs = be_find_vfs(adapter, ENABLED);
2877         if (old_vfs) {
2878                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2879                 if (old_vfs != num_vfs)
2880                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2881                 adapter->num_vfs = old_vfs;
2882         } else {
2883                 if (num_vfs > adapter->dev_num_vfs)
2884                         dev_info(dev, "Device supports %d VFs and not %d\n",
2885                                  adapter->dev_num_vfs, num_vfs);
2886                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2887                 if (!adapter->num_vfs)
2888                         return 0;
2889         }
2890
2891         status = be_vf_setup_init(adapter);
2892         if (status)
2893                 goto err;
2894
2895         if (old_vfs) {
2896                 for_all_vfs(adapter, vf_cfg, vf) {
2897                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2898                         if (status)
2899                                 goto err;
2900                 }
2901         } else {
2902                 status = be_vfs_if_create(adapter);
2903                 if (status)
2904                         goto err;
2905         }
2906
2907         if (old_vfs) {
2908                 status = be_vfs_mac_query(adapter);
2909                 if (status)
2910                         goto err;
2911         } else {
2912                 status = be_vf_eth_addr_config(adapter);
2913                 if (status)
2914                         goto err;
2915         }
2916
2917         for_all_vfs(adapter, vf_cfg, vf) {
2918                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2919                  * Allow full available bandwidth
2920                  */
2921                 if (BE3_chip(adapter) && !old_vfs)
2922                         be_cmd_set_qos(adapter, 1000, vf+1);
2923
2924                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2925                                                   NULL, vf + 1);
2926                 if (!status)
2927                         vf_cfg->tx_rate = lnk_speed;
2928
2929                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2930                                                vf + 1, vf_cfg->if_handle);
2931                 if (status)
2932                         goto err;
2933                 vf_cfg->def_vid = def_vlan;
2934
2935                 be_cmd_enable_vf(adapter, vf + 1);
2936         }
2937
2938         if (!old_vfs) {
2939                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2940                 if (status) {
2941                         dev_err(dev, "SRIOV enable failed\n");
2942                         adapter->num_vfs = 0;
2943                         goto err;
2944                 }
2945         }
2946         return 0;
2947 err:
2948         dev_err(dev, "VF setup failed\n");
2949         be_vf_clear(adapter);
2950         return status;
2951 }
2952
2953 static void be_setup_init(struct be_adapter *adapter)
2954 {
2955         adapter->vlan_prio_bmap = 0xff;
2956         adapter->phy.link_speed = -1;
2957         adapter->if_handle = -1;
2958         adapter->be3_native = false;
2959         adapter->promiscuous = false;
2960         if (be_physfn(adapter))
2961                 adapter->cmd_privileges = MAX_PRIVILEGES;
2962         else
2963                 adapter->cmd_privileges = MIN_PRIVILEGES;
2964 }
2965
2966 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2967                            bool *active_mac, u32 *pmac_id)
2968 {
2969         int status = 0;
2970
2971         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2972                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2973                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2974                         *active_mac = true;
2975                 else
2976                         *active_mac = false;
2977
2978                 return status;
2979         }
2980
2981         if (lancer_chip(adapter)) {
2982                 status = be_cmd_get_mac_from_list(adapter, mac,
2983                                                   active_mac, pmac_id, 0);
2984                 if (*active_mac) {
2985                         status = be_cmd_mac_addr_query(adapter, mac, false,
2986                                                        if_handle, *pmac_id);
2987                 }
2988         } else if (be_physfn(adapter)) {
2989                 /* For BE3, for PF get permanent MAC */
2990                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2991                 *active_mac = false;
2992         } else {
2993                 /* For BE3, for VF get soft MAC assigned by PF*/
2994                 status = be_cmd_mac_addr_query(adapter, mac, false,
2995                                                if_handle, 0);
2996                 *active_mac = true;
2997         }
2998         return status;
2999 }
3000
3001 static void be_get_resources(struct be_adapter *adapter)
3002 {
3003         u16 dev_num_vfs;
3004         int pos, status;
3005         bool profile_present = false;
3006         u16 txq_count = 0;
3007
3008         if (!BEx_chip(adapter)) {
3009                 status = be_cmd_get_func_config(adapter);
3010                 if (!status)
3011                         profile_present = true;
3012         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3013                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3014         }
3015
3016         if (profile_present) {
3017                 /* Sanity fixes for Lancer */
3018                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3019                                               BE_UC_PMAC_COUNT);
3020                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3021                                            BE_NUM_VLANS_SUPPORTED);
3022                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3023                                                BE_MAX_MC);
3024                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3025                                                MAX_TX_QS);
3026                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3027                                                 BE3_MAX_RSS_QS);
3028                 adapter->max_event_queues = min_t(u16,
3029                                                   adapter->max_event_queues,
3030                                                   BE3_MAX_RSS_QS);
3031
3032                 if (adapter->max_rss_queues &&
3033                     adapter->max_rss_queues == adapter->max_rx_queues)
3034                         adapter->max_rss_queues -= 1;
3035
3036                 if (adapter->max_event_queues < adapter->max_rss_queues)
3037                         adapter->max_rss_queues = adapter->max_event_queues;
3038
3039         } else {
3040                 if (be_physfn(adapter))
3041                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3042                 else
3043                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3044
3045                 if (adapter->function_mode & FLEX10_MODE)
3046                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3047                 else
3048                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3049
3050                 adapter->max_mcast_mac = BE_MAX_MC;
3051                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3052                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3053                                                MAX_TX_QS);
3054                 adapter->max_rss_queues = (adapter->be3_native) ?
3055                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3056                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3057
3058                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3059                                         BE_IF_FLAGS_BROADCAST |
3060                                         BE_IF_FLAGS_MULTICAST |
3061                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3062                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3063                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3064                                         BE_IF_FLAGS_PROMISCUOUS;
3065
3066                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3067                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3068         }
3069
3070         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3071         if (pos) {
3072                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3073                                      &dev_num_vfs);
3074                 if (BE3_chip(adapter))
3075                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3076                 adapter->dev_num_vfs = dev_num_vfs;
3077         }
3078 }
3079
3080 /* Routine to query per function resource limits */
3081 static int be_get_config(struct be_adapter *adapter)
3082 {
3083         int status;
3084
3085         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3086                                      &adapter->function_mode,
3087                                      &adapter->function_caps,
3088                                      &adapter->asic_rev);
3089         if (status)
3090                 goto err;
3091
3092         be_get_resources(adapter);
3093
3094         /* primary mac needs 1 pmac entry */
3095         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3096                                    sizeof(u32), GFP_KERNEL);
3097         if (!adapter->pmac_id) {
3098                 status = -ENOMEM;
3099                 goto err;
3100         }
3101
3102 err:
3103         return status;
3104 }
3105
3106 static int be_setup(struct be_adapter *adapter)
3107 {
3108         struct device *dev = &adapter->pdev->dev;
3109         u32 en_flags;
3110         u32 tx_fc, rx_fc;
3111         int status;
3112         u8 mac[ETH_ALEN];
3113         bool active_mac;
3114
3115         be_setup_init(adapter);
3116
3117         if (!lancer_chip(adapter))
3118                 be_cmd_req_native_mode(adapter);
3119
3120         status = be_get_config(adapter);
3121         if (status)
3122                 goto err;
3123
3124         status = be_msix_enable(adapter);
3125         if (status)
3126                 goto err;
3127
3128         status = be_evt_queues_create(adapter);
3129         if (status)
3130                 goto err;
3131
3132         status = be_tx_cqs_create(adapter);
3133         if (status)
3134                 goto err;
3135
3136         status = be_rx_cqs_create(adapter);
3137         if (status)
3138                 goto err;
3139
3140         status = be_mcc_queues_create(adapter);
3141         if (status)
3142                 goto err;
3143
3144         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3145         /* In UMC mode FW does not return right privileges.
3146          * Override with correct privilege equivalent to PF.
3147          */
3148         if (be_is_mc(adapter))
3149                 adapter->cmd_privileges = MAX_PRIVILEGES;
3150
3151         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3152                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3153
3154         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3155                 en_flags |= BE_IF_FLAGS_RSS;
3156
3157         en_flags = en_flags & adapter->if_cap_flags;
3158
3159         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3160                                   &adapter->if_handle, 0);
3161         if (status != 0)
3162                 goto err;
3163
3164         memset(mac, 0, ETH_ALEN);
3165         active_mac = false;
3166         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3167                                  &active_mac, &adapter->pmac_id[0]);
3168         if (status != 0)
3169                 goto err;
3170
3171         if (!active_mac) {
3172                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3173                                          &adapter->pmac_id[0], 0);
3174                 if (status != 0)
3175                         goto err;
3176         }
3177
3178         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3179                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3180                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3181         }
3182
3183         status = be_tx_qs_create(adapter);
3184         if (status)
3185                 goto err;
3186
3187         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3188
3189         if (adapter->vlans_added)
3190                 be_vid_config(adapter);
3191
3192         be_set_rx_mode(adapter->netdev);
3193
3194         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3195
3196         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3197                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3198                                         adapter->rx_fc);
3199
3200         if (be_physfn(adapter)) {
3201                 if (adapter->dev_num_vfs)
3202                         be_vf_setup(adapter);
3203                 else
3204                         dev_warn(dev, "device doesn't support SRIOV\n");
3205         }
3206
3207         status = be_cmd_get_phy_info(adapter);
3208         if (!status && be_pause_supported(adapter))
3209                 adapter->phy.fc_autoneg = 1;
3210
3211         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3212         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3213         return 0;
3214 err:
3215         be_clear(adapter);
3216         return status;
3217 }
3218
3219 #ifdef CONFIG_NET_POLL_CONTROLLER
3220 static void be_netpoll(struct net_device *netdev)
3221 {
3222         struct be_adapter *adapter = netdev_priv(netdev);
3223         struct be_eq_obj *eqo;
3224         int i;
3225
3226         for_all_evt_queues(adapter, eqo, i) {
3227                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3228                 napi_schedule(&eqo->napi);
3229         }
3230
3231         return;
3232 }
3233 #endif
3234
3235 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3236 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3237
3238 static bool be_flash_redboot(struct be_adapter *adapter,
3239                         const u8 *p, u32 img_start, int image_size,
3240                         int hdr_size)
3241 {
3242         u32 crc_offset;
3243         u8 flashed_crc[4];
3244         int status;
3245
3246         crc_offset = hdr_size + img_start + image_size - 4;
3247
3248         p += crc_offset;
3249
3250         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3251                         (image_size - 4));
3252         if (status) {
3253                 dev_err(&adapter->pdev->dev,
3254                 "could not get crc from flash, not flashing redboot\n");
3255                 return false;
3256         }
3257
3258         /*update redboot only if crc does not match*/
3259         if (!memcmp(flashed_crc, p, 4))
3260                 return false;
3261         else
3262                 return true;
3263 }
3264
3265 static bool phy_flashing_required(struct be_adapter *adapter)
3266 {
3267         return (adapter->phy.phy_type == TN_8022 &&
3268                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3269 }
3270
3271 static bool is_comp_in_ufi(struct be_adapter *adapter,
3272                            struct flash_section_info *fsec, int type)
3273 {
3274         int i = 0, img_type = 0;
3275         struct flash_section_info_g2 *fsec_g2 = NULL;
3276
3277         if (BE2_chip(adapter))
3278                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3279
3280         for (i = 0; i < MAX_FLASH_COMP; i++) {
3281                 if (fsec_g2)
3282                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3283                 else
3284                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3285
3286                 if (img_type == type)
3287                         return true;
3288         }
3289         return false;
3290
3291 }
3292
3293 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3294                                          int header_size,
3295                                          const struct firmware *fw)
3296 {
3297         struct flash_section_info *fsec = NULL;
3298         const u8 *p = fw->data;
3299
3300         p += header_size;
3301         while (p < (fw->data + fw->size)) {
3302                 fsec = (struct flash_section_info *)p;
3303                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3304                         return fsec;
3305                 p += 32;
3306         }
3307         return NULL;
3308 }
3309
3310 static int be_flash(struct be_adapter *adapter, const u8 *img,
3311                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3312 {
3313         u32 total_bytes = 0, flash_op, num_bytes = 0;
3314         int status = 0;
3315         struct be_cmd_write_flashrom *req = flash_cmd->va;
3316
3317         total_bytes = img_size;
3318         while (total_bytes) {
3319                 num_bytes = min_t(u32, 32*1024, total_bytes);
3320
3321                 total_bytes -= num_bytes;
3322
3323                 if (!total_bytes) {
3324                         if (optype == OPTYPE_PHY_FW)
3325                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3326                         else
3327                                 flash_op = FLASHROM_OPER_FLASH;
3328                 } else {
3329                         if (optype == OPTYPE_PHY_FW)
3330                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3331                         else
3332                                 flash_op = FLASHROM_OPER_SAVE;
3333                 }
3334
3335                 memcpy(req->data_buf, img, num_bytes);
3336                 img += num_bytes;
3337                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3338                                                 flash_op, num_bytes);
3339                 if (status) {
3340                         if (status == ILLEGAL_IOCTL_REQ &&
3341                             optype == OPTYPE_PHY_FW)
3342                                 break;
3343                         dev_err(&adapter->pdev->dev,
3344                                 "cmd to write to flash rom failed.\n");
3345                         return status;
3346                 }
3347         }
3348         return 0;
3349 }
3350
3351 /* For BE2, BE3 and BE3-R */
3352 static int be_flash_BEx(struct be_adapter *adapter,
3353                          const struct firmware *fw,
3354                          struct be_dma_mem *flash_cmd,
3355                          int num_of_images)
3356
3357 {
3358         int status = 0, i, filehdr_size = 0;
3359         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3360         const u8 *p = fw->data;
3361         const struct flash_comp *pflashcomp;
3362         int num_comp, redboot;
3363         struct flash_section_info *fsec = NULL;
3364
3365         struct flash_comp gen3_flash_types[] = {
3366                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3367                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3368                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3369                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3370                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3371                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3372                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3373                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3374                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3375                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3376                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3377                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3378                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3379                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3380                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3381                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3382                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3383                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3384                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3385                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3386         };
3387
3388         struct flash_comp gen2_flash_types[] = {
3389                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3390                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3391                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3392                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3393                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3394                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3395                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3396                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3397                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3398                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3399                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3400                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3401                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3402                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3403                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3404                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3405         };
3406
3407         if (BE3_chip(adapter)) {
3408                 pflashcomp = gen3_flash_types;
3409                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3410                 num_comp = ARRAY_SIZE(gen3_flash_types);
3411         } else {
3412                 pflashcomp = gen2_flash_types;
3413                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3414                 num_comp = ARRAY_SIZE(gen2_flash_types);
3415         }
3416
3417         /* Get flash section info*/
3418         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3419         if (!fsec) {
3420                 dev_err(&adapter->pdev->dev,
3421                         "Invalid Cookie. UFI corrupted ?\n");
3422                 return -1;
3423         }
3424         for (i = 0; i < num_comp; i++) {
3425                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3426                         continue;
3427
3428                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3429                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3430                         continue;
3431
3432                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3433                     !phy_flashing_required(adapter))
3434                                 continue;
3435
3436                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3437                         redboot = be_flash_redboot(adapter, fw->data,
3438                                 pflashcomp[i].offset, pflashcomp[i].size,
3439                                 filehdr_size + img_hdrs_size);
3440                         if (!redboot)
3441                                 continue;
3442                 }
3443
3444                 p = fw->data;
3445                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3446                 if (p + pflashcomp[i].size > fw->data + fw->size)
3447                         return -1;
3448
3449                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3450                                         pflashcomp[i].size);
3451                 if (status) {
3452                         dev_err(&adapter->pdev->dev,
3453                                 "Flashing section type %d failed.\n",
3454                                 pflashcomp[i].img_type);
3455                         return status;
3456                 }
3457         }
3458         return 0;
3459 }
3460
3461 static int be_flash_skyhawk(struct be_adapter *adapter,
3462                 const struct firmware *fw,
3463                 struct be_dma_mem *flash_cmd, int num_of_images)
3464 {
3465         int status = 0, i, filehdr_size = 0;
3466         int img_offset, img_size, img_optype, redboot;
3467         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3468         const u8 *p = fw->data;
3469         struct flash_section_info *fsec = NULL;
3470
3471         filehdr_size = sizeof(struct flash_file_hdr_g3);
3472         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3473         if (!fsec) {
3474                 dev_err(&adapter->pdev->dev,
3475                         "Invalid Cookie. UFI corrupted ?\n");
3476                 return -1;
3477         }
3478
3479         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3480                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3481                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3482
3483                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3484                 case IMAGE_FIRMWARE_iSCSI:
3485                         img_optype = OPTYPE_ISCSI_ACTIVE;
3486                         break;
3487                 case IMAGE_BOOT_CODE:
3488                         img_optype = OPTYPE_REDBOOT;
3489                         break;
3490                 case IMAGE_OPTION_ROM_ISCSI:
3491                         img_optype = OPTYPE_BIOS;
3492                         break;
3493                 case IMAGE_OPTION_ROM_PXE:
3494                         img_optype = OPTYPE_PXE_BIOS;
3495                         break;
3496                 case IMAGE_OPTION_ROM_FCoE:
3497                         img_optype = OPTYPE_FCOE_BIOS;
3498                         break;
3499                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3500                         img_optype = OPTYPE_ISCSI_BACKUP;
3501                         break;
3502                 case IMAGE_NCSI:
3503                         img_optype = OPTYPE_NCSI_FW;
3504                         break;
3505                 default:
3506                         continue;
3507                 }
3508
3509                 if (img_optype == OPTYPE_REDBOOT) {
3510                         redboot = be_flash_redboot(adapter, fw->data,
3511                                         img_offset, img_size,
3512                                         filehdr_size + img_hdrs_size);
3513                         if (!redboot)
3514                                 continue;
3515                 }
3516
3517                 p = fw->data;
3518                 p += filehdr_size + img_offset + img_hdrs_size;
3519                 if (p + img_size > fw->data + fw->size)
3520                         return -1;
3521
3522                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3523                 if (status) {
3524                         dev_err(&adapter->pdev->dev,
3525                                 "Flashing section type %d failed.\n",
3526                                 fsec->fsec_entry[i].type);
3527                         return status;
3528                 }
3529         }
3530         return 0;
3531 }
3532
3533 static int lancer_wait_idle(struct be_adapter *adapter)
3534 {
3535 #define SLIPORT_IDLE_TIMEOUT 30
3536         u32 reg_val;
3537         int status = 0, i;
3538
3539         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3540                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3541                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3542                         break;
3543
3544                 ssleep(1);
3545         }
3546
3547         if (i == SLIPORT_IDLE_TIMEOUT)
3548                 status = -1;
3549
3550         return status;
3551 }
3552
3553 static int lancer_fw_reset(struct be_adapter *adapter)
3554 {
3555         int status = 0;
3556
3557         status = lancer_wait_idle(adapter);
3558         if (status)
3559                 return status;
3560
3561         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3562                   PHYSDEV_CONTROL_OFFSET);
3563
3564         return status;
3565 }
3566
3567 static int lancer_fw_download(struct be_adapter *adapter,
3568                                 const struct firmware *fw)
3569 {
3570 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3571 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3572         struct be_dma_mem flash_cmd;
3573         const u8 *data_ptr = NULL;
3574         u8 *dest_image_ptr = NULL;
3575         size_t image_size = 0;
3576         u32 chunk_size = 0;
3577         u32 data_written = 0;
3578         u32 offset = 0;
3579         int status = 0;
3580         u8 add_status = 0;
3581         u8 change_status;
3582
3583         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3584                 dev_err(&adapter->pdev->dev,
3585                         "FW Image not properly aligned. "
3586                         "Length must be 4 byte aligned.\n");
3587                 status = -EINVAL;
3588                 goto lancer_fw_exit;
3589         }
3590
3591         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3592                                 + LANCER_FW_DOWNLOAD_CHUNK;
3593         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3594                                           &flash_cmd.dma, GFP_KERNEL);
3595         if (!flash_cmd.va) {
3596                 status = -ENOMEM;
3597                 goto lancer_fw_exit;
3598         }
3599
3600         dest_image_ptr = flash_cmd.va +
3601                                 sizeof(struct lancer_cmd_req_write_object);
3602         image_size = fw->size;
3603         data_ptr = fw->data;
3604
3605         while (image_size) {
3606                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3607
3608                 /* Copy the image chunk content. */
3609                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3610
3611                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3612                                                  chunk_size, offset,
3613                                                  LANCER_FW_DOWNLOAD_LOCATION,
3614                                                  &data_written, &change_status,
3615                                                  &add_status);
3616                 if (status)
3617                         break;
3618
3619                 offset += data_written;
3620                 data_ptr += data_written;
3621                 image_size -= data_written;
3622         }
3623
3624         if (!status) {
3625                 /* Commit the FW written */
3626                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3627                                                  0, offset,
3628                                                  LANCER_FW_DOWNLOAD_LOCATION,
3629                                                  &data_written, &change_status,
3630                                                  &add_status);
3631         }
3632
3633         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3634                                 flash_cmd.dma);
3635         if (status) {
3636                 dev_err(&adapter->pdev->dev,
3637                         "Firmware load error. "
3638                         "Status code: 0x%x Additional Status: 0x%x\n",
3639                         status, add_status);
3640                 goto lancer_fw_exit;
3641         }
3642
3643         if (change_status == LANCER_FW_RESET_NEEDED) {
3644                 status = lancer_fw_reset(adapter);
3645                 if (status) {
3646                         dev_err(&adapter->pdev->dev,
3647                                 "Adapter busy for FW reset.\n"
3648                                 "New FW will not be active.\n");
3649                         goto lancer_fw_exit;
3650                 }
3651         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3652                         dev_err(&adapter->pdev->dev,
3653                                 "System reboot required for new FW"
3654                                 " to be active\n");
3655         }
3656
3657         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3658 lancer_fw_exit:
3659         return status;
3660 }
3661
3662 #define UFI_TYPE2               2
3663 #define UFI_TYPE3               3
3664 #define UFI_TYPE3R              10
3665 #define UFI_TYPE4               4
3666 static int be_get_ufi_type(struct be_adapter *adapter,
3667                            struct flash_file_hdr_g3 *fhdr)
3668 {
3669         if (fhdr == NULL)
3670                 goto be_get_ufi_exit;
3671
3672         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3673                 return UFI_TYPE4;
3674         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3675                 if (fhdr->asic_type_rev == 0x10)
3676                         return UFI_TYPE3R;
3677                 else
3678                         return UFI_TYPE3;
3679         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3680                 return UFI_TYPE2;
3681
3682 be_get_ufi_exit:
3683         dev_err(&adapter->pdev->dev,
3684                 "UFI and Interface are not compatible for flashing\n");
3685         return -1;
3686 }
3687
3688 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3689 {
3690         struct flash_file_hdr_g3 *fhdr3;
3691         struct image_hdr *img_hdr_ptr = NULL;
3692         struct be_dma_mem flash_cmd;
3693         const u8 *p;
3694         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3695
3696         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3697         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3698                                           &flash_cmd.dma, GFP_KERNEL);
3699         if (!flash_cmd.va) {
3700                 status = -ENOMEM;
3701                 goto be_fw_exit;
3702         }
3703
3704         p = fw->data;
3705         fhdr3 = (struct flash_file_hdr_g3 *)p;
3706
3707         ufi_type = be_get_ufi_type(adapter, fhdr3);
3708
3709         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3710         for (i = 0; i < num_imgs; i++) {
3711                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3712                                 (sizeof(struct flash_file_hdr_g3) +
3713                                  i * sizeof(struct image_hdr)));
3714                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3715                         switch (ufi_type) {
3716                         case UFI_TYPE4:
3717                                 status = be_flash_skyhawk(adapter, fw,
3718                                                         &flash_cmd, num_imgs);
3719                                 break;
3720                         case UFI_TYPE3R:
3721                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3722                                                       num_imgs);
3723                                 break;
3724                         case UFI_TYPE3:
3725                                 /* Do not flash this ufi on BE3-R cards */
3726                                 if (adapter->asic_rev < 0x10)
3727                                         status = be_flash_BEx(adapter, fw,
3728                                                               &flash_cmd,
3729                                                               num_imgs);
3730                                 else {
3731                                         status = -1;
3732                                         dev_err(&adapter->pdev->dev,
3733                                                 "Can't load BE3 UFI on BE3R\n");
3734                                 }
3735                         }
3736                 }
3737         }
3738
3739         if (ufi_type == UFI_TYPE2)
3740                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3741         else if (ufi_type == -1)
3742                 status = -1;
3743
3744         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3745                           flash_cmd.dma);
3746         if (status) {
3747                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3748                 goto be_fw_exit;
3749         }
3750
3751         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3752
3753 be_fw_exit:
3754         return status;
3755 }
3756
3757 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3758 {
3759         const struct firmware *fw;
3760         int status;
3761
3762         if (!netif_running(adapter->netdev)) {
3763                 dev_err(&adapter->pdev->dev,
3764                         "Firmware load not allowed (interface is down)\n");
3765                 return -1;
3766         }
3767
3768         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3769         if (status)
3770                 goto fw_exit;
3771
3772         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3773
3774         if (lancer_chip(adapter))
3775                 status = lancer_fw_download(adapter, fw);
3776         else
3777                 status = be_fw_download(adapter, fw);
3778
3779 fw_exit:
3780         release_firmware(fw);
3781         return status;
3782 }
3783
3784 static const struct net_device_ops be_netdev_ops = {
3785         .ndo_open               = be_open,
3786         .ndo_stop               = be_close,
3787         .ndo_start_xmit         = be_xmit,
3788         .ndo_set_rx_mode        = be_set_rx_mode,
3789         .ndo_set_mac_address    = be_mac_addr_set,
3790         .ndo_change_mtu         = be_change_mtu,
3791         .ndo_get_stats64        = be_get_stats64,
3792         .ndo_validate_addr      = eth_validate_addr,
3793         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3794         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3795         .ndo_set_vf_mac         = be_set_vf_mac,
3796         .ndo_set_vf_vlan        = be_set_vf_vlan,
3797         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3798         .ndo_get_vf_config      = be_get_vf_config,
3799 #ifdef CONFIG_NET_POLL_CONTROLLER
3800         .ndo_poll_controller    = be_netpoll,
3801 #endif
3802 };
3803
3804 static void be_netdev_init(struct net_device *netdev)
3805 {
3806         struct be_adapter *adapter = netdev_priv(netdev);
3807         struct be_eq_obj *eqo;
3808         int i;
3809
3810         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3811                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3812                 NETIF_F_HW_VLAN_CTAG_TX;
3813         if (be_multi_rxq(adapter))
3814                 netdev->hw_features |= NETIF_F_RXHASH;
3815
3816         netdev->features |= netdev->hw_features |
3817                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3818
3819         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3820                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3821
3822         netdev->priv_flags |= IFF_UNICAST_FLT;
3823
3824         netdev->flags |= IFF_MULTICAST;
3825
3826         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3827
3828         netdev->netdev_ops = &be_netdev_ops;
3829
3830         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3831
3832         for_all_evt_queues(adapter, eqo, i)
3833                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3834 }
3835
3836 static void be_unmap_pci_bars(struct be_adapter *adapter)
3837 {
3838         if (adapter->csr)
3839                 pci_iounmap(adapter->pdev, adapter->csr);
3840         if (adapter->db)
3841                 pci_iounmap(adapter->pdev, adapter->db);
3842 }
3843
3844 static int db_bar(struct be_adapter *adapter)
3845 {
3846         if (lancer_chip(adapter) || !be_physfn(adapter))
3847                 return 0;
3848         else
3849                 return 4;
3850 }
3851
3852 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3853 {
3854         if (skyhawk_chip(adapter)) {
3855                 adapter->roce_db.size = 4096;
3856                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3857                                                               db_bar(adapter));
3858                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3859                                                                db_bar(adapter));
3860         }
3861         return 0;
3862 }
3863
3864 static int be_map_pci_bars(struct be_adapter *adapter)
3865 {
3866         u8 __iomem *addr;
3867         u32 sli_intf;
3868
3869         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3870         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3871                                 SLI_INTF_IF_TYPE_SHIFT;
3872
3873         if (BEx_chip(adapter) && be_physfn(adapter)) {
3874                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3875                 if (adapter->csr == NULL)
3876                         return -ENOMEM;
3877         }
3878
3879         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3880         if (addr == NULL)
3881                 goto pci_map_err;
3882         adapter->db = addr;
3883
3884         be_roce_map_pci_bars(adapter);
3885         return 0;
3886
3887 pci_map_err:
3888         be_unmap_pci_bars(adapter);
3889         return -ENOMEM;
3890 }
3891
3892 static void be_ctrl_cleanup(struct be_adapter *adapter)
3893 {
3894         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3895
3896         be_unmap_pci_bars(adapter);
3897
3898         if (mem->va)
3899                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3900                                   mem->dma);
3901
3902         mem = &adapter->rx_filter;
3903         if (mem->va)
3904                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3905                                   mem->dma);
3906 }
3907
3908 static int be_ctrl_init(struct be_adapter *adapter)
3909 {
3910         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3911         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3912         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3913         u32 sli_intf;
3914         int status;
3915
3916         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3917         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3918                                  SLI_INTF_FAMILY_SHIFT;
3919         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3920
3921         status = be_map_pci_bars(adapter);
3922         if (status)
3923                 goto done;
3924
3925         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3926         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3927                                                 mbox_mem_alloc->size,
3928                                                 &mbox_mem_alloc->dma,
3929                                                 GFP_KERNEL);
3930         if (!mbox_mem_alloc->va) {
3931                 status = -ENOMEM;
3932                 goto unmap_pci_bars;
3933         }
3934         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3935         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3936         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3937         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3938
3939         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3940         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3941                                            &rx_filter->dma,
3942                                            GFP_KERNEL | __GFP_ZERO);
3943         if (rx_filter->va == NULL) {
3944                 status = -ENOMEM;
3945                 goto free_mbox;
3946         }
3947
3948         mutex_init(&adapter->mbox_lock);
3949         spin_lock_init(&adapter->mcc_lock);
3950         spin_lock_init(&adapter->mcc_cq_lock);
3951
3952         init_completion(&adapter->flash_compl);
3953         pci_save_state(adapter->pdev);
3954         return 0;
3955
3956 free_mbox:
3957         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3958                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3959
3960 unmap_pci_bars:
3961         be_unmap_pci_bars(adapter);
3962
3963 done:
3964         return status;
3965 }
3966
3967 static void be_stats_cleanup(struct be_adapter *adapter)
3968 {
3969         struct be_dma_mem *cmd = &adapter->stats_cmd;
3970
3971         if (cmd->va)
3972                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3973                                   cmd->va, cmd->dma);
3974 }
3975
3976 static int be_stats_init(struct be_adapter *adapter)
3977 {
3978         struct be_dma_mem *cmd = &adapter->stats_cmd;
3979
3980         if (lancer_chip(adapter))
3981                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3982         else if (BE2_chip(adapter))
3983                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3984         else
3985                 /* BE3 and Skyhawk */
3986                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3987
3988         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3989                                      GFP_KERNEL | __GFP_ZERO);
3990         if (cmd->va == NULL)
3991                 return -1;
3992         return 0;
3993 }
3994
3995 static void be_remove(struct pci_dev *pdev)
3996 {
3997         struct be_adapter *adapter = pci_get_drvdata(pdev);
3998
3999         if (!adapter)
4000                 return;
4001
4002         be_roce_dev_remove(adapter);
4003         be_intr_set(adapter, false);
4004
4005         cancel_delayed_work_sync(&adapter->func_recovery_work);
4006
4007         unregister_netdev(adapter->netdev);
4008
4009         be_clear(adapter);
4010
4011         /* tell fw we're done with firing cmds */
4012         be_cmd_fw_clean(adapter);
4013
4014         be_stats_cleanup(adapter);
4015
4016         be_ctrl_cleanup(adapter);
4017
4018         pci_disable_pcie_error_reporting(pdev);
4019
4020         pci_set_drvdata(pdev, NULL);
4021         pci_release_regions(pdev);
4022         pci_disable_device(pdev);
4023
4024         free_netdev(adapter->netdev);
4025 }
4026
4027 bool be_is_wol_supported(struct be_adapter *adapter)
4028 {
4029         return ((adapter->wol_cap & BE_WOL_CAP) &&
4030                 !be_is_wol_excluded(adapter)) ? true : false;
4031 }
4032
4033 u32 be_get_fw_log_level(struct be_adapter *adapter)
4034 {
4035         struct be_dma_mem extfat_cmd;
4036         struct be_fat_conf_params *cfgs;
4037         int status;
4038         u32 level = 0;
4039         int j;
4040
4041         if (lancer_chip(adapter))
4042                 return 0;
4043
4044         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4045         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4046         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4047                                              &extfat_cmd.dma);
4048
4049         if (!extfat_cmd.va) {
4050                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4051                         __func__);
4052                 goto err;
4053         }
4054
4055         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4056         if (!status) {
4057                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4058                                                 sizeof(struct be_cmd_resp_hdr));
4059                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4060                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4061                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4062                 }
4063         }
4064         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4065                             extfat_cmd.dma);
4066 err:
4067         return level;
4068 }
4069
4070 static int be_get_initial_config(struct be_adapter *adapter)
4071 {
4072         int status;
4073         u32 level;
4074
4075         status = be_cmd_get_cntl_attributes(adapter);
4076         if (status)
4077                 return status;
4078
4079         status = be_cmd_get_acpi_wol_cap(adapter);
4080         if (status) {
4081                 /* in case of a failure to get wol capabillities
4082                  * check the exclusion list to determine WOL capability */
4083                 if (!be_is_wol_excluded(adapter))
4084                         adapter->wol_cap |= BE_WOL_CAP;
4085         }
4086
4087         if (be_is_wol_supported(adapter))
4088                 adapter->wol = true;
4089
4090         /* Must be a power of 2 or else MODULO will BUG_ON */
4091         adapter->be_get_temp_freq = 64;
4092
4093         level = be_get_fw_log_level(adapter);
4094         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4095
4096         return 0;
4097 }
4098
4099 static int lancer_recover_func(struct be_adapter *adapter)
4100 {
4101         struct device *dev = &adapter->pdev->dev;
4102         int status;
4103
4104         status = lancer_test_and_set_rdy_state(adapter);
4105         if (status)
4106                 goto err;
4107
4108         if (netif_running(adapter->netdev))
4109                 be_close(adapter->netdev);
4110
4111         be_clear(adapter);
4112
4113         be_clear_all_error(adapter);
4114
4115         status = be_setup(adapter);
4116         if (status)
4117                 goto err;
4118
4119         if (netif_running(adapter->netdev)) {
4120                 status = be_open(adapter->netdev);
4121                 if (status)
4122                         goto err;
4123         }
4124
4125         dev_err(dev, "Error recovery successful\n");
4126         return 0;
4127 err:
4128         if (status == -EAGAIN)
4129                 dev_err(dev, "Waiting for resource provisioning\n");
4130         else
4131                 dev_err(dev, "Error recovery failed\n");
4132
4133         return status;
4134 }
4135
4136 static void be_func_recovery_task(struct work_struct *work)
4137 {
4138         struct be_adapter *adapter =
4139                 container_of(work, struct be_adapter,  func_recovery_work.work);
4140         int status = 0;
4141
4142         be_detect_error(adapter);
4143
4144         if (adapter->hw_error && lancer_chip(adapter)) {
4145
4146                 rtnl_lock();
4147                 netif_device_detach(adapter->netdev);
4148                 rtnl_unlock();
4149
4150                 status = lancer_recover_func(adapter);
4151                 if (!status)
4152                         netif_device_attach(adapter->netdev);
4153         }
4154
4155         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4156          * no need to attempt further recovery.
4157          */
4158         if (!status || status == -EAGAIN)
4159                 schedule_delayed_work(&adapter->func_recovery_work,
4160                                       msecs_to_jiffies(1000));
4161 }
4162
4163 static void be_worker(struct work_struct *work)
4164 {
4165         struct be_adapter *adapter =
4166                 container_of(work, struct be_adapter, work.work);
4167         struct be_rx_obj *rxo;
4168         struct be_eq_obj *eqo;
4169         int i;
4170
4171         /* when interrupts are not yet enabled, just reap any pending
4172         * mcc completions */
4173         if (!netif_running(adapter->netdev)) {
4174                 local_bh_disable();
4175                 be_process_mcc(adapter);
4176                 local_bh_enable();
4177                 goto reschedule;
4178         }
4179
4180         if (!adapter->stats_cmd_sent) {
4181                 if (lancer_chip(adapter))
4182                         lancer_cmd_get_pport_stats(adapter,
4183                                                 &adapter->stats_cmd);
4184                 else
4185                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4186         }
4187
4188         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4189                 be_cmd_get_die_temperature(adapter);
4190
4191         for_all_rx_queues(adapter, rxo, i) {
4192                 if (rxo->rx_post_starved) {
4193                         rxo->rx_post_starved = false;
4194                         be_post_rx_frags(rxo, GFP_KERNEL);
4195                 }
4196         }
4197
4198         for_all_evt_queues(adapter, eqo, i)
4199                 be_eqd_update(adapter, eqo);
4200
4201 reschedule:
4202         adapter->work_counter++;
4203         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4204 }
4205
4206 static bool be_reset_required(struct be_adapter *adapter)
4207 {
4208         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4209 }
4210
4211 static char *mc_name(struct be_adapter *adapter)
4212 {
4213         if (adapter->function_mode & FLEX10_MODE)
4214                 return "FLEX10";
4215         else if (adapter->function_mode & VNIC_MODE)
4216                 return "vNIC";
4217         else if (adapter->function_mode & UMC_ENABLED)
4218                 return "UMC";
4219         else
4220                 return "";
4221 }
4222
4223 static inline char *func_name(struct be_adapter *adapter)
4224 {
4225         return be_physfn(adapter) ? "PF" : "VF";
4226 }
4227
4228 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4229 {
4230         int status = 0;
4231         struct be_adapter *adapter;
4232         struct net_device *netdev;
4233         char port_name;
4234
4235         status = pci_enable_device(pdev);
4236         if (status)
4237                 goto do_none;
4238
4239         status = pci_request_regions(pdev, DRV_NAME);
4240         if (status)
4241                 goto disable_dev;
4242         pci_set_master(pdev);
4243
4244         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4245         if (netdev == NULL) {
4246                 status = -ENOMEM;
4247                 goto rel_reg;
4248         }
4249         adapter = netdev_priv(netdev);
4250         adapter->pdev = pdev;
4251         pci_set_drvdata(pdev, adapter);
4252         adapter->netdev = netdev;
4253         SET_NETDEV_DEV(netdev, &pdev->dev);
4254
4255         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4256         if (!status) {
4257                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4258                 if (status < 0) {
4259                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4260                         goto free_netdev;
4261                 }
4262                 netdev->features |= NETIF_F_HIGHDMA;
4263         } else {
4264                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4265                 if (status) {
4266                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4267                         goto free_netdev;
4268                 }
4269         }
4270
4271         status = pci_enable_pcie_error_reporting(pdev);
4272         if (status)
4273                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4274
4275         status = be_ctrl_init(adapter);
4276         if (status)
4277                 goto free_netdev;
4278
4279         /* sync up with fw's ready state */
4280         if (be_physfn(adapter)) {
4281                 status = be_fw_wait_ready(adapter);
4282                 if (status)
4283                         goto ctrl_clean;
4284         }
4285
4286         if (be_reset_required(adapter)) {
4287                 status = be_cmd_reset_function(adapter);
4288                 if (status)
4289                         goto ctrl_clean;
4290
4291                 /* Wait for interrupts to quiesce after an FLR */
4292                 msleep(100);
4293         }
4294
4295         /* Allow interrupts for other ULPs running on NIC function */
4296         be_intr_set(adapter, true);
4297
4298         /* tell fw we're ready to fire cmds */
4299         status = be_cmd_fw_init(adapter);
4300         if (status)
4301                 goto ctrl_clean;
4302
4303         status = be_stats_init(adapter);
4304         if (status)
4305                 goto ctrl_clean;
4306
4307         status = be_get_initial_config(adapter);
4308         if (status)
4309                 goto stats_clean;
4310
4311         INIT_DELAYED_WORK(&adapter->work, be_worker);
4312         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4313         adapter->rx_fc = adapter->tx_fc = true;
4314
4315         status = be_setup(adapter);
4316         if (status)
4317                 goto stats_clean;
4318
4319         be_netdev_init(netdev);
4320         status = register_netdev(netdev);
4321         if (status != 0)
4322                 goto unsetup;
4323
4324         be_roce_dev_add(adapter);
4325
4326         schedule_delayed_work(&adapter->func_recovery_work,
4327                               msecs_to_jiffies(1000));
4328
4329         be_cmd_query_port_name(adapter, &port_name);
4330
4331         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4332                  func_name(adapter), mc_name(adapter), port_name);
4333
4334         return 0;
4335
4336 unsetup:
4337         be_clear(adapter);
4338 stats_clean:
4339         be_stats_cleanup(adapter);
4340 ctrl_clean:
4341         be_ctrl_cleanup(adapter);
4342 free_netdev:
4343         free_netdev(netdev);
4344         pci_set_drvdata(pdev, NULL);
4345 rel_reg:
4346         pci_release_regions(pdev);
4347 disable_dev:
4348         pci_disable_device(pdev);
4349 do_none:
4350         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4351         return status;
4352 }
4353
4354 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4355 {
4356         struct be_adapter *adapter = pci_get_drvdata(pdev);
4357         struct net_device *netdev =  adapter->netdev;
4358
4359         if (adapter->wol)
4360                 be_setup_wol(adapter, true);
4361
4362         cancel_delayed_work_sync(&adapter->func_recovery_work);
4363
4364         netif_device_detach(netdev);
4365         if (netif_running(netdev)) {
4366                 rtnl_lock();
4367                 be_close(netdev);
4368                 rtnl_unlock();
4369         }
4370         be_clear(adapter);
4371
4372         pci_save_state(pdev);
4373         pci_disable_device(pdev);
4374         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4375         return 0;
4376 }
4377
4378 static int be_resume(struct pci_dev *pdev)
4379 {
4380         int status = 0;
4381         struct be_adapter *adapter = pci_get_drvdata(pdev);
4382         struct net_device *netdev =  adapter->netdev;
4383
4384         netif_device_detach(netdev);
4385
4386         status = pci_enable_device(pdev);
4387         if (status)
4388                 return status;
4389
4390         pci_set_power_state(pdev, 0);
4391         pci_restore_state(pdev);
4392
4393         /* tell fw we're ready to fire cmds */
4394         status = be_cmd_fw_init(adapter);
4395         if (status)
4396                 return status;
4397
4398         be_setup(adapter);
4399         if (netif_running(netdev)) {
4400                 rtnl_lock();
4401                 be_open(netdev);
4402                 rtnl_unlock();
4403         }
4404
4405         schedule_delayed_work(&adapter->func_recovery_work,
4406                               msecs_to_jiffies(1000));
4407         netif_device_attach(netdev);
4408
4409         if (adapter->wol)
4410                 be_setup_wol(adapter, false);
4411
4412         return 0;
4413 }
4414
4415 /*
4416  * An FLR will stop BE from DMAing any data.
4417  */
4418 static void be_shutdown(struct pci_dev *pdev)
4419 {
4420         struct be_adapter *adapter = pci_get_drvdata(pdev);
4421
4422         if (!adapter)
4423                 return;
4424
4425         cancel_delayed_work_sync(&adapter->work);
4426         cancel_delayed_work_sync(&adapter->func_recovery_work);
4427
4428         netif_device_detach(adapter->netdev);
4429
4430         be_cmd_reset_function(adapter);
4431
4432         pci_disable_device(pdev);
4433 }
4434
4435 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4436                                 pci_channel_state_t state)
4437 {
4438         struct be_adapter *adapter = pci_get_drvdata(pdev);
4439         struct net_device *netdev =  adapter->netdev;
4440
4441         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4442
4443         if (!adapter->eeh_error) {
4444                 adapter->eeh_error = true;
4445
4446                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4447
4448                 rtnl_lock();
4449                 netif_device_detach(netdev);
4450                 if (netif_running(netdev))
4451                         be_close(netdev);
4452                 rtnl_unlock();
4453
4454                 be_clear(adapter);
4455         }
4456
4457         if (state == pci_channel_io_perm_failure)
4458                 return PCI_ERS_RESULT_DISCONNECT;
4459
4460         pci_disable_device(pdev);
4461
4462         /* The error could cause the FW to trigger a flash debug dump.
4463          * Resetting the card while flash dump is in progress
4464          * can cause it not to recover; wait for it to finish.
4465          * Wait only for first function as it is needed only once per
4466          * adapter.
4467          */
4468         if (pdev->devfn == 0)
4469                 ssleep(30);
4470
4471         return PCI_ERS_RESULT_NEED_RESET;
4472 }
4473
4474 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4475 {
4476         struct be_adapter *adapter = pci_get_drvdata(pdev);
4477         int status;
4478
4479         dev_info(&adapter->pdev->dev, "EEH reset\n");
4480
4481         status = pci_enable_device(pdev);
4482         if (status)
4483                 return PCI_ERS_RESULT_DISCONNECT;
4484
4485         pci_set_master(pdev);
4486         pci_set_power_state(pdev, 0);
4487         pci_restore_state(pdev);
4488
4489         /* Check if card is ok and fw is ready */
4490         dev_info(&adapter->pdev->dev,
4491                  "Waiting for FW to be ready after EEH reset\n");
4492         status = be_fw_wait_ready(adapter);
4493         if (status)
4494                 return PCI_ERS_RESULT_DISCONNECT;
4495
4496         pci_cleanup_aer_uncorrect_error_status(pdev);
4497         be_clear_all_error(adapter);
4498         return PCI_ERS_RESULT_RECOVERED;
4499 }
4500
4501 static void be_eeh_resume(struct pci_dev *pdev)
4502 {
4503         int status = 0;
4504         struct be_adapter *adapter = pci_get_drvdata(pdev);
4505         struct net_device *netdev =  adapter->netdev;
4506
4507         dev_info(&adapter->pdev->dev, "EEH resume\n");
4508
4509         pci_save_state(pdev);
4510
4511         status = be_cmd_reset_function(adapter);
4512         if (status)
4513                 goto err;
4514
4515         /* tell fw we're ready to fire cmds */
4516         status = be_cmd_fw_init(adapter);
4517         if (status)
4518                 goto err;
4519
4520         status = be_setup(adapter);
4521         if (status)
4522                 goto err;
4523
4524         if (netif_running(netdev)) {
4525                 status = be_open(netdev);
4526                 if (status)
4527                         goto err;
4528         }
4529
4530         schedule_delayed_work(&adapter->func_recovery_work,
4531                               msecs_to_jiffies(1000));
4532         netif_device_attach(netdev);
4533         return;
4534 err:
4535         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4536 }
4537
4538 static const struct pci_error_handlers be_eeh_handlers = {
4539         .error_detected = be_eeh_err_detected,
4540         .slot_reset = be_eeh_reset,
4541         .resume = be_eeh_resume,
4542 };
4543
4544 static struct pci_driver be_driver = {
4545         .name = DRV_NAME,
4546         .id_table = be_dev_ids,
4547         .probe = be_probe,
4548         .remove = be_remove,
4549         .suspend = be_suspend,
4550         .resume = be_resume,
4551         .shutdown = be_shutdown,
4552         .err_handler = &be_eeh_handlers
4553 };
4554
4555 static int __init be_init_module(void)
4556 {
4557         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4558             rx_frag_size != 2048) {
4559                 printk(KERN_WARNING DRV_NAME
4560                         " : Module param rx_frag_size must be 2048/4096/8192."
4561                         " Using 2048\n");
4562                 rx_frag_size = 2048;
4563         }
4564
4565         return pci_register_driver(&be_driver);
4566 }
4567 module_init(be_init_module);
4568
4569 static void __exit be_exit_module(void)
4570 {
4571         pci_unregister_driver(&be_driver);
4572 }
4573 module_exit(be_exit_module);