ACPI / scan: Drop acpi_bus_add() and use acpi_bus_scan() instead
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("ServerEngines Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL);
150         if (!mem->va)
151                 return -ENOMEM;
152         memset(mem->va, 0, mem->size);
153         return 0;
154 }
155
156 static void be_intr_set(struct be_adapter *adapter, bool enable)
157 {
158         u32 reg, enabled;
159
160         if (adapter->eeh_error)
161                 return;
162
163         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
164                                 &reg);
165         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
166
167         if (!enabled && enable)
168                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else if (enabled && !enable)
170                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
171         else
172                 return;
173
174         pci_write_config_dword(adapter->pdev,
175                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
176 }
177
178 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
179 {
180         u32 val = 0;
181         val |= qid & DB_RQ_RING_ID_MASK;
182         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
183
184         wmb();
185         iowrite32(val, adapter->db + DB_RQ_OFFSET);
186 }
187
188 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
189 {
190         u32 val = 0;
191         val |= qid & DB_TXULP_RING_ID_MASK;
192         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
193
194         wmb();
195         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
196 }
197
198 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
199                 bool arm, bool clear_int, u16 num_popped)
200 {
201         u32 val = 0;
202         val |= qid & DB_EQ_RING_ID_MASK;
203         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
204                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
205
206         if (adapter->eeh_error)
207                 return;
208
209         if (arm)
210                 val |= 1 << DB_EQ_REARM_SHIFT;
211         if (clear_int)
212                 val |= 1 << DB_EQ_CLR_SHIFT;
213         val |= 1 << DB_EQ_EVNT_SHIFT;
214         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
215         iowrite32(val, adapter->db + DB_EQ_OFFSET);
216 }
217
218 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
219 {
220         u32 val = 0;
221         val |= qid & DB_CQ_RING_ID_MASK;
222         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
223                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
224
225         if (adapter->eeh_error)
226                 return;
227
228         if (arm)
229                 val |= 1 << DB_CQ_REARM_SHIFT;
230         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
231         iowrite32(val, adapter->db + DB_CQ_OFFSET);
232 }
233
234 static int be_mac_addr_set(struct net_device *netdev, void *p)
235 {
236         struct be_adapter *adapter = netdev_priv(netdev);
237         struct sockaddr *addr = p;
238         int status = 0;
239         u8 current_mac[ETH_ALEN];
240         u32 pmac_id = adapter->pmac_id[0];
241         bool active_mac = true;
242
243         if (!is_valid_ether_addr(addr->sa_data))
244                 return -EADDRNOTAVAIL;
245
246         /* For BE VF, MAC address is already activated by PF.
247          * Hence only operation left is updating netdev->devaddr.
248          * Update it if user is passing the same MAC which was used
249          * during configuring VF MAC from PF(Hypervisor).
250          */
251         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
252                 status = be_cmd_mac_addr_query(adapter, current_mac,
253                                                false, adapter->if_handle, 0);
254                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
255                         goto done;
256                 else
257                         goto err;
258         }
259
260         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
261                 goto done;
262
263         /* For Lancer check if any MAC is active.
264          * If active, get its mac id.
265          */
266         if (lancer_chip(adapter) && !be_physfn(adapter))
267                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
268                                          &pmac_id, 0);
269
270         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
271                                  adapter->if_handle,
272                                  &adapter->pmac_id[0], 0);
273
274         if (status)
275                 goto err;
276
277         if (active_mac)
278                 be_cmd_pmac_del(adapter, adapter->if_handle,
279                                 pmac_id, 0);
280 done:
281         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
282         return 0;
283 err:
284         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
285         return status;
286 }
287
288 /* BE2 supports only v0 cmd */
289 static void *hw_stats_from_cmd(struct be_adapter *adapter)
290 {
291         if (BE2_chip(adapter)) {
292                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
293
294                 return &cmd->hw_stats;
295         } else  {
296                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
297
298                 return &cmd->hw_stats;
299         }
300 }
301
302 /* BE2 supports only v0 cmd */
303 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
304 {
305         if (BE2_chip(adapter)) {
306                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
307
308                 return &hw_stats->erx;
309         } else {
310                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
311
312                 return &hw_stats->erx;
313         }
314 }
315
316 static void populate_be_v0_stats(struct be_adapter *adapter)
317 {
318         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
319         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
320         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
321         struct be_port_rxf_stats_v0 *port_stats =
322                                         &rxf_stats->port[adapter->port_num];
323         struct be_drv_stats *drvs = &adapter->drv_stats;
324
325         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
326         drvs->rx_pause_frames = port_stats->rx_pause_frames;
327         drvs->rx_crc_errors = port_stats->rx_crc_errors;
328         drvs->rx_control_frames = port_stats->rx_control_frames;
329         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
330         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
331         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
332         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
333         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
334         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
335         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
336         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
337         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
338         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
339         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
340         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
341         drvs->rx_dropped_header_too_small =
342                 port_stats->rx_dropped_header_too_small;
343         drvs->rx_address_mismatch_drops =
344                                         port_stats->rx_address_mismatch_drops +
345                                         port_stats->rx_vlan_mismatch_drops;
346         drvs->rx_alignment_symbol_errors =
347                 port_stats->rx_alignment_symbol_errors;
348
349         drvs->tx_pauseframes = port_stats->tx_pauseframes;
350         drvs->tx_controlframes = port_stats->tx_controlframes;
351
352         if (adapter->port_num)
353                 drvs->jabber_events = rxf_stats->port1_jabber_events;
354         else
355                 drvs->jabber_events = rxf_stats->port0_jabber_events;
356         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
357         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
358         drvs->forwarded_packets = rxf_stats->forwarded_packets;
359         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
360         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
361         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
362         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
363 }
364
365 static void populate_be_v1_stats(struct be_adapter *adapter)
366 {
367         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
368         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
369         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
370         struct be_port_rxf_stats_v1 *port_stats =
371                                         &rxf_stats->port[adapter->port_num];
372         struct be_drv_stats *drvs = &adapter->drv_stats;
373
374         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
375         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
376         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
377         drvs->rx_pause_frames = port_stats->rx_pause_frames;
378         drvs->rx_crc_errors = port_stats->rx_crc_errors;
379         drvs->rx_control_frames = port_stats->rx_control_frames;
380         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
381         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
382         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
383         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
384         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
385         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
386         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
387         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
388         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
389         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
390         drvs->rx_dropped_header_too_small =
391                 port_stats->rx_dropped_header_too_small;
392         drvs->rx_input_fifo_overflow_drop =
393                 port_stats->rx_input_fifo_overflow_drop;
394         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
395         drvs->rx_alignment_symbol_errors =
396                 port_stats->rx_alignment_symbol_errors;
397         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
398         drvs->tx_pauseframes = port_stats->tx_pauseframes;
399         drvs->tx_controlframes = port_stats->tx_controlframes;
400         drvs->jabber_events = port_stats->jabber_events;
401         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
402         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
403         drvs->forwarded_packets = rxf_stats->forwarded_packets;
404         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
405         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
406         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
407         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
408 }
409
410 static void populate_lancer_stats(struct be_adapter *adapter)
411 {
412
413         struct be_drv_stats *drvs = &adapter->drv_stats;
414         struct lancer_pport_stats *pport_stats =
415                                         pport_stats_from_cmd(adapter);
416
417         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
418         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
419         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
420         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
421         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
422         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
423         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
424         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
425         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
426         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
427         drvs->rx_dropped_tcp_length =
428                                 pport_stats->rx_dropped_invalid_tcp_length;
429         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
430         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
431         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
432         drvs->rx_dropped_header_too_small =
433                                 pport_stats->rx_dropped_header_too_small;
434         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
435         drvs->rx_address_mismatch_drops =
436                                         pport_stats->rx_address_mismatch_drops +
437                                         pport_stats->rx_vlan_mismatch_drops;
438         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
439         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
440         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
441         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
442         drvs->jabber_events = pport_stats->rx_jabbers;
443         drvs->forwarded_packets = pport_stats->num_forwards_lo;
444         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
445         drvs->rx_drops_too_many_frags =
446                                 pport_stats->rx_drops_too_many_frags_lo;
447 }
448
449 static void accumulate_16bit_val(u32 *acc, u16 val)
450 {
451 #define lo(x)                   (x & 0xFFFF)
452 #define hi(x)                   (x & 0xFFFF0000)
453         bool wrapped = val < lo(*acc);
454         u32 newacc = hi(*acc) + val;
455
456         if (wrapped)
457                 newacc += 65536;
458         ACCESS_ONCE(*acc) = newacc;
459 }
460
461 void be_parse_stats(struct be_adapter *adapter)
462 {
463         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
464         struct be_rx_obj *rxo;
465         int i;
466
467         if (lancer_chip(adapter)) {
468                 populate_lancer_stats(adapter);
469         } else {
470                 if (BE2_chip(adapter))
471                         populate_be_v0_stats(adapter);
472                 else
473                         /* for BE3 and Skyhawk */
474                         populate_be_v1_stats(adapter);
475
476                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
477                 for_all_rx_queues(adapter, rxo, i) {
478                         /* below erx HW counter can actually wrap around after
479                          * 65535. Driver accumulates a 32-bit value
480                          */
481                         accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
482                                              (u16)erx->rx_drops_no_fragments \
483                                              [rxo->q.id]);
484                 }
485         }
486 }
487
488 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
489                                         struct rtnl_link_stats64 *stats)
490 {
491         struct be_adapter *adapter = netdev_priv(netdev);
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct be_rx_obj *rxo;
494         struct be_tx_obj *txo;
495         u64 pkts, bytes;
496         unsigned int start;
497         int i;
498
499         for_all_rx_queues(adapter, rxo, i) {
500                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
501                 do {
502                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
503                         pkts = rx_stats(rxo)->rx_pkts;
504                         bytes = rx_stats(rxo)->rx_bytes;
505                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
506                 stats->rx_packets += pkts;
507                 stats->rx_bytes += bytes;
508                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
509                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
510                                         rx_stats(rxo)->rx_drops_no_frags;
511         }
512
513         for_all_tx_queues(adapter, txo, i) {
514                 const struct be_tx_stats *tx_stats = tx_stats(txo);
515                 do {
516                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
517                         pkts = tx_stats(txo)->tx_pkts;
518                         bytes = tx_stats(txo)->tx_bytes;
519                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
520                 stats->tx_packets += pkts;
521                 stats->tx_bytes += bytes;
522         }
523
524         /* bad pkts received */
525         stats->rx_errors = drvs->rx_crc_errors +
526                 drvs->rx_alignment_symbol_errors +
527                 drvs->rx_in_range_errors +
528                 drvs->rx_out_range_errors +
529                 drvs->rx_frame_too_long +
530                 drvs->rx_dropped_too_small +
531                 drvs->rx_dropped_too_short +
532                 drvs->rx_dropped_header_too_small +
533                 drvs->rx_dropped_tcp_length +
534                 drvs->rx_dropped_runt;
535
536         /* detailed rx errors */
537         stats->rx_length_errors = drvs->rx_in_range_errors +
538                 drvs->rx_out_range_errors +
539                 drvs->rx_frame_too_long;
540
541         stats->rx_crc_errors = drvs->rx_crc_errors;
542
543         /* frame alignment errors */
544         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
545
546         /* receiver fifo overrun */
547         /* drops_no_pbuf is no per i/f, it's per BE card */
548         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
549                                 drvs->rx_input_fifo_overflow_drop +
550                                 drvs->rx_drops_no_pbuf;
551         return stats;
552 }
553
554 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
555 {
556         struct net_device *netdev = adapter->netdev;
557
558         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
559                 netif_carrier_off(netdev);
560                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
561         }
562
563         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
564                 netif_carrier_on(netdev);
565         else
566                 netif_carrier_off(netdev);
567 }
568
569 static void be_tx_stats_update(struct be_tx_obj *txo,
570                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
571 {
572         struct be_tx_stats *stats = tx_stats(txo);
573
574         u64_stats_update_begin(&stats->sync);
575         stats->tx_reqs++;
576         stats->tx_wrbs += wrb_cnt;
577         stats->tx_bytes += copied;
578         stats->tx_pkts += (gso_segs ? gso_segs : 1);
579         if (stopped)
580                 stats->tx_stops++;
581         u64_stats_update_end(&stats->sync);
582 }
583
584 /* Determine number of WRB entries needed to xmit data in an skb */
585 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
586                                                                 bool *dummy)
587 {
588         int cnt = (skb->len > skb->data_len);
589
590         cnt += skb_shinfo(skb)->nr_frags;
591
592         /* to account for hdr wrb */
593         cnt++;
594         if (lancer_chip(adapter) || !(cnt & 1)) {
595                 *dummy = false;
596         } else {
597                 /* add a dummy to make it an even num */
598                 cnt++;
599                 *dummy = true;
600         }
601         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
602         return cnt;
603 }
604
605 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
606 {
607         wrb->frag_pa_hi = upper_32_bits(addr);
608         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
609         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
610         wrb->rsvd0 = 0;
611 }
612
613 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
614                                         struct sk_buff *skb)
615 {
616         u8 vlan_prio;
617         u16 vlan_tag;
618
619         vlan_tag = vlan_tx_tag_get(skb);
620         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
621         /* If vlan priority provided by OS is NOT in available bmap */
622         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
623                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
624                                 adapter->recommended_prio;
625
626         return vlan_tag;
627 }
628
629 static int be_vlan_tag_chk(struct be_adapter *adapter, struct sk_buff *skb)
630 {
631         return vlan_tx_tag_present(skb) || adapter->pvid;
632 }
633
634 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
635                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
636 {
637         u16 vlan_tag;
638
639         memset(hdr, 0, sizeof(*hdr));
640
641         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
642
643         if (skb_is_gso(skb)) {
644                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
645                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
646                         hdr, skb_shinfo(skb)->gso_size);
647                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
648                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
649         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
650                 if (is_tcp_pkt(skb))
651                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
652                 else if (is_udp_pkt(skb))
653                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
654         }
655
656         if (vlan_tx_tag_present(skb)) {
657                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
658                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
659                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
660         }
661
662         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
663         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
664         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
665         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
666 }
667
668 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
669                 bool unmap_single)
670 {
671         dma_addr_t dma;
672
673         be_dws_le_to_cpu(wrb, sizeof(*wrb));
674
675         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
676         if (wrb->frag_len) {
677                 if (unmap_single)
678                         dma_unmap_single(dev, dma, wrb->frag_len,
679                                          DMA_TO_DEVICE);
680                 else
681                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
682         }
683 }
684
685 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
686                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
687 {
688         dma_addr_t busaddr;
689         int i, copied = 0;
690         struct device *dev = &adapter->pdev->dev;
691         struct sk_buff *first_skb = skb;
692         struct be_eth_wrb *wrb;
693         struct be_eth_hdr_wrb *hdr;
694         bool map_single = false;
695         u16 map_head;
696
697         hdr = queue_head_node(txq);
698         queue_head_inc(txq);
699         map_head = txq->head;
700
701         if (skb->len > skb->data_len) {
702                 int len = skb_headlen(skb);
703                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
704                 if (dma_mapping_error(dev, busaddr))
705                         goto dma_err;
706                 map_single = true;
707                 wrb = queue_head_node(txq);
708                 wrb_fill(wrb, busaddr, len);
709                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
710                 queue_head_inc(txq);
711                 copied += len;
712         }
713
714         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
715                 const struct skb_frag_struct *frag =
716                         &skb_shinfo(skb)->frags[i];
717                 busaddr = skb_frag_dma_map(dev, frag, 0,
718                                            skb_frag_size(frag), DMA_TO_DEVICE);
719                 if (dma_mapping_error(dev, busaddr))
720                         goto dma_err;
721                 wrb = queue_head_node(txq);
722                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
723                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
724                 queue_head_inc(txq);
725                 copied += skb_frag_size(frag);
726         }
727
728         if (dummy_wrb) {
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, 0, 0);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733         }
734
735         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
736         be_dws_cpu_to_le(hdr, sizeof(*hdr));
737
738         return copied;
739 dma_err:
740         txq->head = map_head;
741         while (copied) {
742                 wrb = queue_head_node(txq);
743                 unmap_tx_frag(dev, wrb, map_single);
744                 map_single = false;
745                 copied -= wrb->frag_len;
746                 queue_head_inc(txq);
747         }
748         return 0;
749 }
750
751 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
752                                              struct sk_buff *skb)
753 {
754         u16 vlan_tag = 0;
755
756         skb = skb_share_check(skb, GFP_ATOMIC);
757         if (unlikely(!skb))
758                 return skb;
759
760         if (vlan_tx_tag_present(skb)) {
761                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
762                 __vlan_put_tag(skb, vlan_tag);
763                 skb->vlan_tci = 0;
764         }
765
766         return skb;
767 }
768
769 static netdev_tx_t be_xmit(struct sk_buff *skb,
770                         struct net_device *netdev)
771 {
772         struct be_adapter *adapter = netdev_priv(netdev);
773         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
774         struct be_queue_info *txq = &txo->q;
775         struct iphdr *ip = NULL;
776         u32 wrb_cnt = 0, copied = 0;
777         u32 start = txq->head, eth_hdr_len;
778         bool dummy_wrb, stopped = false;
779
780         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
781                 VLAN_ETH_HLEN : ETH_HLEN;
782
783         /* HW has a bug which considers padding bytes as legal
784          * and modifies the IPv4 hdr's 'tot_len' field
785          */
786         if (skb->len <= 60 && be_vlan_tag_chk(adapter, skb) &&
787                         is_ipv4_pkt(skb)) {
788                 ip = (struct iphdr *)ip_hdr(skb);
789                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
790         }
791
792         /* HW has a bug wherein it will calculate CSUM for VLAN
793          * pkts even though it is disabled.
794          * Manually insert VLAN in pkt.
795          */
796         if (skb->ip_summed != CHECKSUM_PARTIAL &&
797                         be_vlan_tag_chk(adapter, skb)) {
798                 skb = be_insert_vlan_in_pkt(adapter, skb);
799                 if (unlikely(!skb))
800                         goto tx_drop;
801         }
802
803         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
804
805         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
806         if (copied) {
807                 int gso_segs = skb_shinfo(skb)->gso_segs;
808
809                 /* record the sent skb in the sent_skb table */
810                 BUG_ON(txo->sent_skb_list[start]);
811                 txo->sent_skb_list[start] = skb;
812
813                 /* Ensure txq has space for the next skb; Else stop the queue
814                  * *BEFORE* ringing the tx doorbell, so that we serialze the
815                  * tx compls of the current transmit which'll wake up the queue
816                  */
817                 atomic_add(wrb_cnt, &txq->used);
818                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
819                                                                 txq->len) {
820                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
821                         stopped = true;
822                 }
823
824                 be_txq_notify(adapter, txq->id, wrb_cnt);
825
826                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
827         } else {
828                 txq->head = start;
829                 dev_kfree_skb_any(skb);
830         }
831 tx_drop:
832         return NETDEV_TX_OK;
833 }
834
835 static int be_change_mtu(struct net_device *netdev, int new_mtu)
836 {
837         struct be_adapter *adapter = netdev_priv(netdev);
838         if (new_mtu < BE_MIN_MTU ||
839                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
840                                         (ETH_HLEN + ETH_FCS_LEN))) {
841                 dev_info(&adapter->pdev->dev,
842                         "MTU must be between %d and %d bytes\n",
843                         BE_MIN_MTU,
844                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
845                 return -EINVAL;
846         }
847         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
848                         netdev->mtu, new_mtu);
849         netdev->mtu = new_mtu;
850         return 0;
851 }
852
853 /*
854  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
855  * If the user configures more, place BE in vlan promiscuous mode.
856  */
857 static int be_vid_config(struct be_adapter *adapter)
858 {
859         u16 vids[BE_NUM_VLANS_SUPPORTED];
860         u16 num = 0, i;
861         int status = 0;
862
863         /* No need to further configure vids if in promiscuous mode */
864         if (adapter->promiscuous)
865                 return 0;
866
867         if (adapter->vlans_added > adapter->max_vlans)
868                 goto set_vlan_promisc;
869
870         /* Construct VLAN Table to give to HW */
871         for (i = 0; i < VLAN_N_VID; i++)
872                 if (adapter->vlan_tag[i])
873                         vids[num++] = cpu_to_le16(i);
874
875         status = be_cmd_vlan_config(adapter, adapter->if_handle,
876                                     vids, num, 1, 0);
877
878         /* Set to VLAN promisc mode as setting VLAN filter failed */
879         if (status) {
880                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
881                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
882                 goto set_vlan_promisc;
883         }
884
885         return status;
886
887 set_vlan_promisc:
888         status = be_cmd_vlan_config(adapter, adapter->if_handle,
889                                     NULL, 0, 1, 1);
890         return status;
891 }
892
893 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
894 {
895         struct be_adapter *adapter = netdev_priv(netdev);
896         int status = 0;
897
898         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
899                 status = -EINVAL;
900                 goto ret;
901         }
902
903         /* Packets with VID 0 are always received by Lancer by default */
904         if (lancer_chip(adapter) && vid == 0)
905                 goto ret;
906
907         adapter->vlan_tag[vid] = 1;
908         if (adapter->vlans_added <= (adapter->max_vlans + 1))
909                 status = be_vid_config(adapter);
910
911         if (!status)
912                 adapter->vlans_added++;
913         else
914                 adapter->vlan_tag[vid] = 0;
915 ret:
916         return status;
917 }
918
919 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
920 {
921         struct be_adapter *adapter = netdev_priv(netdev);
922         int status = 0;
923
924         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
925                 status = -EINVAL;
926                 goto ret;
927         }
928
929         /* Packets with VID 0 are always received by Lancer by default */
930         if (lancer_chip(adapter) && vid == 0)
931                 goto ret;
932
933         adapter->vlan_tag[vid] = 0;
934         if (adapter->vlans_added <= adapter->max_vlans)
935                 status = be_vid_config(adapter);
936
937         if (!status)
938                 adapter->vlans_added--;
939         else
940                 adapter->vlan_tag[vid] = 1;
941 ret:
942         return status;
943 }
944
945 static void be_set_rx_mode(struct net_device *netdev)
946 {
947         struct be_adapter *adapter = netdev_priv(netdev);
948         int status;
949
950         if (netdev->flags & IFF_PROMISC) {
951                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
952                 adapter->promiscuous = true;
953                 goto done;
954         }
955
956         /* BE was previously in promiscuous mode; disable it */
957         if (adapter->promiscuous) {
958                 adapter->promiscuous = false;
959                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
960
961                 if (adapter->vlans_added)
962                         be_vid_config(adapter);
963         }
964
965         /* Enable multicast promisc if num configured exceeds what we support */
966         if (netdev->flags & IFF_ALLMULTI ||
967             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
968                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
969                 goto done;
970         }
971
972         if (netdev_uc_count(netdev) != adapter->uc_macs) {
973                 struct netdev_hw_addr *ha;
974                 int i = 1; /* First slot is claimed by the Primary MAC */
975
976                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
977                         be_cmd_pmac_del(adapter, adapter->if_handle,
978                                         adapter->pmac_id[i], 0);
979                 }
980
981                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
982                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
983                         adapter->promiscuous = true;
984                         goto done;
985                 }
986
987                 netdev_for_each_uc_addr(ha, adapter->netdev) {
988                         adapter->uc_macs++; /* First slot is for Primary MAC */
989                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
990                                         adapter->if_handle,
991                                         &adapter->pmac_id[adapter->uc_macs], 0);
992                 }
993         }
994
995         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
996
997         /* Set to MCAST promisc mode if setting MULTICAST address fails */
998         if (status) {
999                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1000                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1001                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1002         }
1003 done:
1004         return;
1005 }
1006
1007 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1008 {
1009         struct be_adapter *adapter = netdev_priv(netdev);
1010         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1011         int status;
1012         bool active_mac = false;
1013         u32 pmac_id;
1014         u8 old_mac[ETH_ALEN];
1015
1016         if (!sriov_enabled(adapter))
1017                 return -EPERM;
1018
1019         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1020                 return -EINVAL;
1021
1022         if (lancer_chip(adapter)) {
1023                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1024                                                   &pmac_id, vf + 1);
1025                 if (!status && active_mac)
1026                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1027                                         pmac_id, vf + 1);
1028
1029                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1030         } else {
1031                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1032                                          vf_cfg->pmac_id, vf + 1);
1033
1034                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1035                                          &vf_cfg->pmac_id, vf + 1);
1036         }
1037
1038         if (status)
1039                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1040                                 mac, vf);
1041         else
1042                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1043
1044         return status;
1045 }
1046
1047 static int be_get_vf_config(struct net_device *netdev, int vf,
1048                         struct ifla_vf_info *vi)
1049 {
1050         struct be_adapter *adapter = netdev_priv(netdev);
1051         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1052
1053         if (!sriov_enabled(adapter))
1054                 return -EPERM;
1055
1056         if (vf >= adapter->num_vfs)
1057                 return -EINVAL;
1058
1059         vi->vf = vf;
1060         vi->tx_rate = vf_cfg->tx_rate;
1061         vi->vlan = vf_cfg->vlan_tag;
1062         vi->qos = 0;
1063         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1064
1065         return 0;
1066 }
1067
1068 static int be_set_vf_vlan(struct net_device *netdev,
1069                         int vf, u16 vlan, u8 qos)
1070 {
1071         struct be_adapter *adapter = netdev_priv(netdev);
1072         int status = 0;
1073
1074         if (!sriov_enabled(adapter))
1075                 return -EPERM;
1076
1077         if (vf >= adapter->num_vfs || vlan > 4095)
1078                 return -EINVAL;
1079
1080         if (vlan) {
1081                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1082                         /* If this is new value, program it. Else skip. */
1083                         adapter->vf_cfg[vf].vlan_tag = vlan;
1084
1085                         status = be_cmd_set_hsw_config(adapter, vlan,
1086                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1087                 }
1088         } else {
1089                 /* Reset Transparent Vlan Tagging. */
1090                 adapter->vf_cfg[vf].vlan_tag = 0;
1091                 vlan = adapter->vf_cfg[vf].def_vid;
1092                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1093                         adapter->vf_cfg[vf].if_handle);
1094         }
1095
1096
1097         if (status)
1098                 dev_info(&adapter->pdev->dev,
1099                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1100         return status;
1101 }
1102
1103 static int be_set_vf_tx_rate(struct net_device *netdev,
1104                         int vf, int rate)
1105 {
1106         struct be_adapter *adapter = netdev_priv(netdev);
1107         int status = 0;
1108
1109         if (!sriov_enabled(adapter))
1110                 return -EPERM;
1111
1112         if (vf >= adapter->num_vfs)
1113                 return -EINVAL;
1114
1115         if (rate < 100 || rate > 10000) {
1116                 dev_err(&adapter->pdev->dev,
1117                         "tx rate must be between 100 and 10000 Mbps\n");
1118                 return -EINVAL;
1119         }
1120
1121         if (lancer_chip(adapter))
1122                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1123         else
1124                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1125
1126         if (status)
1127                 dev_err(&adapter->pdev->dev,
1128                                 "tx rate %d on VF %d failed\n", rate, vf);
1129         else
1130                 adapter->vf_cfg[vf].tx_rate = rate;
1131         return status;
1132 }
1133
1134 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1135 {
1136         struct pci_dev *dev, *pdev = adapter->pdev;
1137         int vfs = 0, assigned_vfs = 0, pos;
1138         u16 offset, stride;
1139
1140         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1141         if (!pos)
1142                 return 0;
1143         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1144         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1145
1146         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1147         while (dev) {
1148                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1149                         vfs++;
1150                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1151                                 assigned_vfs++;
1152                 }
1153                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1154         }
1155         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1156 }
1157
1158 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1159 {
1160         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1161         ulong now = jiffies;
1162         ulong delta = now - stats->rx_jiffies;
1163         u64 pkts;
1164         unsigned int start, eqd;
1165
1166         if (!eqo->enable_aic) {
1167                 eqd = eqo->eqd;
1168                 goto modify_eqd;
1169         }
1170
1171         if (eqo->idx >= adapter->num_rx_qs)
1172                 return;
1173
1174         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1175
1176         /* Wrapped around */
1177         if (time_before(now, stats->rx_jiffies)) {
1178                 stats->rx_jiffies = now;
1179                 return;
1180         }
1181
1182         /* Update once a second */
1183         if (delta < HZ)
1184                 return;
1185
1186         do {
1187                 start = u64_stats_fetch_begin_bh(&stats->sync);
1188                 pkts = stats->rx_pkts;
1189         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1190
1191         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1192         stats->rx_pkts_prev = pkts;
1193         stats->rx_jiffies = now;
1194         eqd = (stats->rx_pps / 110000) << 3;
1195         eqd = min(eqd, eqo->max_eqd);
1196         eqd = max(eqd, eqo->min_eqd);
1197         if (eqd < 10)
1198                 eqd = 0;
1199
1200 modify_eqd:
1201         if (eqd != eqo->cur_eqd) {
1202                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1203                 eqo->cur_eqd = eqd;
1204         }
1205 }
1206
1207 static void be_rx_stats_update(struct be_rx_obj *rxo,
1208                 struct be_rx_compl_info *rxcp)
1209 {
1210         struct be_rx_stats *stats = rx_stats(rxo);
1211
1212         u64_stats_update_begin(&stats->sync);
1213         stats->rx_compl++;
1214         stats->rx_bytes += rxcp->pkt_size;
1215         stats->rx_pkts++;
1216         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1217                 stats->rx_mcast_pkts++;
1218         if (rxcp->err)
1219                 stats->rx_compl_err++;
1220         u64_stats_update_end(&stats->sync);
1221 }
1222
1223 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1224 {
1225         /* L4 checksum is not reliable for non TCP/UDP packets.
1226          * Also ignore ipcksm for ipv6 pkts */
1227         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1228                                 (rxcp->ip_csum || rxcp->ipv6);
1229 }
1230
1231 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1232                                                 u16 frag_idx)
1233 {
1234         struct be_adapter *adapter = rxo->adapter;
1235         struct be_rx_page_info *rx_page_info;
1236         struct be_queue_info *rxq = &rxo->q;
1237
1238         rx_page_info = &rxo->page_info_tbl[frag_idx];
1239         BUG_ON(!rx_page_info->page);
1240
1241         if (rx_page_info->last_page_user) {
1242                 dma_unmap_page(&adapter->pdev->dev,
1243                                dma_unmap_addr(rx_page_info, bus),
1244                                adapter->big_page_size, DMA_FROM_DEVICE);
1245                 rx_page_info->last_page_user = false;
1246         }
1247
1248         atomic_dec(&rxq->used);
1249         return rx_page_info;
1250 }
1251
1252 /* Throwaway the data in the Rx completion */
1253 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1254                                 struct be_rx_compl_info *rxcp)
1255 {
1256         struct be_queue_info *rxq = &rxo->q;
1257         struct be_rx_page_info *page_info;
1258         u16 i, num_rcvd = rxcp->num_rcvd;
1259
1260         for (i = 0; i < num_rcvd; i++) {
1261                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1262                 put_page(page_info->page);
1263                 memset(page_info, 0, sizeof(*page_info));
1264                 index_inc(&rxcp->rxq_idx, rxq->len);
1265         }
1266 }
1267
1268 /*
1269  * skb_fill_rx_data forms a complete skb for an ether frame
1270  * indicated by rxcp.
1271  */
1272 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1273                              struct be_rx_compl_info *rxcp)
1274 {
1275         struct be_queue_info *rxq = &rxo->q;
1276         struct be_rx_page_info *page_info;
1277         u16 i, j;
1278         u16 hdr_len, curr_frag_len, remaining;
1279         u8 *start;
1280
1281         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1282         start = page_address(page_info->page) + page_info->page_offset;
1283         prefetch(start);
1284
1285         /* Copy data in the first descriptor of this completion */
1286         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1287
1288         skb->len = curr_frag_len;
1289         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1290                 memcpy(skb->data, start, curr_frag_len);
1291                 /* Complete packet has now been moved to data */
1292                 put_page(page_info->page);
1293                 skb->data_len = 0;
1294                 skb->tail += curr_frag_len;
1295         } else {
1296                 hdr_len = ETH_HLEN;
1297                 memcpy(skb->data, start, hdr_len);
1298                 skb_shinfo(skb)->nr_frags = 1;
1299                 skb_frag_set_page(skb, 0, page_info->page);
1300                 skb_shinfo(skb)->frags[0].page_offset =
1301                                         page_info->page_offset + hdr_len;
1302                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1303                 skb->data_len = curr_frag_len - hdr_len;
1304                 skb->truesize += rx_frag_size;
1305                 skb->tail += hdr_len;
1306         }
1307         page_info->page = NULL;
1308
1309         if (rxcp->pkt_size <= rx_frag_size) {
1310                 BUG_ON(rxcp->num_rcvd != 1);
1311                 return;
1312         }
1313
1314         /* More frags present for this completion */
1315         index_inc(&rxcp->rxq_idx, rxq->len);
1316         remaining = rxcp->pkt_size - curr_frag_len;
1317         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1318                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1319                 curr_frag_len = min(remaining, rx_frag_size);
1320
1321                 /* Coalesce all frags from the same physical page in one slot */
1322                 if (page_info->page_offset == 0) {
1323                         /* Fresh page */
1324                         j++;
1325                         skb_frag_set_page(skb, j, page_info->page);
1326                         skb_shinfo(skb)->frags[j].page_offset =
1327                                                         page_info->page_offset;
1328                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1329                         skb_shinfo(skb)->nr_frags++;
1330                 } else {
1331                         put_page(page_info->page);
1332                 }
1333
1334                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1335                 skb->len += curr_frag_len;
1336                 skb->data_len += curr_frag_len;
1337                 skb->truesize += rx_frag_size;
1338                 remaining -= curr_frag_len;
1339                 index_inc(&rxcp->rxq_idx, rxq->len);
1340                 page_info->page = NULL;
1341         }
1342         BUG_ON(j > MAX_SKB_FRAGS);
1343 }
1344
1345 /* Process the RX completion indicated by rxcp when GRO is disabled */
1346 static void be_rx_compl_process(struct be_rx_obj *rxo,
1347                                 struct be_rx_compl_info *rxcp)
1348 {
1349         struct be_adapter *adapter = rxo->adapter;
1350         struct net_device *netdev = adapter->netdev;
1351         struct sk_buff *skb;
1352
1353         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1354         if (unlikely(!skb)) {
1355                 rx_stats(rxo)->rx_drops_no_skbs++;
1356                 be_rx_compl_discard(rxo, rxcp);
1357                 return;
1358         }
1359
1360         skb_fill_rx_data(rxo, skb, rxcp);
1361
1362         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1363                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1364         else
1365                 skb_checksum_none_assert(skb);
1366
1367         skb->protocol = eth_type_trans(skb, netdev);
1368         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1369         if (netdev->features & NETIF_F_RXHASH)
1370                 skb->rxhash = rxcp->rss_hash;
1371
1372
1373         if (rxcp->vlanf)
1374                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1375
1376         netif_receive_skb(skb);
1377 }
1378
1379 /* Process the RX completion indicated by rxcp when GRO is enabled */
1380 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1381                              struct be_rx_compl_info *rxcp)
1382 {
1383         struct be_adapter *adapter = rxo->adapter;
1384         struct be_rx_page_info *page_info;
1385         struct sk_buff *skb = NULL;
1386         struct be_queue_info *rxq = &rxo->q;
1387         u16 remaining, curr_frag_len;
1388         u16 i, j;
1389
1390         skb = napi_get_frags(napi);
1391         if (!skb) {
1392                 be_rx_compl_discard(rxo, rxcp);
1393                 return;
1394         }
1395
1396         remaining = rxcp->pkt_size;
1397         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1398                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1399
1400                 curr_frag_len = min(remaining, rx_frag_size);
1401
1402                 /* Coalesce all frags from the same physical page in one slot */
1403                 if (i == 0 || page_info->page_offset == 0) {
1404                         /* First frag or Fresh page */
1405                         j++;
1406                         skb_frag_set_page(skb, j, page_info->page);
1407                         skb_shinfo(skb)->frags[j].page_offset =
1408                                                         page_info->page_offset;
1409                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1410                 } else {
1411                         put_page(page_info->page);
1412                 }
1413                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1414                 skb->truesize += rx_frag_size;
1415                 remaining -= curr_frag_len;
1416                 index_inc(&rxcp->rxq_idx, rxq->len);
1417                 memset(page_info, 0, sizeof(*page_info));
1418         }
1419         BUG_ON(j > MAX_SKB_FRAGS);
1420
1421         skb_shinfo(skb)->nr_frags = j + 1;
1422         skb->len = rxcp->pkt_size;
1423         skb->data_len = rxcp->pkt_size;
1424         skb->ip_summed = CHECKSUM_UNNECESSARY;
1425         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1426         if (adapter->netdev->features & NETIF_F_RXHASH)
1427                 skb->rxhash = rxcp->rss_hash;
1428
1429         if (rxcp->vlanf)
1430                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1431
1432         napi_gro_frags(napi);
1433 }
1434
1435 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1436                                  struct be_rx_compl_info *rxcp)
1437 {
1438         rxcp->pkt_size =
1439                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1440         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1441         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1442         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1443         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1444         rxcp->ip_csum =
1445                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1446         rxcp->l4_csum =
1447                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1448         rxcp->ipv6 =
1449                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1450         rxcp->rxq_idx =
1451                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1452         rxcp->num_rcvd =
1453                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1454         rxcp->pkt_type =
1455                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1456         rxcp->rss_hash =
1457                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1458         if (rxcp->vlanf) {
1459                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1460                                           compl);
1461                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1462                                                compl);
1463         }
1464         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1465 }
1466
1467 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1468                                  struct be_rx_compl_info *rxcp)
1469 {
1470         rxcp->pkt_size =
1471                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1472         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1473         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1474         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1475         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1476         rxcp->ip_csum =
1477                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1478         rxcp->l4_csum =
1479                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1480         rxcp->ipv6 =
1481                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1482         rxcp->rxq_idx =
1483                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1484         rxcp->num_rcvd =
1485                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1486         rxcp->pkt_type =
1487                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1488         rxcp->rss_hash =
1489                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1490         if (rxcp->vlanf) {
1491                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1492                                           compl);
1493                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1494                                                compl);
1495         }
1496         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1497 }
1498
1499 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1500 {
1501         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1502         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1503         struct be_adapter *adapter = rxo->adapter;
1504
1505         /* For checking the valid bit it is Ok to use either definition as the
1506          * valid bit is at the same position in both v0 and v1 Rx compl */
1507         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1508                 return NULL;
1509
1510         rmb();
1511         be_dws_le_to_cpu(compl, sizeof(*compl));
1512
1513         if (adapter->be3_native)
1514                 be_parse_rx_compl_v1(compl, rxcp);
1515         else
1516                 be_parse_rx_compl_v0(compl, rxcp);
1517
1518         if (rxcp->vlanf) {
1519                 /* vlanf could be wrongly set in some cards.
1520                  * ignore if vtm is not set */
1521                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1522                         rxcp->vlanf = 0;
1523
1524                 if (!lancer_chip(adapter))
1525                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1526
1527                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1528                     !adapter->vlan_tag[rxcp->vlan_tag])
1529                         rxcp->vlanf = 0;
1530         }
1531
1532         /* As the compl has been parsed, reset it; we wont touch it again */
1533         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1534
1535         queue_tail_inc(&rxo->cq);
1536         return rxcp;
1537 }
1538
1539 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1540 {
1541         u32 order = get_order(size);
1542
1543         if (order > 0)
1544                 gfp |= __GFP_COMP;
1545         return  alloc_pages(gfp, order);
1546 }
1547
1548 /*
1549  * Allocate a page, split it to fragments of size rx_frag_size and post as
1550  * receive buffers to BE
1551  */
1552 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1553 {
1554         struct be_adapter *adapter = rxo->adapter;
1555         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1556         struct be_queue_info *rxq = &rxo->q;
1557         struct page *pagep = NULL;
1558         struct be_eth_rx_d *rxd;
1559         u64 page_dmaaddr = 0, frag_dmaaddr;
1560         u32 posted, page_offset = 0;
1561
1562         page_info = &rxo->page_info_tbl[rxq->head];
1563         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1564                 if (!pagep) {
1565                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1566                         if (unlikely(!pagep)) {
1567                                 rx_stats(rxo)->rx_post_fail++;
1568                                 break;
1569                         }
1570                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1571                                                     0, adapter->big_page_size,
1572                                                     DMA_FROM_DEVICE);
1573                         page_info->page_offset = 0;
1574                 } else {
1575                         get_page(pagep);
1576                         page_info->page_offset = page_offset + rx_frag_size;
1577                 }
1578                 page_offset = page_info->page_offset;
1579                 page_info->page = pagep;
1580                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1581                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1582
1583                 rxd = queue_head_node(rxq);
1584                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1585                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1586
1587                 /* Any space left in the current big page for another frag? */
1588                 if ((page_offset + rx_frag_size + rx_frag_size) >
1589                                         adapter->big_page_size) {
1590                         pagep = NULL;
1591                         page_info->last_page_user = true;
1592                 }
1593
1594                 prev_page_info = page_info;
1595                 queue_head_inc(rxq);
1596                 page_info = &rxo->page_info_tbl[rxq->head];
1597         }
1598         if (pagep)
1599                 prev_page_info->last_page_user = true;
1600
1601         if (posted) {
1602                 atomic_add(posted, &rxq->used);
1603                 be_rxq_notify(adapter, rxq->id, posted);
1604         } else if (atomic_read(&rxq->used) == 0) {
1605                 /* Let be_worker replenish when memory is available */
1606                 rxo->rx_post_starved = true;
1607         }
1608 }
1609
1610 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1611 {
1612         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1613
1614         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1615                 return NULL;
1616
1617         rmb();
1618         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1619
1620         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1621
1622         queue_tail_inc(tx_cq);
1623         return txcp;
1624 }
1625
1626 static u16 be_tx_compl_process(struct be_adapter *adapter,
1627                 struct be_tx_obj *txo, u16 last_index)
1628 {
1629         struct be_queue_info *txq = &txo->q;
1630         struct be_eth_wrb *wrb;
1631         struct sk_buff **sent_skbs = txo->sent_skb_list;
1632         struct sk_buff *sent_skb;
1633         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1634         bool unmap_skb_hdr = true;
1635
1636         sent_skb = sent_skbs[txq->tail];
1637         BUG_ON(!sent_skb);
1638         sent_skbs[txq->tail] = NULL;
1639
1640         /* skip header wrb */
1641         queue_tail_inc(txq);
1642
1643         do {
1644                 cur_index = txq->tail;
1645                 wrb = queue_tail_node(txq);
1646                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1647                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1648                 unmap_skb_hdr = false;
1649
1650                 num_wrbs++;
1651                 queue_tail_inc(txq);
1652         } while (cur_index != last_index);
1653
1654         kfree_skb(sent_skb);
1655         return num_wrbs;
1656 }
1657
1658 /* Return the number of events in the event queue */
1659 static inline int events_get(struct be_eq_obj *eqo)
1660 {
1661         struct be_eq_entry *eqe;
1662         int num = 0;
1663
1664         do {
1665                 eqe = queue_tail_node(&eqo->q);
1666                 if (eqe->evt == 0)
1667                         break;
1668
1669                 rmb();
1670                 eqe->evt = 0;
1671                 num++;
1672                 queue_tail_inc(&eqo->q);
1673         } while (true);
1674
1675         return num;
1676 }
1677
1678 /* Leaves the EQ is disarmed state */
1679 static void be_eq_clean(struct be_eq_obj *eqo)
1680 {
1681         int num = events_get(eqo);
1682
1683         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1684 }
1685
1686 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1687 {
1688         struct be_rx_page_info *page_info;
1689         struct be_queue_info *rxq = &rxo->q;
1690         struct be_queue_info *rx_cq = &rxo->cq;
1691         struct be_rx_compl_info *rxcp;
1692         struct be_adapter *adapter = rxo->adapter;
1693         int flush_wait = 0;
1694         u16 tail;
1695
1696         /* Consume pending rx completions.
1697          * Wait for the flush completion (identified by zero num_rcvd)
1698          * to arrive. Notify CQ even when there are no more CQ entries
1699          * for HW to flush partially coalesced CQ entries.
1700          * In Lancer, there is no need to wait for flush compl.
1701          */
1702         for (;;) {
1703                 rxcp = be_rx_compl_get(rxo);
1704                 if (rxcp == NULL) {
1705                         if (lancer_chip(adapter))
1706                                 break;
1707
1708                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1709                                 dev_warn(&adapter->pdev->dev,
1710                                          "did not receive flush compl\n");
1711                                 break;
1712                         }
1713                         be_cq_notify(adapter, rx_cq->id, true, 0);
1714                         mdelay(1);
1715                 } else {
1716                         be_rx_compl_discard(rxo, rxcp);
1717                         be_cq_notify(adapter, rx_cq->id, true, 1);
1718                         if (rxcp->num_rcvd == 0)
1719                                 break;
1720                 }
1721         }
1722
1723         /* After cleanup, leave the CQ in unarmed state */
1724         be_cq_notify(adapter, rx_cq->id, false, 0);
1725
1726         /* Then free posted rx buffers that were not used */
1727         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1728         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1729                 page_info = get_rx_page_info(rxo, tail);
1730                 put_page(page_info->page);
1731                 memset(page_info, 0, sizeof(*page_info));
1732         }
1733         BUG_ON(atomic_read(&rxq->used));
1734         rxq->tail = rxq->head = 0;
1735 }
1736
1737 static void be_tx_compl_clean(struct be_adapter *adapter)
1738 {
1739         struct be_tx_obj *txo;
1740         struct be_queue_info *txq;
1741         struct be_eth_tx_compl *txcp;
1742         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1743         struct sk_buff *sent_skb;
1744         bool dummy_wrb;
1745         int i, pending_txqs;
1746
1747         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1748         do {
1749                 pending_txqs = adapter->num_tx_qs;
1750
1751                 for_all_tx_queues(adapter, txo, i) {
1752                         txq = &txo->q;
1753                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1754                                 end_idx =
1755                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1756                                                       wrb_index, txcp);
1757                                 num_wrbs += be_tx_compl_process(adapter, txo,
1758                                                                 end_idx);
1759                                 cmpl++;
1760                         }
1761                         if (cmpl) {
1762                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1763                                 atomic_sub(num_wrbs, &txq->used);
1764                                 cmpl = 0;
1765                                 num_wrbs = 0;
1766                         }
1767                         if (atomic_read(&txq->used) == 0)
1768                                 pending_txqs--;
1769                 }
1770
1771                 if (pending_txqs == 0 || ++timeo > 200)
1772                         break;
1773
1774                 mdelay(1);
1775         } while (true);
1776
1777         for_all_tx_queues(adapter, txo, i) {
1778                 txq = &txo->q;
1779                 if (atomic_read(&txq->used))
1780                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1781                                 atomic_read(&txq->used));
1782
1783                 /* free posted tx for which compls will never arrive */
1784                 while (atomic_read(&txq->used)) {
1785                         sent_skb = txo->sent_skb_list[txq->tail];
1786                         end_idx = txq->tail;
1787                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1788                                                    &dummy_wrb);
1789                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1790                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1791                         atomic_sub(num_wrbs, &txq->used);
1792                 }
1793         }
1794 }
1795
1796 static void be_evt_queues_destroy(struct be_adapter *adapter)
1797 {
1798         struct be_eq_obj *eqo;
1799         int i;
1800
1801         for_all_evt_queues(adapter, eqo, i) {
1802                 if (eqo->q.created) {
1803                         be_eq_clean(eqo);
1804                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1805                 }
1806                 be_queue_free(adapter, &eqo->q);
1807         }
1808 }
1809
1810 static int be_evt_queues_create(struct be_adapter *adapter)
1811 {
1812         struct be_queue_info *eq;
1813         struct be_eq_obj *eqo;
1814         int i, rc;
1815
1816         adapter->num_evt_qs = num_irqs(adapter);
1817
1818         for_all_evt_queues(adapter, eqo, i) {
1819                 eqo->adapter = adapter;
1820                 eqo->tx_budget = BE_TX_BUDGET;
1821                 eqo->idx = i;
1822                 eqo->max_eqd = BE_MAX_EQD;
1823                 eqo->enable_aic = true;
1824
1825                 eq = &eqo->q;
1826                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1827                                         sizeof(struct be_eq_entry));
1828                 if (rc)
1829                         return rc;
1830
1831                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1832                 if (rc)
1833                         return rc;
1834         }
1835         return 0;
1836 }
1837
1838 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1839 {
1840         struct be_queue_info *q;
1841
1842         q = &adapter->mcc_obj.q;
1843         if (q->created)
1844                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1845         be_queue_free(adapter, q);
1846
1847         q = &adapter->mcc_obj.cq;
1848         if (q->created)
1849                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1850         be_queue_free(adapter, q);
1851 }
1852
1853 /* Must be called only after TX qs are created as MCC shares TX EQ */
1854 static int be_mcc_queues_create(struct be_adapter *adapter)
1855 {
1856         struct be_queue_info *q, *cq;
1857
1858         cq = &adapter->mcc_obj.cq;
1859         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1860                         sizeof(struct be_mcc_compl)))
1861                 goto err;
1862
1863         /* Use the default EQ for MCC completions */
1864         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1865                 goto mcc_cq_free;
1866
1867         q = &adapter->mcc_obj.q;
1868         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1869                 goto mcc_cq_destroy;
1870
1871         if (be_cmd_mccq_create(adapter, q, cq))
1872                 goto mcc_q_free;
1873
1874         return 0;
1875
1876 mcc_q_free:
1877         be_queue_free(adapter, q);
1878 mcc_cq_destroy:
1879         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1880 mcc_cq_free:
1881         be_queue_free(adapter, cq);
1882 err:
1883         return -1;
1884 }
1885
1886 static void be_tx_queues_destroy(struct be_adapter *adapter)
1887 {
1888         struct be_queue_info *q;
1889         struct be_tx_obj *txo;
1890         u8 i;
1891
1892         for_all_tx_queues(adapter, txo, i) {
1893                 q = &txo->q;
1894                 if (q->created)
1895                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1896                 be_queue_free(adapter, q);
1897
1898                 q = &txo->cq;
1899                 if (q->created)
1900                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1901                 be_queue_free(adapter, q);
1902         }
1903 }
1904
1905 static int be_num_txqs_want(struct be_adapter *adapter)
1906 {
1907         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
1908             be_is_mc(adapter) ||
1909             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
1910             BE2_chip(adapter))
1911                 return 1;
1912         else
1913                 return adapter->max_tx_queues;
1914 }
1915
1916 static int be_tx_cqs_create(struct be_adapter *adapter)
1917 {
1918         struct be_queue_info *cq, *eq;
1919         int status;
1920         struct be_tx_obj *txo;
1921         u8 i;
1922
1923         adapter->num_tx_qs = be_num_txqs_want(adapter);
1924         if (adapter->num_tx_qs != MAX_TX_QS) {
1925                 rtnl_lock();
1926                 netif_set_real_num_tx_queues(adapter->netdev,
1927                         adapter->num_tx_qs);
1928                 rtnl_unlock();
1929         }
1930
1931         for_all_tx_queues(adapter, txo, i) {
1932                 cq = &txo->cq;
1933                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1934                                         sizeof(struct be_eth_tx_compl));
1935                 if (status)
1936                         return status;
1937
1938                 /* If num_evt_qs is less than num_tx_qs, then more than
1939                  * one txq share an eq
1940                  */
1941                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1942                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1943                 if (status)
1944                         return status;
1945         }
1946         return 0;
1947 }
1948
1949 static int be_tx_qs_create(struct be_adapter *adapter)
1950 {
1951         struct be_tx_obj *txo;
1952         int i, status;
1953
1954         for_all_tx_queues(adapter, txo, i) {
1955                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1956                                         sizeof(struct be_eth_wrb));
1957                 if (status)
1958                         return status;
1959
1960                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1961                 if (status)
1962                         return status;
1963         }
1964
1965         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
1966                  adapter->num_tx_qs);
1967         return 0;
1968 }
1969
1970 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1971 {
1972         struct be_queue_info *q;
1973         struct be_rx_obj *rxo;
1974         int i;
1975
1976         for_all_rx_queues(adapter, rxo, i) {
1977                 q = &rxo->cq;
1978                 if (q->created)
1979                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1980                 be_queue_free(adapter, q);
1981         }
1982 }
1983
1984 static int be_rx_cqs_create(struct be_adapter *adapter)
1985 {
1986         struct be_queue_info *eq, *cq;
1987         struct be_rx_obj *rxo;
1988         int rc, i;
1989
1990         /* We'll create as many RSS rings as there are irqs.
1991          * But when there's only one irq there's no use creating RSS rings
1992          */
1993         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1994                                 num_irqs(adapter) + 1 : 1;
1995         if (adapter->num_rx_qs != MAX_RX_QS) {
1996                 rtnl_lock();
1997                 netif_set_real_num_rx_queues(adapter->netdev,
1998                                              adapter->num_rx_qs);
1999                 rtnl_unlock();
2000         }
2001
2002         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2003         for_all_rx_queues(adapter, rxo, i) {
2004                 rxo->adapter = adapter;
2005                 cq = &rxo->cq;
2006                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2007                                 sizeof(struct be_eth_rx_compl));
2008                 if (rc)
2009                         return rc;
2010
2011                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2012                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2013                 if (rc)
2014                         return rc;
2015         }
2016
2017         dev_info(&adapter->pdev->dev,
2018                  "created %d RSS queue(s) and 1 default RX queue\n",
2019                  adapter->num_rx_qs - 1);
2020         return 0;
2021 }
2022
2023 static irqreturn_t be_intx(int irq, void *dev)
2024 {
2025         struct be_eq_obj *eqo = dev;
2026         struct be_adapter *adapter = eqo->adapter;
2027         int num_evts = 0;
2028
2029         /* On Lancer, clear-intr bit of the EQ DB does not work.
2030          * INTx is de-asserted only on notifying num evts.
2031          */
2032         if (lancer_chip(adapter))
2033                 num_evts = events_get(eqo);
2034
2035         /* The EQ-notify may not de-assert INTx rightaway, causing
2036          * the ISR to be invoked again. So, return HANDLED even when
2037          * num_evts is zero.
2038          */
2039         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2040         napi_schedule(&eqo->napi);
2041         return IRQ_HANDLED;
2042 }
2043
2044 static irqreturn_t be_msix(int irq, void *dev)
2045 {
2046         struct be_eq_obj *eqo = dev;
2047
2048         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2049         napi_schedule(&eqo->napi);
2050         return IRQ_HANDLED;
2051 }
2052
2053 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2054 {
2055         return (rxcp->tcpf && !rxcp->err) ? true : false;
2056 }
2057
2058 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2059                         int budget)
2060 {
2061         struct be_adapter *adapter = rxo->adapter;
2062         struct be_queue_info *rx_cq = &rxo->cq;
2063         struct be_rx_compl_info *rxcp;
2064         u32 work_done;
2065
2066         for (work_done = 0; work_done < budget; work_done++) {
2067                 rxcp = be_rx_compl_get(rxo);
2068                 if (!rxcp)
2069                         break;
2070
2071                 /* Is it a flush compl that has no data */
2072                 if (unlikely(rxcp->num_rcvd == 0))
2073                         goto loop_continue;
2074
2075                 /* Discard compl with partial DMA Lancer B0 */
2076                 if (unlikely(!rxcp->pkt_size)) {
2077                         be_rx_compl_discard(rxo, rxcp);
2078                         goto loop_continue;
2079                 }
2080
2081                 /* On BE drop pkts that arrive due to imperfect filtering in
2082                  * promiscuous mode on some skews
2083                  */
2084                 if (unlikely(rxcp->port != adapter->port_num &&
2085                                 !lancer_chip(adapter))) {
2086                         be_rx_compl_discard(rxo, rxcp);
2087                         goto loop_continue;
2088                 }
2089
2090                 if (do_gro(rxcp))
2091                         be_rx_compl_process_gro(rxo, napi, rxcp);
2092                 else
2093                         be_rx_compl_process(rxo, rxcp);
2094 loop_continue:
2095                 be_rx_stats_update(rxo, rxcp);
2096         }
2097
2098         if (work_done) {
2099                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2100
2101                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2102                         be_post_rx_frags(rxo, GFP_ATOMIC);
2103         }
2104
2105         return work_done;
2106 }
2107
2108 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2109                           int budget, int idx)
2110 {
2111         struct be_eth_tx_compl *txcp;
2112         int num_wrbs = 0, work_done;
2113
2114         for (work_done = 0; work_done < budget; work_done++) {
2115                 txcp = be_tx_compl_get(&txo->cq);
2116                 if (!txcp)
2117                         break;
2118                 num_wrbs += be_tx_compl_process(adapter, txo,
2119                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2120                                         wrb_index, txcp));
2121         }
2122
2123         if (work_done) {
2124                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2125                 atomic_sub(num_wrbs, &txo->q.used);
2126
2127                 /* As Tx wrbs have been freed up, wake up netdev queue
2128                  * if it was stopped due to lack of tx wrbs.  */
2129                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2130                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2131                         netif_wake_subqueue(adapter->netdev, idx);
2132                 }
2133
2134                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2135                 tx_stats(txo)->tx_compl += work_done;
2136                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2137         }
2138         return (work_done < budget); /* Done */
2139 }
2140
2141 int be_poll(struct napi_struct *napi, int budget)
2142 {
2143         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2144         struct be_adapter *adapter = eqo->adapter;
2145         int max_work = 0, work, i, num_evts;
2146         bool tx_done;
2147
2148         num_evts = events_get(eqo);
2149
2150         /* Process all TXQs serviced by this EQ */
2151         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2152                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2153                                         eqo->tx_budget, i);
2154                 if (!tx_done)
2155                         max_work = budget;
2156         }
2157
2158         /* This loop will iterate twice for EQ0 in which
2159          * completions of the last RXQ (default one) are also processed
2160          * For other EQs the loop iterates only once
2161          */
2162         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2163                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2164                 max_work = max(work, max_work);
2165         }
2166
2167         if (is_mcc_eqo(eqo))
2168                 be_process_mcc(adapter);
2169
2170         if (max_work < budget) {
2171                 napi_complete(napi);
2172                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2173         } else {
2174                 /* As we'll continue in polling mode, count and clear events */
2175                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2176         }
2177         return max_work;
2178 }
2179
2180 void be_detect_error(struct be_adapter *adapter)
2181 {
2182         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2183         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2184         u32 i;
2185
2186         if (be_hw_error(adapter))
2187                 return;
2188
2189         if (lancer_chip(adapter)) {
2190                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2191                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2192                         sliport_err1 = ioread32(adapter->db +
2193                                         SLIPORT_ERROR1_OFFSET);
2194                         sliport_err2 = ioread32(adapter->db +
2195                                         SLIPORT_ERROR2_OFFSET);
2196                 }
2197         } else {
2198                 pci_read_config_dword(adapter->pdev,
2199                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2200                 pci_read_config_dword(adapter->pdev,
2201                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2202                 pci_read_config_dword(adapter->pdev,
2203                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2204                 pci_read_config_dword(adapter->pdev,
2205                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2206
2207                 ue_lo = (ue_lo & ~ue_lo_mask);
2208                 ue_hi = (ue_hi & ~ue_hi_mask);
2209         }
2210
2211         /* On certain platforms BE hardware can indicate spurious UEs.
2212          * Allow the h/w to stop working completely in case of a real UE.
2213          * Hence not setting the hw_error for UE detection.
2214          */
2215         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2216                 adapter->hw_error = true;
2217                 dev_err(&adapter->pdev->dev,
2218                         "Error detected in the card\n");
2219         }
2220
2221         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2222                 dev_err(&adapter->pdev->dev,
2223                         "ERR: sliport status 0x%x\n", sliport_status);
2224                 dev_err(&adapter->pdev->dev,
2225                         "ERR: sliport error1 0x%x\n", sliport_err1);
2226                 dev_err(&adapter->pdev->dev,
2227                         "ERR: sliport error2 0x%x\n", sliport_err2);
2228         }
2229
2230         if (ue_lo) {
2231                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2232                         if (ue_lo & 1)
2233                                 dev_err(&adapter->pdev->dev,
2234                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2235                 }
2236         }
2237
2238         if (ue_hi) {
2239                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2240                         if (ue_hi & 1)
2241                                 dev_err(&adapter->pdev->dev,
2242                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2243                 }
2244         }
2245
2246 }
2247
2248 static void be_msix_disable(struct be_adapter *adapter)
2249 {
2250         if (msix_enabled(adapter)) {
2251                 pci_disable_msix(adapter->pdev);
2252                 adapter->num_msix_vec = 0;
2253         }
2254 }
2255
2256 static uint be_num_rss_want(struct be_adapter *adapter)
2257 {
2258         u32 num = 0;
2259
2260         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2261             (lancer_chip(adapter) ||
2262              (!sriov_want(adapter) && be_physfn(adapter)))) {
2263                 num = adapter->max_rss_queues;
2264                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2265         }
2266         return num;
2267 }
2268
2269 static void be_msix_enable(struct be_adapter *adapter)
2270 {
2271 #define BE_MIN_MSIX_VECTORS             1
2272         int i, status, num_vec, num_roce_vec = 0;
2273         struct device *dev = &adapter->pdev->dev;
2274
2275         /* If RSS queues are not used, need a vec for default RX Q */
2276         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2277         if (be_roce_supported(adapter)) {
2278                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2279                                         (num_online_cpus() + 1));
2280                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2281                 num_vec += num_roce_vec;
2282                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2283         }
2284         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2285
2286         for (i = 0; i < num_vec; i++)
2287                 adapter->msix_entries[i].entry = i;
2288
2289         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2290         if (status == 0) {
2291                 goto done;
2292         } else if (status >= BE_MIN_MSIX_VECTORS) {
2293                 num_vec = status;
2294                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2295                                 num_vec) == 0)
2296                         goto done;
2297         }
2298
2299         dev_warn(dev, "MSIx enable failed\n");
2300         return;
2301 done:
2302         if (be_roce_supported(adapter)) {
2303                 if (num_vec > num_roce_vec) {
2304                         adapter->num_msix_vec = num_vec - num_roce_vec;
2305                         adapter->num_msix_roce_vec =
2306                                 num_vec - adapter->num_msix_vec;
2307                 } else {
2308                         adapter->num_msix_vec = num_vec;
2309                         adapter->num_msix_roce_vec = 0;
2310                 }
2311         } else
2312                 adapter->num_msix_vec = num_vec;
2313         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2314         return;
2315 }
2316
2317 static inline int be_msix_vec_get(struct be_adapter *adapter,
2318                                 struct be_eq_obj *eqo)
2319 {
2320         return adapter->msix_entries[eqo->idx].vector;
2321 }
2322
2323 static int be_msix_register(struct be_adapter *adapter)
2324 {
2325         struct net_device *netdev = adapter->netdev;
2326         struct be_eq_obj *eqo;
2327         int status, i, vec;
2328
2329         for_all_evt_queues(adapter, eqo, i) {
2330                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2331                 vec = be_msix_vec_get(adapter, eqo);
2332                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2333                 if (status)
2334                         goto err_msix;
2335         }
2336
2337         return 0;
2338 err_msix:
2339         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2340                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2341         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2342                 status);
2343         be_msix_disable(adapter);
2344         return status;
2345 }
2346
2347 static int be_irq_register(struct be_adapter *adapter)
2348 {
2349         struct net_device *netdev = adapter->netdev;
2350         int status;
2351
2352         if (msix_enabled(adapter)) {
2353                 status = be_msix_register(adapter);
2354                 if (status == 0)
2355                         goto done;
2356                 /* INTx is not supported for VF */
2357                 if (!be_physfn(adapter))
2358                         return status;
2359         }
2360
2361         /* INTx: only the first EQ is used */
2362         netdev->irq = adapter->pdev->irq;
2363         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2364                              &adapter->eq_obj[0]);
2365         if (status) {
2366                 dev_err(&adapter->pdev->dev,
2367                         "INTx request IRQ failed - err %d\n", status);
2368                 return status;
2369         }
2370 done:
2371         adapter->isr_registered = true;
2372         return 0;
2373 }
2374
2375 static void be_irq_unregister(struct be_adapter *adapter)
2376 {
2377         struct net_device *netdev = adapter->netdev;
2378         struct be_eq_obj *eqo;
2379         int i;
2380
2381         if (!adapter->isr_registered)
2382                 return;
2383
2384         /* INTx */
2385         if (!msix_enabled(adapter)) {
2386                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2387                 goto done;
2388         }
2389
2390         /* MSIx */
2391         for_all_evt_queues(adapter, eqo, i)
2392                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2393
2394 done:
2395         adapter->isr_registered = false;
2396 }
2397
2398 static void be_rx_qs_destroy(struct be_adapter *adapter)
2399 {
2400         struct be_queue_info *q;
2401         struct be_rx_obj *rxo;
2402         int i;
2403
2404         for_all_rx_queues(adapter, rxo, i) {
2405                 q = &rxo->q;
2406                 if (q->created) {
2407                         be_cmd_rxq_destroy(adapter, q);
2408                         /* After the rxq is invalidated, wait for a grace time
2409                          * of 1ms for all dma to end and the flush compl to
2410                          * arrive
2411                          */
2412                         mdelay(1);
2413                         be_rx_cq_clean(rxo);
2414                 }
2415                 be_queue_free(adapter, q);
2416         }
2417 }
2418
2419 static int be_close(struct net_device *netdev)
2420 {
2421         struct be_adapter *adapter = netdev_priv(netdev);
2422         struct be_eq_obj *eqo;
2423         int i;
2424
2425         be_roce_dev_close(adapter);
2426
2427         if (!lancer_chip(adapter))
2428                 be_intr_set(adapter, false);
2429
2430         for_all_evt_queues(adapter, eqo, i)
2431                 napi_disable(&eqo->napi);
2432
2433         be_async_mcc_disable(adapter);
2434
2435         /* Wait for all pending tx completions to arrive so that
2436          * all tx skbs are freed.
2437          */
2438         be_tx_compl_clean(adapter);
2439
2440         be_rx_qs_destroy(adapter);
2441
2442         for_all_evt_queues(adapter, eqo, i) {
2443                 if (msix_enabled(adapter))
2444                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2445                 else
2446                         synchronize_irq(netdev->irq);
2447                 be_eq_clean(eqo);
2448         }
2449
2450         be_irq_unregister(adapter);
2451
2452         return 0;
2453 }
2454
2455 static int be_rx_qs_create(struct be_adapter *adapter)
2456 {
2457         struct be_rx_obj *rxo;
2458         int rc, i, j;
2459         u8 rsstable[128];
2460
2461         for_all_rx_queues(adapter, rxo, i) {
2462                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2463                                     sizeof(struct be_eth_rx_d));
2464                 if (rc)
2465                         return rc;
2466         }
2467
2468         /* The FW would like the default RXQ to be created first */
2469         rxo = default_rxo(adapter);
2470         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2471                                adapter->if_handle, false, &rxo->rss_id);
2472         if (rc)
2473                 return rc;
2474
2475         for_all_rss_queues(adapter, rxo, i) {
2476                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2477                                        rx_frag_size, adapter->if_handle,
2478                                        true, &rxo->rss_id);
2479                 if (rc)
2480                         return rc;
2481         }
2482
2483         if (be_multi_rxq(adapter)) {
2484                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2485                         for_all_rss_queues(adapter, rxo, i) {
2486                                 if ((j + i) >= 128)
2487                                         break;
2488                                 rsstable[j + i] = rxo->rss_id;
2489                         }
2490                 }
2491                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2492                 if (rc)
2493                         return rc;
2494         }
2495
2496         /* First time posting */
2497         for_all_rx_queues(adapter, rxo, i)
2498                 be_post_rx_frags(rxo, GFP_KERNEL);
2499         return 0;
2500 }
2501
2502 static int be_open(struct net_device *netdev)
2503 {
2504         struct be_adapter *adapter = netdev_priv(netdev);
2505         struct be_eq_obj *eqo;
2506         struct be_rx_obj *rxo;
2507         struct be_tx_obj *txo;
2508         u8 link_status;
2509         int status, i;
2510
2511         status = be_rx_qs_create(adapter);
2512         if (status)
2513                 goto err;
2514
2515         be_irq_register(adapter);
2516
2517         if (!lancer_chip(adapter))
2518                 be_intr_set(adapter, true);
2519
2520         for_all_rx_queues(adapter, rxo, i)
2521                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2522
2523         for_all_tx_queues(adapter, txo, i)
2524                 be_cq_notify(adapter, txo->cq.id, true, 0);
2525
2526         be_async_mcc_enable(adapter);
2527
2528         for_all_evt_queues(adapter, eqo, i) {
2529                 napi_enable(&eqo->napi);
2530                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2531         }
2532
2533         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2534         if (!status)
2535                 be_link_status_update(adapter, link_status);
2536
2537         be_roce_dev_open(adapter);
2538         return 0;
2539 err:
2540         be_close(adapter->netdev);
2541         return -EIO;
2542 }
2543
2544 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2545 {
2546         struct be_dma_mem cmd;
2547         int status = 0;
2548         u8 mac[ETH_ALEN];
2549
2550         memset(mac, 0, ETH_ALEN);
2551
2552         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2553         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2554                                     GFP_KERNEL);
2555         if (cmd.va == NULL)
2556                 return -1;
2557         memset(cmd.va, 0, cmd.size);
2558
2559         if (enable) {
2560                 status = pci_write_config_dword(adapter->pdev,
2561                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2562                 if (status) {
2563                         dev_err(&adapter->pdev->dev,
2564                                 "Could not enable Wake-on-lan\n");
2565                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2566                                           cmd.dma);
2567                         return status;
2568                 }
2569                 status = be_cmd_enable_magic_wol(adapter,
2570                                 adapter->netdev->dev_addr, &cmd);
2571                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2572                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2573         } else {
2574                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2575                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2576                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2577         }
2578
2579         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2580         return status;
2581 }
2582
2583 /*
2584  * Generate a seed MAC address from the PF MAC Address using jhash.
2585  * MAC Address for VFs are assigned incrementally starting from the seed.
2586  * These addresses are programmed in the ASIC by the PF and the VF driver
2587  * queries for the MAC address during its probe.
2588  */
2589 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2590 {
2591         u32 vf;
2592         int status = 0;
2593         u8 mac[ETH_ALEN];
2594         struct be_vf_cfg *vf_cfg;
2595
2596         be_vf_eth_addr_generate(adapter, mac);
2597
2598         for_all_vfs(adapter, vf_cfg, vf) {
2599                 if (lancer_chip(adapter)) {
2600                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2601                 } else {
2602                         status = be_cmd_pmac_add(adapter, mac,
2603                                                  vf_cfg->if_handle,
2604                                                  &vf_cfg->pmac_id, vf + 1);
2605                 }
2606
2607                 if (status)
2608                         dev_err(&adapter->pdev->dev,
2609                         "Mac address assignment failed for VF %d\n", vf);
2610                 else
2611                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2612
2613                 mac[5] += 1;
2614         }
2615         return status;
2616 }
2617
2618 static void be_vf_clear(struct be_adapter *adapter)
2619 {
2620         struct be_vf_cfg *vf_cfg;
2621         u32 vf;
2622
2623         if (be_find_vfs(adapter, ASSIGNED)) {
2624                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2625                 goto done;
2626         }
2627
2628         for_all_vfs(adapter, vf_cfg, vf) {
2629                 if (lancer_chip(adapter))
2630                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2631                 else
2632                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2633                                         vf_cfg->pmac_id, vf + 1);
2634
2635                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2636         }
2637         pci_disable_sriov(adapter->pdev);
2638 done:
2639         kfree(adapter->vf_cfg);
2640         adapter->num_vfs = 0;
2641 }
2642
2643 static int be_clear(struct be_adapter *adapter)
2644 {
2645         int i = 1;
2646
2647         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2648                 cancel_delayed_work_sync(&adapter->work);
2649                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2650         }
2651
2652         if (sriov_enabled(adapter))
2653                 be_vf_clear(adapter);
2654
2655         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2656                 be_cmd_pmac_del(adapter, adapter->if_handle,
2657                         adapter->pmac_id[i], 0);
2658
2659         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2660
2661         be_mcc_queues_destroy(adapter);
2662         be_rx_cqs_destroy(adapter);
2663         be_tx_queues_destroy(adapter);
2664         be_evt_queues_destroy(adapter);
2665
2666         kfree(adapter->pmac_id);
2667         adapter->pmac_id = NULL;
2668
2669         be_msix_disable(adapter);
2670         return 0;
2671 }
2672
2673 static void be_get_vf_if_cap_flags(struct be_adapter *adapter,
2674                                    u32 *cap_flags, u8 domain)
2675 {
2676         bool profile_present = false;
2677         int status;
2678
2679         if (lancer_chip(adapter)) {
2680                 status = be_cmd_get_profile_config(adapter, cap_flags, domain);
2681                 if (!status)
2682                         profile_present = true;
2683         }
2684
2685         if (!profile_present)
2686                 *cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2687                              BE_IF_FLAGS_MULTICAST;
2688 }
2689
2690 static int be_vf_setup_init(struct be_adapter *adapter)
2691 {
2692         struct be_vf_cfg *vf_cfg;
2693         int vf;
2694
2695         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2696                                   GFP_KERNEL);
2697         if (!adapter->vf_cfg)
2698                 return -ENOMEM;
2699
2700         for_all_vfs(adapter, vf_cfg, vf) {
2701                 vf_cfg->if_handle = -1;
2702                 vf_cfg->pmac_id = -1;
2703         }
2704         return 0;
2705 }
2706
2707 static int be_vf_setup(struct be_adapter *adapter)
2708 {
2709         struct be_vf_cfg *vf_cfg;
2710         struct device *dev = &adapter->pdev->dev;
2711         u32 cap_flags, en_flags, vf;
2712         u16 def_vlan, lnk_speed;
2713         int status, enabled_vfs;
2714
2715         enabled_vfs = be_find_vfs(adapter, ENABLED);
2716         if (enabled_vfs) {
2717                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2718                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2719                 return 0;
2720         }
2721
2722         if (num_vfs > adapter->dev_num_vfs) {
2723                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2724                          adapter->dev_num_vfs, num_vfs);
2725                 num_vfs = adapter->dev_num_vfs;
2726         }
2727
2728         status = pci_enable_sriov(adapter->pdev, num_vfs);
2729         if (!status) {
2730                 adapter->num_vfs = num_vfs;
2731         } else {
2732                 /* Platform doesn't support SRIOV though device supports it */
2733                 dev_warn(dev, "SRIOV enable failed\n");
2734                 return 0;
2735         }
2736
2737         status = be_vf_setup_init(adapter);
2738         if (status)
2739                 goto err;
2740
2741         for_all_vfs(adapter, vf_cfg, vf) {
2742                 be_get_vf_if_cap_flags(adapter, &cap_flags, vf + 1);
2743
2744                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2745                                         BE_IF_FLAGS_BROADCAST |
2746                                         BE_IF_FLAGS_MULTICAST);
2747
2748                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2749                                           &vf_cfg->if_handle, vf + 1);
2750                 if (status)
2751                         goto err;
2752         }
2753
2754         if (!enabled_vfs) {
2755                 status = be_vf_eth_addr_config(adapter);
2756                 if (status)
2757                         goto err;
2758         }
2759
2760         for_all_vfs(adapter, vf_cfg, vf) {
2761                 lnk_speed = 1000;
2762                 status = be_cmd_set_qos(adapter, lnk_speed, vf + 1);
2763                 if (status)
2764                         goto err;
2765                 vf_cfg->tx_rate = lnk_speed * 10;
2766
2767                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2768                                 vf + 1, vf_cfg->if_handle);
2769                 if (status)
2770                         goto err;
2771                 vf_cfg->def_vid = def_vlan;
2772
2773                 be_cmd_enable_vf(adapter, vf + 1);
2774         }
2775         return 0;
2776 err:
2777         return status;
2778 }
2779
2780 static void be_setup_init(struct be_adapter *adapter)
2781 {
2782         adapter->vlan_prio_bmap = 0xff;
2783         adapter->phy.link_speed = -1;
2784         adapter->if_handle = -1;
2785         adapter->be3_native = false;
2786         adapter->promiscuous = false;
2787         if (be_physfn(adapter))
2788                 adapter->cmd_privileges = MAX_PRIVILEGES;
2789         else
2790                 adapter->cmd_privileges = MIN_PRIVILEGES;
2791 }
2792
2793 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2794                            bool *active_mac, u32 *pmac_id)
2795 {
2796         int status = 0;
2797
2798         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2799                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2800                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2801                         *active_mac = true;
2802                 else
2803                         *active_mac = false;
2804
2805                 return status;
2806         }
2807
2808         if (lancer_chip(adapter)) {
2809                 status = be_cmd_get_mac_from_list(adapter, mac,
2810                                                   active_mac, pmac_id, 0);
2811                 if (*active_mac) {
2812                         status = be_cmd_mac_addr_query(adapter, mac, false,
2813                                                        if_handle, *pmac_id);
2814                 }
2815         } else if (be_physfn(adapter)) {
2816                 /* For BE3, for PF get permanent MAC */
2817                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2818                 *active_mac = false;
2819         } else {
2820                 /* For BE3, for VF get soft MAC assigned by PF*/
2821                 status = be_cmd_mac_addr_query(adapter, mac, false,
2822                                                if_handle, 0);
2823                 *active_mac = true;
2824         }
2825         return status;
2826 }
2827
2828 static void be_get_resources(struct be_adapter *adapter)
2829 {
2830         int status;
2831         bool profile_present = false;
2832
2833         if (lancer_chip(adapter)) {
2834                 status = be_cmd_get_func_config(adapter);
2835
2836                 if (!status)
2837                         profile_present = true;
2838         }
2839
2840         if (profile_present) {
2841                 /* Sanity fixes for Lancer */
2842                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
2843                                               BE_UC_PMAC_COUNT);
2844                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
2845                                            BE_NUM_VLANS_SUPPORTED);
2846                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
2847                                                BE_MAX_MC);
2848                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
2849                                                MAX_TX_QS);
2850                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
2851                                                 BE3_MAX_RSS_QS);
2852                 adapter->max_event_queues = min_t(u16,
2853                                                   adapter->max_event_queues,
2854                                                   BE3_MAX_RSS_QS);
2855
2856                 if (adapter->max_rss_queues &&
2857                     adapter->max_rss_queues == adapter->max_rx_queues)
2858                         adapter->max_rss_queues -= 1;
2859
2860                 if (adapter->max_event_queues < adapter->max_rss_queues)
2861                         adapter->max_rss_queues = adapter->max_event_queues;
2862
2863         } else {
2864                 if (be_physfn(adapter))
2865                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
2866                 else
2867                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
2868
2869                 if (adapter->function_mode & FLEX10_MODE)
2870                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
2871                 else
2872                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
2873
2874                 adapter->max_mcast_mac = BE_MAX_MC;
2875                 adapter->max_tx_queues = MAX_TX_QS;
2876                 adapter->max_rss_queues = (adapter->be3_native) ?
2877                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2878                 adapter->max_event_queues = BE3_MAX_RSS_QS;
2879
2880                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
2881                                         BE_IF_FLAGS_BROADCAST |
2882                                         BE_IF_FLAGS_MULTICAST |
2883                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
2884                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
2885                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
2886                                         BE_IF_FLAGS_PROMISCUOUS;
2887
2888                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2889                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
2890         }
2891 }
2892
2893 /* Routine to query per function resource limits */
2894 static int be_get_config(struct be_adapter *adapter)
2895 {
2896         int pos, status;
2897         u16 dev_num_vfs;
2898
2899         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
2900                                      &adapter->function_mode,
2901                                      &adapter->function_caps);
2902         if (status)
2903                 goto err;
2904
2905         be_get_resources(adapter);
2906
2907         /* primary mac needs 1 pmac entry */
2908         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
2909                                    sizeof(u32), GFP_KERNEL);
2910         if (!adapter->pmac_id) {
2911                 status = -ENOMEM;
2912                 goto err;
2913         }
2914
2915         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2916         if (pos) {
2917                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2918                                      &dev_num_vfs);
2919                 if (!lancer_chip(adapter))
2920                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
2921                 adapter->dev_num_vfs = dev_num_vfs;
2922         }
2923 err:
2924         return status;
2925 }
2926
2927 static int be_setup(struct be_adapter *adapter)
2928 {
2929         struct device *dev = &adapter->pdev->dev;
2930         u32 en_flags;
2931         u32 tx_fc, rx_fc;
2932         int status;
2933         u8 mac[ETH_ALEN];
2934         bool active_mac;
2935
2936         be_setup_init(adapter);
2937
2938         if (!lancer_chip(adapter))
2939                 be_cmd_req_native_mode(adapter);
2940
2941         status = be_get_config(adapter);
2942         if (status)
2943                 goto err;
2944
2945         be_msix_enable(adapter);
2946
2947         status = be_evt_queues_create(adapter);
2948         if (status)
2949                 goto err;
2950
2951         status = be_tx_cqs_create(adapter);
2952         if (status)
2953                 goto err;
2954
2955         status = be_rx_cqs_create(adapter);
2956         if (status)
2957                 goto err;
2958
2959         status = be_mcc_queues_create(adapter);
2960         if (status)
2961                 goto err;
2962
2963         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
2964         /* In UMC mode FW does not return right privileges.
2965          * Override with correct privilege equivalent to PF.
2966          */
2967         if (be_is_mc(adapter))
2968                 adapter->cmd_privileges = MAX_PRIVILEGES;
2969
2970         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2971                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2972
2973         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
2974                 en_flags |= BE_IF_FLAGS_RSS;
2975
2976         en_flags = en_flags & adapter->if_cap_flags;
2977
2978         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
2979                                   &adapter->if_handle, 0);
2980         if (status != 0)
2981                 goto err;
2982
2983         memset(mac, 0, ETH_ALEN);
2984         active_mac = false;
2985         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
2986                                  &active_mac, &adapter->pmac_id[0]);
2987         if (status != 0)
2988                 goto err;
2989
2990         if (!active_mac) {
2991                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2992                                          &adapter->pmac_id[0], 0);
2993                 if (status != 0)
2994                         goto err;
2995         }
2996
2997         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
2998                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2999                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3000         }
3001
3002         status = be_tx_qs_create(adapter);
3003         if (status)
3004                 goto err;
3005
3006         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3007
3008         if (adapter->vlans_added)
3009                 be_vid_config(adapter);
3010
3011         be_set_rx_mode(adapter->netdev);
3012
3013         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3014
3015         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3016                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3017                                         adapter->rx_fc);
3018
3019         if (be_physfn(adapter) && num_vfs) {
3020                 if (adapter->dev_num_vfs)
3021                         be_vf_setup(adapter);
3022                 else
3023                         dev_warn(dev, "device doesn't support SRIOV\n");
3024         }
3025
3026         status = be_cmd_get_phy_info(adapter);
3027         if (!status && be_pause_supported(adapter))
3028                 adapter->phy.fc_autoneg = 1;
3029
3030         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3031         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3032         return 0;
3033 err:
3034         be_clear(adapter);
3035         return status;
3036 }
3037
3038 #ifdef CONFIG_NET_POLL_CONTROLLER
3039 static void be_netpoll(struct net_device *netdev)
3040 {
3041         struct be_adapter *adapter = netdev_priv(netdev);
3042         struct be_eq_obj *eqo;
3043         int i;
3044
3045         for_all_evt_queues(adapter, eqo, i) {
3046                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3047                 napi_schedule(&eqo->napi);
3048         }
3049
3050         return;
3051 }
3052 #endif
3053
3054 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3055 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3056
3057 static bool be_flash_redboot(struct be_adapter *adapter,
3058                         const u8 *p, u32 img_start, int image_size,
3059                         int hdr_size)
3060 {
3061         u32 crc_offset;
3062         u8 flashed_crc[4];
3063         int status;
3064
3065         crc_offset = hdr_size + img_start + image_size - 4;
3066
3067         p += crc_offset;
3068
3069         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3070                         (image_size - 4));
3071         if (status) {
3072                 dev_err(&adapter->pdev->dev,
3073                 "could not get crc from flash, not flashing redboot\n");
3074                 return false;
3075         }
3076
3077         /*update redboot only if crc does not match*/
3078         if (!memcmp(flashed_crc, p, 4))
3079                 return false;
3080         else
3081                 return true;
3082 }
3083
3084 static bool phy_flashing_required(struct be_adapter *adapter)
3085 {
3086         return (adapter->phy.phy_type == TN_8022 &&
3087                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3088 }
3089
3090 static bool is_comp_in_ufi(struct be_adapter *adapter,
3091                            struct flash_section_info *fsec, int type)
3092 {
3093         int i = 0, img_type = 0;
3094         struct flash_section_info_g2 *fsec_g2 = NULL;
3095
3096         if (BE2_chip(adapter))
3097                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3098
3099         for (i = 0; i < MAX_FLASH_COMP; i++) {
3100                 if (fsec_g2)
3101                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3102                 else
3103                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3104
3105                 if (img_type == type)
3106                         return true;
3107         }
3108         return false;
3109
3110 }
3111
3112 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3113                                          int header_size,
3114                                          const struct firmware *fw)
3115 {
3116         struct flash_section_info *fsec = NULL;
3117         const u8 *p = fw->data;
3118
3119         p += header_size;
3120         while (p < (fw->data + fw->size)) {
3121                 fsec = (struct flash_section_info *)p;
3122                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3123                         return fsec;
3124                 p += 32;
3125         }
3126         return NULL;
3127 }
3128
3129 static int be_flash(struct be_adapter *adapter, const u8 *img,
3130                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3131 {
3132         u32 total_bytes = 0, flash_op, num_bytes = 0;
3133         int status = 0;
3134         struct be_cmd_write_flashrom *req = flash_cmd->va;
3135
3136         total_bytes = img_size;
3137         while (total_bytes) {
3138                 num_bytes = min_t(u32, 32*1024, total_bytes);
3139
3140                 total_bytes -= num_bytes;
3141
3142                 if (!total_bytes) {
3143                         if (optype == OPTYPE_PHY_FW)
3144                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3145                         else
3146                                 flash_op = FLASHROM_OPER_FLASH;
3147                 } else {
3148                         if (optype == OPTYPE_PHY_FW)
3149                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3150                         else
3151                                 flash_op = FLASHROM_OPER_SAVE;
3152                 }
3153
3154                 memcpy(req->data_buf, img, num_bytes);
3155                 img += num_bytes;
3156                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3157                                                 flash_op, num_bytes);
3158                 if (status) {
3159                         if (status == ILLEGAL_IOCTL_REQ &&
3160                             optype == OPTYPE_PHY_FW)
3161                                 break;
3162                         dev_err(&adapter->pdev->dev,
3163                                 "cmd to write to flash rom failed.\n");
3164                         return status;
3165                 }
3166         }
3167         return 0;
3168 }
3169
3170 /* For BE2 and BE3 */
3171 static int be_flash_BEx(struct be_adapter *adapter,
3172                          const struct firmware *fw,
3173                          struct be_dma_mem *flash_cmd,
3174                          int num_of_images)
3175
3176 {
3177         int status = 0, i, filehdr_size = 0;
3178         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3179         const u8 *p = fw->data;
3180         const struct flash_comp *pflashcomp;
3181         int num_comp, redboot;
3182         struct flash_section_info *fsec = NULL;
3183
3184         struct flash_comp gen3_flash_types[] = {
3185                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3186                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3187                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3188                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3189                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3190                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3191                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3192                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3193                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3194                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3195                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3196                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3197                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3198                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3199                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3200                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3201                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3202                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3203                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3204                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3205         };
3206
3207         struct flash_comp gen2_flash_types[] = {
3208                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3209                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3210                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3211                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3212                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3213                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3214                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3215                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3216                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3217                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3218                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3219                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3220                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3221                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3222                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3223                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3224         };
3225
3226         if (BE3_chip(adapter)) {
3227                 pflashcomp = gen3_flash_types;
3228                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3229                 num_comp = ARRAY_SIZE(gen3_flash_types);
3230         } else {
3231                 pflashcomp = gen2_flash_types;
3232                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3233                 num_comp = ARRAY_SIZE(gen2_flash_types);
3234         }
3235
3236         /* Get flash section info*/
3237         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3238         if (!fsec) {
3239                 dev_err(&adapter->pdev->dev,
3240                         "Invalid Cookie. UFI corrupted ?\n");
3241                 return -1;
3242         }
3243         for (i = 0; i < num_comp; i++) {
3244                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3245                         continue;
3246
3247                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3248                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3249                         continue;
3250
3251                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3252                     !phy_flashing_required(adapter))
3253                                 continue;
3254
3255                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3256                         redboot = be_flash_redboot(adapter, fw->data,
3257                                 pflashcomp[i].offset, pflashcomp[i].size,
3258                                 filehdr_size + img_hdrs_size);
3259                         if (!redboot)
3260                                 continue;
3261                 }
3262
3263                 p = fw->data;
3264                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3265                 if (p + pflashcomp[i].size > fw->data + fw->size)
3266                         return -1;
3267
3268                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3269                                         pflashcomp[i].size);
3270                 if (status) {
3271                         dev_err(&adapter->pdev->dev,
3272                                 "Flashing section type %d failed.\n",
3273                                 pflashcomp[i].img_type);
3274                         return status;
3275                 }
3276         }
3277         return 0;
3278 }
3279
3280 static int be_flash_skyhawk(struct be_adapter *adapter,
3281                 const struct firmware *fw,
3282                 struct be_dma_mem *flash_cmd, int num_of_images)
3283 {
3284         int status = 0, i, filehdr_size = 0;
3285         int img_offset, img_size, img_optype, redboot;
3286         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3287         const u8 *p = fw->data;
3288         struct flash_section_info *fsec = NULL;
3289
3290         filehdr_size = sizeof(struct flash_file_hdr_g3);
3291         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3292         if (!fsec) {
3293                 dev_err(&adapter->pdev->dev,
3294                         "Invalid Cookie. UFI corrupted ?\n");
3295                 return -1;
3296         }
3297
3298         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3299                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3300                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3301
3302                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3303                 case IMAGE_FIRMWARE_iSCSI:
3304                         img_optype = OPTYPE_ISCSI_ACTIVE;
3305                         break;
3306                 case IMAGE_BOOT_CODE:
3307                         img_optype = OPTYPE_REDBOOT;
3308                         break;
3309                 case IMAGE_OPTION_ROM_ISCSI:
3310                         img_optype = OPTYPE_BIOS;
3311                         break;
3312                 case IMAGE_OPTION_ROM_PXE:
3313                         img_optype = OPTYPE_PXE_BIOS;
3314                         break;
3315                 case IMAGE_OPTION_ROM_FCoE:
3316                         img_optype = OPTYPE_FCOE_BIOS;
3317                         break;
3318                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3319                         img_optype = OPTYPE_ISCSI_BACKUP;
3320                         break;
3321                 case IMAGE_NCSI:
3322                         img_optype = OPTYPE_NCSI_FW;
3323                         break;
3324                 default:
3325                         continue;
3326                 }
3327
3328                 if (img_optype == OPTYPE_REDBOOT) {
3329                         redboot = be_flash_redboot(adapter, fw->data,
3330                                         img_offset, img_size,
3331                                         filehdr_size + img_hdrs_size);
3332                         if (!redboot)
3333                                 continue;
3334                 }
3335
3336                 p = fw->data;
3337                 p += filehdr_size + img_offset + img_hdrs_size;
3338                 if (p + img_size > fw->data + fw->size)
3339                         return -1;
3340
3341                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3342                 if (status) {
3343                         dev_err(&adapter->pdev->dev,
3344                                 "Flashing section type %d failed.\n",
3345                                 fsec->fsec_entry[i].type);
3346                         return status;
3347                 }
3348         }
3349         return 0;
3350 }
3351
3352 static int lancer_wait_idle(struct be_adapter *adapter)
3353 {
3354 #define SLIPORT_IDLE_TIMEOUT 30
3355         u32 reg_val;
3356         int status = 0, i;
3357
3358         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3359                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3360                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3361                         break;
3362
3363                 ssleep(1);
3364         }
3365
3366         if (i == SLIPORT_IDLE_TIMEOUT)
3367                 status = -1;
3368
3369         return status;
3370 }
3371
3372 static int lancer_fw_reset(struct be_adapter *adapter)
3373 {
3374         int status = 0;
3375
3376         status = lancer_wait_idle(adapter);
3377         if (status)
3378                 return status;
3379
3380         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3381                   PHYSDEV_CONTROL_OFFSET);
3382
3383         return status;
3384 }
3385
3386 static int lancer_fw_download(struct be_adapter *adapter,
3387                                 const struct firmware *fw)
3388 {
3389 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3390 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3391         struct be_dma_mem flash_cmd;
3392         const u8 *data_ptr = NULL;
3393         u8 *dest_image_ptr = NULL;
3394         size_t image_size = 0;
3395         u32 chunk_size = 0;
3396         u32 data_written = 0;
3397         u32 offset = 0;
3398         int status = 0;
3399         u8 add_status = 0;
3400         u8 change_status;
3401
3402         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3403                 dev_err(&adapter->pdev->dev,
3404                         "FW Image not properly aligned. "
3405                         "Length must be 4 byte aligned.\n");
3406                 status = -EINVAL;
3407                 goto lancer_fw_exit;
3408         }
3409
3410         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3411                                 + LANCER_FW_DOWNLOAD_CHUNK;
3412         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3413                                                 &flash_cmd.dma, GFP_KERNEL);
3414         if (!flash_cmd.va) {
3415                 status = -ENOMEM;
3416                 dev_err(&adapter->pdev->dev,
3417                         "Memory allocation failure while flashing\n");
3418                 goto lancer_fw_exit;
3419         }
3420
3421         dest_image_ptr = flash_cmd.va +
3422                                 sizeof(struct lancer_cmd_req_write_object);
3423         image_size = fw->size;
3424         data_ptr = fw->data;
3425
3426         while (image_size) {
3427                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3428
3429                 /* Copy the image chunk content. */
3430                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3431
3432                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3433                                                  chunk_size, offset,
3434                                                  LANCER_FW_DOWNLOAD_LOCATION,
3435                                                  &data_written, &change_status,
3436                                                  &add_status);
3437                 if (status)
3438                         break;
3439
3440                 offset += data_written;
3441                 data_ptr += data_written;
3442                 image_size -= data_written;
3443         }
3444
3445         if (!status) {
3446                 /* Commit the FW written */
3447                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3448                                                  0, offset,
3449                                                  LANCER_FW_DOWNLOAD_LOCATION,
3450                                                  &data_written, &change_status,
3451                                                  &add_status);
3452         }
3453
3454         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3455                                 flash_cmd.dma);
3456         if (status) {
3457                 dev_err(&adapter->pdev->dev,
3458                         "Firmware load error. "
3459                         "Status code: 0x%x Additional Status: 0x%x\n",
3460                         status, add_status);
3461                 goto lancer_fw_exit;
3462         }
3463
3464         if (change_status == LANCER_FW_RESET_NEEDED) {
3465                 status = lancer_fw_reset(adapter);
3466                 if (status) {
3467                         dev_err(&adapter->pdev->dev,
3468                                 "Adapter busy for FW reset.\n"
3469                                 "New FW will not be active.\n");
3470                         goto lancer_fw_exit;
3471                 }
3472         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3473                         dev_err(&adapter->pdev->dev,
3474                                 "System reboot required for new FW"
3475                                 " to be active\n");
3476         }
3477
3478         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3479 lancer_fw_exit:
3480         return status;
3481 }
3482
3483 #define UFI_TYPE2               2
3484 #define UFI_TYPE3               3
3485 #define UFI_TYPE4               4
3486 static int be_get_ufi_type(struct be_adapter *adapter,
3487                            struct flash_file_hdr_g2 *fhdr)
3488 {
3489         if (fhdr == NULL)
3490                 goto be_get_ufi_exit;
3491
3492         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3493                 return UFI_TYPE4;
3494         else if (BE3_chip(adapter) && fhdr->build[0] == '3')
3495                 return UFI_TYPE3;
3496         else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3497                 return UFI_TYPE2;
3498
3499 be_get_ufi_exit:
3500         dev_err(&adapter->pdev->dev,
3501                 "UFI and Interface are not compatible for flashing\n");
3502         return -1;
3503 }
3504
3505 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3506 {
3507         struct flash_file_hdr_g2 *fhdr;
3508         struct flash_file_hdr_g3 *fhdr3;
3509         struct image_hdr *img_hdr_ptr = NULL;
3510         struct be_dma_mem flash_cmd;
3511         const u8 *p;
3512         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3513
3514         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3515         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3516                                           &flash_cmd.dma, GFP_KERNEL);
3517         if (!flash_cmd.va) {
3518                 status = -ENOMEM;
3519                 dev_err(&adapter->pdev->dev,
3520                         "Memory allocation failure while flashing\n");
3521                 goto be_fw_exit;
3522         }
3523
3524         p = fw->data;
3525         fhdr = (struct flash_file_hdr_g2 *)p;
3526
3527         ufi_type = be_get_ufi_type(adapter, fhdr);
3528
3529         fhdr3 = (struct flash_file_hdr_g3 *)fw->data;
3530         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3531         for (i = 0; i < num_imgs; i++) {
3532                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3533                                 (sizeof(struct flash_file_hdr_g3) +
3534                                  i * sizeof(struct image_hdr)));
3535                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3536                         if (ufi_type == UFI_TYPE4)
3537                                 status = be_flash_skyhawk(adapter, fw,
3538                                                         &flash_cmd, num_imgs);
3539                         else if (ufi_type == UFI_TYPE3)
3540                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3541                                                       num_imgs);
3542                 }
3543         }
3544
3545         if (ufi_type == UFI_TYPE2)
3546                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3547         else if (ufi_type == -1)
3548                 status = -1;
3549
3550         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3551                           flash_cmd.dma);
3552         if (status) {
3553                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3554                 goto be_fw_exit;
3555         }
3556
3557         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3558
3559 be_fw_exit:
3560         return status;
3561 }
3562
3563 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3564 {
3565         const struct firmware *fw;
3566         int status;
3567
3568         if (!netif_running(adapter->netdev)) {
3569                 dev_err(&adapter->pdev->dev,
3570                         "Firmware load not allowed (interface is down)\n");
3571                 return -1;
3572         }
3573
3574         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3575         if (status)
3576                 goto fw_exit;
3577
3578         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3579
3580         if (lancer_chip(adapter))
3581                 status = lancer_fw_download(adapter, fw);
3582         else
3583                 status = be_fw_download(adapter, fw);
3584
3585 fw_exit:
3586         release_firmware(fw);
3587         return status;
3588 }
3589
3590 static const struct net_device_ops be_netdev_ops = {
3591         .ndo_open               = be_open,
3592         .ndo_stop               = be_close,
3593         .ndo_start_xmit         = be_xmit,
3594         .ndo_set_rx_mode        = be_set_rx_mode,
3595         .ndo_set_mac_address    = be_mac_addr_set,
3596         .ndo_change_mtu         = be_change_mtu,
3597         .ndo_get_stats64        = be_get_stats64,
3598         .ndo_validate_addr      = eth_validate_addr,
3599         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3600         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3601         .ndo_set_vf_mac         = be_set_vf_mac,
3602         .ndo_set_vf_vlan        = be_set_vf_vlan,
3603         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3604         .ndo_get_vf_config      = be_get_vf_config,
3605 #ifdef CONFIG_NET_POLL_CONTROLLER
3606         .ndo_poll_controller    = be_netpoll,
3607 #endif
3608 };
3609
3610 static void be_netdev_init(struct net_device *netdev)
3611 {
3612         struct be_adapter *adapter = netdev_priv(netdev);
3613         struct be_eq_obj *eqo;
3614         int i;
3615
3616         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3617                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3618                 NETIF_F_HW_VLAN_TX;
3619         if (be_multi_rxq(adapter))
3620                 netdev->hw_features |= NETIF_F_RXHASH;
3621
3622         netdev->features |= netdev->hw_features |
3623                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3624
3625         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3626                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3627
3628         netdev->priv_flags |= IFF_UNICAST_FLT;
3629
3630         netdev->flags |= IFF_MULTICAST;
3631
3632         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3633
3634         netdev->netdev_ops = &be_netdev_ops;
3635
3636         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3637
3638         for_all_evt_queues(adapter, eqo, i)
3639                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3640 }
3641
3642 static void be_unmap_pci_bars(struct be_adapter *adapter)
3643 {
3644         if (adapter->db)
3645                 pci_iounmap(adapter->pdev, adapter->db);
3646 }
3647
3648 static int db_bar(struct be_adapter *adapter)
3649 {
3650         if (lancer_chip(adapter) || !be_physfn(adapter))
3651                 return 0;
3652         else
3653                 return 4;
3654 }
3655
3656 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3657 {
3658         if (skyhawk_chip(adapter)) {
3659                 adapter->roce_db.size = 4096;
3660                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3661                                                               db_bar(adapter));
3662                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3663                                                                db_bar(adapter));
3664         }
3665         return 0;
3666 }
3667
3668 static int be_map_pci_bars(struct be_adapter *adapter)
3669 {
3670         u8 __iomem *addr;
3671         u32 sli_intf;
3672
3673         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3674         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3675                                 SLI_INTF_IF_TYPE_SHIFT;
3676
3677         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3678         if (addr == NULL)
3679                 goto pci_map_err;
3680         adapter->db = addr;
3681
3682         be_roce_map_pci_bars(adapter);
3683         return 0;
3684
3685 pci_map_err:
3686         be_unmap_pci_bars(adapter);
3687         return -ENOMEM;
3688 }
3689
3690 static void be_ctrl_cleanup(struct be_adapter *adapter)
3691 {
3692         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3693
3694         be_unmap_pci_bars(adapter);
3695
3696         if (mem->va)
3697                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3698                                   mem->dma);
3699
3700         mem = &adapter->rx_filter;
3701         if (mem->va)
3702                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3703                                   mem->dma);
3704 }
3705
3706 static int be_ctrl_init(struct be_adapter *adapter)
3707 {
3708         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3709         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3710         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3711         u32 sli_intf;
3712         int status;
3713
3714         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3715         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3716                                  SLI_INTF_FAMILY_SHIFT;
3717         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3718
3719         status = be_map_pci_bars(adapter);
3720         if (status)
3721                 goto done;
3722
3723         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3724         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3725                                                 mbox_mem_alloc->size,
3726                                                 &mbox_mem_alloc->dma,
3727                                                 GFP_KERNEL);
3728         if (!mbox_mem_alloc->va) {
3729                 status = -ENOMEM;
3730                 goto unmap_pci_bars;
3731         }
3732         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3733         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3734         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3735         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3736
3737         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3738         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3739                                         &rx_filter->dma, GFP_KERNEL);
3740         if (rx_filter->va == NULL) {
3741                 status = -ENOMEM;
3742                 goto free_mbox;
3743         }
3744         memset(rx_filter->va, 0, rx_filter->size);
3745         mutex_init(&adapter->mbox_lock);
3746         spin_lock_init(&adapter->mcc_lock);
3747         spin_lock_init(&adapter->mcc_cq_lock);
3748
3749         init_completion(&adapter->flash_compl);
3750         pci_save_state(adapter->pdev);
3751         return 0;
3752
3753 free_mbox:
3754         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3755                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3756
3757 unmap_pci_bars:
3758         be_unmap_pci_bars(adapter);
3759
3760 done:
3761         return status;
3762 }
3763
3764 static void be_stats_cleanup(struct be_adapter *adapter)
3765 {
3766         struct be_dma_mem *cmd = &adapter->stats_cmd;
3767
3768         if (cmd->va)
3769                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3770                                   cmd->va, cmd->dma);
3771 }
3772
3773 static int be_stats_init(struct be_adapter *adapter)
3774 {
3775         struct be_dma_mem *cmd = &adapter->stats_cmd;
3776
3777         if (lancer_chip(adapter))
3778                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3779         else if (BE2_chip(adapter))
3780                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3781         else
3782                 /* BE3 and Skyhawk */
3783                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3784
3785         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3786                                      GFP_KERNEL);
3787         if (cmd->va == NULL)
3788                 return -1;
3789         memset(cmd->va, 0, cmd->size);
3790         return 0;
3791 }
3792
3793 static void be_remove(struct pci_dev *pdev)
3794 {
3795         struct be_adapter *adapter = pci_get_drvdata(pdev);
3796
3797         if (!adapter)
3798                 return;
3799
3800         be_roce_dev_remove(adapter);
3801
3802         cancel_delayed_work_sync(&adapter->func_recovery_work);
3803
3804         unregister_netdev(adapter->netdev);
3805
3806         be_clear(adapter);
3807
3808         /* tell fw we're done with firing cmds */
3809         be_cmd_fw_clean(adapter);
3810
3811         be_stats_cleanup(adapter);
3812
3813         be_ctrl_cleanup(adapter);
3814
3815         pci_disable_pcie_error_reporting(pdev);
3816
3817         pci_set_drvdata(pdev, NULL);
3818         pci_release_regions(pdev);
3819         pci_disable_device(pdev);
3820
3821         free_netdev(adapter->netdev);
3822 }
3823
3824 bool be_is_wol_supported(struct be_adapter *adapter)
3825 {
3826         return ((adapter->wol_cap & BE_WOL_CAP) &&
3827                 !be_is_wol_excluded(adapter)) ? true : false;
3828 }
3829
3830 u32 be_get_fw_log_level(struct be_adapter *adapter)
3831 {
3832         struct be_dma_mem extfat_cmd;
3833         struct be_fat_conf_params *cfgs;
3834         int status;
3835         u32 level = 0;
3836         int j;
3837
3838         if (lancer_chip(adapter))
3839                 return 0;
3840
3841         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3842         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3843         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3844                                              &extfat_cmd.dma);
3845
3846         if (!extfat_cmd.va) {
3847                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3848                         __func__);
3849                 goto err;
3850         }
3851
3852         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3853         if (!status) {
3854                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3855                                                 sizeof(struct be_cmd_resp_hdr));
3856                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
3857                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3858                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3859                 }
3860         }
3861         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3862                             extfat_cmd.dma);
3863 err:
3864         return level;
3865 }
3866
3867 static int be_get_initial_config(struct be_adapter *adapter)
3868 {
3869         int status;
3870         u32 level;
3871
3872         status = be_cmd_get_cntl_attributes(adapter);
3873         if (status)
3874                 return status;
3875
3876         status = be_cmd_get_acpi_wol_cap(adapter);
3877         if (status) {
3878                 /* in case of a failure to get wol capabillities
3879                  * check the exclusion list to determine WOL capability */
3880                 if (!be_is_wol_excluded(adapter))
3881                         adapter->wol_cap |= BE_WOL_CAP;
3882         }
3883
3884         if (be_is_wol_supported(adapter))
3885                 adapter->wol = true;
3886
3887         /* Must be a power of 2 or else MODULO will BUG_ON */
3888         adapter->be_get_temp_freq = 64;
3889
3890         level = be_get_fw_log_level(adapter);
3891         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3892
3893         return 0;
3894 }
3895
3896 static int lancer_recover_func(struct be_adapter *adapter)
3897 {
3898         int status;
3899
3900         status = lancer_test_and_set_rdy_state(adapter);
3901         if (status)
3902                 goto err;
3903
3904         if (netif_running(adapter->netdev))
3905                 be_close(adapter->netdev);
3906
3907         be_clear(adapter);
3908
3909         adapter->hw_error = false;
3910         adapter->fw_timeout = false;
3911
3912         status = be_setup(adapter);
3913         if (status)
3914                 goto err;
3915
3916         if (netif_running(adapter->netdev)) {
3917                 status = be_open(adapter->netdev);
3918                 if (status)
3919                         goto err;
3920         }
3921
3922         dev_err(&adapter->pdev->dev,
3923                 "Adapter SLIPORT recovery succeeded\n");
3924         return 0;
3925 err:
3926         if (adapter->eeh_error)
3927                 dev_err(&adapter->pdev->dev,
3928                         "Adapter SLIPORT recovery failed\n");
3929
3930         return status;
3931 }
3932
3933 static void be_func_recovery_task(struct work_struct *work)
3934 {
3935         struct be_adapter *adapter =
3936                 container_of(work, struct be_adapter,  func_recovery_work.work);
3937         int status;
3938
3939         be_detect_error(adapter);
3940
3941         if (adapter->hw_error && lancer_chip(adapter)) {
3942
3943                 if (adapter->eeh_error)
3944                         goto out;
3945
3946                 rtnl_lock();
3947                 netif_device_detach(adapter->netdev);
3948                 rtnl_unlock();
3949
3950                 status = lancer_recover_func(adapter);
3951
3952                 if (!status)
3953                         netif_device_attach(adapter->netdev);
3954         }
3955
3956 out:
3957         schedule_delayed_work(&adapter->func_recovery_work,
3958                               msecs_to_jiffies(1000));
3959 }
3960
3961 static void be_worker(struct work_struct *work)
3962 {
3963         struct be_adapter *adapter =
3964                 container_of(work, struct be_adapter, work.work);
3965         struct be_rx_obj *rxo;
3966         struct be_eq_obj *eqo;
3967         int i;
3968
3969         /* when interrupts are not yet enabled, just reap any pending
3970         * mcc completions */
3971         if (!netif_running(adapter->netdev)) {
3972                 local_bh_disable();
3973                 be_process_mcc(adapter);
3974                 local_bh_enable();
3975                 goto reschedule;
3976         }
3977
3978         if (!adapter->stats_cmd_sent) {
3979                 if (lancer_chip(adapter))
3980                         lancer_cmd_get_pport_stats(adapter,
3981                                                 &adapter->stats_cmd);
3982                 else
3983                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3984         }
3985
3986         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
3987                 be_cmd_get_die_temperature(adapter);
3988
3989         for_all_rx_queues(adapter, rxo, i) {
3990                 if (rxo->rx_post_starved) {
3991                         rxo->rx_post_starved = false;
3992                         be_post_rx_frags(rxo, GFP_KERNEL);
3993                 }
3994         }
3995
3996         for_all_evt_queues(adapter, eqo, i)
3997                 be_eqd_update(adapter, eqo);
3998
3999 reschedule:
4000         adapter->work_counter++;
4001         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4002 }
4003
4004 static bool be_reset_required(struct be_adapter *adapter)
4005 {
4006         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4007 }
4008
4009 static char *mc_name(struct be_adapter *adapter)
4010 {
4011         if (adapter->function_mode & FLEX10_MODE)
4012                 return "FLEX10";
4013         else if (adapter->function_mode & VNIC_MODE)
4014                 return "vNIC";
4015         else if (adapter->function_mode & UMC_ENABLED)
4016                 return "UMC";
4017         else
4018                 return "";
4019 }
4020
4021 static inline char *func_name(struct be_adapter *adapter)
4022 {
4023         return be_physfn(adapter) ? "PF" : "VF";
4024 }
4025
4026 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4027 {
4028         int status = 0;
4029         struct be_adapter *adapter;
4030         struct net_device *netdev;
4031         char port_name;
4032
4033         status = pci_enable_device(pdev);
4034         if (status)
4035                 goto do_none;
4036
4037         status = pci_request_regions(pdev, DRV_NAME);
4038         if (status)
4039                 goto disable_dev;
4040         pci_set_master(pdev);
4041
4042         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4043         if (netdev == NULL) {
4044                 status = -ENOMEM;
4045                 goto rel_reg;
4046         }
4047         adapter = netdev_priv(netdev);
4048         adapter->pdev = pdev;
4049         pci_set_drvdata(pdev, adapter);
4050         adapter->netdev = netdev;
4051         SET_NETDEV_DEV(netdev, &pdev->dev);
4052
4053         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4054         if (!status) {
4055                 netdev->features |= NETIF_F_HIGHDMA;
4056         } else {
4057                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4058                 if (status) {
4059                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4060                         goto free_netdev;
4061                 }
4062         }
4063
4064         status = pci_enable_pcie_error_reporting(pdev);
4065         if (status)
4066                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4067
4068         status = be_ctrl_init(adapter);
4069         if (status)
4070                 goto free_netdev;
4071
4072         /* sync up with fw's ready state */
4073         if (be_physfn(adapter)) {
4074                 status = be_fw_wait_ready(adapter);
4075                 if (status)
4076                         goto ctrl_clean;
4077         }
4078
4079         /* tell fw we're ready to fire cmds */
4080         status = be_cmd_fw_init(adapter);
4081         if (status)
4082                 goto ctrl_clean;
4083
4084         if (be_reset_required(adapter)) {
4085                 status = be_cmd_reset_function(adapter);
4086                 if (status)
4087                         goto ctrl_clean;
4088         }
4089
4090         /* The INTR bit may be set in the card when probed by a kdump kernel
4091          * after a crash.
4092          */
4093         if (!lancer_chip(adapter))
4094                 be_intr_set(adapter, false);
4095
4096         status = be_stats_init(adapter);
4097         if (status)
4098                 goto ctrl_clean;
4099
4100         status = be_get_initial_config(adapter);
4101         if (status)
4102                 goto stats_clean;
4103
4104         INIT_DELAYED_WORK(&adapter->work, be_worker);
4105         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4106         adapter->rx_fc = adapter->tx_fc = true;
4107
4108         status = be_setup(adapter);
4109         if (status)
4110                 goto stats_clean;
4111
4112         be_netdev_init(netdev);
4113         status = register_netdev(netdev);
4114         if (status != 0)
4115                 goto unsetup;
4116
4117         be_roce_dev_add(adapter);
4118
4119         schedule_delayed_work(&adapter->func_recovery_work,
4120                               msecs_to_jiffies(1000));
4121
4122         be_cmd_query_port_name(adapter, &port_name);
4123
4124         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4125                  func_name(adapter), mc_name(adapter), port_name);
4126
4127         return 0;
4128
4129 unsetup:
4130         be_clear(adapter);
4131 stats_clean:
4132         be_stats_cleanup(adapter);
4133 ctrl_clean:
4134         be_ctrl_cleanup(adapter);
4135 free_netdev:
4136         free_netdev(netdev);
4137         pci_set_drvdata(pdev, NULL);
4138 rel_reg:
4139         pci_release_regions(pdev);
4140 disable_dev:
4141         pci_disable_device(pdev);
4142 do_none:
4143         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4144         return status;
4145 }
4146
4147 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4148 {
4149         struct be_adapter *adapter = pci_get_drvdata(pdev);
4150         struct net_device *netdev =  adapter->netdev;
4151
4152         if (adapter->wol)
4153                 be_setup_wol(adapter, true);
4154
4155         cancel_delayed_work_sync(&adapter->func_recovery_work);
4156
4157         netif_device_detach(netdev);
4158         if (netif_running(netdev)) {
4159                 rtnl_lock();
4160                 be_close(netdev);
4161                 rtnl_unlock();
4162         }
4163         be_clear(adapter);
4164
4165         pci_save_state(pdev);
4166         pci_disable_device(pdev);
4167         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4168         return 0;
4169 }
4170
4171 static int be_resume(struct pci_dev *pdev)
4172 {
4173         int status = 0;
4174         struct be_adapter *adapter = pci_get_drvdata(pdev);
4175         struct net_device *netdev =  adapter->netdev;
4176
4177         netif_device_detach(netdev);
4178
4179         status = pci_enable_device(pdev);
4180         if (status)
4181                 return status;
4182
4183         pci_set_power_state(pdev, 0);
4184         pci_restore_state(pdev);
4185
4186         /* tell fw we're ready to fire cmds */
4187         status = be_cmd_fw_init(adapter);
4188         if (status)
4189                 return status;
4190
4191         be_setup(adapter);
4192         if (netif_running(netdev)) {
4193                 rtnl_lock();
4194                 be_open(netdev);
4195                 rtnl_unlock();
4196         }
4197
4198         schedule_delayed_work(&adapter->func_recovery_work,
4199                               msecs_to_jiffies(1000));
4200         netif_device_attach(netdev);
4201
4202         if (adapter->wol)
4203                 be_setup_wol(adapter, false);
4204
4205         return 0;
4206 }
4207
4208 /*
4209  * An FLR will stop BE from DMAing any data.
4210  */
4211 static void be_shutdown(struct pci_dev *pdev)
4212 {
4213         struct be_adapter *adapter = pci_get_drvdata(pdev);
4214
4215         if (!adapter)
4216                 return;
4217
4218         cancel_delayed_work_sync(&adapter->work);
4219         cancel_delayed_work_sync(&adapter->func_recovery_work);
4220
4221         netif_device_detach(adapter->netdev);
4222
4223         be_cmd_reset_function(adapter);
4224
4225         pci_disable_device(pdev);
4226 }
4227
4228 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4229                                 pci_channel_state_t state)
4230 {
4231         struct be_adapter *adapter = pci_get_drvdata(pdev);
4232         struct net_device *netdev =  adapter->netdev;
4233
4234         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4235
4236         adapter->eeh_error = true;
4237
4238         cancel_delayed_work_sync(&adapter->func_recovery_work);
4239
4240         rtnl_lock();
4241         netif_device_detach(netdev);
4242         rtnl_unlock();
4243
4244         if (netif_running(netdev)) {
4245                 rtnl_lock();
4246                 be_close(netdev);
4247                 rtnl_unlock();
4248         }
4249         be_clear(adapter);
4250
4251         if (state == pci_channel_io_perm_failure)
4252                 return PCI_ERS_RESULT_DISCONNECT;
4253
4254         pci_disable_device(pdev);
4255
4256         /* The error could cause the FW to trigger a flash debug dump.
4257          * Resetting the card while flash dump is in progress
4258          * can cause it not to recover; wait for it to finish.
4259          * Wait only for first function as it is needed only once per
4260          * adapter.
4261          */
4262         if (pdev->devfn == 0)
4263                 ssleep(30);
4264
4265         return PCI_ERS_RESULT_NEED_RESET;
4266 }
4267
4268 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4269 {
4270         struct be_adapter *adapter = pci_get_drvdata(pdev);
4271         int status;
4272
4273         dev_info(&adapter->pdev->dev, "EEH reset\n");
4274         be_clear_all_error(adapter);
4275
4276         status = pci_enable_device(pdev);
4277         if (status)
4278                 return PCI_ERS_RESULT_DISCONNECT;
4279
4280         pci_set_master(pdev);
4281         pci_set_power_state(pdev, 0);
4282         pci_restore_state(pdev);
4283
4284         /* Check if card is ok and fw is ready */
4285         status = be_fw_wait_ready(adapter);
4286         if (status)
4287                 return PCI_ERS_RESULT_DISCONNECT;
4288
4289         pci_cleanup_aer_uncorrect_error_status(pdev);
4290         return PCI_ERS_RESULT_RECOVERED;
4291 }
4292
4293 static void be_eeh_resume(struct pci_dev *pdev)
4294 {
4295         int status = 0;
4296         struct be_adapter *adapter = pci_get_drvdata(pdev);
4297         struct net_device *netdev =  adapter->netdev;
4298
4299         dev_info(&adapter->pdev->dev, "EEH resume\n");
4300
4301         pci_save_state(pdev);
4302
4303         /* tell fw we're ready to fire cmds */
4304         status = be_cmd_fw_init(adapter);
4305         if (status)
4306                 goto err;
4307
4308         status = be_cmd_reset_function(adapter);
4309         if (status)
4310                 goto err;
4311
4312         status = be_setup(adapter);
4313         if (status)
4314                 goto err;
4315
4316         if (netif_running(netdev)) {
4317                 status = be_open(netdev);
4318                 if (status)
4319                         goto err;
4320         }
4321
4322         schedule_delayed_work(&adapter->func_recovery_work,
4323                               msecs_to_jiffies(1000));
4324         netif_device_attach(netdev);
4325         return;
4326 err:
4327         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4328 }
4329
4330 static const struct pci_error_handlers be_eeh_handlers = {
4331         .error_detected = be_eeh_err_detected,
4332         .slot_reset = be_eeh_reset,
4333         .resume = be_eeh_resume,
4334 };
4335
4336 static struct pci_driver be_driver = {
4337         .name = DRV_NAME,
4338         .id_table = be_dev_ids,
4339         .probe = be_probe,
4340         .remove = be_remove,
4341         .suspend = be_suspend,
4342         .resume = be_resume,
4343         .shutdown = be_shutdown,
4344         .err_handler = &be_eeh_handlers
4345 };
4346
4347 static int __init be_init_module(void)
4348 {
4349         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4350             rx_frag_size != 2048) {
4351                 printk(KERN_WARNING DRV_NAME
4352                         " : Module param rx_frag_size must be 2048/4096/8192."
4353                         " Using 2048\n");
4354                 rx_frag_size = 2048;
4355         }
4356
4357         return pci_register_driver(&be_driver);
4358 }
4359 module_init(be_init_module);
4360
4361 static void __exit be_exit_module(void)
4362 {
4363         pci_unregister_driver(&be_driver);
4364 }
4365 module_exit(be_exit_module);