Merge remote-tracking branch 'lsk/v3.10/topic/gator' into linux-linaro-lsk
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2013 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24
25 MODULE_VERSION(DRV_VER);
26 MODULE_DEVICE_TABLE(pci, be_dev_ids);
27 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
28 MODULE_AUTHOR("Emulex Corporation");
29 MODULE_LICENSE("GPL");
30
31 static unsigned int num_vfs;
32 module_param(num_vfs, uint, S_IRUGO);
33 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
34
35 static ushort rx_frag_size = 2048;
36 module_param(rx_frag_size, ushort, S_IRUGO);
37 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
38
39 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
46         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
48         { 0 }
49 };
50 MODULE_DEVICE_TABLE(pci, be_dev_ids);
51 /* UE Status Low CSR */
52 static const char * const ue_status_low_desc[] = {
53         "CEV",
54         "CTX",
55         "DBUF",
56         "ERX",
57         "Host",
58         "MPU",
59         "NDMA",
60         "PTC ",
61         "RDMA ",
62         "RXF ",
63         "RXIPS ",
64         "RXULP0 ",
65         "RXULP1 ",
66         "RXULP2 ",
67         "TIM ",
68         "TPOST ",
69         "TPRE ",
70         "TXIPS ",
71         "TXULP0 ",
72         "TXULP1 ",
73         "UC ",
74         "WDMA ",
75         "TXULP2 ",
76         "HOST1 ",
77         "P0_OB_LINK ",
78         "P1_OB_LINK ",
79         "HOST_GPIO ",
80         "MBOX ",
81         "AXGMAC0",
82         "AXGMAC1",
83         "JTAG",
84         "MPU_INTPEND"
85 };
86 /* UE Status High CSR */
87 static const char * const ue_status_hi_desc[] = {
88         "LPCMEMHOST",
89         "MGMT_MAC",
90         "PCS0ONLINE",
91         "MPU_IRAM",
92         "PCS1ONLINE",
93         "PCTL0",
94         "PCTL1",
95         "PMEM",
96         "RR",
97         "TXPB",
98         "RXPP",
99         "XAUI",
100         "TXP",
101         "ARM",
102         "IPC",
103         "HOST2",
104         "HOST3",
105         "HOST4",
106         "HOST5",
107         "HOST6",
108         "HOST7",
109         "HOST8",
110         "HOST9",
111         "NETC",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown",
118         "Unknown",
119         "Unknown"
120 };
121
122 /* Is BE in a multi-channel mode */
123 static inline bool be_is_mc(struct be_adapter *adapter) {
124         return (adapter->function_mode & FLEX10_MODE ||
125                 adapter->function_mode & VNIC_MODE ||
126                 adapter->function_mode & UMC_ENABLED);
127 }
128
129 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
130 {
131         struct be_dma_mem *mem = &q->dma_mem;
132         if (mem->va) {
133                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
134                                   mem->dma);
135                 mem->va = NULL;
136         }
137 }
138
139 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
140                 u16 len, u16 entry_size)
141 {
142         struct be_dma_mem *mem = &q->dma_mem;
143
144         memset(q, 0, sizeof(*q));
145         q->len = len;
146         q->entry_size = entry_size;
147         mem->size = len * entry_size;
148         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
149                                      GFP_KERNEL | __GFP_ZERO);
150         if (!mem->va)
151                 return -ENOMEM;
152         return 0;
153 }
154
155 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
156 {
157         u32 reg, enabled;
158
159         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160                                 &reg);
161         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162
163         if (!enabled && enable)
164                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else if (enabled && !enable)
166                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else
168                 return;
169
170         pci_write_config_dword(adapter->pdev,
171                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173
174 static void be_intr_set(struct be_adapter *adapter, bool enable)
175 {
176         int status = 0;
177
178         /* On lancer interrupts can't be controlled via this register */
179         if (lancer_chip(adapter))
180                 return;
181
182         if (adapter->eeh_error)
183                 return;
184
185         status = be_cmd_intr_set(adapter, enable);
186         if (status)
187                 be_reg_intr_set(adapter, enable);
188 }
189
190 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
191 {
192         u32 val = 0;
193         val |= qid & DB_RQ_RING_ID_MASK;
194         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
195
196         wmb();
197         iowrite32(val, adapter->db + DB_RQ_OFFSET);
198 }
199
200 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
201                           u16 posted)
202 {
203         u32 val = 0;
204         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
205         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
206
207         wmb();
208         iowrite32(val, adapter->db + txo->db_offset);
209 }
210
211 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
212                 bool arm, bool clear_int, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_EQ_RING_ID_MASK;
216         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
217                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_error)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_EQ_REARM_SHIFT;
224         if (clear_int)
225                 val |= 1 << DB_EQ_CLR_SHIFT;
226         val |= 1 << DB_EQ_EVNT_SHIFT;
227         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
228         iowrite32(val, adapter->db + DB_EQ_OFFSET);
229 }
230
231 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
232 {
233         u32 val = 0;
234         val |= qid & DB_CQ_RING_ID_MASK;
235         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
236                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
237
238         if (adapter->eeh_error)
239                 return;
240
241         if (arm)
242                 val |= 1 << DB_CQ_REARM_SHIFT;
243         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
244         iowrite32(val, adapter->db + DB_CQ_OFFSET);
245 }
246
247 static int be_mac_addr_set(struct net_device *netdev, void *p)
248 {
249         struct be_adapter *adapter = netdev_priv(netdev);
250         struct sockaddr *addr = p;
251         int status = 0;
252         u8 current_mac[ETH_ALEN];
253         u32 pmac_id = adapter->pmac_id[0];
254         bool active_mac = true;
255
256         if (!is_valid_ether_addr(addr->sa_data))
257                 return -EADDRNOTAVAIL;
258
259         /* For BE VF, MAC address is already activated by PF.
260          * Hence only operation left is updating netdev->devaddr.
261          * Update it if user is passing the same MAC which was used
262          * during configuring VF MAC from PF(Hypervisor).
263          */
264         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
265                 status = be_cmd_mac_addr_query(adapter, current_mac,
266                                                false, adapter->if_handle, 0);
267                 if (!status && !memcmp(current_mac, addr->sa_data, ETH_ALEN))
268                         goto done;
269                 else
270                         goto err;
271         }
272
273         if (!memcmp(addr->sa_data, netdev->dev_addr, ETH_ALEN))
274                 goto done;
275
276         /* For Lancer check if any MAC is active.
277          * If active, get its mac id.
278          */
279         if (lancer_chip(adapter) && !be_physfn(adapter))
280                 be_cmd_get_mac_from_list(adapter, current_mac, &active_mac,
281                                          &pmac_id, 0);
282
283         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
284                                  adapter->if_handle,
285                                  &adapter->pmac_id[0], 0);
286
287         if (status)
288                 goto err;
289
290         if (active_mac)
291                 be_cmd_pmac_del(adapter, adapter->if_handle,
292                                 pmac_id, 0);
293 done:
294         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
295         return 0;
296 err:
297         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
298         return status;
299 }
300
301 /* BE2 supports only v0 cmd */
302 static void *hw_stats_from_cmd(struct be_adapter *adapter)
303 {
304         if (BE2_chip(adapter)) {
305                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
306
307                 return &cmd->hw_stats;
308         } else  {
309                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
310
311                 return &cmd->hw_stats;
312         }
313 }
314
315 /* BE2 supports only v0 cmd */
316 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
317 {
318         if (BE2_chip(adapter)) {
319                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
320
321                 return &hw_stats->erx;
322         } else {
323                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
324
325                 return &hw_stats->erx;
326         }
327 }
328
329 static void populate_be_v0_stats(struct be_adapter *adapter)
330 {
331         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
332         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
333         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
334         struct be_port_rxf_stats_v0 *port_stats =
335                                         &rxf_stats->port[adapter->port_num];
336         struct be_drv_stats *drvs = &adapter->drv_stats;
337
338         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
339         drvs->rx_pause_frames = port_stats->rx_pause_frames;
340         drvs->rx_crc_errors = port_stats->rx_crc_errors;
341         drvs->rx_control_frames = port_stats->rx_control_frames;
342         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
343         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
344         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
345         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
346         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
347         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
348         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
349         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
350         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
351         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
352         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
353         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
354         drvs->rx_dropped_header_too_small =
355                 port_stats->rx_dropped_header_too_small;
356         drvs->rx_address_filtered =
357                                         port_stats->rx_address_filtered +
358                                         port_stats->rx_vlan_filtered;
359         drvs->rx_alignment_symbol_errors =
360                 port_stats->rx_alignment_symbol_errors;
361
362         drvs->tx_pauseframes = port_stats->tx_pauseframes;
363         drvs->tx_controlframes = port_stats->tx_controlframes;
364
365         if (adapter->port_num)
366                 drvs->jabber_events = rxf_stats->port1_jabber_events;
367         else
368                 drvs->jabber_events = rxf_stats->port0_jabber_events;
369         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
370         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
371         drvs->forwarded_packets = rxf_stats->forwarded_packets;
372         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
373         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
374         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
375         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
376 }
377
378 static void populate_be_v1_stats(struct be_adapter *adapter)
379 {
380         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
381         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
382         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
383         struct be_port_rxf_stats_v1 *port_stats =
384                                         &rxf_stats->port[adapter->port_num];
385         struct be_drv_stats *drvs = &adapter->drv_stats;
386
387         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
388         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
389         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
390         drvs->rx_pause_frames = port_stats->rx_pause_frames;
391         drvs->rx_crc_errors = port_stats->rx_crc_errors;
392         drvs->rx_control_frames = port_stats->rx_control_frames;
393         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
394         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
395         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
396         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
397         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
398         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
399         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
400         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
401         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
402         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
403         drvs->rx_dropped_header_too_small =
404                 port_stats->rx_dropped_header_too_small;
405         drvs->rx_input_fifo_overflow_drop =
406                 port_stats->rx_input_fifo_overflow_drop;
407         drvs->rx_address_filtered = port_stats->rx_address_filtered;
408         drvs->rx_alignment_symbol_errors =
409                 port_stats->rx_alignment_symbol_errors;
410         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
411         drvs->tx_pauseframes = port_stats->tx_pauseframes;
412         drvs->tx_controlframes = port_stats->tx_controlframes;
413         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
414         drvs->jabber_events = port_stats->jabber_events;
415         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
416         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
417         drvs->forwarded_packets = rxf_stats->forwarded_packets;
418         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
419         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
420         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
421         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
422 }
423
424 static void populate_lancer_stats(struct be_adapter *adapter)
425 {
426
427         struct be_drv_stats *drvs = &adapter->drv_stats;
428         struct lancer_pport_stats *pport_stats =
429                                         pport_stats_from_cmd(adapter);
430
431         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
432         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
433         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
434         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
435         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
436         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
437         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
438         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
439         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
440         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
441         drvs->rx_dropped_tcp_length =
442                                 pport_stats->rx_dropped_invalid_tcp_length;
443         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
444         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
445         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
446         drvs->rx_dropped_header_too_small =
447                                 pport_stats->rx_dropped_header_too_small;
448         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
449         drvs->rx_address_filtered =
450                                         pport_stats->rx_address_filtered +
451                                         pport_stats->rx_vlan_filtered;
452         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
453         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
454         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
455         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
456         drvs->jabber_events = pport_stats->rx_jabbers;
457         drvs->forwarded_packets = pport_stats->num_forwards_lo;
458         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
459         drvs->rx_drops_too_many_frags =
460                                 pport_stats->rx_drops_too_many_frags_lo;
461 }
462
463 static void accumulate_16bit_val(u32 *acc, u16 val)
464 {
465 #define lo(x)                   (x & 0xFFFF)
466 #define hi(x)                   (x & 0xFFFF0000)
467         bool wrapped = val < lo(*acc);
468         u32 newacc = hi(*acc) + val;
469
470         if (wrapped)
471                 newacc += 65536;
472         ACCESS_ONCE(*acc) = newacc;
473 }
474
475 void populate_erx_stats(struct be_adapter *adapter,
476                         struct be_rx_obj *rxo,
477                         u32 erx_stat)
478 {
479         if (!BEx_chip(adapter))
480                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
481         else
482                 /* below erx HW counter can actually wrap around after
483                  * 65535. Driver accumulates a 32-bit value
484                  */
485                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
486                                      (u16)erx_stat);
487 }
488
489 void be_parse_stats(struct be_adapter *adapter)
490 {
491         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
492         struct be_rx_obj *rxo;
493         int i;
494         u32 erx_stat;
495
496         if (lancer_chip(adapter)) {
497                 populate_lancer_stats(adapter);
498         } else {
499                 if (BE2_chip(adapter))
500                         populate_be_v0_stats(adapter);
501                 else
502                         /* for BE3 and Skyhawk */
503                         populate_be_v1_stats(adapter);
504
505                 /* as erx_v1 is longer than v0, ok to use v1 for v0 access */
506                 for_all_rx_queues(adapter, rxo, i) {
507                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
508                         populate_erx_stats(adapter, rxo, erx_stat);
509                 }
510         }
511 }
512
513 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
514                                         struct rtnl_link_stats64 *stats)
515 {
516         struct be_adapter *adapter = netdev_priv(netdev);
517         struct be_drv_stats *drvs = &adapter->drv_stats;
518         struct be_rx_obj *rxo;
519         struct be_tx_obj *txo;
520         u64 pkts, bytes;
521         unsigned int start;
522         int i;
523
524         for_all_rx_queues(adapter, rxo, i) {
525                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
526                 do {
527                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
528                         pkts = rx_stats(rxo)->rx_pkts;
529                         bytes = rx_stats(rxo)->rx_bytes;
530                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
531                 stats->rx_packets += pkts;
532                 stats->rx_bytes += bytes;
533                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
534                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
535                                         rx_stats(rxo)->rx_drops_no_frags;
536         }
537
538         for_all_tx_queues(adapter, txo, i) {
539                 const struct be_tx_stats *tx_stats = tx_stats(txo);
540                 do {
541                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
542                         pkts = tx_stats(txo)->tx_pkts;
543                         bytes = tx_stats(txo)->tx_bytes;
544                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
545                 stats->tx_packets += pkts;
546                 stats->tx_bytes += bytes;
547         }
548
549         /* bad pkts received */
550         stats->rx_errors = drvs->rx_crc_errors +
551                 drvs->rx_alignment_symbol_errors +
552                 drvs->rx_in_range_errors +
553                 drvs->rx_out_range_errors +
554                 drvs->rx_frame_too_long +
555                 drvs->rx_dropped_too_small +
556                 drvs->rx_dropped_too_short +
557                 drvs->rx_dropped_header_too_small +
558                 drvs->rx_dropped_tcp_length +
559                 drvs->rx_dropped_runt;
560
561         /* detailed rx errors */
562         stats->rx_length_errors = drvs->rx_in_range_errors +
563                 drvs->rx_out_range_errors +
564                 drvs->rx_frame_too_long;
565
566         stats->rx_crc_errors = drvs->rx_crc_errors;
567
568         /* frame alignment errors */
569         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
570
571         /* receiver fifo overrun */
572         /* drops_no_pbuf is no per i/f, it's per BE card */
573         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
574                                 drvs->rx_input_fifo_overflow_drop +
575                                 drvs->rx_drops_no_pbuf;
576         return stats;
577 }
578
579 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
580 {
581         struct net_device *netdev = adapter->netdev;
582
583         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
584                 netif_carrier_off(netdev);
585                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
586         }
587
588         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
589                 netif_carrier_on(netdev);
590         else
591                 netif_carrier_off(netdev);
592 }
593
594 static void be_tx_stats_update(struct be_tx_obj *txo,
595                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
596 {
597         struct be_tx_stats *stats = tx_stats(txo);
598
599         u64_stats_update_begin(&stats->sync);
600         stats->tx_reqs++;
601         stats->tx_wrbs += wrb_cnt;
602         stats->tx_bytes += copied;
603         stats->tx_pkts += (gso_segs ? gso_segs : 1);
604         if (stopped)
605                 stats->tx_stops++;
606         u64_stats_update_end(&stats->sync);
607 }
608
609 /* Determine number of WRB entries needed to xmit data in an skb */
610 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
611                                                                 bool *dummy)
612 {
613         int cnt = (skb->len > skb->data_len);
614
615         cnt += skb_shinfo(skb)->nr_frags;
616
617         /* to account for hdr wrb */
618         cnt++;
619         if (lancer_chip(adapter) || !(cnt & 1)) {
620                 *dummy = false;
621         } else {
622                 /* add a dummy to make it an even num */
623                 cnt++;
624                 *dummy = true;
625         }
626         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
627         return cnt;
628 }
629
630 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
631 {
632         wrb->frag_pa_hi = upper_32_bits(addr);
633         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
634         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
635         wrb->rsvd0 = 0;
636 }
637
638 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
639                                         struct sk_buff *skb)
640 {
641         u8 vlan_prio;
642         u16 vlan_tag;
643
644         vlan_tag = vlan_tx_tag_get(skb);
645         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
646         /* If vlan priority provided by OS is NOT in available bmap */
647         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
648                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
649                                 adapter->recommended_prio;
650
651         return vlan_tag;
652 }
653
654 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
655                 struct sk_buff *skb, u32 wrb_cnt, u32 len, bool skip_hw_vlan)
656 {
657         u16 vlan_tag;
658
659         memset(hdr, 0, sizeof(*hdr));
660
661         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
662
663         if (skb_is_gso(skb)) {
664                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
665                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
666                         hdr, skb_shinfo(skb)->gso_size);
667                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
668                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
669         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
670                 if (is_tcp_pkt(skb))
671                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
672                 else if (is_udp_pkt(skb))
673                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
674         }
675
676         if (vlan_tx_tag_present(skb)) {
677                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
678                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
679                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
680         }
681
682         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
683         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
684         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
685         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
686         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
687 }
688
689 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
690                 bool unmap_single)
691 {
692         dma_addr_t dma;
693
694         be_dws_le_to_cpu(wrb, sizeof(*wrb));
695
696         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
697         if (wrb->frag_len) {
698                 if (unmap_single)
699                         dma_unmap_single(dev, dma, wrb->frag_len,
700                                          DMA_TO_DEVICE);
701                 else
702                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
703         }
704 }
705
706 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
707                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
708                 bool skip_hw_vlan)
709 {
710         dma_addr_t busaddr;
711         int i, copied = 0;
712         struct device *dev = &adapter->pdev->dev;
713         struct sk_buff *first_skb = skb;
714         struct be_eth_wrb *wrb;
715         struct be_eth_hdr_wrb *hdr;
716         bool map_single = false;
717         u16 map_head;
718
719         hdr = queue_head_node(txq);
720         queue_head_inc(txq);
721         map_head = txq->head;
722
723         if (skb->len > skb->data_len) {
724                 int len = skb_headlen(skb);
725                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
726                 if (dma_mapping_error(dev, busaddr))
727                         goto dma_err;
728                 map_single = true;
729                 wrb = queue_head_node(txq);
730                 wrb_fill(wrb, busaddr, len);
731                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
732                 queue_head_inc(txq);
733                 copied += len;
734         }
735
736         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
737                 const struct skb_frag_struct *frag =
738                         &skb_shinfo(skb)->frags[i];
739                 busaddr = skb_frag_dma_map(dev, frag, 0,
740                                            skb_frag_size(frag), DMA_TO_DEVICE);
741                 if (dma_mapping_error(dev, busaddr))
742                         goto dma_err;
743                 wrb = queue_head_node(txq);
744                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
745                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
746                 queue_head_inc(txq);
747                 copied += skb_frag_size(frag);
748         }
749
750         if (dummy_wrb) {
751                 wrb = queue_head_node(txq);
752                 wrb_fill(wrb, 0, 0);
753                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
754                 queue_head_inc(txq);
755         }
756
757         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
758         be_dws_cpu_to_le(hdr, sizeof(*hdr));
759
760         return copied;
761 dma_err:
762         txq->head = map_head;
763         while (copied) {
764                 wrb = queue_head_node(txq);
765                 unmap_tx_frag(dev, wrb, map_single);
766                 map_single = false;
767                 copied -= wrb->frag_len;
768                 queue_head_inc(txq);
769         }
770         return 0;
771 }
772
773 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
774                                              struct sk_buff *skb,
775                                              bool *skip_hw_vlan)
776 {
777         u16 vlan_tag = 0;
778
779         skb = skb_share_check(skb, GFP_ATOMIC);
780         if (unlikely(!skb))
781                 return skb;
782
783         if (vlan_tx_tag_present(skb))
784                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
785
786         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
787                 if (!vlan_tag)
788                         vlan_tag = adapter->pvid;
789                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
790                  * skip VLAN insertion
791                  */
792                 if (skip_hw_vlan)
793                         *skip_hw_vlan = true;
794         }
795
796         if (vlan_tag) {
797                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
798                 if (unlikely(!skb))
799                         return skb;
800                 skb->vlan_tci = 0;
801         }
802
803         /* Insert the outer VLAN, if any */
804         if (adapter->qnq_vid) {
805                 vlan_tag = adapter->qnq_vid;
806                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
807                 if (unlikely(!skb))
808                         return skb;
809                 if (skip_hw_vlan)
810                         *skip_hw_vlan = true;
811         }
812
813         return skb;
814 }
815
816 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
817 {
818         struct ethhdr *eh = (struct ethhdr *)skb->data;
819         u16 offset = ETH_HLEN;
820
821         if (eh->h_proto == htons(ETH_P_IPV6)) {
822                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
823
824                 offset += sizeof(struct ipv6hdr);
825                 if (ip6h->nexthdr != NEXTHDR_TCP &&
826                     ip6h->nexthdr != NEXTHDR_UDP) {
827                         struct ipv6_opt_hdr *ehdr =
828                                 (struct ipv6_opt_hdr *) (skb->data + offset);
829
830                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
831                         if (ehdr->hdrlen == 0xff)
832                                 return true;
833                 }
834         }
835         return false;
836 }
837
838 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
839 {
840         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
841 }
842
843 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
844 {
845         return BE3_chip(adapter) &&
846                 be_ipv6_exthdr_check(skb);
847 }
848
849 static netdev_tx_t be_xmit(struct sk_buff *skb,
850                         struct net_device *netdev)
851 {
852         struct be_adapter *adapter = netdev_priv(netdev);
853         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
854         struct be_queue_info *txq = &txo->q;
855         struct iphdr *ip = NULL;
856         u32 wrb_cnt = 0, copied = 0;
857         u32 start = txq->head, eth_hdr_len;
858         bool dummy_wrb, stopped = false;
859         bool skip_hw_vlan = false;
860         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
861
862         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
863                 VLAN_ETH_HLEN : ETH_HLEN;
864
865         /* For padded packets, BE HW modifies tot_len field in IP header
866          * incorrecly when VLAN tag is inserted by HW.
867          */
868         if (skb->len <= 60 && vlan_tx_tag_present(skb) && is_ipv4_pkt(skb)) {
869                 ip = (struct iphdr *)ip_hdr(skb);
870                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
871         }
872
873         /* If vlan tag is already inlined in the packet, skip HW VLAN
874          * tagging in UMC mode
875          */
876         if ((adapter->function_mode & UMC_ENABLED) &&
877             veh->h_vlan_proto == htons(ETH_P_8021Q))
878                         skip_hw_vlan = true;
879
880         /* HW has a bug wherein it will calculate CSUM for VLAN
881          * pkts even though it is disabled.
882          * Manually insert VLAN in pkt.
883          */
884         if (skb->ip_summed != CHECKSUM_PARTIAL &&
885                         vlan_tx_tag_present(skb)) {
886                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
887                 if (unlikely(!skb))
888                         goto tx_drop;
889         }
890
891         /* HW may lockup when VLAN HW tagging is requested on
892          * certain ipv6 packets. Drop such pkts if the HW workaround to
893          * skip HW tagging is not enabled by FW.
894          */
895         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
896                      (adapter->pvid || adapter->qnq_vid) &&
897                      !qnq_async_evt_rcvd(adapter)))
898                 goto tx_drop;
899
900         /* Manual VLAN tag insertion to prevent:
901          * ASIC lockup when the ASIC inserts VLAN tag into
902          * certain ipv6 packets. Insert VLAN tags in driver,
903          * and set event, completion, vlan bits accordingly
904          * in the Tx WRB.
905          */
906         if (be_ipv6_tx_stall_chk(adapter, skb) &&
907             be_vlan_tag_tx_chk(adapter, skb)) {
908                 skb = be_insert_vlan_in_pkt(adapter, skb, &skip_hw_vlan);
909                 if (unlikely(!skb))
910                         goto tx_drop;
911         }
912
913         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
914
915         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
916                               skip_hw_vlan);
917         if (copied) {
918                 int gso_segs = skb_shinfo(skb)->gso_segs;
919
920                 /* record the sent skb in the sent_skb table */
921                 BUG_ON(txo->sent_skb_list[start]);
922                 txo->sent_skb_list[start] = skb;
923
924                 /* Ensure txq has space for the next skb; Else stop the queue
925                  * *BEFORE* ringing the tx doorbell, so that we serialze the
926                  * tx compls of the current transmit which'll wake up the queue
927                  */
928                 atomic_add(wrb_cnt, &txq->used);
929                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
930                                                                 txq->len) {
931                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
932                         stopped = true;
933                 }
934
935                 be_txq_notify(adapter, txo, wrb_cnt);
936
937                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
938         } else {
939                 txq->head = start;
940                 dev_kfree_skb_any(skb);
941         }
942 tx_drop:
943         return NETDEV_TX_OK;
944 }
945
946 static int be_change_mtu(struct net_device *netdev, int new_mtu)
947 {
948         struct be_adapter *adapter = netdev_priv(netdev);
949         if (new_mtu < BE_MIN_MTU ||
950                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
951                                         (ETH_HLEN + ETH_FCS_LEN))) {
952                 dev_info(&adapter->pdev->dev,
953                         "MTU must be between %d and %d bytes\n",
954                         BE_MIN_MTU,
955                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
956                 return -EINVAL;
957         }
958         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
959                         netdev->mtu, new_mtu);
960         netdev->mtu = new_mtu;
961         return 0;
962 }
963
964 /*
965  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
966  * If the user configures more, place BE in vlan promiscuous mode.
967  */
968 static int be_vid_config(struct be_adapter *adapter)
969 {
970         u16 vids[BE_NUM_VLANS_SUPPORTED];
971         u16 num = 0, i;
972         int status = 0;
973
974         /* No need to further configure vids if in promiscuous mode */
975         if (adapter->promiscuous)
976                 return 0;
977
978         if (adapter->vlans_added > adapter->max_vlans)
979                 goto set_vlan_promisc;
980
981         /* Construct VLAN Table to give to HW */
982         for (i = 0; i < VLAN_N_VID; i++)
983                 if (adapter->vlan_tag[i])
984                         vids[num++] = cpu_to_le16(i);
985
986         status = be_cmd_vlan_config(adapter, adapter->if_handle,
987                                     vids, num, 1, 0);
988
989         /* Set to VLAN promisc mode as setting VLAN filter failed */
990         if (status) {
991                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
992                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
993                 goto set_vlan_promisc;
994         }
995
996         return status;
997
998 set_vlan_promisc:
999         status = be_cmd_vlan_config(adapter, adapter->if_handle,
1000                                     NULL, 0, 1, 1);
1001         return status;
1002 }
1003
1004 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1005 {
1006         struct be_adapter *adapter = netdev_priv(netdev);
1007         int status = 0;
1008
1009         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1010                 status = -EINVAL;
1011                 goto ret;
1012         }
1013
1014         /* Packets with VID 0 are always received by Lancer by default */
1015         if (lancer_chip(adapter) && vid == 0)
1016                 goto ret;
1017
1018         adapter->vlan_tag[vid] = 1;
1019         if (adapter->vlans_added <= (adapter->max_vlans + 1))
1020                 status = be_vid_config(adapter);
1021
1022         if (!status)
1023                 adapter->vlans_added++;
1024         else
1025                 adapter->vlan_tag[vid] = 0;
1026 ret:
1027         return status;
1028 }
1029
1030 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1031 {
1032         struct be_adapter *adapter = netdev_priv(netdev);
1033         int status = 0;
1034
1035         if (!lancer_chip(adapter) && !be_physfn(adapter)) {
1036                 status = -EINVAL;
1037                 goto ret;
1038         }
1039
1040         /* Packets with VID 0 are always received by Lancer by default */
1041         if (lancer_chip(adapter) && vid == 0)
1042                 goto ret;
1043
1044         adapter->vlan_tag[vid] = 0;
1045         if (adapter->vlans_added <= adapter->max_vlans)
1046                 status = be_vid_config(adapter);
1047
1048         if (!status)
1049                 adapter->vlans_added--;
1050         else
1051                 adapter->vlan_tag[vid] = 1;
1052 ret:
1053         return status;
1054 }
1055
1056 static void be_set_rx_mode(struct net_device *netdev)
1057 {
1058         struct be_adapter *adapter = netdev_priv(netdev);
1059         int status;
1060
1061         if (netdev->flags & IFF_PROMISC) {
1062                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1063                 adapter->promiscuous = true;
1064                 goto done;
1065         }
1066
1067         /* BE was previously in promiscuous mode; disable it */
1068         if (adapter->promiscuous) {
1069                 adapter->promiscuous = false;
1070                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1071
1072                 if (adapter->vlans_added)
1073                         be_vid_config(adapter);
1074         }
1075
1076         /* Enable multicast promisc if num configured exceeds what we support */
1077         if (netdev->flags & IFF_ALLMULTI ||
1078             netdev_mc_count(netdev) > adapter->max_mcast_mac) {
1079                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1080                 goto done;
1081         }
1082
1083         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1084                 struct netdev_hw_addr *ha;
1085                 int i = 1; /* First slot is claimed by the Primary MAC */
1086
1087                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1088                         be_cmd_pmac_del(adapter, adapter->if_handle,
1089                                         adapter->pmac_id[i], 0);
1090                 }
1091
1092                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
1093                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1094                         adapter->promiscuous = true;
1095                         goto done;
1096                 }
1097
1098                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1099                         adapter->uc_macs++; /* First slot is for Primary MAC */
1100                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1101                                         adapter->if_handle,
1102                                         &adapter->pmac_id[adapter->uc_macs], 0);
1103                 }
1104         }
1105
1106         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1107
1108         /* Set to MCAST promisc mode if setting MULTICAST address fails */
1109         if (status) {
1110                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
1111                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
1112                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1113         }
1114 done:
1115         return;
1116 }
1117
1118 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1119 {
1120         struct be_adapter *adapter = netdev_priv(netdev);
1121         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1122         int status;
1123         bool active_mac = false;
1124         u32 pmac_id;
1125         u8 old_mac[ETH_ALEN];
1126
1127         if (!sriov_enabled(adapter))
1128                 return -EPERM;
1129
1130         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1131                 return -EINVAL;
1132
1133         if (lancer_chip(adapter)) {
1134                 status = be_cmd_get_mac_from_list(adapter, old_mac, &active_mac,
1135                                                   &pmac_id, vf + 1);
1136                 if (!status && active_mac)
1137                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1138                                         pmac_id, vf + 1);
1139
1140                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
1141         } else {
1142                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
1143                                          vf_cfg->pmac_id, vf + 1);
1144
1145                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1146                                          &vf_cfg->pmac_id, vf + 1);
1147         }
1148
1149         if (status)
1150                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
1151                                 mac, vf);
1152         else
1153                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
1154
1155         return status;
1156 }
1157
1158 static int be_get_vf_config(struct net_device *netdev, int vf,
1159                         struct ifla_vf_info *vi)
1160 {
1161         struct be_adapter *adapter = netdev_priv(netdev);
1162         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1163
1164         if (!sriov_enabled(adapter))
1165                 return -EPERM;
1166
1167         if (vf >= adapter->num_vfs)
1168                 return -EINVAL;
1169
1170         vi->vf = vf;
1171         vi->tx_rate = vf_cfg->tx_rate;
1172         vi->vlan = vf_cfg->vlan_tag;
1173         vi->qos = 0;
1174         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1175
1176         return 0;
1177 }
1178
1179 static int be_set_vf_vlan(struct net_device *netdev,
1180                         int vf, u16 vlan, u8 qos)
1181 {
1182         struct be_adapter *adapter = netdev_priv(netdev);
1183         int status = 0;
1184
1185         if (!sriov_enabled(adapter))
1186                 return -EPERM;
1187
1188         if (vf >= adapter->num_vfs || vlan > 4095)
1189                 return -EINVAL;
1190
1191         if (vlan) {
1192                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
1193                         /* If this is new value, program it. Else skip. */
1194                         adapter->vf_cfg[vf].vlan_tag = vlan;
1195
1196                         status = be_cmd_set_hsw_config(adapter, vlan,
1197                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1198                 }
1199         } else {
1200                 /* Reset Transparent Vlan Tagging. */
1201                 adapter->vf_cfg[vf].vlan_tag = 0;
1202                 vlan = adapter->vf_cfg[vf].def_vid;
1203                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1204                         adapter->vf_cfg[vf].if_handle);
1205         }
1206
1207
1208         if (status)
1209                 dev_info(&adapter->pdev->dev,
1210                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1211         return status;
1212 }
1213
1214 static int be_set_vf_tx_rate(struct net_device *netdev,
1215                         int vf, int rate)
1216 {
1217         struct be_adapter *adapter = netdev_priv(netdev);
1218         int status = 0;
1219
1220         if (!sriov_enabled(adapter))
1221                 return -EPERM;
1222
1223         if (vf >= adapter->num_vfs)
1224                 return -EINVAL;
1225
1226         if (rate < 100 || rate > 10000) {
1227                 dev_err(&adapter->pdev->dev,
1228                         "tx rate must be between 100 and 10000 Mbps\n");
1229                 return -EINVAL;
1230         }
1231
1232         if (lancer_chip(adapter))
1233                 status = be_cmd_set_profile_config(adapter, rate / 10, vf + 1);
1234         else
1235                 status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1236
1237         if (status)
1238                 dev_err(&adapter->pdev->dev,
1239                                 "tx rate %d on VF %d failed\n", rate, vf);
1240         else
1241                 adapter->vf_cfg[vf].tx_rate = rate;
1242         return status;
1243 }
1244
1245 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1246 {
1247         struct pci_dev *dev, *pdev = adapter->pdev;
1248         int vfs = 0, assigned_vfs = 0, pos;
1249         u16 offset, stride;
1250
1251         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1252         if (!pos)
1253                 return 0;
1254         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1255         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1256
1257         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1258         while (dev) {
1259                 if (dev->is_virtfn && pci_physfn(dev) == pdev) {
1260                         vfs++;
1261                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1262                                 assigned_vfs++;
1263                 }
1264                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1265         }
1266         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1267 }
1268
1269 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1270 {
1271         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1272         ulong now = jiffies;
1273         ulong delta = now - stats->rx_jiffies;
1274         u64 pkts;
1275         unsigned int start, eqd;
1276
1277         if (!eqo->enable_aic) {
1278                 eqd = eqo->eqd;
1279                 goto modify_eqd;
1280         }
1281
1282         if (eqo->idx >= adapter->num_rx_qs)
1283                 return;
1284
1285         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1286
1287         /* Wrapped around */
1288         if (time_before(now, stats->rx_jiffies)) {
1289                 stats->rx_jiffies = now;
1290                 return;
1291         }
1292
1293         /* Update once a second */
1294         if (delta < HZ)
1295                 return;
1296
1297         do {
1298                 start = u64_stats_fetch_begin_bh(&stats->sync);
1299                 pkts = stats->rx_pkts;
1300         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1301
1302         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1303         stats->rx_pkts_prev = pkts;
1304         stats->rx_jiffies = now;
1305         eqd = (stats->rx_pps / 110000) << 3;
1306         eqd = min(eqd, eqo->max_eqd);
1307         eqd = max(eqd, eqo->min_eqd);
1308         if (eqd < 10)
1309                 eqd = 0;
1310
1311 modify_eqd:
1312         if (eqd != eqo->cur_eqd) {
1313                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1314                 eqo->cur_eqd = eqd;
1315         }
1316 }
1317
1318 static void be_rx_stats_update(struct be_rx_obj *rxo,
1319                 struct be_rx_compl_info *rxcp)
1320 {
1321         struct be_rx_stats *stats = rx_stats(rxo);
1322
1323         u64_stats_update_begin(&stats->sync);
1324         stats->rx_compl++;
1325         stats->rx_bytes += rxcp->pkt_size;
1326         stats->rx_pkts++;
1327         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1328                 stats->rx_mcast_pkts++;
1329         if (rxcp->err)
1330                 stats->rx_compl_err++;
1331         u64_stats_update_end(&stats->sync);
1332 }
1333
1334 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1335 {
1336         /* L4 checksum is not reliable for non TCP/UDP packets.
1337          * Also ignore ipcksm for ipv6 pkts */
1338         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1339                                 (rxcp->ip_csum || rxcp->ipv6);
1340 }
1341
1342 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1343                                                 u16 frag_idx)
1344 {
1345         struct be_adapter *adapter = rxo->adapter;
1346         struct be_rx_page_info *rx_page_info;
1347         struct be_queue_info *rxq = &rxo->q;
1348
1349         rx_page_info = &rxo->page_info_tbl[frag_idx];
1350         BUG_ON(!rx_page_info->page);
1351
1352         if (rx_page_info->last_page_user) {
1353                 dma_unmap_page(&adapter->pdev->dev,
1354                                dma_unmap_addr(rx_page_info, bus),
1355                                adapter->big_page_size, DMA_FROM_DEVICE);
1356                 rx_page_info->last_page_user = false;
1357         }
1358
1359         atomic_dec(&rxq->used);
1360         return rx_page_info;
1361 }
1362
1363 /* Throwaway the data in the Rx completion */
1364 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1365                                 struct be_rx_compl_info *rxcp)
1366 {
1367         struct be_queue_info *rxq = &rxo->q;
1368         struct be_rx_page_info *page_info;
1369         u16 i, num_rcvd = rxcp->num_rcvd;
1370
1371         for (i = 0; i < num_rcvd; i++) {
1372                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1373                 put_page(page_info->page);
1374                 memset(page_info, 0, sizeof(*page_info));
1375                 index_inc(&rxcp->rxq_idx, rxq->len);
1376         }
1377 }
1378
1379 /*
1380  * skb_fill_rx_data forms a complete skb for an ether frame
1381  * indicated by rxcp.
1382  */
1383 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1384                              struct be_rx_compl_info *rxcp)
1385 {
1386         struct be_queue_info *rxq = &rxo->q;
1387         struct be_rx_page_info *page_info;
1388         u16 i, j;
1389         u16 hdr_len, curr_frag_len, remaining;
1390         u8 *start;
1391
1392         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1393         start = page_address(page_info->page) + page_info->page_offset;
1394         prefetch(start);
1395
1396         /* Copy data in the first descriptor of this completion */
1397         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1398
1399         skb->len = curr_frag_len;
1400         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1401                 memcpy(skb->data, start, curr_frag_len);
1402                 /* Complete packet has now been moved to data */
1403                 put_page(page_info->page);
1404                 skb->data_len = 0;
1405                 skb->tail += curr_frag_len;
1406         } else {
1407                 hdr_len = ETH_HLEN;
1408                 memcpy(skb->data, start, hdr_len);
1409                 skb_shinfo(skb)->nr_frags = 1;
1410                 skb_frag_set_page(skb, 0, page_info->page);
1411                 skb_shinfo(skb)->frags[0].page_offset =
1412                                         page_info->page_offset + hdr_len;
1413                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1414                 skb->data_len = curr_frag_len - hdr_len;
1415                 skb->truesize += rx_frag_size;
1416                 skb->tail += hdr_len;
1417         }
1418         page_info->page = NULL;
1419
1420         if (rxcp->pkt_size <= rx_frag_size) {
1421                 BUG_ON(rxcp->num_rcvd != 1);
1422                 return;
1423         }
1424
1425         /* More frags present for this completion */
1426         index_inc(&rxcp->rxq_idx, rxq->len);
1427         remaining = rxcp->pkt_size - curr_frag_len;
1428         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1429                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1430                 curr_frag_len = min(remaining, rx_frag_size);
1431
1432                 /* Coalesce all frags from the same physical page in one slot */
1433                 if (page_info->page_offset == 0) {
1434                         /* Fresh page */
1435                         j++;
1436                         skb_frag_set_page(skb, j, page_info->page);
1437                         skb_shinfo(skb)->frags[j].page_offset =
1438                                                         page_info->page_offset;
1439                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1440                         skb_shinfo(skb)->nr_frags++;
1441                 } else {
1442                         put_page(page_info->page);
1443                 }
1444
1445                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1446                 skb->len += curr_frag_len;
1447                 skb->data_len += curr_frag_len;
1448                 skb->truesize += rx_frag_size;
1449                 remaining -= curr_frag_len;
1450                 index_inc(&rxcp->rxq_idx, rxq->len);
1451                 page_info->page = NULL;
1452         }
1453         BUG_ON(j > MAX_SKB_FRAGS);
1454 }
1455
1456 /* Process the RX completion indicated by rxcp when GRO is disabled */
1457 static void be_rx_compl_process(struct be_rx_obj *rxo,
1458                                 struct be_rx_compl_info *rxcp)
1459 {
1460         struct be_adapter *adapter = rxo->adapter;
1461         struct net_device *netdev = adapter->netdev;
1462         struct sk_buff *skb;
1463
1464         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1465         if (unlikely(!skb)) {
1466                 rx_stats(rxo)->rx_drops_no_skbs++;
1467                 be_rx_compl_discard(rxo, rxcp);
1468                 return;
1469         }
1470
1471         skb_fill_rx_data(rxo, skb, rxcp);
1472
1473         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1474                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1475         else
1476                 skb_checksum_none_assert(skb);
1477
1478         skb->protocol = eth_type_trans(skb, netdev);
1479         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1480         if (netdev->features & NETIF_F_RXHASH)
1481                 skb->rxhash = rxcp->rss_hash;
1482
1483
1484         if (rxcp->vlanf)
1485                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1486
1487         netif_receive_skb(skb);
1488 }
1489
1490 /* Process the RX completion indicated by rxcp when GRO is enabled */
1491 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1492                              struct be_rx_compl_info *rxcp)
1493 {
1494         struct be_adapter *adapter = rxo->adapter;
1495         struct be_rx_page_info *page_info;
1496         struct sk_buff *skb = NULL;
1497         struct be_queue_info *rxq = &rxo->q;
1498         u16 remaining, curr_frag_len;
1499         u16 i, j;
1500
1501         skb = napi_get_frags(napi);
1502         if (!skb) {
1503                 be_rx_compl_discard(rxo, rxcp);
1504                 return;
1505         }
1506
1507         remaining = rxcp->pkt_size;
1508         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1509                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1510
1511                 curr_frag_len = min(remaining, rx_frag_size);
1512
1513                 /* Coalesce all frags from the same physical page in one slot */
1514                 if (i == 0 || page_info->page_offset == 0) {
1515                         /* First frag or Fresh page */
1516                         j++;
1517                         skb_frag_set_page(skb, j, page_info->page);
1518                         skb_shinfo(skb)->frags[j].page_offset =
1519                                                         page_info->page_offset;
1520                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1521                 } else {
1522                         put_page(page_info->page);
1523                 }
1524                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1525                 skb->truesize += rx_frag_size;
1526                 remaining -= curr_frag_len;
1527                 index_inc(&rxcp->rxq_idx, rxq->len);
1528                 memset(page_info, 0, sizeof(*page_info));
1529         }
1530         BUG_ON(j > MAX_SKB_FRAGS);
1531
1532         skb_shinfo(skb)->nr_frags = j + 1;
1533         skb->len = rxcp->pkt_size;
1534         skb->data_len = rxcp->pkt_size;
1535         skb->ip_summed = CHECKSUM_UNNECESSARY;
1536         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1537         if (adapter->netdev->features & NETIF_F_RXHASH)
1538                 skb->rxhash = rxcp->rss_hash;
1539
1540         if (rxcp->vlanf)
1541                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1542
1543         napi_gro_frags(napi);
1544 }
1545
1546 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1547                                  struct be_rx_compl_info *rxcp)
1548 {
1549         rxcp->pkt_size =
1550                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1551         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1552         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1553         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1554         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1555         rxcp->ip_csum =
1556                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1557         rxcp->l4_csum =
1558                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1559         rxcp->ipv6 =
1560                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1561         rxcp->rxq_idx =
1562                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1563         rxcp->num_rcvd =
1564                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1565         rxcp->pkt_type =
1566                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1567         rxcp->rss_hash =
1568                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1569         if (rxcp->vlanf) {
1570                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1571                                           compl);
1572                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1573                                                compl);
1574         }
1575         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1576 }
1577
1578 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1579                                  struct be_rx_compl_info *rxcp)
1580 {
1581         rxcp->pkt_size =
1582                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1583         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1584         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1585         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1586         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1587         rxcp->ip_csum =
1588                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1589         rxcp->l4_csum =
1590                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1591         rxcp->ipv6 =
1592                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1593         rxcp->rxq_idx =
1594                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1595         rxcp->num_rcvd =
1596                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1597         rxcp->pkt_type =
1598                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1599         rxcp->rss_hash =
1600                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1601         if (rxcp->vlanf) {
1602                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1603                                           compl);
1604                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1605                                                compl);
1606         }
1607         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1608         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1609                                       ip_frag, compl);
1610 }
1611
1612 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1613 {
1614         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1615         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1616         struct be_adapter *adapter = rxo->adapter;
1617
1618         /* For checking the valid bit it is Ok to use either definition as the
1619          * valid bit is at the same position in both v0 and v1 Rx compl */
1620         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1621                 return NULL;
1622
1623         rmb();
1624         be_dws_le_to_cpu(compl, sizeof(*compl));
1625
1626         if (adapter->be3_native)
1627                 be_parse_rx_compl_v1(compl, rxcp);
1628         else
1629                 be_parse_rx_compl_v0(compl, rxcp);
1630
1631         if (rxcp->ip_frag)
1632                 rxcp->l4_csum = 0;
1633
1634         if (rxcp->vlanf) {
1635                 /* vlanf could be wrongly set in some cards.
1636                  * ignore if vtm is not set */
1637                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1638                         rxcp->vlanf = 0;
1639
1640                 if (!lancer_chip(adapter))
1641                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1642
1643                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1644                     !adapter->vlan_tag[rxcp->vlan_tag])
1645                         rxcp->vlanf = 0;
1646         }
1647
1648         /* As the compl has been parsed, reset it; we wont touch it again */
1649         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1650
1651         queue_tail_inc(&rxo->cq);
1652         return rxcp;
1653 }
1654
1655 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1656 {
1657         u32 order = get_order(size);
1658
1659         if (order > 0)
1660                 gfp |= __GFP_COMP;
1661         return  alloc_pages(gfp, order);
1662 }
1663
1664 /*
1665  * Allocate a page, split it to fragments of size rx_frag_size and post as
1666  * receive buffers to BE
1667  */
1668 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1669 {
1670         struct be_adapter *adapter = rxo->adapter;
1671         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1672         struct be_queue_info *rxq = &rxo->q;
1673         struct page *pagep = NULL;
1674         struct be_eth_rx_d *rxd;
1675         u64 page_dmaaddr = 0, frag_dmaaddr;
1676         u32 posted, page_offset = 0;
1677
1678         page_info = &rxo->page_info_tbl[rxq->head];
1679         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1680                 if (!pagep) {
1681                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1682                         if (unlikely(!pagep)) {
1683                                 rx_stats(rxo)->rx_post_fail++;
1684                                 break;
1685                         }
1686                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1687                                                     0, adapter->big_page_size,
1688                                                     DMA_FROM_DEVICE);
1689                         page_info->page_offset = 0;
1690                 } else {
1691                         get_page(pagep);
1692                         page_info->page_offset = page_offset + rx_frag_size;
1693                 }
1694                 page_offset = page_info->page_offset;
1695                 page_info->page = pagep;
1696                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1697                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1698
1699                 rxd = queue_head_node(rxq);
1700                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1701                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1702
1703                 /* Any space left in the current big page for another frag? */
1704                 if ((page_offset + rx_frag_size + rx_frag_size) >
1705                                         adapter->big_page_size) {
1706                         pagep = NULL;
1707                         page_info->last_page_user = true;
1708                 }
1709
1710                 prev_page_info = page_info;
1711                 queue_head_inc(rxq);
1712                 page_info = &rxo->page_info_tbl[rxq->head];
1713         }
1714         if (pagep)
1715                 prev_page_info->last_page_user = true;
1716
1717         if (posted) {
1718                 atomic_add(posted, &rxq->used);
1719                 be_rxq_notify(adapter, rxq->id, posted);
1720         } else if (atomic_read(&rxq->used) == 0) {
1721                 /* Let be_worker replenish when memory is available */
1722                 rxo->rx_post_starved = true;
1723         }
1724 }
1725
1726 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1727 {
1728         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1729
1730         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1731                 return NULL;
1732
1733         rmb();
1734         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1735
1736         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1737
1738         queue_tail_inc(tx_cq);
1739         return txcp;
1740 }
1741
1742 static u16 be_tx_compl_process(struct be_adapter *adapter,
1743                 struct be_tx_obj *txo, u16 last_index)
1744 {
1745         struct be_queue_info *txq = &txo->q;
1746         struct be_eth_wrb *wrb;
1747         struct sk_buff **sent_skbs = txo->sent_skb_list;
1748         struct sk_buff *sent_skb;
1749         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1750         bool unmap_skb_hdr = true;
1751
1752         sent_skb = sent_skbs[txq->tail];
1753         BUG_ON(!sent_skb);
1754         sent_skbs[txq->tail] = NULL;
1755
1756         /* skip header wrb */
1757         queue_tail_inc(txq);
1758
1759         do {
1760                 cur_index = txq->tail;
1761                 wrb = queue_tail_node(txq);
1762                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1763                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1764                 unmap_skb_hdr = false;
1765
1766                 num_wrbs++;
1767                 queue_tail_inc(txq);
1768         } while (cur_index != last_index);
1769
1770         kfree_skb(sent_skb);
1771         return num_wrbs;
1772 }
1773
1774 /* Return the number of events in the event queue */
1775 static inline int events_get(struct be_eq_obj *eqo)
1776 {
1777         struct be_eq_entry *eqe;
1778         int num = 0;
1779
1780         do {
1781                 eqe = queue_tail_node(&eqo->q);
1782                 if (eqe->evt == 0)
1783                         break;
1784
1785                 rmb();
1786                 eqe->evt = 0;
1787                 num++;
1788                 queue_tail_inc(&eqo->q);
1789         } while (true);
1790
1791         return num;
1792 }
1793
1794 /* Leaves the EQ is disarmed state */
1795 static void be_eq_clean(struct be_eq_obj *eqo)
1796 {
1797         int num = events_get(eqo);
1798
1799         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1800 }
1801
1802 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1803 {
1804         struct be_rx_page_info *page_info;
1805         struct be_queue_info *rxq = &rxo->q;
1806         struct be_queue_info *rx_cq = &rxo->cq;
1807         struct be_rx_compl_info *rxcp;
1808         struct be_adapter *adapter = rxo->adapter;
1809         int flush_wait = 0;
1810         u16 tail;
1811
1812         /* Consume pending rx completions.
1813          * Wait for the flush completion (identified by zero num_rcvd)
1814          * to arrive. Notify CQ even when there are no more CQ entries
1815          * for HW to flush partially coalesced CQ entries.
1816          * In Lancer, there is no need to wait for flush compl.
1817          */
1818         for (;;) {
1819                 rxcp = be_rx_compl_get(rxo);
1820                 if (rxcp == NULL) {
1821                         if (lancer_chip(adapter))
1822                                 break;
1823
1824                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
1825                                 dev_warn(&adapter->pdev->dev,
1826                                          "did not receive flush compl\n");
1827                                 break;
1828                         }
1829                         be_cq_notify(adapter, rx_cq->id, true, 0);
1830                         mdelay(1);
1831                 } else {
1832                         be_rx_compl_discard(rxo, rxcp);
1833                         be_cq_notify(adapter, rx_cq->id, false, 1);
1834                         if (rxcp->num_rcvd == 0)
1835                                 break;
1836                 }
1837         }
1838
1839         /* After cleanup, leave the CQ in unarmed state */
1840         be_cq_notify(adapter, rx_cq->id, false, 0);
1841
1842         /* Then free posted rx buffers that were not used */
1843         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1844         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1845                 page_info = get_rx_page_info(rxo, tail);
1846                 put_page(page_info->page);
1847                 memset(page_info, 0, sizeof(*page_info));
1848         }
1849         BUG_ON(atomic_read(&rxq->used));
1850         rxq->tail = rxq->head = 0;
1851 }
1852
1853 static void be_tx_compl_clean(struct be_adapter *adapter)
1854 {
1855         struct be_tx_obj *txo;
1856         struct be_queue_info *txq;
1857         struct be_eth_tx_compl *txcp;
1858         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1859         struct sk_buff *sent_skb;
1860         bool dummy_wrb;
1861         int i, pending_txqs;
1862
1863         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1864         do {
1865                 pending_txqs = adapter->num_tx_qs;
1866
1867                 for_all_tx_queues(adapter, txo, i) {
1868                         txq = &txo->q;
1869                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1870                                 end_idx =
1871                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1872                                                       wrb_index, txcp);
1873                                 num_wrbs += be_tx_compl_process(adapter, txo,
1874                                                                 end_idx);
1875                                 cmpl++;
1876                         }
1877                         if (cmpl) {
1878                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1879                                 atomic_sub(num_wrbs, &txq->used);
1880                                 cmpl = 0;
1881                                 num_wrbs = 0;
1882                         }
1883                         if (atomic_read(&txq->used) == 0)
1884                                 pending_txqs--;
1885                 }
1886
1887                 if (pending_txqs == 0 || ++timeo > 200)
1888                         break;
1889
1890                 mdelay(1);
1891         } while (true);
1892
1893         for_all_tx_queues(adapter, txo, i) {
1894                 txq = &txo->q;
1895                 if (atomic_read(&txq->used))
1896                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1897                                 atomic_read(&txq->used));
1898
1899                 /* free posted tx for which compls will never arrive */
1900                 while (atomic_read(&txq->used)) {
1901                         sent_skb = txo->sent_skb_list[txq->tail];
1902                         end_idx = txq->tail;
1903                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1904                                                    &dummy_wrb);
1905                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1906                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1907                         atomic_sub(num_wrbs, &txq->used);
1908                 }
1909         }
1910 }
1911
1912 static void be_evt_queues_destroy(struct be_adapter *adapter)
1913 {
1914         struct be_eq_obj *eqo;
1915         int i;
1916
1917         for_all_evt_queues(adapter, eqo, i) {
1918                 if (eqo->q.created) {
1919                         be_eq_clean(eqo);
1920                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1921                 }
1922                 be_queue_free(adapter, &eqo->q);
1923         }
1924 }
1925
1926 static int be_evt_queues_create(struct be_adapter *adapter)
1927 {
1928         struct be_queue_info *eq;
1929         struct be_eq_obj *eqo;
1930         int i, rc;
1931
1932         adapter->num_evt_qs = num_irqs(adapter);
1933
1934         for_all_evt_queues(adapter, eqo, i) {
1935                 eqo->adapter = adapter;
1936                 eqo->tx_budget = BE_TX_BUDGET;
1937                 eqo->idx = i;
1938                 eqo->max_eqd = BE_MAX_EQD;
1939                 eqo->enable_aic = true;
1940
1941                 eq = &eqo->q;
1942                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1943                                         sizeof(struct be_eq_entry));
1944                 if (rc)
1945                         return rc;
1946
1947                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1948                 if (rc)
1949                         return rc;
1950         }
1951         return 0;
1952 }
1953
1954 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1955 {
1956         struct be_queue_info *q;
1957
1958         q = &adapter->mcc_obj.q;
1959         if (q->created)
1960                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1961         be_queue_free(adapter, q);
1962
1963         q = &adapter->mcc_obj.cq;
1964         if (q->created)
1965                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1966         be_queue_free(adapter, q);
1967 }
1968
1969 /* Must be called only after TX qs are created as MCC shares TX EQ */
1970 static int be_mcc_queues_create(struct be_adapter *adapter)
1971 {
1972         struct be_queue_info *q, *cq;
1973
1974         cq = &adapter->mcc_obj.cq;
1975         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1976                         sizeof(struct be_mcc_compl)))
1977                 goto err;
1978
1979         /* Use the default EQ for MCC completions */
1980         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1981                 goto mcc_cq_free;
1982
1983         q = &adapter->mcc_obj.q;
1984         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1985                 goto mcc_cq_destroy;
1986
1987         if (be_cmd_mccq_create(adapter, q, cq))
1988                 goto mcc_q_free;
1989
1990         return 0;
1991
1992 mcc_q_free:
1993         be_queue_free(adapter, q);
1994 mcc_cq_destroy:
1995         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1996 mcc_cq_free:
1997         be_queue_free(adapter, cq);
1998 err:
1999         return -1;
2000 }
2001
2002 static void be_tx_queues_destroy(struct be_adapter *adapter)
2003 {
2004         struct be_queue_info *q;
2005         struct be_tx_obj *txo;
2006         u8 i;
2007
2008         for_all_tx_queues(adapter, txo, i) {
2009                 q = &txo->q;
2010                 if (q->created)
2011                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2012                 be_queue_free(adapter, q);
2013
2014                 q = &txo->cq;
2015                 if (q->created)
2016                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2017                 be_queue_free(adapter, q);
2018         }
2019 }
2020
2021 static int be_num_txqs_want(struct be_adapter *adapter)
2022 {
2023         if ((!lancer_chip(adapter) && sriov_want(adapter)) ||
2024             be_is_mc(adapter) ||
2025             (!lancer_chip(adapter) && !be_physfn(adapter)) ||
2026             BE2_chip(adapter))
2027                 return 1;
2028         else
2029                 return adapter->max_tx_queues;
2030 }
2031
2032 static int be_tx_cqs_create(struct be_adapter *adapter)
2033 {
2034         struct be_queue_info *cq, *eq;
2035         int status;
2036         struct be_tx_obj *txo;
2037         u8 i;
2038
2039         adapter->num_tx_qs = be_num_txqs_want(adapter);
2040         if (adapter->num_tx_qs != MAX_TX_QS) {
2041                 rtnl_lock();
2042                 netif_set_real_num_tx_queues(adapter->netdev,
2043                         adapter->num_tx_qs);
2044                 rtnl_unlock();
2045         }
2046
2047         for_all_tx_queues(adapter, txo, i) {
2048                 cq = &txo->cq;
2049                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2050                                         sizeof(struct be_eth_tx_compl));
2051                 if (status)
2052                         return status;
2053
2054                 /* If num_evt_qs is less than num_tx_qs, then more than
2055                  * one txq share an eq
2056                  */
2057                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2058                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2059                 if (status)
2060                         return status;
2061         }
2062         return 0;
2063 }
2064
2065 static int be_tx_qs_create(struct be_adapter *adapter)
2066 {
2067         struct be_tx_obj *txo;
2068         int i, status;
2069
2070         for_all_tx_queues(adapter, txo, i) {
2071                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2072                                         sizeof(struct be_eth_wrb));
2073                 if (status)
2074                         return status;
2075
2076                 status = be_cmd_txq_create(adapter, txo);
2077                 if (status)
2078                         return status;
2079         }
2080
2081         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2082                  adapter->num_tx_qs);
2083         return 0;
2084 }
2085
2086 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2087 {
2088         struct be_queue_info *q;
2089         struct be_rx_obj *rxo;
2090         int i;
2091
2092         for_all_rx_queues(adapter, rxo, i) {
2093                 q = &rxo->cq;
2094                 if (q->created)
2095                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2096                 be_queue_free(adapter, q);
2097         }
2098 }
2099
2100 static int be_rx_cqs_create(struct be_adapter *adapter)
2101 {
2102         struct be_queue_info *eq, *cq;
2103         struct be_rx_obj *rxo;
2104         int rc, i;
2105
2106         /* We'll create as many RSS rings as there are irqs.
2107          * But when there's only one irq there's no use creating RSS rings
2108          */
2109         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
2110                                 num_irqs(adapter) + 1 : 1;
2111         if (adapter->num_rx_qs != MAX_RX_QS) {
2112                 rtnl_lock();
2113                 netif_set_real_num_rx_queues(adapter->netdev,
2114                                              adapter->num_rx_qs);
2115                 rtnl_unlock();
2116         }
2117
2118         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2119         for_all_rx_queues(adapter, rxo, i) {
2120                 rxo->adapter = adapter;
2121                 cq = &rxo->cq;
2122                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2123                                 sizeof(struct be_eth_rx_compl));
2124                 if (rc)
2125                         return rc;
2126
2127                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2128                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2129                 if (rc)
2130                         return rc;
2131         }
2132
2133         dev_info(&adapter->pdev->dev,
2134                  "created %d RSS queue(s) and 1 default RX queue\n",
2135                  adapter->num_rx_qs - 1);
2136         return 0;
2137 }
2138
2139 static irqreturn_t be_intx(int irq, void *dev)
2140 {
2141         struct be_eq_obj *eqo = dev;
2142         struct be_adapter *adapter = eqo->adapter;
2143         int num_evts = 0;
2144
2145         /* IRQ is not expected when NAPI is scheduled as the EQ
2146          * will not be armed.
2147          * But, this can happen on Lancer INTx where it takes
2148          * a while to de-assert INTx or in BE2 where occasionaly
2149          * an interrupt may be raised even when EQ is unarmed.
2150          * If NAPI is already scheduled, then counting & notifying
2151          * events will orphan them.
2152          */
2153         if (napi_schedule_prep(&eqo->napi)) {
2154                 num_evts = events_get(eqo);
2155                 __napi_schedule(&eqo->napi);
2156                 if (num_evts)
2157                         eqo->spurious_intr = 0;
2158         }
2159         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2160
2161         /* Return IRQ_HANDLED only for the the first spurious intr
2162          * after a valid intr to stop the kernel from branding
2163          * this irq as a bad one!
2164          */
2165         if (num_evts || eqo->spurious_intr++ == 0)
2166                 return IRQ_HANDLED;
2167         else
2168                 return IRQ_NONE;
2169 }
2170
2171 static irqreturn_t be_msix(int irq, void *dev)
2172 {
2173         struct be_eq_obj *eqo = dev;
2174
2175         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2176         napi_schedule(&eqo->napi);
2177         return IRQ_HANDLED;
2178 }
2179
2180 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2181 {
2182         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2183 }
2184
2185 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2186                         int budget)
2187 {
2188         struct be_adapter *adapter = rxo->adapter;
2189         struct be_queue_info *rx_cq = &rxo->cq;
2190         struct be_rx_compl_info *rxcp;
2191         u32 work_done;
2192
2193         for (work_done = 0; work_done < budget; work_done++) {
2194                 rxcp = be_rx_compl_get(rxo);
2195                 if (!rxcp)
2196                         break;
2197
2198                 /* Is it a flush compl that has no data */
2199                 if (unlikely(rxcp->num_rcvd == 0))
2200                         goto loop_continue;
2201
2202                 /* Discard compl with partial DMA Lancer B0 */
2203                 if (unlikely(!rxcp->pkt_size)) {
2204                         be_rx_compl_discard(rxo, rxcp);
2205                         goto loop_continue;
2206                 }
2207
2208                 /* On BE drop pkts that arrive due to imperfect filtering in
2209                  * promiscuous mode on some skews
2210                  */
2211                 if (unlikely(rxcp->port != adapter->port_num &&
2212                                 !lancer_chip(adapter))) {
2213                         be_rx_compl_discard(rxo, rxcp);
2214                         goto loop_continue;
2215                 }
2216
2217                 if (do_gro(rxcp))
2218                         be_rx_compl_process_gro(rxo, napi, rxcp);
2219                 else
2220                         be_rx_compl_process(rxo, rxcp);
2221 loop_continue:
2222                 be_rx_stats_update(rxo, rxcp);
2223         }
2224
2225         if (work_done) {
2226                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2227
2228                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
2229                         be_post_rx_frags(rxo, GFP_ATOMIC);
2230         }
2231
2232         return work_done;
2233 }
2234
2235 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2236                           int budget, int idx)
2237 {
2238         struct be_eth_tx_compl *txcp;
2239         int num_wrbs = 0, work_done;
2240
2241         for (work_done = 0; work_done < budget; work_done++) {
2242                 txcp = be_tx_compl_get(&txo->cq);
2243                 if (!txcp)
2244                         break;
2245                 num_wrbs += be_tx_compl_process(adapter, txo,
2246                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2247                                         wrb_index, txcp));
2248         }
2249
2250         if (work_done) {
2251                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2252                 atomic_sub(num_wrbs, &txo->q.used);
2253
2254                 /* As Tx wrbs have been freed up, wake up netdev queue
2255                  * if it was stopped due to lack of tx wrbs.  */
2256                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2257                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2258                         netif_wake_subqueue(adapter->netdev, idx);
2259                 }
2260
2261                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2262                 tx_stats(txo)->tx_compl += work_done;
2263                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2264         }
2265         return (work_done < budget); /* Done */
2266 }
2267
2268 int be_poll(struct napi_struct *napi, int budget)
2269 {
2270         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2271         struct be_adapter *adapter = eqo->adapter;
2272         int max_work = 0, work, i, num_evts;
2273         bool tx_done;
2274
2275         num_evts = events_get(eqo);
2276
2277         /* Process all TXQs serviced by this EQ */
2278         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2279                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2280                                         eqo->tx_budget, i);
2281                 if (!tx_done)
2282                         max_work = budget;
2283         }
2284
2285         /* This loop will iterate twice for EQ0 in which
2286          * completions of the last RXQ (default one) are also processed
2287          * For other EQs the loop iterates only once
2288          */
2289         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2290                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2291                 max_work = max(work, max_work);
2292         }
2293
2294         if (is_mcc_eqo(eqo))
2295                 be_process_mcc(adapter);
2296
2297         if (max_work < budget) {
2298                 napi_complete(napi);
2299                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2300         } else {
2301                 /* As we'll continue in polling mode, count and clear events */
2302                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2303         }
2304         return max_work;
2305 }
2306
2307 void be_detect_error(struct be_adapter *adapter)
2308 {
2309         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2310         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2311         u32 i;
2312
2313         if (be_hw_error(adapter))
2314                 return;
2315
2316         if (lancer_chip(adapter)) {
2317                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2318                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2319                         sliport_err1 = ioread32(adapter->db +
2320                                         SLIPORT_ERROR1_OFFSET);
2321                         sliport_err2 = ioread32(adapter->db +
2322                                         SLIPORT_ERROR2_OFFSET);
2323                 }
2324         } else {
2325                 pci_read_config_dword(adapter->pdev,
2326                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2327                 pci_read_config_dword(adapter->pdev,
2328                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2329                 pci_read_config_dword(adapter->pdev,
2330                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2331                 pci_read_config_dword(adapter->pdev,
2332                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2333
2334                 ue_lo = (ue_lo & ~ue_lo_mask);
2335                 ue_hi = (ue_hi & ~ue_hi_mask);
2336         }
2337
2338         /* On certain platforms BE hardware can indicate spurious UEs.
2339          * Allow the h/w to stop working completely in case of a real UE.
2340          * Hence not setting the hw_error for UE detection.
2341          */
2342         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2343                 adapter->hw_error = true;
2344                 dev_err(&adapter->pdev->dev,
2345                         "Error detected in the card\n");
2346         }
2347
2348         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2349                 dev_err(&adapter->pdev->dev,
2350                         "ERR: sliport status 0x%x\n", sliport_status);
2351                 dev_err(&adapter->pdev->dev,
2352                         "ERR: sliport error1 0x%x\n", sliport_err1);
2353                 dev_err(&adapter->pdev->dev,
2354                         "ERR: sliport error2 0x%x\n", sliport_err2);
2355         }
2356
2357         if (ue_lo) {
2358                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2359                         if (ue_lo & 1)
2360                                 dev_err(&adapter->pdev->dev,
2361                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2362                 }
2363         }
2364
2365         if (ue_hi) {
2366                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2367                         if (ue_hi & 1)
2368                                 dev_err(&adapter->pdev->dev,
2369                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2370                 }
2371         }
2372
2373 }
2374
2375 static void be_msix_disable(struct be_adapter *adapter)
2376 {
2377         if (msix_enabled(adapter)) {
2378                 pci_disable_msix(adapter->pdev);
2379                 adapter->num_msix_vec = 0;
2380         }
2381 }
2382
2383 static uint be_num_rss_want(struct be_adapter *adapter)
2384 {
2385         u32 num = 0;
2386
2387         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2388             (lancer_chip(adapter) ||
2389              (!sriov_want(adapter) && be_physfn(adapter)))) {
2390                 num = adapter->max_rss_queues;
2391                 num = min_t(u32, num, (u32)netif_get_num_default_rss_queues());
2392         }
2393         return num;
2394 }
2395
2396 static int be_msix_enable(struct be_adapter *adapter)
2397 {
2398 #define BE_MIN_MSIX_VECTORS             1
2399         int i, status, num_vec, num_roce_vec = 0;
2400         struct device *dev = &adapter->pdev->dev;
2401
2402         /* If RSS queues are not used, need a vec for default RX Q */
2403         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2404         if (be_roce_supported(adapter)) {
2405                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2406                                         (num_online_cpus() + 1));
2407                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2408                 num_vec += num_roce_vec;
2409                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2410         }
2411         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2412
2413         for (i = 0; i < num_vec; i++)
2414                 adapter->msix_entries[i].entry = i;
2415
2416         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2417         if (status == 0) {
2418                 goto done;
2419         } else if (status >= BE_MIN_MSIX_VECTORS) {
2420                 num_vec = status;
2421                 status = pci_enable_msix(adapter->pdev, adapter->msix_entries,
2422                                          num_vec);
2423                 if (!status)
2424                         goto done;
2425         }
2426
2427         dev_warn(dev, "MSIx enable failed\n");
2428         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2429         if (!be_physfn(adapter))
2430                 return status;
2431         return 0;
2432 done:
2433         if (be_roce_supported(adapter)) {
2434                 if (num_vec > num_roce_vec) {
2435                         adapter->num_msix_vec = num_vec - num_roce_vec;
2436                         adapter->num_msix_roce_vec =
2437                                 num_vec - adapter->num_msix_vec;
2438                 } else {
2439                         adapter->num_msix_vec = num_vec;
2440                         adapter->num_msix_roce_vec = 0;
2441                 }
2442         } else
2443                 adapter->num_msix_vec = num_vec;
2444         dev_info(dev, "enabled %d MSI-x vector(s)\n", adapter->num_msix_vec);
2445         return 0;
2446 }
2447
2448 static inline int be_msix_vec_get(struct be_adapter *adapter,
2449                                 struct be_eq_obj *eqo)
2450 {
2451         return adapter->msix_entries[eqo->idx].vector;
2452 }
2453
2454 static int be_msix_register(struct be_adapter *adapter)
2455 {
2456         struct net_device *netdev = adapter->netdev;
2457         struct be_eq_obj *eqo;
2458         int status, i, vec;
2459
2460         for_all_evt_queues(adapter, eqo, i) {
2461                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2462                 vec = be_msix_vec_get(adapter, eqo);
2463                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2464                 if (status)
2465                         goto err_msix;
2466         }
2467
2468         return 0;
2469 err_msix:
2470         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2471                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2472         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2473                 status);
2474         be_msix_disable(adapter);
2475         return status;
2476 }
2477
2478 static int be_irq_register(struct be_adapter *adapter)
2479 {
2480         struct net_device *netdev = adapter->netdev;
2481         int status;
2482
2483         if (msix_enabled(adapter)) {
2484                 status = be_msix_register(adapter);
2485                 if (status == 0)
2486                         goto done;
2487                 /* INTx is not supported for VF */
2488                 if (!be_physfn(adapter))
2489                         return status;
2490         }
2491
2492         /* INTx: only the first EQ is used */
2493         netdev->irq = adapter->pdev->irq;
2494         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2495                              &adapter->eq_obj[0]);
2496         if (status) {
2497                 dev_err(&adapter->pdev->dev,
2498                         "INTx request IRQ failed - err %d\n", status);
2499                 return status;
2500         }
2501 done:
2502         adapter->isr_registered = true;
2503         return 0;
2504 }
2505
2506 static void be_irq_unregister(struct be_adapter *adapter)
2507 {
2508         struct net_device *netdev = adapter->netdev;
2509         struct be_eq_obj *eqo;
2510         int i;
2511
2512         if (!adapter->isr_registered)
2513                 return;
2514
2515         /* INTx */
2516         if (!msix_enabled(adapter)) {
2517                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2518                 goto done;
2519         }
2520
2521         /* MSIx */
2522         for_all_evt_queues(adapter, eqo, i)
2523                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2524
2525 done:
2526         adapter->isr_registered = false;
2527 }
2528
2529 static void be_rx_qs_destroy(struct be_adapter *adapter)
2530 {
2531         struct be_queue_info *q;
2532         struct be_rx_obj *rxo;
2533         int i;
2534
2535         for_all_rx_queues(adapter, rxo, i) {
2536                 q = &rxo->q;
2537                 if (q->created) {
2538                         be_cmd_rxq_destroy(adapter, q);
2539                         be_rx_cq_clean(rxo);
2540                 }
2541                 be_queue_free(adapter, q);
2542         }
2543 }
2544
2545 static int be_close(struct net_device *netdev)
2546 {
2547         struct be_adapter *adapter = netdev_priv(netdev);
2548         struct be_eq_obj *eqo;
2549         int i;
2550
2551         be_roce_dev_close(adapter);
2552
2553         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2554                 for_all_evt_queues(adapter, eqo, i)
2555                         napi_disable(&eqo->napi);
2556                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2557         }
2558
2559         be_async_mcc_disable(adapter);
2560
2561         /* Wait for all pending tx completions to arrive so that
2562          * all tx skbs are freed.
2563          */
2564         netif_tx_disable(netdev);
2565         be_tx_compl_clean(adapter);
2566
2567         be_rx_qs_destroy(adapter);
2568
2569         for_all_evt_queues(adapter, eqo, i) {
2570                 if (msix_enabled(adapter))
2571                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2572                 else
2573                         synchronize_irq(netdev->irq);
2574                 be_eq_clean(eqo);
2575         }
2576
2577         be_irq_unregister(adapter);
2578
2579         return 0;
2580 }
2581
2582 static int be_rx_qs_create(struct be_adapter *adapter)
2583 {
2584         struct be_rx_obj *rxo;
2585         int rc, i, j;
2586         u8 rsstable[128];
2587
2588         for_all_rx_queues(adapter, rxo, i) {
2589                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2590                                     sizeof(struct be_eth_rx_d));
2591                 if (rc)
2592                         return rc;
2593         }
2594
2595         /* The FW would like the default RXQ to be created first */
2596         rxo = default_rxo(adapter);
2597         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2598                                adapter->if_handle, false, &rxo->rss_id);
2599         if (rc)
2600                 return rc;
2601
2602         for_all_rss_queues(adapter, rxo, i) {
2603                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2604                                        rx_frag_size, adapter->if_handle,
2605                                        true, &rxo->rss_id);
2606                 if (rc)
2607                         return rc;
2608         }
2609
2610         if (be_multi_rxq(adapter)) {
2611                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2612                         for_all_rss_queues(adapter, rxo, i) {
2613                                 if ((j + i) >= 128)
2614                                         break;
2615                                 rsstable[j + i] = rxo->rss_id;
2616                         }
2617                 }
2618                 adapter->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2619                                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2620
2621                 if (!BEx_chip(adapter))
2622                         adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2623                                                 RSS_ENABLE_UDP_IPV6;
2624
2625                 rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
2626                                        128);
2627                 if (rc) {
2628                         adapter->rss_flags = 0;
2629                         return rc;
2630                 }
2631         }
2632
2633         /* First time posting */
2634         for_all_rx_queues(adapter, rxo, i)
2635                 be_post_rx_frags(rxo, GFP_KERNEL);
2636         return 0;
2637 }
2638
2639 static int be_open(struct net_device *netdev)
2640 {
2641         struct be_adapter *adapter = netdev_priv(netdev);
2642         struct be_eq_obj *eqo;
2643         struct be_rx_obj *rxo;
2644         struct be_tx_obj *txo;
2645         u8 link_status;
2646         int status, i;
2647
2648         status = be_rx_qs_create(adapter);
2649         if (status)
2650                 goto err;
2651
2652         status = be_irq_register(adapter);
2653         if (status)
2654                 goto err;
2655
2656         for_all_rx_queues(adapter, rxo, i)
2657                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2658
2659         for_all_tx_queues(adapter, txo, i)
2660                 be_cq_notify(adapter, txo->cq.id, true, 0);
2661
2662         be_async_mcc_enable(adapter);
2663
2664         for_all_evt_queues(adapter, eqo, i) {
2665                 napi_enable(&eqo->napi);
2666                 be_eq_notify(adapter, eqo->q.id, true, true, 0);
2667         }
2668         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2669
2670         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2671         if (!status)
2672                 be_link_status_update(adapter, link_status);
2673
2674         netif_tx_start_all_queues(netdev);
2675         be_roce_dev_open(adapter);
2676         return 0;
2677 err:
2678         be_close(adapter->netdev);
2679         return -EIO;
2680 }
2681
2682 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2683 {
2684         struct be_dma_mem cmd;
2685         int status = 0;
2686         u8 mac[ETH_ALEN];
2687
2688         memset(mac, 0, ETH_ALEN);
2689
2690         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2691         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2692                                     GFP_KERNEL | __GFP_ZERO);
2693         if (cmd.va == NULL)
2694                 return -1;
2695
2696         if (enable) {
2697                 status = pci_write_config_dword(adapter->pdev,
2698                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2699                 if (status) {
2700                         dev_err(&adapter->pdev->dev,
2701                                 "Could not enable Wake-on-lan\n");
2702                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2703                                           cmd.dma);
2704                         return status;
2705                 }
2706                 status = be_cmd_enable_magic_wol(adapter,
2707                                 adapter->netdev->dev_addr, &cmd);
2708                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2709                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2710         } else {
2711                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2712                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2713                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2714         }
2715
2716         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2717         return status;
2718 }
2719
2720 /*
2721  * Generate a seed MAC address from the PF MAC Address using jhash.
2722  * MAC Address for VFs are assigned incrementally starting from the seed.
2723  * These addresses are programmed in the ASIC by the PF and the VF driver
2724  * queries for the MAC address during its probe.
2725  */
2726 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2727 {
2728         u32 vf;
2729         int status = 0;
2730         u8 mac[ETH_ALEN];
2731         struct be_vf_cfg *vf_cfg;
2732
2733         be_vf_eth_addr_generate(adapter, mac);
2734
2735         for_all_vfs(adapter, vf_cfg, vf) {
2736                 if (lancer_chip(adapter)) {
2737                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2738                 } else {
2739                         status = be_cmd_pmac_add(adapter, mac,
2740                                                  vf_cfg->if_handle,
2741                                                  &vf_cfg->pmac_id, vf + 1);
2742                 }
2743
2744                 if (status)
2745                         dev_err(&adapter->pdev->dev,
2746                         "Mac address assignment failed for VF %d\n", vf);
2747                 else
2748                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2749
2750                 mac[5] += 1;
2751         }
2752         return status;
2753 }
2754
2755 static int be_vfs_mac_query(struct be_adapter *adapter)
2756 {
2757         int status, vf;
2758         u8 mac[ETH_ALEN];
2759         struct be_vf_cfg *vf_cfg;
2760         bool active;
2761
2762         for_all_vfs(adapter, vf_cfg, vf) {
2763                 be_cmd_get_mac_from_list(adapter, mac, &active,
2764                                          &vf_cfg->pmac_id, 0);
2765
2766                 status = be_cmd_mac_addr_query(adapter, mac, false,
2767                                                vf_cfg->if_handle, 0);
2768                 if (status)
2769                         return status;
2770                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2771         }
2772         return 0;
2773 }
2774
2775 static void be_vf_clear(struct be_adapter *adapter)
2776 {
2777         struct be_vf_cfg *vf_cfg;
2778         u32 vf;
2779
2780         if (be_find_vfs(adapter, ASSIGNED)) {
2781                 dev_warn(&adapter->pdev->dev,
2782                          "VFs are assigned to VMs: not disabling VFs\n");
2783                 goto done;
2784         }
2785
2786         pci_disable_sriov(adapter->pdev);
2787
2788         for_all_vfs(adapter, vf_cfg, vf) {
2789                 if (lancer_chip(adapter))
2790                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2791                 else
2792                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2793                                         vf_cfg->pmac_id, vf + 1);
2794
2795                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2796         }
2797 done:
2798         kfree(adapter->vf_cfg);
2799         adapter->num_vfs = 0;
2800 }
2801
2802 static int be_clear(struct be_adapter *adapter)
2803 {
2804         int i = 1;
2805
2806         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2807                 cancel_delayed_work_sync(&adapter->work);
2808                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2809         }
2810
2811         if (sriov_enabled(adapter))
2812                 be_vf_clear(adapter);
2813
2814         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2815                 be_cmd_pmac_del(adapter, adapter->if_handle,
2816                         adapter->pmac_id[i], 0);
2817
2818         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2819
2820         be_mcc_queues_destroy(adapter);
2821         be_rx_cqs_destroy(adapter);
2822         be_tx_queues_destroy(adapter);
2823         be_evt_queues_destroy(adapter);
2824
2825         kfree(adapter->pmac_id);
2826         adapter->pmac_id = NULL;
2827
2828         be_msix_disable(adapter);
2829         return 0;
2830 }
2831
2832 static int be_vfs_if_create(struct be_adapter *adapter)
2833 {
2834         struct be_vf_cfg *vf_cfg;
2835         u32 cap_flags, en_flags, vf;
2836         int status;
2837
2838         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2839                     BE_IF_FLAGS_MULTICAST;
2840
2841         for_all_vfs(adapter, vf_cfg, vf) {
2842                 if (!BE3_chip(adapter))
2843                         be_cmd_get_profile_config(adapter, &cap_flags,
2844                                                   NULL, vf + 1);
2845
2846                 /* If a FW profile exists, then cap_flags are updated */
2847                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
2848                            BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_MULTICAST);
2849                 status = be_cmd_if_create(adapter, cap_flags, en_flags,
2850                                           &vf_cfg->if_handle, vf + 1);
2851                 if (status)
2852                         goto err;
2853         }
2854 err:
2855         return status;
2856 }
2857
2858 static int be_vf_setup_init(struct be_adapter *adapter)
2859 {
2860         struct be_vf_cfg *vf_cfg;
2861         int vf;
2862
2863         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2864                                   GFP_KERNEL);
2865         if (!adapter->vf_cfg)
2866                 return -ENOMEM;
2867
2868         for_all_vfs(adapter, vf_cfg, vf) {
2869                 vf_cfg->if_handle = -1;
2870                 vf_cfg->pmac_id = -1;
2871         }
2872         return 0;
2873 }
2874
2875 static int be_vf_setup(struct be_adapter *adapter)
2876 {
2877         struct be_vf_cfg *vf_cfg;
2878         u16 def_vlan, lnk_speed;
2879         int status, old_vfs, vf;
2880         struct device *dev = &adapter->pdev->dev;
2881
2882         old_vfs = be_find_vfs(adapter, ENABLED);
2883         if (old_vfs) {
2884                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
2885                 if (old_vfs != num_vfs)
2886                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2887                 adapter->num_vfs = old_vfs;
2888         } else {
2889                 if (num_vfs > adapter->dev_num_vfs)
2890                         dev_info(dev, "Device supports %d VFs and not %d\n",
2891                                  adapter->dev_num_vfs, num_vfs);
2892                 adapter->num_vfs = min_t(u16, num_vfs, adapter->dev_num_vfs);
2893                 if (!adapter->num_vfs)
2894                         return 0;
2895         }
2896
2897         status = be_vf_setup_init(adapter);
2898         if (status)
2899                 goto err;
2900
2901         if (old_vfs) {
2902                 for_all_vfs(adapter, vf_cfg, vf) {
2903                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
2904                         if (status)
2905                                 goto err;
2906                 }
2907         } else {
2908                 status = be_vfs_if_create(adapter);
2909                 if (status)
2910                         goto err;
2911         }
2912
2913         if (old_vfs) {
2914                 status = be_vfs_mac_query(adapter);
2915                 if (status)
2916                         goto err;
2917         } else {
2918                 status = be_vf_eth_addr_config(adapter);
2919                 if (status)
2920                         goto err;
2921         }
2922
2923         for_all_vfs(adapter, vf_cfg, vf) {
2924                 /* BE3 FW, by default, caps VF TX-rate to 100mbps.
2925                  * Allow full available bandwidth
2926                  */
2927                 if (BE3_chip(adapter) && !old_vfs)
2928                         be_cmd_set_qos(adapter, 1000, vf+1);
2929
2930                 status = be_cmd_link_status_query(adapter, &lnk_speed,
2931                                                   NULL, vf + 1);
2932                 if (!status)
2933                         vf_cfg->tx_rate = lnk_speed;
2934
2935                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2936                                                vf + 1, vf_cfg->if_handle);
2937                 if (status)
2938                         goto err;
2939                 vf_cfg->def_vid = def_vlan;
2940
2941                 be_cmd_enable_vf(adapter, vf + 1);
2942         }
2943
2944         if (!old_vfs) {
2945                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2946                 if (status) {
2947                         dev_err(dev, "SRIOV enable failed\n");
2948                         adapter->num_vfs = 0;
2949                         goto err;
2950                 }
2951         }
2952         return 0;
2953 err:
2954         dev_err(dev, "VF setup failed\n");
2955         be_vf_clear(adapter);
2956         return status;
2957 }
2958
2959 static void be_setup_init(struct be_adapter *adapter)
2960 {
2961         adapter->vlan_prio_bmap = 0xff;
2962         adapter->phy.link_speed = -1;
2963         adapter->if_handle = -1;
2964         adapter->be3_native = false;
2965         adapter->promiscuous = false;
2966         if (be_physfn(adapter))
2967                 adapter->cmd_privileges = MAX_PRIVILEGES;
2968         else
2969                 adapter->cmd_privileges = MIN_PRIVILEGES;
2970 }
2971
2972 static int be_get_mac_addr(struct be_adapter *adapter, u8 *mac, u32 if_handle,
2973                            bool *active_mac, u32 *pmac_id)
2974 {
2975         int status = 0;
2976
2977         if (!is_zero_ether_addr(adapter->netdev->perm_addr)) {
2978                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
2979                 if (!lancer_chip(adapter) && !be_physfn(adapter))
2980                         *active_mac = true;
2981                 else
2982                         *active_mac = false;
2983
2984                 return status;
2985         }
2986
2987         if (lancer_chip(adapter)) {
2988                 status = be_cmd_get_mac_from_list(adapter, mac,
2989                                                   active_mac, pmac_id, 0);
2990                 if (*active_mac) {
2991                         status = be_cmd_mac_addr_query(adapter, mac, false,
2992                                                        if_handle, *pmac_id);
2993                 }
2994         } else if (be_physfn(adapter)) {
2995                 /* For BE3, for PF get permanent MAC */
2996                 status = be_cmd_mac_addr_query(adapter, mac, true, 0, 0);
2997                 *active_mac = false;
2998         } else {
2999                 /* For BE3, for VF get soft MAC assigned by PF*/
3000                 status = be_cmd_mac_addr_query(adapter, mac, false,
3001                                                if_handle, 0);
3002                 *active_mac = true;
3003         }
3004         return status;
3005 }
3006
3007 static void be_get_resources(struct be_adapter *adapter)
3008 {
3009         u16 dev_num_vfs;
3010         int pos, status;
3011         bool profile_present = false;
3012         u16 txq_count = 0;
3013
3014         if (!BEx_chip(adapter)) {
3015                 status = be_cmd_get_func_config(adapter);
3016                 if (!status)
3017                         profile_present = true;
3018         } else if (BE3_chip(adapter) && be_physfn(adapter)) {
3019                 be_cmd_get_profile_config(adapter, NULL, &txq_count, 0);
3020         }
3021
3022         if (profile_present) {
3023                 /* Sanity fixes for Lancer */
3024                 adapter->max_pmac_cnt = min_t(u16, adapter->max_pmac_cnt,
3025                                               BE_UC_PMAC_COUNT);
3026                 adapter->max_vlans = min_t(u16, adapter->max_vlans,
3027                                            BE_NUM_VLANS_SUPPORTED);
3028                 adapter->max_mcast_mac = min_t(u16, adapter->max_mcast_mac,
3029                                                BE_MAX_MC);
3030                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3031                                                MAX_TX_QS);
3032                 adapter->max_rss_queues = min_t(u16, adapter->max_rss_queues,
3033                                                 BE3_MAX_RSS_QS);
3034                 adapter->max_event_queues = min_t(u16,
3035                                                   adapter->max_event_queues,
3036                                                   BE3_MAX_RSS_QS);
3037
3038                 if (adapter->max_rss_queues &&
3039                     adapter->max_rss_queues == adapter->max_rx_queues)
3040                         adapter->max_rss_queues -= 1;
3041
3042                 if (adapter->max_event_queues < adapter->max_rss_queues)
3043                         adapter->max_rss_queues = adapter->max_event_queues;
3044
3045         } else {
3046                 if (be_physfn(adapter))
3047                         adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3048                 else
3049                         adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3050
3051                 if (adapter->function_mode & FLEX10_MODE)
3052                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3053                 else
3054                         adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3055
3056                 adapter->max_mcast_mac = BE_MAX_MC;
3057                 adapter->max_tx_queues = txq_count ? txq_count : MAX_TX_QS;
3058                 adapter->max_tx_queues = min_t(u16, adapter->max_tx_queues,
3059                                                MAX_TX_QS);
3060                 adapter->max_rss_queues = (adapter->be3_native) ?
3061                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3062                 adapter->max_event_queues = BE3_MAX_RSS_QS;
3063
3064                 adapter->if_cap_flags = BE_IF_FLAGS_UNTAGGED |
3065                                         BE_IF_FLAGS_BROADCAST |
3066                                         BE_IF_FLAGS_MULTICAST |
3067                                         BE_IF_FLAGS_PASS_L3L4_ERRORS |
3068                                         BE_IF_FLAGS_MCAST_PROMISCUOUS |
3069                                         BE_IF_FLAGS_VLAN_PROMISCUOUS |
3070                                         BE_IF_FLAGS_PROMISCUOUS;
3071
3072                 if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3073                         adapter->if_cap_flags |= BE_IF_FLAGS_RSS;
3074         }
3075
3076         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
3077         if (pos) {
3078                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
3079                                      &dev_num_vfs);
3080                 if (BE3_chip(adapter))
3081                         dev_num_vfs = min_t(u16, dev_num_vfs, MAX_VFS);
3082                 adapter->dev_num_vfs = dev_num_vfs;
3083         }
3084 }
3085
3086 /* Routine to query per function resource limits */
3087 static int be_get_config(struct be_adapter *adapter)
3088 {
3089         int status;
3090
3091         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3092                                      &adapter->function_mode,
3093                                      &adapter->function_caps,
3094                                      &adapter->asic_rev);
3095         if (status)
3096                 goto err;
3097
3098         be_get_resources(adapter);
3099
3100         /* primary mac needs 1 pmac entry */
3101         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3102                                    sizeof(u32), GFP_KERNEL);
3103         if (!adapter->pmac_id) {
3104                 status = -ENOMEM;
3105                 goto err;
3106         }
3107
3108 err:
3109         return status;
3110 }
3111
3112 static int be_setup(struct be_adapter *adapter)
3113 {
3114         struct device *dev = &adapter->pdev->dev;
3115         u32 en_flags;
3116         u32 tx_fc, rx_fc;
3117         int status;
3118         u8 mac[ETH_ALEN];
3119         bool active_mac;
3120
3121         be_setup_init(adapter);
3122
3123         if (!lancer_chip(adapter))
3124                 be_cmd_req_native_mode(adapter);
3125
3126         status = be_get_config(adapter);
3127         if (status)
3128                 goto err;
3129
3130         status = be_msix_enable(adapter);
3131         if (status)
3132                 goto err;
3133
3134         status = be_evt_queues_create(adapter);
3135         if (status)
3136                 goto err;
3137
3138         status = be_tx_cqs_create(adapter);
3139         if (status)
3140                 goto err;
3141
3142         status = be_rx_cqs_create(adapter);
3143         if (status)
3144                 goto err;
3145
3146         status = be_mcc_queues_create(adapter);
3147         if (status)
3148                 goto err;
3149
3150         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3151         /* In UMC mode FW does not return right privileges.
3152          * Override with correct privilege equivalent to PF.
3153          */
3154         if (be_is_mc(adapter))
3155                 adapter->cmd_privileges = MAX_PRIVILEGES;
3156
3157         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3158                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3159
3160         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3161                 en_flags |= BE_IF_FLAGS_RSS;
3162
3163         en_flags = en_flags & adapter->if_cap_flags;
3164
3165         status = be_cmd_if_create(adapter, adapter->if_cap_flags, en_flags,
3166                                   &adapter->if_handle, 0);
3167         if (status != 0)
3168                 goto err;
3169
3170         memset(mac, 0, ETH_ALEN);
3171         active_mac = false;
3172         status = be_get_mac_addr(adapter, mac, adapter->if_handle,
3173                                  &active_mac, &adapter->pmac_id[0]);
3174         if (status != 0)
3175                 goto err;
3176
3177         if (!active_mac) {
3178                 status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3179                                          &adapter->pmac_id[0], 0);
3180                 if (status != 0)
3181                         goto err;
3182         }
3183
3184         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3185                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3186                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3187         }
3188
3189         status = be_tx_qs_create(adapter);
3190         if (status)
3191                 goto err;
3192
3193         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
3194
3195         if (adapter->vlans_added)
3196                 be_vid_config(adapter);
3197
3198         be_set_rx_mode(adapter->netdev);
3199
3200         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3201
3202         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3203                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3204                                         adapter->rx_fc);
3205
3206         if (be_physfn(adapter)) {
3207                 if (adapter->dev_num_vfs)
3208                         be_vf_setup(adapter);
3209                 else
3210                         dev_warn(dev, "device doesn't support SRIOV\n");
3211         }
3212
3213         status = be_cmd_get_phy_info(adapter);
3214         if (!status && be_pause_supported(adapter))
3215                 adapter->phy.fc_autoneg = 1;
3216
3217         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3218         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3219         return 0;
3220 err:
3221         be_clear(adapter);
3222         return status;
3223 }
3224
3225 #ifdef CONFIG_NET_POLL_CONTROLLER
3226 static void be_netpoll(struct net_device *netdev)
3227 {
3228         struct be_adapter *adapter = netdev_priv(netdev);
3229         struct be_eq_obj *eqo;
3230         int i;
3231
3232         for_all_evt_queues(adapter, eqo, i) {
3233                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3234                 napi_schedule(&eqo->napi);
3235         }
3236
3237         return;
3238 }
3239 #endif
3240
3241 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
3242 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
3243
3244 static bool be_flash_redboot(struct be_adapter *adapter,
3245                         const u8 *p, u32 img_start, int image_size,
3246                         int hdr_size)
3247 {
3248         u32 crc_offset;
3249         u8 flashed_crc[4];
3250         int status;
3251
3252         crc_offset = hdr_size + img_start + image_size - 4;
3253
3254         p += crc_offset;
3255
3256         status = be_cmd_get_flash_crc(adapter, flashed_crc,
3257                         (image_size - 4));
3258         if (status) {
3259                 dev_err(&adapter->pdev->dev,
3260                 "could not get crc from flash, not flashing redboot\n");
3261                 return false;
3262         }
3263
3264         /*update redboot only if crc does not match*/
3265         if (!memcmp(flashed_crc, p, 4))
3266                 return false;
3267         else
3268                 return true;
3269 }
3270
3271 static bool phy_flashing_required(struct be_adapter *adapter)
3272 {
3273         return (adapter->phy.phy_type == TN_8022 &&
3274                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3275 }
3276
3277 static bool is_comp_in_ufi(struct be_adapter *adapter,
3278                            struct flash_section_info *fsec, int type)
3279 {
3280         int i = 0, img_type = 0;
3281         struct flash_section_info_g2 *fsec_g2 = NULL;
3282
3283         if (BE2_chip(adapter))
3284                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3285
3286         for (i = 0; i < MAX_FLASH_COMP; i++) {
3287                 if (fsec_g2)
3288                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3289                 else
3290                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3291
3292                 if (img_type == type)
3293                         return true;
3294         }
3295         return false;
3296
3297 }
3298
3299 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3300                                          int header_size,
3301                                          const struct firmware *fw)
3302 {
3303         struct flash_section_info *fsec = NULL;
3304         const u8 *p = fw->data;
3305
3306         p += header_size;
3307         while (p < (fw->data + fw->size)) {
3308                 fsec = (struct flash_section_info *)p;
3309                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3310                         return fsec;
3311                 p += 32;
3312         }
3313         return NULL;
3314 }
3315
3316 static int be_flash(struct be_adapter *adapter, const u8 *img,
3317                 struct be_dma_mem *flash_cmd, int optype, int img_size)
3318 {
3319         u32 total_bytes = 0, flash_op, num_bytes = 0;
3320         int status = 0;
3321         struct be_cmd_write_flashrom *req = flash_cmd->va;
3322
3323         total_bytes = img_size;
3324         while (total_bytes) {
3325                 num_bytes = min_t(u32, 32*1024, total_bytes);
3326
3327                 total_bytes -= num_bytes;
3328
3329                 if (!total_bytes) {
3330                         if (optype == OPTYPE_PHY_FW)
3331                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3332                         else
3333                                 flash_op = FLASHROM_OPER_FLASH;
3334                 } else {
3335                         if (optype == OPTYPE_PHY_FW)
3336                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3337                         else
3338                                 flash_op = FLASHROM_OPER_SAVE;
3339                 }
3340
3341                 memcpy(req->data_buf, img, num_bytes);
3342                 img += num_bytes;
3343                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3344                                                 flash_op, num_bytes);
3345                 if (status) {
3346                         if (status == ILLEGAL_IOCTL_REQ &&
3347                             optype == OPTYPE_PHY_FW)
3348                                 break;
3349                         dev_err(&adapter->pdev->dev,
3350                                 "cmd to write to flash rom failed.\n");
3351                         return status;
3352                 }
3353         }
3354         return 0;
3355 }
3356
3357 /* For BE2, BE3 and BE3-R */
3358 static int be_flash_BEx(struct be_adapter *adapter,
3359                          const struct firmware *fw,
3360                          struct be_dma_mem *flash_cmd,
3361                          int num_of_images)
3362
3363 {
3364         int status = 0, i, filehdr_size = 0;
3365         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3366         const u8 *p = fw->data;
3367         const struct flash_comp *pflashcomp;
3368         int num_comp, redboot;
3369         struct flash_section_info *fsec = NULL;
3370
3371         struct flash_comp gen3_flash_types[] = {
3372                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3373                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3374                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3375                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3376                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3377                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3378                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3379                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3380                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3381                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3382                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3383                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3384                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3385                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3386                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3387                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3388                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3389                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3390                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3391                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3392         };
3393
3394         struct flash_comp gen2_flash_types[] = {
3395                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3396                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3397                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3398                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3399                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3400                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3401                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3402                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3403                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3404                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3405                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3406                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3407                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3408                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3409                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3410                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3411         };
3412
3413         if (BE3_chip(adapter)) {
3414                 pflashcomp = gen3_flash_types;
3415                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3416                 num_comp = ARRAY_SIZE(gen3_flash_types);
3417         } else {
3418                 pflashcomp = gen2_flash_types;
3419                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3420                 num_comp = ARRAY_SIZE(gen2_flash_types);
3421         }
3422
3423         /* Get flash section info*/
3424         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3425         if (!fsec) {
3426                 dev_err(&adapter->pdev->dev,
3427                         "Invalid Cookie. UFI corrupted ?\n");
3428                 return -1;
3429         }
3430         for (i = 0; i < num_comp; i++) {
3431                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3432                         continue;
3433
3434                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3435                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3436                         continue;
3437
3438                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3439                     !phy_flashing_required(adapter))
3440                                 continue;
3441
3442                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3443                         redboot = be_flash_redboot(adapter, fw->data,
3444                                 pflashcomp[i].offset, pflashcomp[i].size,
3445                                 filehdr_size + img_hdrs_size);
3446                         if (!redboot)
3447                                 continue;
3448                 }
3449
3450                 p = fw->data;
3451                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
3452                 if (p + pflashcomp[i].size > fw->data + fw->size)
3453                         return -1;
3454
3455                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3456                                         pflashcomp[i].size);
3457                 if (status) {
3458                         dev_err(&adapter->pdev->dev,
3459                                 "Flashing section type %d failed.\n",
3460                                 pflashcomp[i].img_type);
3461                         return status;
3462                 }
3463         }
3464         return 0;
3465 }
3466
3467 static int be_flash_skyhawk(struct be_adapter *adapter,
3468                 const struct firmware *fw,
3469                 struct be_dma_mem *flash_cmd, int num_of_images)
3470 {
3471         int status = 0, i, filehdr_size = 0;
3472         int img_offset, img_size, img_optype, redboot;
3473         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3474         const u8 *p = fw->data;
3475         struct flash_section_info *fsec = NULL;
3476
3477         filehdr_size = sizeof(struct flash_file_hdr_g3);
3478         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3479         if (!fsec) {
3480                 dev_err(&adapter->pdev->dev,
3481                         "Invalid Cookie. UFI corrupted ?\n");
3482                 return -1;
3483         }
3484
3485         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3486                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3487                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3488
3489                 switch (le32_to_cpu(fsec->fsec_entry[i].type)) {
3490                 case IMAGE_FIRMWARE_iSCSI:
3491                         img_optype = OPTYPE_ISCSI_ACTIVE;
3492                         break;
3493                 case IMAGE_BOOT_CODE:
3494                         img_optype = OPTYPE_REDBOOT;
3495                         break;
3496                 case IMAGE_OPTION_ROM_ISCSI:
3497                         img_optype = OPTYPE_BIOS;
3498                         break;
3499                 case IMAGE_OPTION_ROM_PXE:
3500                         img_optype = OPTYPE_PXE_BIOS;
3501                         break;
3502                 case IMAGE_OPTION_ROM_FCoE:
3503                         img_optype = OPTYPE_FCOE_BIOS;
3504                         break;
3505                 case IMAGE_FIRMWARE_BACKUP_iSCSI:
3506                         img_optype = OPTYPE_ISCSI_BACKUP;
3507                         break;
3508                 case IMAGE_NCSI:
3509                         img_optype = OPTYPE_NCSI_FW;
3510                         break;
3511                 default:
3512                         continue;
3513                 }
3514
3515                 if (img_optype == OPTYPE_REDBOOT) {
3516                         redboot = be_flash_redboot(adapter, fw->data,
3517                                         img_offset, img_size,
3518                                         filehdr_size + img_hdrs_size);
3519                         if (!redboot)
3520                                 continue;
3521                 }
3522
3523                 p = fw->data;
3524                 p += filehdr_size + img_offset + img_hdrs_size;
3525                 if (p + img_size > fw->data + fw->size)
3526                         return -1;
3527
3528                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
3529                 if (status) {
3530                         dev_err(&adapter->pdev->dev,
3531                                 "Flashing section type %d failed.\n",
3532                                 fsec->fsec_entry[i].type);
3533                         return status;
3534                 }
3535         }
3536         return 0;
3537 }
3538
3539 static int lancer_wait_idle(struct be_adapter *adapter)
3540 {
3541 #define SLIPORT_IDLE_TIMEOUT 30
3542         u32 reg_val;
3543         int status = 0, i;
3544
3545         for (i = 0; i < SLIPORT_IDLE_TIMEOUT; i++) {
3546                 reg_val = ioread32(adapter->db + PHYSDEV_CONTROL_OFFSET);
3547                 if ((reg_val & PHYSDEV_CONTROL_INP_MASK) == 0)
3548                         break;
3549
3550                 ssleep(1);
3551         }
3552
3553         if (i == SLIPORT_IDLE_TIMEOUT)
3554                 status = -1;
3555
3556         return status;
3557 }
3558
3559 static int lancer_fw_reset(struct be_adapter *adapter)
3560 {
3561         int status = 0;
3562
3563         status = lancer_wait_idle(adapter);
3564         if (status)
3565                 return status;
3566
3567         iowrite32(PHYSDEV_CONTROL_FW_RESET_MASK, adapter->db +
3568                   PHYSDEV_CONTROL_OFFSET);
3569
3570         return status;
3571 }
3572
3573 static int lancer_fw_download(struct be_adapter *adapter,
3574                                 const struct firmware *fw)
3575 {
3576 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3577 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3578         struct be_dma_mem flash_cmd;
3579         const u8 *data_ptr = NULL;
3580         u8 *dest_image_ptr = NULL;
3581         size_t image_size = 0;
3582         u32 chunk_size = 0;
3583         u32 data_written = 0;
3584         u32 offset = 0;
3585         int status = 0;
3586         u8 add_status = 0;
3587         u8 change_status;
3588
3589         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3590                 dev_err(&adapter->pdev->dev,
3591                         "FW Image not properly aligned. "
3592                         "Length must be 4 byte aligned.\n");
3593                 status = -EINVAL;
3594                 goto lancer_fw_exit;
3595         }
3596
3597         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3598                                 + LANCER_FW_DOWNLOAD_CHUNK;
3599         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3600                                           &flash_cmd.dma, GFP_KERNEL);
3601         if (!flash_cmd.va) {
3602                 status = -ENOMEM;
3603                 goto lancer_fw_exit;
3604         }
3605
3606         dest_image_ptr = flash_cmd.va +
3607                                 sizeof(struct lancer_cmd_req_write_object);
3608         image_size = fw->size;
3609         data_ptr = fw->data;
3610
3611         while (image_size) {
3612                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3613
3614                 /* Copy the image chunk content. */
3615                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3616
3617                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3618                                                  chunk_size, offset,
3619                                                  LANCER_FW_DOWNLOAD_LOCATION,
3620                                                  &data_written, &change_status,
3621                                                  &add_status);
3622                 if (status)
3623                         break;
3624
3625                 offset += data_written;
3626                 data_ptr += data_written;
3627                 image_size -= data_written;
3628         }
3629
3630         if (!status) {
3631                 /* Commit the FW written */
3632                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3633                                                  0, offset,
3634                                                  LANCER_FW_DOWNLOAD_LOCATION,
3635                                                  &data_written, &change_status,
3636                                                  &add_status);
3637         }
3638
3639         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3640                                 flash_cmd.dma);
3641         if (status) {
3642                 dev_err(&adapter->pdev->dev,
3643                         "Firmware load error. "
3644                         "Status code: 0x%x Additional Status: 0x%x\n",
3645                         status, add_status);
3646                 goto lancer_fw_exit;
3647         }
3648
3649         if (change_status == LANCER_FW_RESET_NEEDED) {
3650                 status = lancer_fw_reset(adapter);
3651                 if (status) {
3652                         dev_err(&adapter->pdev->dev,
3653                                 "Adapter busy for FW reset.\n"
3654                                 "New FW will not be active.\n");
3655                         goto lancer_fw_exit;
3656                 }
3657         } else if (change_status != LANCER_NO_RESET_NEEDED) {
3658                         dev_err(&adapter->pdev->dev,
3659                                 "System reboot required for new FW"
3660                                 " to be active\n");
3661         }
3662
3663         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3664 lancer_fw_exit:
3665         return status;
3666 }
3667
3668 #define UFI_TYPE2               2
3669 #define UFI_TYPE3               3
3670 #define UFI_TYPE3R              10
3671 #define UFI_TYPE4               4
3672 static int be_get_ufi_type(struct be_adapter *adapter,
3673                            struct flash_file_hdr_g3 *fhdr)
3674 {
3675         if (fhdr == NULL)
3676                 goto be_get_ufi_exit;
3677
3678         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
3679                 return UFI_TYPE4;
3680         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
3681                 if (fhdr->asic_type_rev == 0x10)
3682                         return UFI_TYPE3R;
3683                 else
3684                         return UFI_TYPE3;
3685         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
3686                 return UFI_TYPE2;
3687
3688 be_get_ufi_exit:
3689         dev_err(&adapter->pdev->dev,
3690                 "UFI and Interface are not compatible for flashing\n");
3691         return -1;
3692 }
3693
3694 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3695 {
3696         struct flash_file_hdr_g3 *fhdr3;
3697         struct image_hdr *img_hdr_ptr = NULL;
3698         struct be_dma_mem flash_cmd;
3699         const u8 *p;
3700         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
3701
3702         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
3703         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3704                                           &flash_cmd.dma, GFP_KERNEL);
3705         if (!flash_cmd.va) {
3706                 status = -ENOMEM;
3707                 goto be_fw_exit;
3708         }
3709
3710         p = fw->data;
3711         fhdr3 = (struct flash_file_hdr_g3 *)p;
3712
3713         ufi_type = be_get_ufi_type(adapter, fhdr3);
3714
3715         num_imgs = le32_to_cpu(fhdr3->num_imgs);
3716         for (i = 0; i < num_imgs; i++) {
3717                 img_hdr_ptr = (struct image_hdr *)(fw->data +
3718                                 (sizeof(struct flash_file_hdr_g3) +
3719                                  i * sizeof(struct image_hdr)));
3720                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
3721                         switch (ufi_type) {
3722                         case UFI_TYPE4:
3723                                 status = be_flash_skyhawk(adapter, fw,
3724                                                         &flash_cmd, num_imgs);
3725                                 break;
3726                         case UFI_TYPE3R:
3727                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
3728                                                       num_imgs);
3729                                 break;
3730                         case UFI_TYPE3:
3731                                 /* Do not flash this ufi on BE3-R cards */
3732                                 if (adapter->asic_rev < 0x10)
3733                                         status = be_flash_BEx(adapter, fw,
3734                                                               &flash_cmd,
3735                                                               num_imgs);
3736                                 else {
3737                                         status = -1;
3738                                         dev_err(&adapter->pdev->dev,
3739                                                 "Can't load BE3 UFI on BE3R\n");
3740                                 }
3741                         }
3742                 }
3743         }
3744
3745         if (ufi_type == UFI_TYPE2)
3746                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
3747         else if (ufi_type == -1)
3748                 status = -1;
3749
3750         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3751                           flash_cmd.dma);
3752         if (status) {
3753                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3754                 goto be_fw_exit;
3755         }
3756
3757         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3758
3759 be_fw_exit:
3760         return status;
3761 }
3762
3763 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3764 {
3765         const struct firmware *fw;
3766         int status;
3767
3768         if (!netif_running(adapter->netdev)) {
3769                 dev_err(&adapter->pdev->dev,
3770                         "Firmware load not allowed (interface is down)\n");
3771                 return -1;
3772         }
3773
3774         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3775         if (status)
3776                 goto fw_exit;
3777
3778         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3779
3780         if (lancer_chip(adapter))
3781                 status = lancer_fw_download(adapter, fw);
3782         else
3783                 status = be_fw_download(adapter, fw);
3784
3785 fw_exit:
3786         release_firmware(fw);
3787         return status;
3788 }
3789
3790 static const struct net_device_ops be_netdev_ops = {
3791         .ndo_open               = be_open,
3792         .ndo_stop               = be_close,
3793         .ndo_start_xmit         = be_xmit,
3794         .ndo_set_rx_mode        = be_set_rx_mode,
3795         .ndo_set_mac_address    = be_mac_addr_set,
3796         .ndo_change_mtu         = be_change_mtu,
3797         .ndo_get_stats64        = be_get_stats64,
3798         .ndo_validate_addr      = eth_validate_addr,
3799         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3800         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3801         .ndo_set_vf_mac         = be_set_vf_mac,
3802         .ndo_set_vf_vlan        = be_set_vf_vlan,
3803         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3804         .ndo_get_vf_config      = be_get_vf_config,
3805 #ifdef CONFIG_NET_POLL_CONTROLLER
3806         .ndo_poll_controller    = be_netpoll,
3807 #endif
3808 };
3809
3810 static void be_netdev_init(struct net_device *netdev)
3811 {
3812         struct be_adapter *adapter = netdev_priv(netdev);
3813         struct be_eq_obj *eqo;
3814         int i;
3815
3816         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3817                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3818                 NETIF_F_HW_VLAN_CTAG_TX;
3819         if (be_multi_rxq(adapter))
3820                 netdev->hw_features |= NETIF_F_RXHASH;
3821
3822         netdev->features |= netdev->hw_features |
3823                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
3824
3825         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3826                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3827
3828         netdev->priv_flags |= IFF_UNICAST_FLT;
3829
3830         netdev->flags |= IFF_MULTICAST;
3831
3832         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
3833
3834         netdev->netdev_ops = &be_netdev_ops;
3835
3836         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3837
3838         for_all_evt_queues(adapter, eqo, i)
3839                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3840 }
3841
3842 static void be_unmap_pci_bars(struct be_adapter *adapter)
3843 {
3844         if (adapter->csr)
3845                 pci_iounmap(adapter->pdev, adapter->csr);
3846         if (adapter->db)
3847                 pci_iounmap(adapter->pdev, adapter->db);
3848 }
3849
3850 static int db_bar(struct be_adapter *adapter)
3851 {
3852         if (lancer_chip(adapter) || !be_physfn(adapter))
3853                 return 0;
3854         else
3855                 return 4;
3856 }
3857
3858 static int be_roce_map_pci_bars(struct be_adapter *adapter)
3859 {
3860         if (skyhawk_chip(adapter)) {
3861                 adapter->roce_db.size = 4096;
3862                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
3863                                                               db_bar(adapter));
3864                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
3865                                                                db_bar(adapter));
3866         }
3867         return 0;
3868 }
3869
3870 static int be_map_pci_bars(struct be_adapter *adapter)
3871 {
3872         u8 __iomem *addr;
3873         u32 sli_intf;
3874
3875         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3876         adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3877                                 SLI_INTF_IF_TYPE_SHIFT;
3878
3879         if (BEx_chip(adapter) && be_physfn(adapter)) {
3880                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
3881                 if (adapter->csr == NULL)
3882                         return -ENOMEM;
3883         }
3884
3885         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
3886         if (addr == NULL)
3887                 goto pci_map_err;
3888         adapter->db = addr;
3889
3890         be_roce_map_pci_bars(adapter);
3891         return 0;
3892
3893 pci_map_err:
3894         be_unmap_pci_bars(adapter);
3895         return -ENOMEM;
3896 }
3897
3898 static void be_ctrl_cleanup(struct be_adapter *adapter)
3899 {
3900         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3901
3902         be_unmap_pci_bars(adapter);
3903
3904         if (mem->va)
3905                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3906                                   mem->dma);
3907
3908         mem = &adapter->rx_filter;
3909         if (mem->va)
3910                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3911                                   mem->dma);
3912 }
3913
3914 static int be_ctrl_init(struct be_adapter *adapter)
3915 {
3916         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3917         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3918         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3919         u32 sli_intf;
3920         int status;
3921
3922         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3923         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
3924                                  SLI_INTF_FAMILY_SHIFT;
3925         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3926
3927         status = be_map_pci_bars(adapter);
3928         if (status)
3929                 goto done;
3930
3931         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3932         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3933                                                 mbox_mem_alloc->size,
3934                                                 &mbox_mem_alloc->dma,
3935                                                 GFP_KERNEL);
3936         if (!mbox_mem_alloc->va) {
3937                 status = -ENOMEM;
3938                 goto unmap_pci_bars;
3939         }
3940         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3941         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3942         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3943         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3944
3945         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3946         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3947                                            &rx_filter->dma,
3948                                            GFP_KERNEL | __GFP_ZERO);
3949         if (rx_filter->va == NULL) {
3950                 status = -ENOMEM;
3951                 goto free_mbox;
3952         }
3953
3954         mutex_init(&adapter->mbox_lock);
3955         spin_lock_init(&adapter->mcc_lock);
3956         spin_lock_init(&adapter->mcc_cq_lock);
3957
3958         init_completion(&adapter->flash_compl);
3959         pci_save_state(adapter->pdev);
3960         return 0;
3961
3962 free_mbox:
3963         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3964                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3965
3966 unmap_pci_bars:
3967         be_unmap_pci_bars(adapter);
3968
3969 done:
3970         return status;
3971 }
3972
3973 static void be_stats_cleanup(struct be_adapter *adapter)
3974 {
3975         struct be_dma_mem *cmd = &adapter->stats_cmd;
3976
3977         if (cmd->va)
3978                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3979                                   cmd->va, cmd->dma);
3980 }
3981
3982 static int be_stats_init(struct be_adapter *adapter)
3983 {
3984         struct be_dma_mem *cmd = &adapter->stats_cmd;
3985
3986         if (lancer_chip(adapter))
3987                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3988         else if (BE2_chip(adapter))
3989                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3990         else
3991                 /* BE3 and Skyhawk */
3992                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3993
3994         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3995                                      GFP_KERNEL | __GFP_ZERO);
3996         if (cmd->va == NULL)
3997                 return -1;
3998         return 0;
3999 }
4000
4001 static void be_remove(struct pci_dev *pdev)
4002 {
4003         struct be_adapter *adapter = pci_get_drvdata(pdev);
4004
4005         if (!adapter)
4006                 return;
4007
4008         be_roce_dev_remove(adapter);
4009         be_intr_set(adapter, false);
4010
4011         cancel_delayed_work_sync(&adapter->func_recovery_work);
4012
4013         unregister_netdev(adapter->netdev);
4014
4015         be_clear(adapter);
4016
4017         /* tell fw we're done with firing cmds */
4018         be_cmd_fw_clean(adapter);
4019
4020         be_stats_cleanup(adapter);
4021
4022         be_ctrl_cleanup(adapter);
4023
4024         pci_disable_pcie_error_reporting(pdev);
4025
4026         pci_set_drvdata(pdev, NULL);
4027         pci_release_regions(pdev);
4028         pci_disable_device(pdev);
4029
4030         free_netdev(adapter->netdev);
4031 }
4032
4033 bool be_is_wol_supported(struct be_adapter *adapter)
4034 {
4035         return ((adapter->wol_cap & BE_WOL_CAP) &&
4036                 !be_is_wol_excluded(adapter)) ? true : false;
4037 }
4038
4039 u32 be_get_fw_log_level(struct be_adapter *adapter)
4040 {
4041         struct be_dma_mem extfat_cmd;
4042         struct be_fat_conf_params *cfgs;
4043         int status;
4044         u32 level = 0;
4045         int j;
4046
4047         if (lancer_chip(adapter))
4048                 return 0;
4049
4050         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
4051         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
4052         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
4053                                              &extfat_cmd.dma);
4054
4055         if (!extfat_cmd.va) {
4056                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
4057                         __func__);
4058                 goto err;
4059         }
4060
4061         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
4062         if (!status) {
4063                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
4064                                                 sizeof(struct be_cmd_resp_hdr));
4065                 for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
4066                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
4067                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
4068                 }
4069         }
4070         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
4071                             extfat_cmd.dma);
4072 err:
4073         return level;
4074 }
4075
4076 static int be_get_initial_config(struct be_adapter *adapter)
4077 {
4078         int status;
4079         u32 level;
4080
4081         status = be_cmd_get_cntl_attributes(adapter);
4082         if (status)
4083                 return status;
4084
4085         status = be_cmd_get_acpi_wol_cap(adapter);
4086         if (status) {
4087                 /* in case of a failure to get wol capabillities
4088                  * check the exclusion list to determine WOL capability */
4089                 if (!be_is_wol_excluded(adapter))
4090                         adapter->wol_cap |= BE_WOL_CAP;
4091         }
4092
4093         if (be_is_wol_supported(adapter))
4094                 adapter->wol = true;
4095
4096         /* Must be a power of 2 or else MODULO will BUG_ON */
4097         adapter->be_get_temp_freq = 64;
4098
4099         level = be_get_fw_log_level(adapter);
4100         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4101
4102         return 0;
4103 }
4104
4105 static int lancer_recover_func(struct be_adapter *adapter)
4106 {
4107         struct device *dev = &adapter->pdev->dev;
4108         int status;
4109
4110         status = lancer_test_and_set_rdy_state(adapter);
4111         if (status)
4112                 goto err;
4113
4114         if (netif_running(adapter->netdev))
4115                 be_close(adapter->netdev);
4116
4117         be_clear(adapter);
4118
4119         be_clear_all_error(adapter);
4120
4121         status = be_setup(adapter);
4122         if (status)
4123                 goto err;
4124
4125         if (netif_running(adapter->netdev)) {
4126                 status = be_open(adapter->netdev);
4127                 if (status)
4128                         goto err;
4129         }
4130
4131         dev_err(dev, "Error recovery successful\n");
4132         return 0;
4133 err:
4134         if (status == -EAGAIN)
4135                 dev_err(dev, "Waiting for resource provisioning\n");
4136         else
4137                 dev_err(dev, "Error recovery failed\n");
4138
4139         return status;
4140 }
4141
4142 static void be_func_recovery_task(struct work_struct *work)
4143 {
4144         struct be_adapter *adapter =
4145                 container_of(work, struct be_adapter,  func_recovery_work.work);
4146         int status = 0;
4147
4148         be_detect_error(adapter);
4149
4150         if (adapter->hw_error && lancer_chip(adapter)) {
4151
4152                 rtnl_lock();
4153                 netif_device_detach(adapter->netdev);
4154                 rtnl_unlock();
4155
4156                 status = lancer_recover_func(adapter);
4157                 if (!status)
4158                         netif_device_attach(adapter->netdev);
4159         }
4160
4161         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4162          * no need to attempt further recovery.
4163          */
4164         if (!status || status == -EAGAIN)
4165                 schedule_delayed_work(&adapter->func_recovery_work,
4166                                       msecs_to_jiffies(1000));
4167 }
4168
4169 static void be_worker(struct work_struct *work)
4170 {
4171         struct be_adapter *adapter =
4172                 container_of(work, struct be_adapter, work.work);
4173         struct be_rx_obj *rxo;
4174         struct be_eq_obj *eqo;
4175         int i;
4176
4177         /* when interrupts are not yet enabled, just reap any pending
4178         * mcc completions */
4179         if (!netif_running(adapter->netdev)) {
4180                 local_bh_disable();
4181                 be_process_mcc(adapter);
4182                 local_bh_enable();
4183                 goto reschedule;
4184         }
4185
4186         if (!adapter->stats_cmd_sent) {
4187                 if (lancer_chip(adapter))
4188                         lancer_cmd_get_pport_stats(adapter,
4189                                                 &adapter->stats_cmd);
4190                 else
4191                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4192         }
4193
4194         if (MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4195                 be_cmd_get_die_temperature(adapter);
4196
4197         for_all_rx_queues(adapter, rxo, i) {
4198                 if (rxo->rx_post_starved) {
4199                         rxo->rx_post_starved = false;
4200                         be_post_rx_frags(rxo, GFP_KERNEL);
4201                 }
4202         }
4203
4204         for_all_evt_queues(adapter, eqo, i)
4205                 be_eqd_update(adapter, eqo);
4206
4207 reschedule:
4208         adapter->work_counter++;
4209         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4210 }
4211
4212 static bool be_reset_required(struct be_adapter *adapter)
4213 {
4214         return be_find_vfs(adapter, ENABLED) > 0 ? false : true;
4215 }
4216
4217 static char *mc_name(struct be_adapter *adapter)
4218 {
4219         if (adapter->function_mode & FLEX10_MODE)
4220                 return "FLEX10";
4221         else if (adapter->function_mode & VNIC_MODE)
4222                 return "vNIC";
4223         else if (adapter->function_mode & UMC_ENABLED)
4224                 return "UMC";
4225         else
4226                 return "";
4227 }
4228
4229 static inline char *func_name(struct be_adapter *adapter)
4230 {
4231         return be_physfn(adapter) ? "PF" : "VF";
4232 }
4233
4234 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4235 {
4236         int status = 0;
4237         struct be_adapter *adapter;
4238         struct net_device *netdev;
4239         char port_name;
4240
4241         status = pci_enable_device(pdev);
4242         if (status)
4243                 goto do_none;
4244
4245         status = pci_request_regions(pdev, DRV_NAME);
4246         if (status)
4247                 goto disable_dev;
4248         pci_set_master(pdev);
4249
4250         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4251         if (netdev == NULL) {
4252                 status = -ENOMEM;
4253                 goto rel_reg;
4254         }
4255         adapter = netdev_priv(netdev);
4256         adapter->pdev = pdev;
4257         pci_set_drvdata(pdev, adapter);
4258         adapter->netdev = netdev;
4259         SET_NETDEV_DEV(netdev, &pdev->dev);
4260
4261         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4262         if (!status) {
4263                 status = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
4264                 if (status < 0) {
4265                         dev_err(&pdev->dev, "dma_set_coherent_mask failed\n");
4266                         goto free_netdev;
4267                 }
4268                 netdev->features |= NETIF_F_HIGHDMA;
4269         } else {
4270                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4271                 if (!status)
4272                         status = dma_set_coherent_mask(&pdev->dev,
4273                                                        DMA_BIT_MASK(32));
4274                 if (status) {
4275                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4276                         goto free_netdev;
4277                 }
4278         }
4279
4280         status = pci_enable_pcie_error_reporting(pdev);
4281         if (status)
4282                 dev_err(&pdev->dev, "Could not use PCIe error reporting\n");
4283
4284         status = be_ctrl_init(adapter);
4285         if (status)
4286                 goto free_netdev;
4287
4288         /* sync up with fw's ready state */
4289         if (be_physfn(adapter)) {
4290                 status = be_fw_wait_ready(adapter);
4291                 if (status)
4292                         goto ctrl_clean;
4293         }
4294
4295         if (be_reset_required(adapter)) {
4296                 status = be_cmd_reset_function(adapter);
4297                 if (status)
4298                         goto ctrl_clean;
4299
4300                 /* Wait for interrupts to quiesce after an FLR */
4301                 msleep(100);
4302         }
4303
4304         /* Allow interrupts for other ULPs running on NIC function */
4305         be_intr_set(adapter, true);
4306
4307         /* tell fw we're ready to fire cmds */
4308         status = be_cmd_fw_init(adapter);
4309         if (status)
4310                 goto ctrl_clean;
4311
4312         status = be_stats_init(adapter);
4313         if (status)
4314                 goto ctrl_clean;
4315
4316         status = be_get_initial_config(adapter);
4317         if (status)
4318                 goto stats_clean;
4319
4320         INIT_DELAYED_WORK(&adapter->work, be_worker);
4321         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4322         adapter->rx_fc = adapter->tx_fc = true;
4323
4324         status = be_setup(adapter);
4325         if (status)
4326                 goto stats_clean;
4327
4328         be_netdev_init(netdev);
4329         status = register_netdev(netdev);
4330         if (status != 0)
4331                 goto unsetup;
4332
4333         be_roce_dev_add(adapter);
4334
4335         schedule_delayed_work(&adapter->func_recovery_work,
4336                               msecs_to_jiffies(1000));
4337
4338         be_cmd_query_port_name(adapter, &port_name);
4339
4340         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4341                  func_name(adapter), mc_name(adapter), port_name);
4342
4343         return 0;
4344
4345 unsetup:
4346         be_clear(adapter);
4347 stats_clean:
4348         be_stats_cleanup(adapter);
4349 ctrl_clean:
4350         be_ctrl_cleanup(adapter);
4351 free_netdev:
4352         free_netdev(netdev);
4353         pci_set_drvdata(pdev, NULL);
4354 rel_reg:
4355         pci_release_regions(pdev);
4356 disable_dev:
4357         pci_disable_device(pdev);
4358 do_none:
4359         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4360         return status;
4361 }
4362
4363 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4364 {
4365         struct be_adapter *adapter = pci_get_drvdata(pdev);
4366         struct net_device *netdev =  adapter->netdev;
4367
4368         if (adapter->wol)
4369                 be_setup_wol(adapter, true);
4370
4371         cancel_delayed_work_sync(&adapter->func_recovery_work);
4372
4373         netif_device_detach(netdev);
4374         if (netif_running(netdev)) {
4375                 rtnl_lock();
4376                 be_close(netdev);
4377                 rtnl_unlock();
4378         }
4379         be_clear(adapter);
4380
4381         pci_save_state(pdev);
4382         pci_disable_device(pdev);
4383         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4384         return 0;
4385 }
4386
4387 static int be_resume(struct pci_dev *pdev)
4388 {
4389         int status = 0;
4390         struct be_adapter *adapter = pci_get_drvdata(pdev);
4391         struct net_device *netdev =  adapter->netdev;
4392
4393         netif_device_detach(netdev);
4394
4395         status = pci_enable_device(pdev);
4396         if (status)
4397                 return status;
4398
4399         pci_set_power_state(pdev, 0);
4400         pci_restore_state(pdev);
4401
4402         /* tell fw we're ready to fire cmds */
4403         status = be_cmd_fw_init(adapter);
4404         if (status)
4405                 return status;
4406
4407         be_setup(adapter);
4408         if (netif_running(netdev)) {
4409                 rtnl_lock();
4410                 be_open(netdev);
4411                 rtnl_unlock();
4412         }
4413
4414         schedule_delayed_work(&adapter->func_recovery_work,
4415                               msecs_to_jiffies(1000));
4416         netif_device_attach(netdev);
4417
4418         if (adapter->wol)
4419                 be_setup_wol(adapter, false);
4420
4421         return 0;
4422 }
4423
4424 /*
4425  * An FLR will stop BE from DMAing any data.
4426  */
4427 static void be_shutdown(struct pci_dev *pdev)
4428 {
4429         struct be_adapter *adapter = pci_get_drvdata(pdev);
4430
4431         if (!adapter)
4432                 return;
4433
4434         cancel_delayed_work_sync(&adapter->work);
4435         cancel_delayed_work_sync(&adapter->func_recovery_work);
4436
4437         netif_device_detach(adapter->netdev);
4438
4439         be_cmd_reset_function(adapter);
4440
4441         pci_disable_device(pdev);
4442 }
4443
4444 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
4445                                 pci_channel_state_t state)
4446 {
4447         struct be_adapter *adapter = pci_get_drvdata(pdev);
4448         struct net_device *netdev =  adapter->netdev;
4449
4450         dev_err(&adapter->pdev->dev, "EEH error detected\n");
4451
4452         if (!adapter->eeh_error) {
4453                 adapter->eeh_error = true;
4454
4455                 cancel_delayed_work_sync(&adapter->func_recovery_work);
4456
4457                 rtnl_lock();
4458                 netif_device_detach(netdev);
4459                 if (netif_running(netdev))
4460                         be_close(netdev);
4461                 rtnl_unlock();
4462
4463                 be_clear(adapter);
4464         }
4465
4466         if (state == pci_channel_io_perm_failure)
4467                 return PCI_ERS_RESULT_DISCONNECT;
4468
4469         pci_disable_device(pdev);
4470
4471         /* The error could cause the FW to trigger a flash debug dump.
4472          * Resetting the card while flash dump is in progress
4473          * can cause it not to recover; wait for it to finish.
4474          * Wait only for first function as it is needed only once per
4475          * adapter.
4476          */
4477         if (pdev->devfn == 0)
4478                 ssleep(30);
4479
4480         return PCI_ERS_RESULT_NEED_RESET;
4481 }
4482
4483 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
4484 {
4485         struct be_adapter *adapter = pci_get_drvdata(pdev);
4486         int status;
4487
4488         dev_info(&adapter->pdev->dev, "EEH reset\n");
4489
4490         status = pci_enable_device(pdev);
4491         if (status)
4492                 return PCI_ERS_RESULT_DISCONNECT;
4493
4494         pci_set_master(pdev);
4495         pci_set_power_state(pdev, 0);
4496         pci_restore_state(pdev);
4497
4498         /* Check if card is ok and fw is ready */
4499         dev_info(&adapter->pdev->dev,
4500                  "Waiting for FW to be ready after EEH reset\n");
4501         status = be_fw_wait_ready(adapter);
4502         if (status)
4503                 return PCI_ERS_RESULT_DISCONNECT;
4504
4505         pci_cleanup_aer_uncorrect_error_status(pdev);
4506         be_clear_all_error(adapter);
4507         return PCI_ERS_RESULT_RECOVERED;
4508 }
4509
4510 static void be_eeh_resume(struct pci_dev *pdev)
4511 {
4512         int status = 0;
4513         struct be_adapter *adapter = pci_get_drvdata(pdev);
4514         struct net_device *netdev =  adapter->netdev;
4515
4516         dev_info(&adapter->pdev->dev, "EEH resume\n");
4517
4518         pci_save_state(pdev);
4519
4520         status = be_cmd_reset_function(adapter);
4521         if (status)
4522                 goto err;
4523
4524         /* tell fw we're ready to fire cmds */
4525         status = be_cmd_fw_init(adapter);
4526         if (status)
4527                 goto err;
4528
4529         status = be_setup(adapter);
4530         if (status)
4531                 goto err;
4532
4533         if (netif_running(netdev)) {
4534                 status = be_open(netdev);
4535                 if (status)
4536                         goto err;
4537         }
4538
4539         schedule_delayed_work(&adapter->func_recovery_work,
4540                               msecs_to_jiffies(1000));
4541         netif_device_attach(netdev);
4542         return;
4543 err:
4544         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4545 }
4546
4547 static const struct pci_error_handlers be_eeh_handlers = {
4548         .error_detected = be_eeh_err_detected,
4549         .slot_reset = be_eeh_reset,
4550         .resume = be_eeh_resume,
4551 };
4552
4553 static struct pci_driver be_driver = {
4554         .name = DRV_NAME,
4555         .id_table = be_dev_ids,
4556         .probe = be_probe,
4557         .remove = be_remove,
4558         .suspend = be_suspend,
4559         .resume = be_resume,
4560         .shutdown = be_shutdown,
4561         .err_handler = &be_eeh_handlers
4562 };
4563
4564 static int __init be_init_module(void)
4565 {
4566         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4567             rx_frag_size != 2048) {
4568                 printk(KERN_WARNING DRV_NAME
4569                         " : Module param rx_frag_size must be 2048/4096/8192."
4570                         " Using 2048\n");
4571                 rx_frag_size = 2048;
4572         }
4573
4574         return pci_register_driver(&be_driver);
4575 }
4576 module_init(be_init_module);
4577
4578 static void __exit be_exit_module(void)
4579 {
4580         pci_unregister_driver(&be_driver);
4581 }
4582 module_exit(be_exit_module);