HID: picolcd: sanity check report size in raw_event() callback
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2014 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 #include <linux/aer.h>
24 #include <linux/if_bridge.h>
25 #include <net/busy_poll.h>
26 #include <net/vxlan.h>
27
28 MODULE_VERSION(DRV_VER);
29 MODULE_DEVICE_TABLE(pci, be_dev_ids);
30 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
31 MODULE_AUTHOR("Emulex Corporation");
32 MODULE_LICENSE("GPL");
33
34 static unsigned int num_vfs;
35 module_param(num_vfs, uint, S_IRUGO);
36 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
37
38 static ushort rx_frag_size = 2048;
39 module_param(rx_frag_size, ushort, S_IRUGO);
40 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
41
42 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
43         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
44         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
45         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
46         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
47         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
48         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
49         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
50         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID6)},
51         { 0 }
52 };
53 MODULE_DEVICE_TABLE(pci, be_dev_ids);
54 /* UE Status Low CSR */
55 static const char * const ue_status_low_desc[] = {
56         "CEV",
57         "CTX",
58         "DBUF",
59         "ERX",
60         "Host",
61         "MPU",
62         "NDMA",
63         "PTC ",
64         "RDMA ",
65         "RXF ",
66         "RXIPS ",
67         "RXULP0 ",
68         "RXULP1 ",
69         "RXULP2 ",
70         "TIM ",
71         "TPOST ",
72         "TPRE ",
73         "TXIPS ",
74         "TXULP0 ",
75         "TXULP1 ",
76         "UC ",
77         "WDMA ",
78         "TXULP2 ",
79         "HOST1 ",
80         "P0_OB_LINK ",
81         "P1_OB_LINK ",
82         "HOST_GPIO ",
83         "MBOX ",
84         "ERX2 ",
85         "SPARE ",
86         "JTAG ",
87         "MPU_INTPEND "
88 };
89 /* UE Status High CSR */
90 static const char * const ue_status_hi_desc[] = {
91         "LPCMEMHOST",
92         "MGMT_MAC",
93         "PCS0ONLINE",
94         "MPU_IRAM",
95         "PCS1ONLINE",
96         "PCTL0",
97         "PCTL1",
98         "PMEM",
99         "RR",
100         "TXPB",
101         "RXPP",
102         "XAUI",
103         "TXP",
104         "ARM",
105         "IPC",
106         "HOST2",
107         "HOST3",
108         "HOST4",
109         "HOST5",
110         "HOST6",
111         "HOST7",
112         "ECRC",
113         "Poison TLP",
114         "NETC",
115         "PERIPH",
116         "LLTXULP",
117         "D2P",
118         "RCON",
119         "LDMA",
120         "LLTXP",
121         "LLTXPB",
122         "Unknown"
123 };
124
125
126 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
127 {
128         struct be_dma_mem *mem = &q->dma_mem;
129         if (mem->va) {
130                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
131                                   mem->dma);
132                 mem->va = NULL;
133         }
134 }
135
136 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
137                           u16 len, u16 entry_size)
138 {
139         struct be_dma_mem *mem = &q->dma_mem;
140
141         memset(q, 0, sizeof(*q));
142         q->len = len;
143         q->entry_size = entry_size;
144         mem->size = len * entry_size;
145         mem->va = dma_zalloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
146                                       GFP_KERNEL);
147         if (!mem->va)
148                 return -ENOMEM;
149         return 0;
150 }
151
152 static void be_reg_intr_set(struct be_adapter *adapter, bool enable)
153 {
154         u32 reg, enabled;
155
156         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
157                               &reg);
158         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
159
160         if (!enabled && enable)
161                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162         else if (enabled && !enable)
163                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164         else
165                 return;
166
167         pci_write_config_dword(adapter->pdev,
168                                PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
169 }
170
171 static void be_intr_set(struct be_adapter *adapter, bool enable)
172 {
173         int status = 0;
174
175         /* On lancer interrupts can't be controlled via this register */
176         if (lancer_chip(adapter))
177                 return;
178
179         if (adapter->eeh_error)
180                 return;
181
182         status = be_cmd_intr_set(adapter, enable);
183         if (status)
184                 be_reg_intr_set(adapter, enable);
185 }
186
187 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
188 {
189         u32 val = 0;
190         val |= qid & DB_RQ_RING_ID_MASK;
191         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
192
193         wmb();
194         iowrite32(val, adapter->db + DB_RQ_OFFSET);
195 }
196
197 static void be_txq_notify(struct be_adapter *adapter, struct be_tx_obj *txo,
198                           u16 posted)
199 {
200         u32 val = 0;
201         val |= txo->q.id & DB_TXULP_RING_ID_MASK;
202         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
203
204         wmb();
205         iowrite32(val, adapter->db + txo->db_offset);
206 }
207
208 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
209                          bool arm, bool clear_int, u16 num_popped)
210 {
211         u32 val = 0;
212         val |= qid & DB_EQ_RING_ID_MASK;
213         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) << DB_EQ_RING_ID_EXT_MASK_SHIFT);
214
215         if (adapter->eeh_error)
216                 return;
217
218         if (arm)
219                 val |= 1 << DB_EQ_REARM_SHIFT;
220         if (clear_int)
221                 val |= 1 << DB_EQ_CLR_SHIFT;
222         val |= 1 << DB_EQ_EVNT_SHIFT;
223         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
224         iowrite32(val, adapter->db + DB_EQ_OFFSET);
225 }
226
227 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
228 {
229         u32 val = 0;
230         val |= qid & DB_CQ_RING_ID_MASK;
231         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
232                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
233
234         if (adapter->eeh_error)
235                 return;
236
237         if (arm)
238                 val |= 1 << DB_CQ_REARM_SHIFT;
239         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
240         iowrite32(val, adapter->db + DB_CQ_OFFSET);
241 }
242
243 static int be_mac_addr_set(struct net_device *netdev, void *p)
244 {
245         struct be_adapter *adapter = netdev_priv(netdev);
246         struct device *dev = &adapter->pdev->dev;
247         struct sockaddr *addr = p;
248         int status;
249         u8 mac[ETH_ALEN];
250         u32 old_pmac_id = adapter->pmac_id[0], curr_pmac_id = 0;
251
252         if (!is_valid_ether_addr(addr->sa_data))
253                 return -EADDRNOTAVAIL;
254
255         /* Proceed further only if, User provided MAC is different
256          * from active MAC
257          */
258         if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
259                 return 0;
260
261         /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
262          * privilege or if PF did not provision the new MAC address.
263          * On BE3, this cmd will always fail if the VF doesn't have the
264          * FILTMGMT privilege. This failure is OK, only if the PF programmed
265          * the MAC for the VF.
266          */
267         status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
268                                  adapter->if_handle, &adapter->pmac_id[0], 0);
269         if (!status) {
270                 curr_pmac_id = adapter->pmac_id[0];
271
272                 /* Delete the old programmed MAC. This call may fail if the
273                  * old MAC was already deleted by the PF driver.
274                  */
275                 if (adapter->pmac_id[0] != old_pmac_id)
276                         be_cmd_pmac_del(adapter, adapter->if_handle,
277                                         old_pmac_id, 0);
278         }
279
280         /* Decide if the new MAC is successfully activated only after
281          * querying the FW
282          */
283         status = be_cmd_get_active_mac(adapter, curr_pmac_id, mac,
284                                        adapter->if_handle, true, 0);
285         if (status)
286                 goto err;
287
288         /* The MAC change did not happen, either due to lack of privilege
289          * or PF didn't pre-provision.
290          */
291         if (!ether_addr_equal(addr->sa_data, mac)) {
292                 status = -EPERM;
293                 goto err;
294         }
295
296         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
297         dev_info(dev, "MAC address changed to %pM\n", mac);
298         return 0;
299 err:
300         dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
301         return status;
302 }
303
304 /* BE2 supports only v0 cmd */
305 static void *hw_stats_from_cmd(struct be_adapter *adapter)
306 {
307         if (BE2_chip(adapter)) {
308                 struct be_cmd_resp_get_stats_v0 *cmd = adapter->stats_cmd.va;
309
310                 return &cmd->hw_stats;
311         } else if (BE3_chip(adapter)) {
312                 struct be_cmd_resp_get_stats_v1 *cmd = adapter->stats_cmd.va;
313
314                 return &cmd->hw_stats;
315         } else {
316                 struct be_cmd_resp_get_stats_v2 *cmd = adapter->stats_cmd.va;
317
318                 return &cmd->hw_stats;
319         }
320 }
321
322 /* BE2 supports only v0 cmd */
323 static void *be_erx_stats_from_cmd(struct be_adapter *adapter)
324 {
325         if (BE2_chip(adapter)) {
326                 struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
327
328                 return &hw_stats->erx;
329         } else if (BE3_chip(adapter)) {
330                 struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
331
332                 return &hw_stats->erx;
333         } else {
334                 struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
335
336                 return &hw_stats->erx;
337         }
338 }
339
340 static void populate_be_v0_stats(struct be_adapter *adapter)
341 {
342         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
343         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
344         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
345         struct be_port_rxf_stats_v0 *port_stats =
346                                         &rxf_stats->port[adapter->port_num];
347         struct be_drv_stats *drvs = &adapter->drv_stats;
348
349         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
350         drvs->rx_pause_frames = port_stats->rx_pause_frames;
351         drvs->rx_crc_errors = port_stats->rx_crc_errors;
352         drvs->rx_control_frames = port_stats->rx_control_frames;
353         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
354         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
355         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
356         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
357         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
358         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
359         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
360         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
361         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
362         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
363         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
364         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
365         drvs->rx_dropped_header_too_small =
366                 port_stats->rx_dropped_header_too_small;
367         drvs->rx_address_filtered =
368                                         port_stats->rx_address_filtered +
369                                         port_stats->rx_vlan_filtered;
370         drvs->rx_alignment_symbol_errors =
371                 port_stats->rx_alignment_symbol_errors;
372
373         drvs->tx_pauseframes = port_stats->tx_pauseframes;
374         drvs->tx_controlframes = port_stats->tx_controlframes;
375
376         if (adapter->port_num)
377                 drvs->jabber_events = rxf_stats->port1_jabber_events;
378         else
379                 drvs->jabber_events = rxf_stats->port0_jabber_events;
380         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
381         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
382         drvs->forwarded_packets = rxf_stats->forwarded_packets;
383         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
384         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
385         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
386         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
387 }
388
389 static void populate_be_v1_stats(struct be_adapter *adapter)
390 {
391         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
392         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
393         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
394         struct be_port_rxf_stats_v1 *port_stats =
395                                         &rxf_stats->port[adapter->port_num];
396         struct be_drv_stats *drvs = &adapter->drv_stats;
397
398         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
399         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
400         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
401         drvs->rx_pause_frames = port_stats->rx_pause_frames;
402         drvs->rx_crc_errors = port_stats->rx_crc_errors;
403         drvs->rx_control_frames = port_stats->rx_control_frames;
404         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
405         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
406         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
407         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
408         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
409         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
410         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
411         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
412         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
413         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
414         drvs->rx_dropped_header_too_small =
415                 port_stats->rx_dropped_header_too_small;
416         drvs->rx_input_fifo_overflow_drop =
417                 port_stats->rx_input_fifo_overflow_drop;
418         drvs->rx_address_filtered = port_stats->rx_address_filtered;
419         drvs->rx_alignment_symbol_errors =
420                 port_stats->rx_alignment_symbol_errors;
421         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
422         drvs->tx_pauseframes = port_stats->tx_pauseframes;
423         drvs->tx_controlframes = port_stats->tx_controlframes;
424         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
425         drvs->jabber_events = port_stats->jabber_events;
426         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
427         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
428         drvs->forwarded_packets = rxf_stats->forwarded_packets;
429         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
430         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
431         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
432         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
433 }
434
435 static void populate_be_v2_stats(struct be_adapter *adapter)
436 {
437         struct be_hw_stats_v2 *hw_stats = hw_stats_from_cmd(adapter);
438         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
439         struct be_rxf_stats_v2 *rxf_stats = &hw_stats->rxf;
440         struct be_port_rxf_stats_v2 *port_stats =
441                                         &rxf_stats->port[adapter->port_num];
442         struct be_drv_stats *drvs = &adapter->drv_stats;
443
444         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
445         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
446         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
447         drvs->rx_pause_frames = port_stats->rx_pause_frames;
448         drvs->rx_crc_errors = port_stats->rx_crc_errors;
449         drvs->rx_control_frames = port_stats->rx_control_frames;
450         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
451         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
452         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
453         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
454         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
455         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
456         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
457         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
458         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
459         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
460         drvs->rx_dropped_header_too_small =
461                 port_stats->rx_dropped_header_too_small;
462         drvs->rx_input_fifo_overflow_drop =
463                 port_stats->rx_input_fifo_overflow_drop;
464         drvs->rx_address_filtered = port_stats->rx_address_filtered;
465         drvs->rx_alignment_symbol_errors =
466                 port_stats->rx_alignment_symbol_errors;
467         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
468         drvs->tx_pauseframes = port_stats->tx_pauseframes;
469         drvs->tx_controlframes = port_stats->tx_controlframes;
470         drvs->tx_priority_pauseframes = port_stats->tx_priority_pauseframes;
471         drvs->jabber_events = port_stats->jabber_events;
472         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
473         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
474         drvs->forwarded_packets = rxf_stats->forwarded_packets;
475         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
476         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
477         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
478         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
479         if (be_roce_supported(adapter)) {
480                 drvs->rx_roce_bytes_lsd = port_stats->roce_bytes_received_lsd;
481                 drvs->rx_roce_bytes_msd = port_stats->roce_bytes_received_msd;
482                 drvs->rx_roce_frames = port_stats->roce_frames_received;
483                 drvs->roce_drops_crc = port_stats->roce_drops_crc;
484                 drvs->roce_drops_payload_len =
485                         port_stats->roce_drops_payload_len;
486         }
487 }
488
489 static void populate_lancer_stats(struct be_adapter *adapter)
490 {
491
492         struct be_drv_stats *drvs = &adapter->drv_stats;
493         struct lancer_pport_stats *pport_stats = pport_stats_from_cmd(adapter);
494
495         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
496         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
497         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
498         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
499         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
500         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
501         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
502         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
503         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
504         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
505         drvs->rx_dropped_tcp_length =
506                                 pport_stats->rx_dropped_invalid_tcp_length;
507         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
508         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
509         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
510         drvs->rx_dropped_header_too_small =
511                                 pport_stats->rx_dropped_header_too_small;
512         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
513         drvs->rx_address_filtered =
514                                         pport_stats->rx_address_filtered +
515                                         pport_stats->rx_vlan_filtered;
516         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
517         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
518         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
519         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
520         drvs->jabber_events = pport_stats->rx_jabbers;
521         drvs->forwarded_packets = pport_stats->num_forwards_lo;
522         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
523         drvs->rx_drops_too_many_frags =
524                                 pport_stats->rx_drops_too_many_frags_lo;
525 }
526
527 static void accumulate_16bit_val(u32 *acc, u16 val)
528 {
529 #define lo(x)                   (x & 0xFFFF)
530 #define hi(x)                   (x & 0xFFFF0000)
531         bool wrapped = val < lo(*acc);
532         u32 newacc = hi(*acc) + val;
533
534         if (wrapped)
535                 newacc += 65536;
536         ACCESS_ONCE(*acc) = newacc;
537 }
538
539 static void populate_erx_stats(struct be_adapter *adapter,
540                                struct be_rx_obj *rxo, u32 erx_stat)
541 {
542         if (!BEx_chip(adapter))
543                 rx_stats(rxo)->rx_drops_no_frags = erx_stat;
544         else
545                 /* below erx HW counter can actually wrap around after
546                  * 65535. Driver accumulates a 32-bit value
547                  */
548                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
549                                      (u16)erx_stat);
550 }
551
552 void be_parse_stats(struct be_adapter *adapter)
553 {
554         struct be_erx_stats_v2 *erx = be_erx_stats_from_cmd(adapter);
555         struct be_rx_obj *rxo;
556         int i;
557         u32 erx_stat;
558
559         if (lancer_chip(adapter)) {
560                 populate_lancer_stats(adapter);
561         } else {
562                 if (BE2_chip(adapter))
563                         populate_be_v0_stats(adapter);
564                 else if (BE3_chip(adapter))
565                         /* for BE3 */
566                         populate_be_v1_stats(adapter);
567                 else
568                         populate_be_v2_stats(adapter);
569
570                 /* erx_v2 is longer than v0, v1. use v2 for v0, v1 access */
571                 for_all_rx_queues(adapter, rxo, i) {
572                         erx_stat = erx->rx_drops_no_fragments[rxo->q.id];
573                         populate_erx_stats(adapter, rxo, erx_stat);
574                 }
575         }
576 }
577
578 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
579                                                 struct rtnl_link_stats64 *stats)
580 {
581         struct be_adapter *adapter = netdev_priv(netdev);
582         struct be_drv_stats *drvs = &adapter->drv_stats;
583         struct be_rx_obj *rxo;
584         struct be_tx_obj *txo;
585         u64 pkts, bytes;
586         unsigned int start;
587         int i;
588
589         for_all_rx_queues(adapter, rxo, i) {
590                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
591                 do {
592                         start = u64_stats_fetch_begin_irq(&rx_stats->sync);
593                         pkts = rx_stats(rxo)->rx_pkts;
594                         bytes = rx_stats(rxo)->rx_bytes;
595                 } while (u64_stats_fetch_retry_irq(&rx_stats->sync, start));
596                 stats->rx_packets += pkts;
597                 stats->rx_bytes += bytes;
598                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
599                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
600                                         rx_stats(rxo)->rx_drops_no_frags;
601         }
602
603         for_all_tx_queues(adapter, txo, i) {
604                 const struct be_tx_stats *tx_stats = tx_stats(txo);
605                 do {
606                         start = u64_stats_fetch_begin_irq(&tx_stats->sync);
607                         pkts = tx_stats(txo)->tx_pkts;
608                         bytes = tx_stats(txo)->tx_bytes;
609                 } while (u64_stats_fetch_retry_irq(&tx_stats->sync, start));
610                 stats->tx_packets += pkts;
611                 stats->tx_bytes += bytes;
612         }
613
614         /* bad pkts received */
615         stats->rx_errors = drvs->rx_crc_errors +
616                 drvs->rx_alignment_symbol_errors +
617                 drvs->rx_in_range_errors +
618                 drvs->rx_out_range_errors +
619                 drvs->rx_frame_too_long +
620                 drvs->rx_dropped_too_small +
621                 drvs->rx_dropped_too_short +
622                 drvs->rx_dropped_header_too_small +
623                 drvs->rx_dropped_tcp_length +
624                 drvs->rx_dropped_runt;
625
626         /* detailed rx errors */
627         stats->rx_length_errors = drvs->rx_in_range_errors +
628                 drvs->rx_out_range_errors +
629                 drvs->rx_frame_too_long;
630
631         stats->rx_crc_errors = drvs->rx_crc_errors;
632
633         /* frame alignment errors */
634         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
635
636         /* receiver fifo overrun */
637         /* drops_no_pbuf is no per i/f, it's per BE card */
638         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
639                                 drvs->rx_input_fifo_overflow_drop +
640                                 drvs->rx_drops_no_pbuf;
641         return stats;
642 }
643
644 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
645 {
646         struct net_device *netdev = adapter->netdev;
647
648         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
649                 netif_carrier_off(netdev);
650                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
651         }
652
653         if (link_status)
654                 netif_carrier_on(netdev);
655         else
656                 netif_carrier_off(netdev);
657 }
658
659 static void be_tx_stats_update(struct be_tx_obj *txo,
660                                u32 wrb_cnt, u32 copied, u32 gso_segs,
661                                bool stopped)
662 {
663         struct be_tx_stats *stats = tx_stats(txo);
664
665         u64_stats_update_begin(&stats->sync);
666         stats->tx_reqs++;
667         stats->tx_wrbs += wrb_cnt;
668         stats->tx_bytes += copied;
669         stats->tx_pkts += (gso_segs ? gso_segs : 1);
670         if (stopped)
671                 stats->tx_stops++;
672         u64_stats_update_end(&stats->sync);
673 }
674
675 /* Determine number of WRB entries needed to xmit data in an skb */
676 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
677                            bool *dummy)
678 {
679         int cnt = (skb->len > skb->data_len);
680
681         cnt += skb_shinfo(skb)->nr_frags;
682
683         /* to account for hdr wrb */
684         cnt++;
685         if (lancer_chip(adapter) || !(cnt & 1)) {
686                 *dummy = false;
687         } else {
688                 /* add a dummy to make it an even num */
689                 cnt++;
690                 *dummy = true;
691         }
692         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
693         return cnt;
694 }
695
696 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
697 {
698         wrb->frag_pa_hi = upper_32_bits(addr);
699         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
700         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
701         wrb->rsvd0 = 0;
702 }
703
704 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
705                                      struct sk_buff *skb)
706 {
707         u8 vlan_prio;
708         u16 vlan_tag;
709
710         vlan_tag = vlan_tx_tag_get(skb);
711         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
712         /* If vlan priority provided by OS is NOT in available bmap */
713         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
714                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
715                                 adapter->recommended_prio;
716
717         return vlan_tag;
718 }
719
720 /* Used only for IP tunnel packets */
721 static u16 skb_inner_ip_proto(struct sk_buff *skb)
722 {
723         return (inner_ip_hdr(skb)->version == 4) ?
724                 inner_ip_hdr(skb)->protocol : inner_ipv6_hdr(skb)->nexthdr;
725 }
726
727 static u16 skb_ip_proto(struct sk_buff *skb)
728 {
729         return (ip_hdr(skb)->version == 4) ?
730                 ip_hdr(skb)->protocol : ipv6_hdr(skb)->nexthdr;
731 }
732
733 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
734                          struct sk_buff *skb, u32 wrb_cnt, u32 len,
735                          bool skip_hw_vlan)
736 {
737         u16 vlan_tag, proto;
738
739         memset(hdr, 0, sizeof(*hdr));
740
741         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
742
743         if (skb_is_gso(skb)) {
744                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
745                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
746                         hdr, skb_shinfo(skb)->gso_size);
747                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
748                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
749         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
750                 if (skb->encapsulation) {
751                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
752                         proto = skb_inner_ip_proto(skb);
753                 } else {
754                         proto = skb_ip_proto(skb);
755                 }
756                 if (proto == IPPROTO_TCP)
757                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
758                 else if (proto == IPPROTO_UDP)
759                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
760         }
761
762         if (vlan_tx_tag_present(skb)) {
763                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
764                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
765                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
766         }
767
768         /* To skip HW VLAN tagging: evt = 1, compl = 0 */
769         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, !skip_hw_vlan);
770         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
771         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
772         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
773 }
774
775 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
776                           bool unmap_single)
777 {
778         dma_addr_t dma;
779
780         be_dws_le_to_cpu(wrb, sizeof(*wrb));
781
782         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
783         if (wrb->frag_len) {
784                 if (unmap_single)
785                         dma_unmap_single(dev, dma, wrb->frag_len,
786                                          DMA_TO_DEVICE);
787                 else
788                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
789         }
790 }
791
792 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
793                         struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb,
794                         bool skip_hw_vlan)
795 {
796         dma_addr_t busaddr;
797         int i, copied = 0;
798         struct device *dev = &adapter->pdev->dev;
799         struct sk_buff *first_skb = skb;
800         struct be_eth_wrb *wrb;
801         struct be_eth_hdr_wrb *hdr;
802         bool map_single = false;
803         u16 map_head;
804
805         hdr = queue_head_node(txq);
806         queue_head_inc(txq);
807         map_head = txq->head;
808
809         if (skb->len > skb->data_len) {
810                 int len = skb_headlen(skb);
811                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
812                 if (dma_mapping_error(dev, busaddr))
813                         goto dma_err;
814                 map_single = true;
815                 wrb = queue_head_node(txq);
816                 wrb_fill(wrb, busaddr, len);
817                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
818                 queue_head_inc(txq);
819                 copied += len;
820         }
821
822         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
823                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
824                 busaddr = skb_frag_dma_map(dev, frag, 0,
825                                            skb_frag_size(frag), DMA_TO_DEVICE);
826                 if (dma_mapping_error(dev, busaddr))
827                         goto dma_err;
828                 wrb = queue_head_node(txq);
829                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
830                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
831                 queue_head_inc(txq);
832                 copied += skb_frag_size(frag);
833         }
834
835         if (dummy_wrb) {
836                 wrb = queue_head_node(txq);
837                 wrb_fill(wrb, 0, 0);
838                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
839                 queue_head_inc(txq);
840         }
841
842         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied, skip_hw_vlan);
843         be_dws_cpu_to_le(hdr, sizeof(*hdr));
844
845         return copied;
846 dma_err:
847         txq->head = map_head;
848         while (copied) {
849                 wrb = queue_head_node(txq);
850                 unmap_tx_frag(dev, wrb, map_single);
851                 map_single = false;
852                 copied -= wrb->frag_len;
853                 queue_head_inc(txq);
854         }
855         return 0;
856 }
857
858 static struct sk_buff *be_insert_vlan_in_pkt(struct be_adapter *adapter,
859                                              struct sk_buff *skb,
860                                              bool *skip_hw_vlan)
861 {
862         u16 vlan_tag = 0;
863
864         skb = skb_share_check(skb, GFP_ATOMIC);
865         if (unlikely(!skb))
866                 return skb;
867
868         if (vlan_tx_tag_present(skb))
869                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
870
871         if (qnq_async_evt_rcvd(adapter) && adapter->pvid) {
872                 if (!vlan_tag)
873                         vlan_tag = adapter->pvid;
874                 /* f/w workaround to set skip_hw_vlan = 1, informs the F/W to
875                  * skip VLAN insertion
876                  */
877                 if (skip_hw_vlan)
878                         *skip_hw_vlan = true;
879         }
880
881         if (vlan_tag) {
882                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
883                 if (unlikely(!skb))
884                         return skb;
885                 skb->vlan_tci = 0;
886         }
887
888         /* Insert the outer VLAN, if any */
889         if (adapter->qnq_vid) {
890                 vlan_tag = adapter->qnq_vid;
891                 skb = __vlan_put_tag(skb, htons(ETH_P_8021Q), vlan_tag);
892                 if (unlikely(!skb))
893                         return skb;
894                 if (skip_hw_vlan)
895                         *skip_hw_vlan = true;
896         }
897
898         return skb;
899 }
900
901 static bool be_ipv6_exthdr_check(struct sk_buff *skb)
902 {
903         struct ethhdr *eh = (struct ethhdr *)skb->data;
904         u16 offset = ETH_HLEN;
905
906         if (eh->h_proto == htons(ETH_P_IPV6)) {
907                 struct ipv6hdr *ip6h = (struct ipv6hdr *)(skb->data + offset);
908
909                 offset += sizeof(struct ipv6hdr);
910                 if (ip6h->nexthdr != NEXTHDR_TCP &&
911                     ip6h->nexthdr != NEXTHDR_UDP) {
912                         struct ipv6_opt_hdr *ehdr =
913                                 (struct ipv6_opt_hdr *) (skb->data + offset);
914
915                         /* offending pkt: 2nd byte following IPv6 hdr is 0xff */
916                         if (ehdr->hdrlen == 0xff)
917                                 return true;
918                 }
919         }
920         return false;
921 }
922
923 static int be_vlan_tag_tx_chk(struct be_adapter *adapter, struct sk_buff *skb)
924 {
925         return vlan_tx_tag_present(skb) || adapter->pvid || adapter->qnq_vid;
926 }
927
928 static int be_ipv6_tx_stall_chk(struct be_adapter *adapter, struct sk_buff *skb)
929 {
930         return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
931 }
932
933 static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
934                                                   struct sk_buff *skb,
935                                                   bool *skip_hw_vlan)
936 {
937         struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
938         unsigned int eth_hdr_len;
939         struct iphdr *ip;
940
941         /* For padded packets, BE HW modifies tot_len field in IP header
942          * incorrecly when VLAN tag is inserted by HW.
943          * For padded packets, Lancer computes incorrect checksum.
944          */
945         eth_hdr_len = ntohs(skb->protocol) == ETH_P_8021Q ?
946                                                 VLAN_ETH_HLEN : ETH_HLEN;
947         if (skb->len <= 60 &&
948             (lancer_chip(adapter) || vlan_tx_tag_present(skb)) &&
949             is_ipv4_pkt(skb)) {
950                 ip = (struct iphdr *)ip_hdr(skb);
951                 pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len));
952         }
953
954         /* If vlan tag is already inlined in the packet, skip HW VLAN
955          * tagging in pvid-tagging mode
956          */
957         if (be_pvid_tagging_enabled(adapter) &&
958             veh->h_vlan_proto == htons(ETH_P_8021Q))
959                 *skip_hw_vlan = true;
960
961         /* HW has a bug wherein it will calculate CSUM for VLAN
962          * pkts even though it is disabled.
963          * Manually insert VLAN in pkt.
964          */
965         if (skb->ip_summed != CHECKSUM_PARTIAL &&
966             vlan_tx_tag_present(skb)) {
967                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
968                 if (unlikely(!skb))
969                         goto err;
970         }
971
972         /* HW may lockup when VLAN HW tagging is requested on
973          * certain ipv6 packets. Drop such pkts if the HW workaround to
974          * skip HW tagging is not enabled by FW.
975          */
976         if (unlikely(be_ipv6_tx_stall_chk(adapter, skb) &&
977             (adapter->pvid || adapter->qnq_vid) &&
978             !qnq_async_evt_rcvd(adapter)))
979                 goto tx_drop;
980
981         /* Manual VLAN tag insertion to prevent:
982          * ASIC lockup when the ASIC inserts VLAN tag into
983          * certain ipv6 packets. Insert VLAN tags in driver,
984          * and set event, completion, vlan bits accordingly
985          * in the Tx WRB.
986          */
987         if (be_ipv6_tx_stall_chk(adapter, skb) &&
988             be_vlan_tag_tx_chk(adapter, skb)) {
989                 skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
990                 if (unlikely(!skb))
991                         goto err;
992         }
993
994         return skb;
995 tx_drop:
996         dev_kfree_skb_any(skb);
997 err:
998         return NULL;
999 }
1000
1001 static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
1002                                            struct sk_buff *skb,
1003                                            bool *skip_hw_vlan)
1004 {
1005         /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
1006          * less may cause a transmit stall on that port. So the work-around is
1007          * to pad short packets (<= 32 bytes) to a 36-byte length.
1008          */
1009         if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
1010                 if (skb_padto(skb, 36))
1011                         return NULL;
1012                 skb->len = 36;
1013         }
1014
1015         if (BEx_chip(adapter) || lancer_chip(adapter)) {
1016                 skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
1017                 if (!skb)
1018                         return NULL;
1019         }
1020
1021         return skb;
1022 }
1023
1024 static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
1025 {
1026         struct be_adapter *adapter = netdev_priv(netdev);
1027         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
1028         struct be_queue_info *txq = &txo->q;
1029         bool dummy_wrb, stopped = false;
1030         u32 wrb_cnt = 0, copied = 0;
1031         bool skip_hw_vlan = false;
1032         u32 start = txq->head;
1033
1034         skb = be_xmit_workarounds(adapter, skb, &skip_hw_vlan);
1035         if (!skb) {
1036                 tx_stats(txo)->tx_drv_drops++;
1037                 return NETDEV_TX_OK;
1038         }
1039
1040         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
1041
1042         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb,
1043                               skip_hw_vlan);
1044         if (copied) {
1045                 int gso_segs = skb_shinfo(skb)->gso_segs;
1046
1047                 /* record the sent skb in the sent_skb table */
1048                 BUG_ON(txo->sent_skb_list[start]);
1049                 txo->sent_skb_list[start] = skb;
1050
1051                 /* Ensure txq has space for the next skb; Else stop the queue
1052                  * *BEFORE* ringing the tx doorbell, so that we serialze the
1053                  * tx compls of the current transmit which'll wake up the queue
1054                  */
1055                 atomic_add(wrb_cnt, &txq->used);
1056                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
1057                                                                 txq->len) {
1058                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
1059                         stopped = true;
1060                 }
1061
1062                 be_txq_notify(adapter, txo, wrb_cnt);
1063
1064                 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
1065         } else {
1066                 txq->head = start;
1067                 tx_stats(txo)->tx_drv_drops++;
1068                 dev_kfree_skb_any(skb);
1069         }
1070         return NETDEV_TX_OK;
1071 }
1072
1073 static int be_change_mtu(struct net_device *netdev, int new_mtu)
1074 {
1075         struct be_adapter *adapter = netdev_priv(netdev);
1076         if (new_mtu < BE_MIN_MTU ||
1077             new_mtu > (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN))) {
1078                 dev_info(&adapter->pdev->dev,
1079                          "MTU must be between %d and %d bytes\n",
1080                          BE_MIN_MTU,
1081                          (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
1082                 return -EINVAL;
1083         }
1084         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
1085                  netdev->mtu, new_mtu);
1086         netdev->mtu = new_mtu;
1087         return 0;
1088 }
1089
1090 /*
1091  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
1092  * If the user configures more, place BE in vlan promiscuous mode.
1093  */
1094 static int be_vid_config(struct be_adapter *adapter)
1095 {
1096         u16 vids[BE_NUM_VLANS_SUPPORTED];
1097         u16 num = 0, i = 0;
1098         int status = 0;
1099
1100         /* No need to further configure vids if in promiscuous mode */
1101         if (adapter->promiscuous)
1102                 return 0;
1103
1104         if (adapter->vlans_added > be_max_vlans(adapter))
1105                 goto set_vlan_promisc;
1106
1107         /* Construct VLAN Table to give to HW */
1108         for_each_set_bit(i, adapter->vids, VLAN_N_VID)
1109                 vids[num++] = cpu_to_le16(i);
1110
1111         status = be_cmd_vlan_config(adapter, adapter->if_handle, vids, num);
1112         if (status) {
1113                 /* Set to VLAN promisc mode as setting VLAN filter failed */
1114                 if (addl_status(status) ==
1115                                 MCC_ADDL_STATUS_INSUFFICIENT_RESOURCES)
1116                         goto set_vlan_promisc;
1117                 dev_err(&adapter->pdev->dev,
1118                         "Setting HW VLAN filtering failed.\n");
1119         } else {
1120                 if (adapter->flags & BE_FLAGS_VLAN_PROMISC) {
1121                         /* hw VLAN filtering re-enabled. */
1122                         status = be_cmd_rx_filter(adapter,
1123                                                   BE_FLAGS_VLAN_PROMISC, OFF);
1124                         if (!status) {
1125                                 dev_info(&adapter->pdev->dev,
1126                                          "Disabling VLAN Promiscuous mode.\n");
1127                                 adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
1128                         }
1129                 }
1130         }
1131
1132         return status;
1133
1134 set_vlan_promisc:
1135         if (adapter->flags & BE_FLAGS_VLAN_PROMISC)
1136                 return 0;
1137
1138         status = be_cmd_rx_filter(adapter, BE_FLAGS_VLAN_PROMISC, ON);
1139         if (!status) {
1140                 dev_info(&adapter->pdev->dev, "Enable VLAN Promiscuous mode\n");
1141                 adapter->flags |= BE_FLAGS_VLAN_PROMISC;
1142         } else
1143                 dev_err(&adapter->pdev->dev,
1144                         "Failed to enable VLAN Promiscuous mode.\n");
1145         return status;
1146 }
1147
1148 static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
1149 {
1150         struct be_adapter *adapter = netdev_priv(netdev);
1151         int status = 0;
1152
1153         /* Packets with VID 0 are always received by Lancer by default */
1154         if (lancer_chip(adapter) && vid == 0)
1155                 return status;
1156
1157         if (test_bit(vid, adapter->vids))
1158                 return status;
1159
1160         set_bit(vid, adapter->vids);
1161         adapter->vlans_added++;
1162
1163         status = be_vid_config(adapter);
1164         if (status) {
1165                 adapter->vlans_added--;
1166                 clear_bit(vid, adapter->vids);
1167         }
1168
1169         return status;
1170 }
1171
1172 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
1173 {
1174         struct be_adapter *adapter = netdev_priv(netdev);
1175
1176         /* Packets with VID 0 are always received by Lancer by default */
1177         if (lancer_chip(adapter) && vid == 0)
1178                 return 0;
1179
1180         clear_bit(vid, adapter->vids);
1181         adapter->vlans_added--;
1182
1183         return be_vid_config(adapter);
1184 }
1185
1186 static void be_clear_promisc(struct be_adapter *adapter)
1187 {
1188         adapter->promiscuous = false;
1189         adapter->flags &= ~(BE_FLAGS_VLAN_PROMISC | BE_FLAGS_MCAST_PROMISC);
1190
1191         be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
1192 }
1193
1194 static void be_set_rx_mode(struct net_device *netdev)
1195 {
1196         struct be_adapter *adapter = netdev_priv(netdev);
1197         int status;
1198
1199         if (netdev->flags & IFF_PROMISC) {
1200                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1201                 adapter->promiscuous = true;
1202                 goto done;
1203         }
1204
1205         /* BE was previously in promiscuous mode; disable it */
1206         if (adapter->promiscuous) {
1207                 be_clear_promisc(adapter);
1208                 if (adapter->vlans_added)
1209                         be_vid_config(adapter);
1210         }
1211
1212         /* Enable multicast promisc if num configured exceeds what we support */
1213         if (netdev->flags & IFF_ALLMULTI ||
1214             netdev_mc_count(netdev) > be_max_mc(adapter))
1215                 goto set_mcast_promisc;
1216
1217         if (netdev_uc_count(netdev) != adapter->uc_macs) {
1218                 struct netdev_hw_addr *ha;
1219                 int i = 1; /* First slot is claimed by the Primary MAC */
1220
1221                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
1222                         be_cmd_pmac_del(adapter, adapter->if_handle,
1223                                         adapter->pmac_id[i], 0);
1224                 }
1225
1226                 if (netdev_uc_count(netdev) > be_max_uc(adapter)) {
1227                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
1228                         adapter->promiscuous = true;
1229                         goto done;
1230                 }
1231
1232                 netdev_for_each_uc_addr(ha, adapter->netdev) {
1233                         adapter->uc_macs++; /* First slot is for Primary MAC */
1234                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
1235                                         adapter->if_handle,
1236                                         &adapter->pmac_id[adapter->uc_macs], 0);
1237                 }
1238         }
1239
1240         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
1241         if (!status) {
1242                 if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1243                         adapter->flags &= ~BE_FLAGS_MCAST_PROMISC;
1244                 goto done;
1245         }
1246
1247 set_mcast_promisc:
1248         if (adapter->flags & BE_FLAGS_MCAST_PROMISC)
1249                 return;
1250
1251         /* Set to MCAST promisc mode if setting MULTICAST address fails
1252          * or if num configured exceeds what we support
1253          */
1254         status = be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
1255         if (!status)
1256                 adapter->flags |= BE_FLAGS_MCAST_PROMISC;
1257 done:
1258         return;
1259 }
1260
1261 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1262 {
1263         struct be_adapter *adapter = netdev_priv(netdev);
1264         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1265         int status;
1266
1267         if (!sriov_enabled(adapter))
1268                 return -EPERM;
1269
1270         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
1271                 return -EINVAL;
1272
1273         /* Proceed further only if user provided MAC is different
1274          * from active MAC
1275          */
1276         if (ether_addr_equal(mac, vf_cfg->mac_addr))
1277                 return 0;
1278
1279         if (BEx_chip(adapter)) {
1280                 be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
1281                                 vf + 1);
1282
1283                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
1284                                          &vf_cfg->pmac_id, vf + 1);
1285         } else {
1286                 status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
1287                                         vf + 1);
1288         }
1289
1290         if (status) {
1291                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
1292                         mac, vf, status);
1293                 return be_cmd_status(status);
1294         }
1295
1296         ether_addr_copy(vf_cfg->mac_addr, mac);
1297
1298         return 0;
1299 }
1300
1301 static int be_get_vf_config(struct net_device *netdev, int vf,
1302                             struct ifla_vf_info *vi)
1303 {
1304         struct be_adapter *adapter = netdev_priv(netdev);
1305         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1306
1307         if (!sriov_enabled(adapter))
1308                 return -EPERM;
1309
1310         if (vf >= adapter->num_vfs)
1311                 return -EINVAL;
1312
1313         vi->vf = vf;
1314         vi->max_tx_rate = vf_cfg->tx_rate;
1315         vi->min_tx_rate = 0;
1316         vi->vlan = vf_cfg->vlan_tag & VLAN_VID_MASK;
1317         vi->qos = vf_cfg->vlan_tag >> VLAN_PRIO_SHIFT;
1318         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
1319         vi->linkstate = adapter->vf_cfg[vf].plink_tracking;
1320
1321         return 0;
1322 }
1323
1324 static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
1325 {
1326         struct be_adapter *adapter = netdev_priv(netdev);
1327         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
1328         int status = 0;
1329
1330         if (!sriov_enabled(adapter))
1331                 return -EPERM;
1332
1333         if (vf >= adapter->num_vfs || vlan > 4095 || qos > 7)
1334                 return -EINVAL;
1335
1336         if (vlan || qos) {
1337                 vlan |= qos << VLAN_PRIO_SHIFT;
1338                 if (vf_cfg->vlan_tag != vlan)
1339                         status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1340                                                        vf_cfg->if_handle, 0);
1341         } else {
1342                 /* Reset Transparent Vlan Tagging. */
1343                 status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
1344                                                vf + 1, vf_cfg->if_handle, 0);
1345         }
1346
1347         if (status) {
1348                 dev_err(&adapter->pdev->dev,
1349                         "VLAN %d config on VF %d failed : %#x\n", vlan,
1350                         vf, status);
1351                 return be_cmd_status(status);
1352         }
1353
1354         vf_cfg->vlan_tag = vlan;
1355
1356         return 0;
1357 }
1358
1359 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
1360                              int min_tx_rate, int max_tx_rate)
1361 {
1362         struct be_adapter *adapter = netdev_priv(netdev);
1363         struct device *dev = &adapter->pdev->dev;
1364         int percent_rate, status = 0;
1365         u16 link_speed = 0;
1366         u8 link_status;
1367
1368         if (!sriov_enabled(adapter))
1369                 return -EPERM;
1370
1371         if (vf >= adapter->num_vfs)
1372                 return -EINVAL;
1373
1374         if (min_tx_rate)
1375                 return -EINVAL;
1376
1377         if (!max_tx_rate)
1378                 goto config_qos;
1379
1380         status = be_cmd_link_status_query(adapter, &link_speed,
1381                                           &link_status, 0);
1382         if (status)
1383                 goto err;
1384
1385         if (!link_status) {
1386                 dev_err(dev, "TX-rate setting not allowed when link is down\n");
1387                 status = -ENETDOWN;
1388                 goto err;
1389         }
1390
1391         if (max_tx_rate < 100 || max_tx_rate > link_speed) {
1392                 dev_err(dev, "TX-rate must be between 100 and %d Mbps\n",
1393                         link_speed);
1394                 status = -EINVAL;
1395                 goto err;
1396         }
1397
1398         /* On Skyhawk the QOS setting must be done only as a % value */
1399         percent_rate = link_speed / 100;
1400         if (skyhawk_chip(adapter) && (max_tx_rate % percent_rate)) {
1401                 dev_err(dev, "TX-rate must be a multiple of %d Mbps\n",
1402                         percent_rate);
1403                 status = -EINVAL;
1404                 goto err;
1405         }
1406
1407 config_qos:
1408         status = be_cmd_config_qos(adapter, max_tx_rate, link_speed, vf + 1);
1409         if (status)
1410                 goto err;
1411
1412         adapter->vf_cfg[vf].tx_rate = max_tx_rate;
1413         return 0;
1414
1415 err:
1416         dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
1417                 max_tx_rate, vf);
1418         return be_cmd_status(status);
1419 }
1420 static int be_set_vf_link_state(struct net_device *netdev, int vf,
1421                                 int link_state)
1422 {
1423         struct be_adapter *adapter = netdev_priv(netdev);
1424         int status;
1425
1426         if (!sriov_enabled(adapter))
1427                 return -EPERM;
1428
1429         if (vf >= adapter->num_vfs)
1430                 return -EINVAL;
1431
1432         status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
1433         if (status) {
1434                 dev_err(&adapter->pdev->dev,
1435                         "Link state change on VF %d failed: %#x\n", vf, status);
1436                 return be_cmd_status(status);
1437         }
1438
1439         adapter->vf_cfg[vf].plink_tracking = link_state;
1440
1441         return 0;
1442 }
1443
1444 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
1445                           ulong now)
1446 {
1447         aic->rx_pkts_prev = rx_pkts;
1448         aic->tx_reqs_prev = tx_pkts;
1449         aic->jiffies = now;
1450 }
1451
1452 static void be_eqd_update(struct be_adapter *adapter)
1453 {
1454         struct be_set_eqd set_eqd[MAX_EVT_QS];
1455         int eqd, i, num = 0, start;
1456         struct be_aic_obj *aic;
1457         struct be_eq_obj *eqo;
1458         struct be_rx_obj *rxo;
1459         struct be_tx_obj *txo;
1460         u64 rx_pkts, tx_pkts;
1461         ulong now;
1462         u32 pps, delta;
1463
1464         for_all_evt_queues(adapter, eqo, i) {
1465                 aic = &adapter->aic_obj[eqo->idx];
1466                 if (!aic->enable) {
1467                         if (aic->jiffies)
1468                                 aic->jiffies = 0;
1469                         eqd = aic->et_eqd;
1470                         goto modify_eqd;
1471                 }
1472
1473                 rxo = &adapter->rx_obj[eqo->idx];
1474                 do {
1475                         start = u64_stats_fetch_begin_irq(&rxo->stats.sync);
1476                         rx_pkts = rxo->stats.rx_pkts;
1477                 } while (u64_stats_fetch_retry_irq(&rxo->stats.sync, start));
1478
1479                 txo = &adapter->tx_obj[eqo->idx];
1480                 do {
1481                         start = u64_stats_fetch_begin_irq(&txo->stats.sync);
1482                         tx_pkts = txo->stats.tx_reqs;
1483                 } while (u64_stats_fetch_retry_irq(&txo->stats.sync, start));
1484
1485
1486                 /* Skip, if wrapped around or first calculation */
1487                 now = jiffies;
1488                 if (!aic->jiffies || time_before(now, aic->jiffies) ||
1489                     rx_pkts < aic->rx_pkts_prev ||
1490                     tx_pkts < aic->tx_reqs_prev) {
1491                         be_aic_update(aic, rx_pkts, tx_pkts, now);
1492                         continue;
1493                 }
1494
1495                 delta = jiffies_to_msecs(now - aic->jiffies);
1496                 pps = (((u32)(rx_pkts - aic->rx_pkts_prev) * 1000) / delta) +
1497                         (((u32)(tx_pkts - aic->tx_reqs_prev) * 1000) / delta);
1498                 eqd = (pps / 15000) << 2;
1499
1500                 if (eqd < 8)
1501                         eqd = 0;
1502                 eqd = min_t(u32, eqd, aic->max_eqd);
1503                 eqd = max_t(u32, eqd, aic->min_eqd);
1504
1505                 be_aic_update(aic, rx_pkts, tx_pkts, now);
1506 modify_eqd:
1507                 if (eqd != aic->prev_eqd) {
1508                         set_eqd[num].delay_multiplier = (eqd * 65)/100;
1509                         set_eqd[num].eq_id = eqo->q.id;
1510                         aic->prev_eqd = eqd;
1511                         num++;
1512                 }
1513         }
1514
1515         if (num)
1516                 be_cmd_modify_eqd(adapter, set_eqd, num);
1517 }
1518
1519 static void be_rx_stats_update(struct be_rx_obj *rxo,
1520                                struct be_rx_compl_info *rxcp)
1521 {
1522         struct be_rx_stats *stats = rx_stats(rxo);
1523
1524         u64_stats_update_begin(&stats->sync);
1525         stats->rx_compl++;
1526         stats->rx_bytes += rxcp->pkt_size;
1527         stats->rx_pkts++;
1528         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1529                 stats->rx_mcast_pkts++;
1530         if (rxcp->err)
1531                 stats->rx_compl_err++;
1532         u64_stats_update_end(&stats->sync);
1533 }
1534
1535 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1536 {
1537         /* L4 checksum is not reliable for non TCP/UDP packets.
1538          * Also ignore ipcksm for ipv6 pkts
1539          */
1540         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1541                 (rxcp->ip_csum || rxcp->ipv6) && !rxcp->err;
1542 }
1543
1544 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo)
1545 {
1546         struct be_adapter *adapter = rxo->adapter;
1547         struct be_rx_page_info *rx_page_info;
1548         struct be_queue_info *rxq = &rxo->q;
1549         u16 frag_idx = rxq->tail;
1550
1551         rx_page_info = &rxo->page_info_tbl[frag_idx];
1552         BUG_ON(!rx_page_info->page);
1553
1554         if (rx_page_info->last_frag) {
1555                 dma_unmap_page(&adapter->pdev->dev,
1556                                dma_unmap_addr(rx_page_info, bus),
1557                                adapter->big_page_size, DMA_FROM_DEVICE);
1558                 rx_page_info->last_frag = false;
1559         } else {
1560                 dma_sync_single_for_cpu(&adapter->pdev->dev,
1561                                         dma_unmap_addr(rx_page_info, bus),
1562                                         rx_frag_size, DMA_FROM_DEVICE);
1563         }
1564
1565         queue_tail_inc(rxq);
1566         atomic_dec(&rxq->used);
1567         return rx_page_info;
1568 }
1569
1570 /* Throwaway the data in the Rx completion */
1571 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1572                                 struct be_rx_compl_info *rxcp)
1573 {
1574         struct be_rx_page_info *page_info;
1575         u16 i, num_rcvd = rxcp->num_rcvd;
1576
1577         for (i = 0; i < num_rcvd; i++) {
1578                 page_info = get_rx_page_info(rxo);
1579                 put_page(page_info->page);
1580                 memset(page_info, 0, sizeof(*page_info));
1581         }
1582 }
1583
1584 /*
1585  * skb_fill_rx_data forms a complete skb for an ether frame
1586  * indicated by rxcp.
1587  */
1588 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1589                              struct be_rx_compl_info *rxcp)
1590 {
1591         struct be_rx_page_info *page_info;
1592         u16 i, j;
1593         u16 hdr_len, curr_frag_len, remaining;
1594         u8 *start;
1595
1596         page_info = get_rx_page_info(rxo);
1597         start = page_address(page_info->page) + page_info->page_offset;
1598         prefetch(start);
1599
1600         /* Copy data in the first descriptor of this completion */
1601         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1602
1603         skb->len = curr_frag_len;
1604         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1605                 memcpy(skb->data, start, curr_frag_len);
1606                 /* Complete packet has now been moved to data */
1607                 put_page(page_info->page);
1608                 skb->data_len = 0;
1609                 skb->tail += curr_frag_len;
1610         } else {
1611                 hdr_len = ETH_HLEN;
1612                 memcpy(skb->data, start, hdr_len);
1613                 skb_shinfo(skb)->nr_frags = 1;
1614                 skb_frag_set_page(skb, 0, page_info->page);
1615                 skb_shinfo(skb)->frags[0].page_offset =
1616                                         page_info->page_offset + hdr_len;
1617                 skb_frag_size_set(&skb_shinfo(skb)->frags[0],
1618                                   curr_frag_len - hdr_len);
1619                 skb->data_len = curr_frag_len - hdr_len;
1620                 skb->truesize += rx_frag_size;
1621                 skb->tail += hdr_len;
1622         }
1623         page_info->page = NULL;
1624
1625         if (rxcp->pkt_size <= rx_frag_size) {
1626                 BUG_ON(rxcp->num_rcvd != 1);
1627                 return;
1628         }
1629
1630         /* More frags present for this completion */
1631         remaining = rxcp->pkt_size - curr_frag_len;
1632         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1633                 page_info = get_rx_page_info(rxo);
1634                 curr_frag_len = min(remaining, rx_frag_size);
1635
1636                 /* Coalesce all frags from the same physical page in one slot */
1637                 if (page_info->page_offset == 0) {
1638                         /* Fresh page */
1639                         j++;
1640                         skb_frag_set_page(skb, j, page_info->page);
1641                         skb_shinfo(skb)->frags[j].page_offset =
1642                                                         page_info->page_offset;
1643                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1644                         skb_shinfo(skb)->nr_frags++;
1645                 } else {
1646                         put_page(page_info->page);
1647                 }
1648
1649                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1650                 skb->len += curr_frag_len;
1651                 skb->data_len += curr_frag_len;
1652                 skb->truesize += rx_frag_size;
1653                 remaining -= curr_frag_len;
1654                 page_info->page = NULL;
1655         }
1656         BUG_ON(j > MAX_SKB_FRAGS);
1657 }
1658
1659 /* Process the RX completion indicated by rxcp when GRO is disabled */
1660 static void be_rx_compl_process(struct be_rx_obj *rxo, struct napi_struct *napi,
1661                                 struct be_rx_compl_info *rxcp)
1662 {
1663         struct be_adapter *adapter = rxo->adapter;
1664         struct net_device *netdev = adapter->netdev;
1665         struct sk_buff *skb;
1666
1667         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1668         if (unlikely(!skb)) {
1669                 rx_stats(rxo)->rx_drops_no_skbs++;
1670                 be_rx_compl_discard(rxo, rxcp);
1671                 return;
1672         }
1673
1674         skb_fill_rx_data(rxo, skb, rxcp);
1675
1676         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1677                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1678         else
1679                 skb_checksum_none_assert(skb);
1680
1681         skb->protocol = eth_type_trans(skb, netdev);
1682         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1683         if (netdev->features & NETIF_F_RXHASH)
1684                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1685
1686         skb->encapsulation = rxcp->tunneled;
1687         skb_mark_napi_id(skb, napi);
1688
1689         if (rxcp->vlanf)
1690                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1691
1692         netif_receive_skb(skb);
1693 }
1694
1695 /* Process the RX completion indicated by rxcp when GRO is enabled */
1696 static void be_rx_compl_process_gro(struct be_rx_obj *rxo,
1697                                     struct napi_struct *napi,
1698                                     struct be_rx_compl_info *rxcp)
1699 {
1700         struct be_adapter *adapter = rxo->adapter;
1701         struct be_rx_page_info *page_info;
1702         struct sk_buff *skb = NULL;
1703         u16 remaining, curr_frag_len;
1704         u16 i, j;
1705
1706         skb = napi_get_frags(napi);
1707         if (!skb) {
1708                 be_rx_compl_discard(rxo, rxcp);
1709                 return;
1710         }
1711
1712         remaining = rxcp->pkt_size;
1713         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1714                 page_info = get_rx_page_info(rxo);
1715
1716                 curr_frag_len = min(remaining, rx_frag_size);
1717
1718                 /* Coalesce all frags from the same physical page in one slot */
1719                 if (i == 0 || page_info->page_offset == 0) {
1720                         /* First frag or Fresh page */
1721                         j++;
1722                         skb_frag_set_page(skb, j, page_info->page);
1723                         skb_shinfo(skb)->frags[j].page_offset =
1724                                                         page_info->page_offset;
1725                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1726                 } else {
1727                         put_page(page_info->page);
1728                 }
1729                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1730                 skb->truesize += rx_frag_size;
1731                 remaining -= curr_frag_len;
1732                 memset(page_info, 0, sizeof(*page_info));
1733         }
1734         BUG_ON(j > MAX_SKB_FRAGS);
1735
1736         skb_shinfo(skb)->nr_frags = j + 1;
1737         skb->len = rxcp->pkt_size;
1738         skb->data_len = rxcp->pkt_size;
1739         skb->ip_summed = CHECKSUM_UNNECESSARY;
1740         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1741         if (adapter->netdev->features & NETIF_F_RXHASH)
1742                 skb_set_hash(skb, rxcp->rss_hash, PKT_HASH_TYPE_L3);
1743
1744         skb->encapsulation = rxcp->tunneled;
1745         skb_mark_napi_id(skb, napi);
1746
1747         if (rxcp->vlanf)
1748                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), rxcp->vlan_tag);
1749
1750         napi_gro_frags(napi);
1751 }
1752
1753 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1754                                  struct be_rx_compl_info *rxcp)
1755 {
1756         rxcp->pkt_size =
1757                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1758         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1759         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1760         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1761         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1762         rxcp->ip_csum =
1763                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1764         rxcp->l4_csum =
1765                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1766         rxcp->ipv6 =
1767                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1768         rxcp->num_rcvd =
1769                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1770         rxcp->pkt_type =
1771                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1772         rxcp->rss_hash =
1773                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, compl);
1774         if (rxcp->vlanf) {
1775                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, qnq,
1776                                           compl);
1777                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1,
1778                                                vlan_tag, compl);
1779         }
1780         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1781         rxcp->tunneled =
1782                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tunneled, compl);
1783 }
1784
1785 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1786                                  struct be_rx_compl_info *rxcp)
1787 {
1788         rxcp->pkt_size =
1789                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1790         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1791         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1792         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1793         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1794         rxcp->ip_csum =
1795                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1796         rxcp->l4_csum =
1797                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1798         rxcp->ipv6 =
1799                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1800         rxcp->num_rcvd =
1801                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1802         rxcp->pkt_type =
1803                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1804         rxcp->rss_hash =
1805                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, compl);
1806         if (rxcp->vlanf) {
1807                 rxcp->qnq = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, qnq,
1808                                           compl);
1809                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1810                                                vlan_tag, compl);
1811         }
1812         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1813         rxcp->ip_frag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0,
1814                                       ip_frag, compl);
1815 }
1816
1817 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1818 {
1819         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1820         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1821         struct be_adapter *adapter = rxo->adapter;
1822
1823         /* For checking the valid bit it is Ok to use either definition as the
1824          * valid bit is at the same position in both v0 and v1 Rx compl */
1825         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1826                 return NULL;
1827
1828         rmb();
1829         be_dws_le_to_cpu(compl, sizeof(*compl));
1830
1831         if (adapter->be3_native)
1832                 be_parse_rx_compl_v1(compl, rxcp);
1833         else
1834                 be_parse_rx_compl_v0(compl, rxcp);
1835
1836         if (rxcp->ip_frag)
1837                 rxcp->l4_csum = 0;
1838
1839         if (rxcp->vlanf) {
1840                 /* In QNQ modes, if qnq bit is not set, then the packet was
1841                  * tagged only with the transparent outer vlan-tag and must
1842                  * not be treated as a vlan packet by host
1843                  */
1844                 if (be_is_qnq_mode(adapter) && !rxcp->qnq)
1845                         rxcp->vlanf = 0;
1846
1847                 if (!lancer_chip(adapter))
1848                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1849
1850                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1851                     !test_bit(rxcp->vlan_tag, adapter->vids))
1852                         rxcp->vlanf = 0;
1853         }
1854
1855         /* As the compl has been parsed, reset it; we wont touch it again */
1856         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1857
1858         queue_tail_inc(&rxo->cq);
1859         return rxcp;
1860 }
1861
1862 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1863 {
1864         u32 order = get_order(size);
1865
1866         if (order > 0)
1867                 gfp |= __GFP_COMP;
1868         return  alloc_pages(gfp, order);
1869 }
1870
1871 /*
1872  * Allocate a page, split it to fragments of size rx_frag_size and post as
1873  * receive buffers to BE
1874  */
1875 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1876 {
1877         struct be_adapter *adapter = rxo->adapter;
1878         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1879         struct be_queue_info *rxq = &rxo->q;
1880         struct page *pagep = NULL;
1881         struct device *dev = &adapter->pdev->dev;
1882         struct be_eth_rx_d *rxd;
1883         u64 page_dmaaddr = 0, frag_dmaaddr;
1884         u32 posted, page_offset = 0;
1885
1886         page_info = &rxo->page_info_tbl[rxq->head];
1887         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1888                 if (!pagep) {
1889                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1890                         if (unlikely(!pagep)) {
1891                                 rx_stats(rxo)->rx_post_fail++;
1892                                 break;
1893                         }
1894                         page_dmaaddr = dma_map_page(dev, pagep, 0,
1895                                                     adapter->big_page_size,
1896                                                     DMA_FROM_DEVICE);
1897                         if (dma_mapping_error(dev, page_dmaaddr)) {
1898                                 put_page(pagep);
1899                                 pagep = NULL;
1900                                 rx_stats(rxo)->rx_post_fail++;
1901                                 break;
1902                         }
1903                         page_offset = 0;
1904                 } else {
1905                         get_page(pagep);
1906                         page_offset += rx_frag_size;
1907                 }
1908                 page_info->page_offset = page_offset;
1909                 page_info->page = pagep;
1910
1911                 rxd = queue_head_node(rxq);
1912                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1913                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1914                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1915
1916                 /* Any space left in the current big page for another frag? */
1917                 if ((page_offset + rx_frag_size + rx_frag_size) >
1918                                         adapter->big_page_size) {
1919                         pagep = NULL;
1920                         page_info->last_frag = true;
1921                         dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1922                 } else {
1923                         dma_unmap_addr_set(page_info, bus, frag_dmaaddr);
1924                 }
1925
1926                 prev_page_info = page_info;
1927                 queue_head_inc(rxq);
1928                 page_info = &rxo->page_info_tbl[rxq->head];
1929         }
1930
1931         /* Mark the last frag of a page when we break out of the above loop
1932          * with no more slots available in the RXQ
1933          */
1934         if (pagep) {
1935                 prev_page_info->last_frag = true;
1936                 dma_unmap_addr_set(prev_page_info, bus, page_dmaaddr);
1937         }
1938
1939         if (posted) {
1940                 atomic_add(posted, &rxq->used);
1941                 if (rxo->rx_post_starved)
1942                         rxo->rx_post_starved = false;
1943                 be_rxq_notify(adapter, rxq->id, posted);
1944         } else if (atomic_read(&rxq->used) == 0) {
1945                 /* Let be_worker replenish when memory is available */
1946                 rxo->rx_post_starved = true;
1947         }
1948 }
1949
1950 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1951 {
1952         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1953
1954         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1955                 return NULL;
1956
1957         rmb();
1958         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1959
1960         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1961
1962         queue_tail_inc(tx_cq);
1963         return txcp;
1964 }
1965
1966 static u16 be_tx_compl_process(struct be_adapter *adapter,
1967                                struct be_tx_obj *txo, u16 last_index)
1968 {
1969         struct be_queue_info *txq = &txo->q;
1970         struct be_eth_wrb *wrb;
1971         struct sk_buff **sent_skbs = txo->sent_skb_list;
1972         struct sk_buff *sent_skb;
1973         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1974         bool unmap_skb_hdr = true;
1975
1976         sent_skb = sent_skbs[txq->tail];
1977         BUG_ON(!sent_skb);
1978         sent_skbs[txq->tail] = NULL;
1979
1980         /* skip header wrb */
1981         queue_tail_inc(txq);
1982
1983         do {
1984                 cur_index = txq->tail;
1985                 wrb = queue_tail_node(txq);
1986                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1987                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1988                 unmap_skb_hdr = false;
1989
1990                 num_wrbs++;
1991                 queue_tail_inc(txq);
1992         } while (cur_index != last_index);
1993
1994         dev_kfree_skb_any(sent_skb);
1995         return num_wrbs;
1996 }
1997
1998 /* Return the number of events in the event queue */
1999 static inline int events_get(struct be_eq_obj *eqo)
2000 {
2001         struct be_eq_entry *eqe;
2002         int num = 0;
2003
2004         do {
2005                 eqe = queue_tail_node(&eqo->q);
2006                 if (eqe->evt == 0)
2007                         break;
2008
2009                 rmb();
2010                 eqe->evt = 0;
2011                 num++;
2012                 queue_tail_inc(&eqo->q);
2013         } while (true);
2014
2015         return num;
2016 }
2017
2018 /* Leaves the EQ is disarmed state */
2019 static void be_eq_clean(struct be_eq_obj *eqo)
2020 {
2021         int num = events_get(eqo);
2022
2023         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
2024 }
2025
2026 static void be_rx_cq_clean(struct be_rx_obj *rxo)
2027 {
2028         struct be_rx_page_info *page_info;
2029         struct be_queue_info *rxq = &rxo->q;
2030         struct be_queue_info *rx_cq = &rxo->cq;
2031         struct be_rx_compl_info *rxcp;
2032         struct be_adapter *adapter = rxo->adapter;
2033         int flush_wait = 0;
2034
2035         /* Consume pending rx completions.
2036          * Wait for the flush completion (identified by zero num_rcvd)
2037          * to arrive. Notify CQ even when there are no more CQ entries
2038          * for HW to flush partially coalesced CQ entries.
2039          * In Lancer, there is no need to wait for flush compl.
2040          */
2041         for (;;) {
2042                 rxcp = be_rx_compl_get(rxo);
2043                 if (!rxcp) {
2044                         if (lancer_chip(adapter))
2045                                 break;
2046
2047                         if (flush_wait++ > 10 || be_hw_error(adapter)) {
2048                                 dev_warn(&adapter->pdev->dev,
2049                                          "did not receive flush compl\n");
2050                                 break;
2051                         }
2052                         be_cq_notify(adapter, rx_cq->id, true, 0);
2053                         mdelay(1);
2054                 } else {
2055                         be_rx_compl_discard(rxo, rxcp);
2056                         be_cq_notify(adapter, rx_cq->id, false, 1);
2057                         if (rxcp->num_rcvd == 0)
2058                                 break;
2059                 }
2060         }
2061
2062         /* After cleanup, leave the CQ in unarmed state */
2063         be_cq_notify(adapter, rx_cq->id, false, 0);
2064
2065         /* Then free posted rx buffers that were not used */
2066         while (atomic_read(&rxq->used) > 0) {
2067                 page_info = get_rx_page_info(rxo);
2068                 put_page(page_info->page);
2069                 memset(page_info, 0, sizeof(*page_info));
2070         }
2071         BUG_ON(atomic_read(&rxq->used));
2072         rxq->tail = rxq->head = 0;
2073 }
2074
2075 static void be_tx_compl_clean(struct be_adapter *adapter)
2076 {
2077         struct be_tx_obj *txo;
2078         struct be_queue_info *txq;
2079         struct be_eth_tx_compl *txcp;
2080         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
2081         struct sk_buff *sent_skb;
2082         bool dummy_wrb;
2083         int i, pending_txqs;
2084
2085         /* Stop polling for compls when HW has been silent for 10ms */
2086         do {
2087                 pending_txqs = adapter->num_tx_qs;
2088
2089                 for_all_tx_queues(adapter, txo, i) {
2090                         cmpl = 0;
2091                         num_wrbs = 0;
2092                         txq = &txo->q;
2093                         while ((txcp = be_tx_compl_get(&txo->cq))) {
2094                                 end_idx =
2095                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
2096                                                       wrb_index, txcp);
2097                                 num_wrbs += be_tx_compl_process(adapter, txo,
2098                                                                 end_idx);
2099                                 cmpl++;
2100                         }
2101                         if (cmpl) {
2102                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
2103                                 atomic_sub(num_wrbs, &txq->used);
2104                                 timeo = 0;
2105                         }
2106                         if (atomic_read(&txq->used) == 0)
2107                                 pending_txqs--;
2108                 }
2109
2110                 if (pending_txqs == 0 || ++timeo > 10 || be_hw_error(adapter))
2111                         break;
2112
2113                 mdelay(1);
2114         } while (true);
2115
2116         for_all_tx_queues(adapter, txo, i) {
2117                 txq = &txo->q;
2118                 if (atomic_read(&txq->used))
2119                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
2120                                 atomic_read(&txq->used));
2121
2122                 /* free posted tx for which compls will never arrive */
2123                 while (atomic_read(&txq->used)) {
2124                         sent_skb = txo->sent_skb_list[txq->tail];
2125                         end_idx = txq->tail;
2126                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
2127                                                    &dummy_wrb);
2128                         index_adv(&end_idx, num_wrbs - 1, txq->len);
2129                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
2130                         atomic_sub(num_wrbs, &txq->used);
2131                 }
2132         }
2133 }
2134
2135 static void be_evt_queues_destroy(struct be_adapter *adapter)
2136 {
2137         struct be_eq_obj *eqo;
2138         int i;
2139
2140         for_all_evt_queues(adapter, eqo, i) {
2141                 if (eqo->q.created) {
2142                         be_eq_clean(eqo);
2143                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2144                         napi_hash_del(&eqo->napi);
2145                         netif_napi_del(&eqo->napi);
2146                 }
2147                 be_queue_free(adapter, &eqo->q);
2148         }
2149 }
2150
2151 static int be_evt_queues_create(struct be_adapter *adapter)
2152 {
2153         struct be_queue_info *eq;
2154         struct be_eq_obj *eqo;
2155         struct be_aic_obj *aic;
2156         int i, rc;
2157
2158         adapter->num_evt_qs = min_t(u16, num_irqs(adapter),
2159                                     adapter->cfg_num_qs);
2160
2161         for_all_evt_queues(adapter, eqo, i) {
2162                 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2163                                BE_NAPI_WEIGHT);
2164                 napi_hash_add(&eqo->napi);
2165                 aic = &adapter->aic_obj[i];
2166                 eqo->adapter = adapter;
2167                 eqo->tx_budget = BE_TX_BUDGET;
2168                 eqo->idx = i;
2169                 aic->max_eqd = BE_MAX_EQD;
2170                 aic->enable = true;
2171
2172                 eq = &eqo->q;
2173                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
2174                                     sizeof(struct be_eq_entry));
2175                 if (rc)
2176                         return rc;
2177
2178                 rc = be_cmd_eq_create(adapter, eqo);
2179                 if (rc)
2180                         return rc;
2181         }
2182         return 0;
2183 }
2184
2185 static void be_mcc_queues_destroy(struct be_adapter *adapter)
2186 {
2187         struct be_queue_info *q;
2188
2189         q = &adapter->mcc_obj.q;
2190         if (q->created)
2191                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
2192         be_queue_free(adapter, q);
2193
2194         q = &adapter->mcc_obj.cq;
2195         if (q->created)
2196                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2197         be_queue_free(adapter, q);
2198 }
2199
2200 /* Must be called only after TX qs are created as MCC shares TX EQ */
2201 static int be_mcc_queues_create(struct be_adapter *adapter)
2202 {
2203         struct be_queue_info *q, *cq;
2204
2205         cq = &adapter->mcc_obj.cq;
2206         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
2207                            sizeof(struct be_mcc_compl)))
2208                 goto err;
2209
2210         /* Use the default EQ for MCC completions */
2211         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
2212                 goto mcc_cq_free;
2213
2214         q = &adapter->mcc_obj.q;
2215         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
2216                 goto mcc_cq_destroy;
2217
2218         if (be_cmd_mccq_create(adapter, q, cq))
2219                 goto mcc_q_free;
2220
2221         return 0;
2222
2223 mcc_q_free:
2224         be_queue_free(adapter, q);
2225 mcc_cq_destroy:
2226         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
2227 mcc_cq_free:
2228         be_queue_free(adapter, cq);
2229 err:
2230         return -1;
2231 }
2232
2233 static void be_tx_queues_destroy(struct be_adapter *adapter)
2234 {
2235         struct be_queue_info *q;
2236         struct be_tx_obj *txo;
2237         u8 i;
2238
2239         for_all_tx_queues(adapter, txo, i) {
2240                 q = &txo->q;
2241                 if (q->created)
2242                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
2243                 be_queue_free(adapter, q);
2244
2245                 q = &txo->cq;
2246                 if (q->created)
2247                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2248                 be_queue_free(adapter, q);
2249         }
2250 }
2251
2252 static int be_tx_qs_create(struct be_adapter *adapter)
2253 {
2254         struct be_queue_info *cq, *eq;
2255         struct be_tx_obj *txo;
2256         int status, i;
2257
2258         adapter->num_tx_qs = min(adapter->num_evt_qs, be_max_txqs(adapter));
2259
2260         for_all_tx_queues(adapter, txo, i) {
2261                 cq = &txo->cq;
2262                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
2263                                         sizeof(struct be_eth_tx_compl));
2264                 if (status)
2265                         return status;
2266
2267                 u64_stats_init(&txo->stats.sync);
2268                 u64_stats_init(&txo->stats.sync_compl);
2269
2270                 /* If num_evt_qs is less than num_tx_qs, then more than
2271                  * one txq share an eq
2272                  */
2273                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2274                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
2275                 if (status)
2276                         return status;
2277
2278                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
2279                                         sizeof(struct be_eth_wrb));
2280                 if (status)
2281                         return status;
2282
2283                 status = be_cmd_txq_create(adapter, txo);
2284                 if (status)
2285                         return status;
2286         }
2287
2288         dev_info(&adapter->pdev->dev, "created %d TX queue(s)\n",
2289                  adapter->num_tx_qs);
2290         return 0;
2291 }
2292
2293 static void be_rx_cqs_destroy(struct be_adapter *adapter)
2294 {
2295         struct be_queue_info *q;
2296         struct be_rx_obj *rxo;
2297         int i;
2298
2299         for_all_rx_queues(adapter, rxo, i) {
2300                 q = &rxo->cq;
2301                 if (q->created)
2302                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
2303                 be_queue_free(adapter, q);
2304         }
2305 }
2306
2307 static int be_rx_cqs_create(struct be_adapter *adapter)
2308 {
2309         struct be_queue_info *eq, *cq;
2310         struct be_rx_obj *rxo;
2311         int rc, i;
2312
2313         /* We can create as many RSS rings as there are EQs. */
2314         adapter->num_rx_qs = adapter->num_evt_qs;
2315
2316         /* We'll use RSS only if atleast 2 RSS rings are supported.
2317          * When RSS is used, we'll need a default RXQ for non-IP traffic.
2318          */
2319         if (adapter->num_rx_qs > 1)
2320                 adapter->num_rx_qs++;
2321
2322         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
2323         for_all_rx_queues(adapter, rxo, i) {
2324                 rxo->adapter = adapter;
2325                 cq = &rxo->cq;
2326                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
2327                                     sizeof(struct be_eth_rx_compl));
2328                 if (rc)
2329                         return rc;
2330
2331                 u64_stats_init(&rxo->stats.sync);
2332                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
2333                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
2334                 if (rc)
2335                         return rc;
2336         }
2337
2338         dev_info(&adapter->pdev->dev,
2339                  "created %d RSS queue(s) and 1 default RX queue\n",
2340                  adapter->num_rx_qs - 1);
2341         return 0;
2342 }
2343
2344 static irqreturn_t be_intx(int irq, void *dev)
2345 {
2346         struct be_eq_obj *eqo = dev;
2347         struct be_adapter *adapter = eqo->adapter;
2348         int num_evts = 0;
2349
2350         /* IRQ is not expected when NAPI is scheduled as the EQ
2351          * will not be armed.
2352          * But, this can happen on Lancer INTx where it takes
2353          * a while to de-assert INTx or in BE2 where occasionaly
2354          * an interrupt may be raised even when EQ is unarmed.
2355          * If NAPI is already scheduled, then counting & notifying
2356          * events will orphan them.
2357          */
2358         if (napi_schedule_prep(&eqo->napi)) {
2359                 num_evts = events_get(eqo);
2360                 __napi_schedule(&eqo->napi);
2361                 if (num_evts)
2362                         eqo->spurious_intr = 0;
2363         }
2364         be_eq_notify(adapter, eqo->q.id, false, true, num_evts);
2365
2366         /* Return IRQ_HANDLED only for the the first spurious intr
2367          * after a valid intr to stop the kernel from branding
2368          * this irq as a bad one!
2369          */
2370         if (num_evts || eqo->spurious_intr++ == 0)
2371                 return IRQ_HANDLED;
2372         else
2373                 return IRQ_NONE;
2374 }
2375
2376 static irqreturn_t be_msix(int irq, void *dev)
2377 {
2378         struct be_eq_obj *eqo = dev;
2379
2380         be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
2381         napi_schedule(&eqo->napi);
2382         return IRQ_HANDLED;
2383 }
2384
2385 static inline bool do_gro(struct be_rx_compl_info *rxcp)
2386 {
2387         return (rxcp->tcpf && !rxcp->err && rxcp->l4_csum) ? true : false;
2388 }
2389
2390 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
2391                          int budget, int polling)
2392 {
2393         struct be_adapter *adapter = rxo->adapter;
2394         struct be_queue_info *rx_cq = &rxo->cq;
2395         struct be_rx_compl_info *rxcp;
2396         u32 work_done;
2397
2398         for (work_done = 0; work_done < budget; work_done++) {
2399                 rxcp = be_rx_compl_get(rxo);
2400                 if (!rxcp)
2401                         break;
2402
2403                 /* Is it a flush compl that has no data */
2404                 if (unlikely(rxcp->num_rcvd == 0))
2405                         goto loop_continue;
2406
2407                 /* Discard compl with partial DMA Lancer B0 */
2408                 if (unlikely(!rxcp->pkt_size)) {
2409                         be_rx_compl_discard(rxo, rxcp);
2410                         goto loop_continue;
2411                 }
2412
2413                 /* On BE drop pkts that arrive due to imperfect filtering in
2414                  * promiscuous mode on some skews
2415                  */
2416                 if (unlikely(rxcp->port != adapter->port_num &&
2417                              !lancer_chip(adapter))) {
2418                         be_rx_compl_discard(rxo, rxcp);
2419                         goto loop_continue;
2420                 }
2421
2422                 /* Don't do gro when we're busy_polling */
2423                 if (do_gro(rxcp) && polling != BUSY_POLLING)
2424                         be_rx_compl_process_gro(rxo, napi, rxcp);
2425                 else
2426                         be_rx_compl_process(rxo, napi, rxcp);
2427
2428 loop_continue:
2429                 be_rx_stats_update(rxo, rxcp);
2430         }
2431
2432         if (work_done) {
2433                 be_cq_notify(adapter, rx_cq->id, true, work_done);
2434
2435                 /* When an rx-obj gets into post_starved state, just
2436                  * let be_worker do the posting.
2437                  */
2438                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM &&
2439                     !rxo->rx_post_starved)
2440                         be_post_rx_frags(rxo, GFP_ATOMIC);
2441         }
2442
2443         return work_done;
2444 }
2445
2446 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
2447                           int budget, int idx)
2448 {
2449         struct be_eth_tx_compl *txcp;
2450         int num_wrbs = 0, work_done;
2451
2452         for (work_done = 0; work_done < budget; work_done++) {
2453                 txcp = be_tx_compl_get(&txo->cq);
2454                 if (!txcp)
2455                         break;
2456                 num_wrbs += be_tx_compl_process(adapter, txo,
2457                                                 AMAP_GET_BITS(struct
2458                                                               amap_eth_tx_compl,
2459                                                               wrb_index, txcp));
2460         }
2461
2462         if (work_done) {
2463                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2464                 atomic_sub(num_wrbs, &txo->q.used);
2465
2466                 /* As Tx wrbs have been freed up, wake up netdev queue
2467                  * if it was stopped due to lack of tx wrbs.  */
2468                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2469                     atomic_read(&txo->q.used) < txo->q.len / 2) {
2470                         netif_wake_subqueue(adapter->netdev, idx);
2471                 }
2472
2473                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2474                 tx_stats(txo)->tx_compl += work_done;
2475                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2476         }
2477         return (work_done < budget); /* Done */
2478 }
2479
2480 int be_poll(struct napi_struct *napi, int budget)
2481 {
2482         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2483         struct be_adapter *adapter = eqo->adapter;
2484         int max_work = 0, work, i, num_evts;
2485         struct be_rx_obj *rxo;
2486         bool tx_done;
2487
2488         num_evts = events_get(eqo);
2489
2490         /* Process all TXQs serviced by this EQ */
2491         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2492                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2493                                         eqo->tx_budget, i);
2494                 if (!tx_done)
2495                         max_work = budget;
2496         }
2497
2498         if (be_lock_napi(eqo)) {
2499                 /* This loop will iterate twice for EQ0 in which
2500                  * completions of the last RXQ (default one) are also processed
2501                  * For other EQs the loop iterates only once
2502                  */
2503                 for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2504                         work = be_process_rx(rxo, napi, budget, NAPI_POLLING);
2505                         max_work = max(work, max_work);
2506                 }
2507                 be_unlock_napi(eqo);
2508         } else {
2509                 max_work = budget;
2510         }
2511
2512         if (is_mcc_eqo(eqo))
2513                 be_process_mcc(adapter);
2514
2515         if (max_work < budget) {
2516                 napi_complete(napi);
2517                 be_eq_notify(adapter, eqo->q.id, true, false, num_evts);
2518         } else {
2519                 /* As we'll continue in polling mode, count and clear events */
2520                 be_eq_notify(adapter, eqo->q.id, false, false, num_evts);
2521         }
2522         return max_work;
2523 }
2524
2525 #ifdef CONFIG_NET_RX_BUSY_POLL
2526 static int be_busy_poll(struct napi_struct *napi)
2527 {
2528         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2529         struct be_adapter *adapter = eqo->adapter;
2530         struct be_rx_obj *rxo;
2531         int i, work = 0;
2532
2533         if (!be_lock_busy_poll(eqo))
2534                 return LL_FLUSH_BUSY;
2535
2536         for_all_rx_queues_on_eq(adapter, eqo, rxo, i) {
2537                 work = be_process_rx(rxo, napi, 4, BUSY_POLLING);
2538                 if (work)
2539                         break;
2540         }
2541
2542         be_unlock_busy_poll(eqo);
2543         return work;
2544 }
2545 #endif
2546
2547 void be_detect_error(struct be_adapter *adapter)
2548 {
2549         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2550         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2551         u32 i;
2552         bool error_detected = false;
2553         struct device *dev = &adapter->pdev->dev;
2554         struct net_device *netdev = adapter->netdev;
2555
2556         if (be_hw_error(adapter))
2557                 return;
2558
2559         if (lancer_chip(adapter)) {
2560                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2561                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2562                         sliport_err1 = ioread32(adapter->db +
2563                                                 SLIPORT_ERROR1_OFFSET);
2564                         sliport_err2 = ioread32(adapter->db +
2565                                                 SLIPORT_ERROR2_OFFSET);
2566                         adapter->hw_error = true;
2567                         /* Do not log error messages if its a FW reset */
2568                         if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
2569                             sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
2570                                 dev_info(dev, "Firmware update in progress\n");
2571                         } else {
2572                                 error_detected = true;
2573                                 dev_err(dev, "Error detected in the card\n");
2574                                 dev_err(dev, "ERR: sliport status 0x%x\n",
2575                                         sliport_status);
2576                                 dev_err(dev, "ERR: sliport error1 0x%x\n",
2577                                         sliport_err1);
2578                                 dev_err(dev, "ERR: sliport error2 0x%x\n",
2579                                         sliport_err2);
2580                         }
2581                 }
2582         } else {
2583                 pci_read_config_dword(adapter->pdev,
2584                                       PCICFG_UE_STATUS_LOW, &ue_lo);
2585                 pci_read_config_dword(adapter->pdev,
2586                                       PCICFG_UE_STATUS_HIGH, &ue_hi);
2587                 pci_read_config_dword(adapter->pdev,
2588                                       PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2589                 pci_read_config_dword(adapter->pdev,
2590                                       PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2591
2592                 ue_lo = (ue_lo & ~ue_lo_mask);
2593                 ue_hi = (ue_hi & ~ue_hi_mask);
2594
2595                 /* On certain platforms BE hardware can indicate spurious UEs.
2596                  * Allow HW to stop working completely in case of a real UE.
2597                  * Hence not setting the hw_error for UE detection.
2598                  */
2599
2600                 if (ue_lo || ue_hi) {
2601                         error_detected = true;
2602                         dev_err(dev,
2603                                 "Unrecoverable Error detected in the adapter");
2604                         dev_err(dev, "Please reboot server to recover");
2605                         if (skyhawk_chip(adapter))
2606                                 adapter->hw_error = true;
2607                         for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2608                                 if (ue_lo & 1)
2609                                         dev_err(dev, "UE: %s bit set\n",
2610                                                 ue_status_low_desc[i]);
2611                         }
2612                         for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2613                                 if (ue_hi & 1)
2614                                         dev_err(dev, "UE: %s bit set\n",
2615                                                 ue_status_hi_desc[i]);
2616                         }
2617                 }
2618         }
2619         if (error_detected)
2620                 netif_carrier_off(netdev);
2621 }
2622
2623 static void be_msix_disable(struct be_adapter *adapter)
2624 {
2625         if (msix_enabled(adapter)) {
2626                 pci_disable_msix(adapter->pdev);
2627                 adapter->num_msix_vec = 0;
2628                 adapter->num_msix_roce_vec = 0;
2629         }
2630 }
2631
2632 static int be_msix_enable(struct be_adapter *adapter)
2633 {
2634         int i, num_vec;
2635         struct device *dev = &adapter->pdev->dev;
2636
2637         /* If RoCE is supported, program the max number of NIC vectors that
2638          * may be configured via set-channels, along with vectors needed for
2639          * RoCe. Else, just program the number we'll use initially.
2640          */
2641         if (be_roce_supported(adapter))
2642                 num_vec = min_t(int, 2 * be_max_eqs(adapter),
2643                                 2 * num_online_cpus());
2644         else
2645                 num_vec = adapter->cfg_num_qs;
2646
2647         for (i = 0; i < num_vec; i++)
2648                 adapter->msix_entries[i].entry = i;
2649
2650         num_vec = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
2651                                         MIN_MSIX_VECTORS, num_vec);
2652         if (num_vec < 0)
2653                 goto fail;
2654
2655         if (be_roce_supported(adapter) && num_vec > MIN_MSIX_VECTORS) {
2656                 adapter->num_msix_roce_vec = num_vec / 2;
2657                 dev_info(dev, "enabled %d MSI-x vector(s) for RoCE\n",
2658                          adapter->num_msix_roce_vec);
2659         }
2660
2661         adapter->num_msix_vec = num_vec - adapter->num_msix_roce_vec;
2662
2663         dev_info(dev, "enabled %d MSI-x vector(s) for NIC\n",
2664                  adapter->num_msix_vec);
2665         return 0;
2666
2667 fail:
2668         dev_warn(dev, "MSIx enable failed\n");
2669
2670         /* INTx is not supported in VFs, so fail probe if enable_msix fails */
2671         if (!be_physfn(adapter))
2672                 return num_vec;
2673         return 0;
2674 }
2675
2676 static inline int be_msix_vec_get(struct be_adapter *adapter,
2677                                   struct be_eq_obj *eqo)
2678 {
2679         return adapter->msix_entries[eqo->msix_idx].vector;
2680 }
2681
2682 static int be_msix_register(struct be_adapter *adapter)
2683 {
2684         struct net_device *netdev = adapter->netdev;
2685         struct be_eq_obj *eqo;
2686         int status, i, vec;
2687
2688         for_all_evt_queues(adapter, eqo, i) {
2689                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2690                 vec = be_msix_vec_get(adapter, eqo);
2691                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2692                 if (status)
2693                         goto err_msix;
2694         }
2695
2696         return 0;
2697 err_msix:
2698         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2699                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2700         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2701                  status);
2702         be_msix_disable(adapter);
2703         return status;
2704 }
2705
2706 static int be_irq_register(struct be_adapter *adapter)
2707 {
2708         struct net_device *netdev = adapter->netdev;
2709         int status;
2710
2711         if (msix_enabled(adapter)) {
2712                 status = be_msix_register(adapter);
2713                 if (status == 0)
2714                         goto done;
2715                 /* INTx is not supported for VF */
2716                 if (!be_physfn(adapter))
2717                         return status;
2718         }
2719
2720         /* INTx: only the first EQ is used */
2721         netdev->irq = adapter->pdev->irq;
2722         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2723                              &adapter->eq_obj[0]);
2724         if (status) {
2725                 dev_err(&adapter->pdev->dev,
2726                         "INTx request IRQ failed - err %d\n", status);
2727                 return status;
2728         }
2729 done:
2730         adapter->isr_registered = true;
2731         return 0;
2732 }
2733
2734 static void be_irq_unregister(struct be_adapter *adapter)
2735 {
2736         struct net_device *netdev = adapter->netdev;
2737         struct be_eq_obj *eqo;
2738         int i;
2739
2740         if (!adapter->isr_registered)
2741                 return;
2742
2743         /* INTx */
2744         if (!msix_enabled(adapter)) {
2745                 free_irq(netdev->irq, &adapter->eq_obj[0]);
2746                 goto done;
2747         }
2748
2749         /* MSIx */
2750         for_all_evt_queues(adapter, eqo, i)
2751                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2752
2753 done:
2754         adapter->isr_registered = false;
2755 }
2756
2757 static void be_rx_qs_destroy(struct be_adapter *adapter)
2758 {
2759         struct be_queue_info *q;
2760         struct be_rx_obj *rxo;
2761         int i;
2762
2763         for_all_rx_queues(adapter, rxo, i) {
2764                 q = &rxo->q;
2765                 if (q->created) {
2766                         be_cmd_rxq_destroy(adapter, q);
2767                         be_rx_cq_clean(rxo);
2768                 }
2769                 be_queue_free(adapter, q);
2770         }
2771 }
2772
2773 static int be_close(struct net_device *netdev)
2774 {
2775         struct be_adapter *adapter = netdev_priv(netdev);
2776         struct be_eq_obj *eqo;
2777         int i;
2778
2779         /* This protection is needed as be_close() may be called even when the
2780          * adapter is in cleared state (after eeh perm failure)
2781          */
2782         if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
2783                 return 0;
2784
2785         be_roce_dev_close(adapter);
2786
2787         if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
2788                 for_all_evt_queues(adapter, eqo, i) {
2789                         napi_disable(&eqo->napi);
2790                         be_disable_busy_poll(eqo);
2791                 }
2792                 adapter->flags &= ~BE_FLAGS_NAPI_ENABLED;
2793         }
2794
2795         be_async_mcc_disable(adapter);
2796
2797         /* Wait for all pending tx completions to arrive so that
2798          * all tx skbs are freed.
2799          */
2800         netif_tx_disable(netdev);
2801         be_tx_compl_clean(adapter);
2802
2803         be_rx_qs_destroy(adapter);
2804
2805         for (i = 1; i < (adapter->uc_macs + 1); i++)
2806                 be_cmd_pmac_del(adapter, adapter->if_handle,
2807                                 adapter->pmac_id[i], 0);
2808         adapter->uc_macs = 0;
2809
2810         for_all_evt_queues(adapter, eqo, i) {
2811                 if (msix_enabled(adapter))
2812                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2813                 else
2814                         synchronize_irq(netdev->irq);
2815                 be_eq_clean(eqo);
2816         }
2817
2818         be_irq_unregister(adapter);
2819
2820         return 0;
2821 }
2822
2823 static int be_rx_qs_create(struct be_adapter *adapter)
2824 {
2825         struct be_rx_obj *rxo;
2826         int rc, i, j;
2827         u8 rss_hkey[RSS_HASH_KEY_LEN];
2828         struct rss_info *rss = &adapter->rss_info;
2829
2830         for_all_rx_queues(adapter, rxo, i) {
2831                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2832                                     sizeof(struct be_eth_rx_d));
2833                 if (rc)
2834                         return rc;
2835         }
2836
2837         /* The FW would like the default RXQ to be created first */
2838         rxo = default_rxo(adapter);
2839         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2840                                adapter->if_handle, false, &rxo->rss_id);
2841         if (rc)
2842                 return rc;
2843
2844         for_all_rss_queues(adapter, rxo, i) {
2845                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2846                                        rx_frag_size, adapter->if_handle,
2847                                        true, &rxo->rss_id);
2848                 if (rc)
2849                         return rc;
2850         }
2851
2852         if (be_multi_rxq(adapter)) {
2853                 for (j = 0; j < RSS_INDIR_TABLE_LEN;
2854                         j += adapter->num_rx_qs - 1) {
2855                         for_all_rss_queues(adapter, rxo, i) {
2856                                 if ((j + i) >= RSS_INDIR_TABLE_LEN)
2857                                         break;
2858                                 rss->rsstable[j + i] = rxo->rss_id;
2859                                 rss->rss_queue[j + i] = i;
2860                         }
2861                 }
2862                 rss->rss_flags = RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 |
2863                         RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6;
2864
2865                 if (!BEx_chip(adapter))
2866                         rss->rss_flags |= RSS_ENABLE_UDP_IPV4 |
2867                                 RSS_ENABLE_UDP_IPV6;
2868         } else {
2869                 /* Disable RSS, if only default RX Q is created */
2870                 rss->rss_flags = RSS_ENABLE_NONE;
2871         }
2872
2873         get_random_bytes(rss_hkey, RSS_HASH_KEY_LEN);
2874         rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
2875                                128, rss_hkey);
2876         if (rc) {
2877                 rss->rss_flags = RSS_ENABLE_NONE;
2878                 return rc;
2879         }
2880
2881         memcpy(rss->rss_hkey, rss_hkey, RSS_HASH_KEY_LEN);
2882
2883         /* First time posting */
2884         for_all_rx_queues(adapter, rxo, i)
2885                 be_post_rx_frags(rxo, GFP_KERNEL);
2886         return 0;
2887 }
2888
2889 static int be_open(struct net_device *netdev)
2890 {
2891         struct be_adapter *adapter = netdev_priv(netdev);
2892         struct be_eq_obj *eqo;
2893         struct be_rx_obj *rxo;
2894         struct be_tx_obj *txo;
2895         u8 link_status;
2896         int status, i;
2897
2898         status = be_rx_qs_create(adapter);
2899         if (status)
2900                 goto err;
2901
2902         status = be_irq_register(adapter);
2903         if (status)
2904                 goto err;
2905
2906         for_all_rx_queues(adapter, rxo, i)
2907                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2908
2909         for_all_tx_queues(adapter, txo, i)
2910                 be_cq_notify(adapter, txo->cq.id, true, 0);
2911
2912         be_async_mcc_enable(adapter);
2913
2914         for_all_evt_queues(adapter, eqo, i) {
2915                 napi_enable(&eqo->napi);
2916                 be_enable_busy_poll(eqo);
2917                 be_eq_notify(adapter, eqo->q.id, true, true, 0);
2918         }
2919         adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2920
2921         status = be_cmd_link_status_query(adapter, NULL, &link_status, 0);
2922         if (!status)
2923                 be_link_status_update(adapter, link_status);
2924
2925         netif_tx_start_all_queues(netdev);
2926         be_roce_dev_open(adapter);
2927
2928 #ifdef CONFIG_BE2NET_VXLAN
2929         if (skyhawk_chip(adapter))
2930                 vxlan_get_rx_port(netdev);
2931 #endif
2932
2933         return 0;
2934 err:
2935         be_close(adapter->netdev);
2936         return -EIO;
2937 }
2938
2939 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2940 {
2941         struct be_dma_mem cmd;
2942         int status = 0;
2943         u8 mac[ETH_ALEN];
2944
2945         memset(mac, 0, ETH_ALEN);
2946
2947         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2948         cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2949                                      GFP_KERNEL);
2950         if (!cmd.va)
2951                 return -ENOMEM;
2952
2953         if (enable) {
2954                 status = pci_write_config_dword(adapter->pdev,
2955                                                 PCICFG_PM_CONTROL_OFFSET,
2956                                                 PCICFG_PM_CONTROL_MASK);
2957                 if (status) {
2958                         dev_err(&adapter->pdev->dev,
2959                                 "Could not enable Wake-on-lan\n");
2960                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2961                                           cmd.dma);
2962                         return status;
2963                 }
2964                 status = be_cmd_enable_magic_wol(adapter,
2965                                                  adapter->netdev->dev_addr,
2966                                                  &cmd);
2967                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2968                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2969         } else {
2970                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2971                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2972                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2973         }
2974
2975         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2976         return status;
2977 }
2978
2979 /*
2980  * Generate a seed MAC address from the PF MAC Address using jhash.
2981  * MAC Address for VFs are assigned incrementally starting from the seed.
2982  * These addresses are programmed in the ASIC by the PF and the VF driver
2983  * queries for the MAC address during its probe.
2984  */
2985 static int be_vf_eth_addr_config(struct be_adapter *adapter)
2986 {
2987         u32 vf;
2988         int status = 0;
2989         u8 mac[ETH_ALEN];
2990         struct be_vf_cfg *vf_cfg;
2991
2992         be_vf_eth_addr_generate(adapter, mac);
2993
2994         for_all_vfs(adapter, vf_cfg, vf) {
2995                 if (BEx_chip(adapter))
2996                         status = be_cmd_pmac_add(adapter, mac,
2997                                                  vf_cfg->if_handle,
2998                                                  &vf_cfg->pmac_id, vf + 1);
2999                 else
3000                         status = be_cmd_set_mac(adapter, mac, vf_cfg->if_handle,
3001                                                 vf + 1);
3002
3003                 if (status)
3004                         dev_err(&adapter->pdev->dev,
3005                                 "Mac address assignment failed for VF %d\n",
3006                                 vf);
3007                 else
3008                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3009
3010                 mac[5] += 1;
3011         }
3012         return status;
3013 }
3014
3015 static int be_vfs_mac_query(struct be_adapter *adapter)
3016 {
3017         int status, vf;
3018         u8 mac[ETH_ALEN];
3019         struct be_vf_cfg *vf_cfg;
3020
3021         for_all_vfs(adapter, vf_cfg, vf) {
3022                 status = be_cmd_get_active_mac(adapter, vf_cfg->pmac_id,
3023                                                mac, vf_cfg->if_handle,
3024                                                false, vf+1);
3025                 if (status)
3026                         return status;
3027                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
3028         }
3029         return 0;
3030 }
3031
3032 static void be_vf_clear(struct be_adapter *adapter)
3033 {
3034         struct be_vf_cfg *vf_cfg;
3035         u32 vf;
3036
3037         if (pci_vfs_assigned(adapter->pdev)) {
3038                 dev_warn(&adapter->pdev->dev,
3039                          "VFs are assigned to VMs: not disabling VFs\n");
3040                 goto done;
3041         }
3042
3043         pci_disable_sriov(adapter->pdev);
3044
3045         for_all_vfs(adapter, vf_cfg, vf) {
3046                 if (BEx_chip(adapter))
3047                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
3048                                         vf_cfg->pmac_id, vf + 1);
3049                 else
3050                         be_cmd_set_mac(adapter, NULL, vf_cfg->if_handle,
3051                                        vf + 1);
3052
3053                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
3054         }
3055 done:
3056         kfree(adapter->vf_cfg);
3057         adapter->num_vfs = 0;
3058         adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
3059 }
3060
3061 static void be_clear_queues(struct be_adapter *adapter)
3062 {
3063         be_mcc_queues_destroy(adapter);
3064         be_rx_cqs_destroy(adapter);
3065         be_tx_queues_destroy(adapter);
3066         be_evt_queues_destroy(adapter);
3067 }
3068
3069 static void be_cancel_worker(struct be_adapter *adapter)
3070 {
3071         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
3072                 cancel_delayed_work_sync(&adapter->work);
3073                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
3074         }
3075 }
3076
3077 static void be_mac_clear(struct be_adapter *adapter)
3078 {
3079         int i;
3080
3081         if (adapter->pmac_id) {
3082                 for (i = 0; i < (adapter->uc_macs + 1); i++)
3083                         be_cmd_pmac_del(adapter, adapter->if_handle,
3084                                         adapter->pmac_id[i], 0);
3085                 adapter->uc_macs = 0;
3086
3087                 kfree(adapter->pmac_id);
3088                 adapter->pmac_id = NULL;
3089         }
3090 }
3091
3092 #ifdef CONFIG_BE2NET_VXLAN
3093 static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3094 {
3095         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)
3096                 be_cmd_manage_iface(adapter, adapter->if_handle,
3097                                     OP_CONVERT_TUNNEL_TO_NORMAL);
3098
3099         if (adapter->vxlan_port)
3100                 be_cmd_set_vxlan_port(adapter, 0);
3101
3102         adapter->flags &= ~BE_FLAGS_VXLAN_OFFLOADS;
3103         adapter->vxlan_port = 0;
3104 }
3105 #endif
3106
3107 static int be_clear(struct be_adapter *adapter)
3108 {
3109         be_cancel_worker(adapter);
3110
3111         if (sriov_enabled(adapter))
3112                 be_vf_clear(adapter);
3113
3114         /* Re-configure FW to distribute resources evenly across max-supported
3115          * number of VFs, only when VFs are not already enabled.
3116          */
3117         if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
3118                 be_cmd_set_sriov_config(adapter, adapter->pool_res,
3119                                         pci_sriov_get_totalvfs(adapter->pdev));
3120
3121 #ifdef CONFIG_BE2NET_VXLAN
3122         be_disable_vxlan_offloads(adapter);
3123 #endif
3124         /* delete the primary mac along with the uc-mac list */
3125         be_mac_clear(adapter);
3126
3127         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
3128
3129         be_clear_queues(adapter);
3130
3131         be_msix_disable(adapter);
3132         adapter->flags &= ~BE_FLAGS_SETUP_DONE;
3133         return 0;
3134 }
3135
3136 static int be_vfs_if_create(struct be_adapter *adapter)
3137 {
3138         struct be_resources res = {0};
3139         struct be_vf_cfg *vf_cfg;
3140         u32 cap_flags, en_flags, vf;
3141         int status = 0;
3142
3143         cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3144                     BE_IF_FLAGS_MULTICAST;
3145
3146         for_all_vfs(adapter, vf_cfg, vf) {
3147                 if (!BE3_chip(adapter)) {
3148                         status = be_cmd_get_profile_config(adapter, &res,
3149                                                            vf + 1);
3150                         if (!status)
3151                                 cap_flags = res.if_cap_flags;
3152                 }
3153
3154                 /* If a FW profile exists, then cap_flags are updated */
3155                 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3156                                         BE_IF_FLAGS_BROADCAST |
3157                                         BE_IF_FLAGS_MULTICAST);
3158                 status =
3159                     be_cmd_if_create(adapter, cap_flags, en_flags,
3160                                      &vf_cfg->if_handle, vf + 1);
3161                 if (status)
3162                         goto err;
3163         }
3164 err:
3165         return status;
3166 }
3167
3168 static int be_vf_setup_init(struct be_adapter *adapter)
3169 {
3170         struct be_vf_cfg *vf_cfg;
3171         int vf;
3172
3173         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
3174                                   GFP_KERNEL);
3175         if (!adapter->vf_cfg)
3176                 return -ENOMEM;
3177
3178         for_all_vfs(adapter, vf_cfg, vf) {
3179                 vf_cfg->if_handle = -1;
3180                 vf_cfg->pmac_id = -1;
3181         }
3182         return 0;
3183 }
3184
3185 static int be_vf_setup(struct be_adapter *adapter)
3186 {
3187         struct device *dev = &adapter->pdev->dev;
3188         struct be_vf_cfg *vf_cfg;
3189         int status, old_vfs, vf;
3190         u32 privileges;
3191
3192         old_vfs = pci_num_vf(adapter->pdev);
3193
3194         status = be_vf_setup_init(adapter);
3195         if (status)
3196                 goto err;
3197
3198         if (old_vfs) {
3199                 for_all_vfs(adapter, vf_cfg, vf) {
3200                         status = be_cmd_get_if_id(adapter, vf_cfg, vf);
3201                         if (status)
3202                                 goto err;
3203                 }
3204
3205                 status = be_vfs_mac_query(adapter);
3206                 if (status)
3207                         goto err;
3208         } else {
3209                 status = be_vfs_if_create(adapter);
3210                 if (status)
3211                         goto err;
3212
3213                 status = be_vf_eth_addr_config(adapter);
3214                 if (status)
3215                         goto err;
3216         }
3217
3218         for_all_vfs(adapter, vf_cfg, vf) {
3219                 /* Allow VFs to programs MAC/VLAN filters */
3220                 status = be_cmd_get_fn_privileges(adapter, &privileges, vf + 1);
3221                 if (!status && !(privileges & BE_PRIV_FILTMGMT)) {
3222                         status = be_cmd_set_fn_privileges(adapter,
3223                                                           privileges |
3224                                                           BE_PRIV_FILTMGMT,
3225                                                           vf + 1);
3226                         if (!status)
3227                                 dev_info(dev, "VF%d has FILTMGMT privilege\n",
3228                                          vf);
3229                 }
3230
3231                 /* Allow full available bandwidth */
3232                 if (!old_vfs)
3233                         be_cmd_config_qos(adapter, 0, 0, vf + 1);
3234
3235                 if (!old_vfs) {
3236                         be_cmd_enable_vf(adapter, vf + 1);
3237                         be_cmd_set_logical_link_config(adapter,
3238                                                        IFLA_VF_LINK_STATE_AUTO,
3239                                                        vf+1);
3240                 }
3241         }
3242
3243         if (!old_vfs) {
3244                 status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
3245                 if (status) {
3246                         dev_err(dev, "SRIOV enable failed\n");
3247                         adapter->num_vfs = 0;
3248                         goto err;
3249                 }
3250         }
3251
3252         adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
3253         return 0;
3254 err:
3255         dev_err(dev, "VF setup failed\n");
3256         be_vf_clear(adapter);
3257         return status;
3258 }
3259
3260 /* Converting function_mode bits on BE3 to SH mc_type enums */
3261
3262 static u8 be_convert_mc_type(u32 function_mode)
3263 {
3264         if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3265                 return vNIC1;
3266         else if (function_mode & QNQ_MODE)
3267                 return FLEX10;
3268         else if (function_mode & VNIC_MODE)
3269                 return vNIC2;
3270         else if (function_mode & UMC_ENABLED)
3271                 return UMC;
3272         else
3273                 return MC_NONE;
3274 }
3275
3276 /* On BE2/BE3 FW does not suggest the supported limits */
3277 static void BEx_get_resources(struct be_adapter *adapter,
3278                               struct be_resources *res)
3279 {
3280         bool use_sriov = adapter->num_vfs ? 1 : 0;
3281
3282         if (be_physfn(adapter))
3283                 res->max_uc_mac = BE_UC_PMAC_COUNT;
3284         else
3285                 res->max_uc_mac = BE_VF_UC_PMAC_COUNT;
3286
3287         adapter->mc_type = be_convert_mc_type(adapter->function_mode);
3288
3289         if (be_is_mc(adapter)) {
3290                 /* Assuming that there are 4 channels per port,
3291                  * when multi-channel is enabled
3292                  */
3293                 if (be_is_qnq_mode(adapter))
3294                         res->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3295                 else
3296                         /* In a non-qnq multichannel mode, the pvid
3297                          * takes up one vlan entry
3298                          */
3299                         res->max_vlans = (BE_NUM_VLANS_SUPPORTED / 4) - 1;
3300         } else {
3301                 res->max_vlans = BE_NUM_VLANS_SUPPORTED;
3302         }
3303
3304         res->max_mcast_mac = BE_MAX_MC;
3305
3306         /* 1) For BE3 1Gb ports, FW does not support multiple TXQs
3307          * 2) Create multiple TX rings on a BE3-R multi-channel interface
3308          *    *only* if it is RSS-capable.
3309          */
3310         if (BE2_chip(adapter) || use_sriov ||  (adapter->port_num > 1) ||
3311             !be_physfn(adapter) || (be_is_mc(adapter) &&
3312             !(adapter->function_caps & BE_FUNCTION_CAPS_RSS)))
3313                 res->max_tx_qs = 1;
3314         else
3315                 res->max_tx_qs = BE3_MAX_TX_QS;
3316
3317         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
3318             !use_sriov && be_physfn(adapter))
3319                 res->max_rss_qs = (adapter->be3_native) ?
3320                                            BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
3321         res->max_rx_qs = res->max_rss_qs + 1;
3322
3323         if (be_physfn(adapter))
3324                 res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
3325                                         BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
3326         else
3327                 res->max_evt_qs = 1;
3328
3329         res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
3330         if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
3331                 res->if_cap_flags &= ~BE_IF_FLAGS_RSS;
3332 }
3333
3334 static void be_setup_init(struct be_adapter *adapter)
3335 {
3336         adapter->vlan_prio_bmap = 0xff;
3337         adapter->phy.link_speed = -1;
3338         adapter->if_handle = -1;
3339         adapter->be3_native = false;
3340         adapter->promiscuous = false;
3341         if (be_physfn(adapter))
3342                 adapter->cmd_privileges = MAX_PRIVILEGES;
3343         else
3344                 adapter->cmd_privileges = MIN_PRIVILEGES;
3345 }
3346
3347 static int be_get_sriov_config(struct be_adapter *adapter)
3348 {
3349         struct device *dev = &adapter->pdev->dev;
3350         struct be_resources res = {0};
3351         int max_vfs, old_vfs;
3352
3353         /* Some old versions of BE3 FW don't report max_vfs value */
3354         be_cmd_get_profile_config(adapter, &res, 0);
3355
3356         if (BE3_chip(adapter) && !res.max_vfs) {
3357                 max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
3358                 res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
3359         }
3360
3361         adapter->pool_res = res;
3362
3363         if (!be_max_vfs(adapter)) {
3364                 if (num_vfs)
3365                         dev_warn(dev, "device doesn't support SRIOV\n");
3366                 adapter->num_vfs = 0;
3367                 return 0;
3368         }
3369
3370         pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
3371
3372         /* validate num_vfs module param */
3373         old_vfs = pci_num_vf(adapter->pdev);
3374         if (old_vfs) {
3375                 dev_info(dev, "%d VFs are already enabled\n", old_vfs);
3376                 if (old_vfs != num_vfs)
3377                         dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
3378                 adapter->num_vfs = old_vfs;
3379         } else {
3380                 if (num_vfs > be_max_vfs(adapter)) {
3381                         dev_info(dev, "Resources unavailable to init %d VFs\n",
3382                                  num_vfs);
3383                         dev_info(dev, "Limiting to %d VFs\n",
3384                                  be_max_vfs(adapter));
3385                 }
3386                 adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
3387         }
3388
3389         return 0;
3390 }
3391
3392 static int be_get_resources(struct be_adapter *adapter)
3393 {
3394         struct device *dev = &adapter->pdev->dev;
3395         struct be_resources res = {0};
3396         int status;
3397
3398         if (BEx_chip(adapter)) {
3399                 BEx_get_resources(adapter, &res);
3400                 adapter->res = res;
3401         }
3402
3403         /* For Lancer, SH etc read per-function resource limits from FW.
3404          * GET_FUNC_CONFIG returns per function guaranteed limits.
3405          * GET_PROFILE_CONFIG returns PCI-E related limits PF-pool limits
3406          */
3407         if (!BEx_chip(adapter)) {
3408                 status = be_cmd_get_func_config(adapter, &res);
3409                 if (status)
3410                         return status;
3411
3412                 /* If RoCE may be enabled stash away half the EQs for RoCE */
3413                 if (be_roce_supported(adapter))
3414                         res.max_evt_qs /= 2;
3415                 adapter->res = res;
3416
3417                 dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
3418                          be_max_txqs(adapter), be_max_rxqs(adapter),
3419                          be_max_rss(adapter), be_max_eqs(adapter),
3420                          be_max_vfs(adapter));
3421                 dev_info(dev, "Max: uc-macs %d, mc-macs %d, vlans %d\n",
3422                          be_max_uc(adapter), be_max_mc(adapter),
3423                          be_max_vlans(adapter));
3424         }
3425
3426         return 0;
3427 }
3428
3429 static void be_sriov_config(struct be_adapter *adapter)
3430 {
3431         struct device *dev = &adapter->pdev->dev;
3432         int status;
3433
3434         status = be_get_sriov_config(adapter);
3435         if (status) {
3436                 dev_err(dev, "Failed to query SR-IOV configuration\n");
3437                 dev_err(dev, "SR-IOV cannot be enabled\n");
3438                 return;
3439         }
3440
3441         /* When the HW is in SRIOV capable configuration, the PF-pool
3442          * resources are equally distributed across the max-number of
3443          * VFs. The user may request only a subset of the max-vfs to be
3444          * enabled. Based on num_vfs, redistribute the resources across
3445          * num_vfs so that each VF will have access to more number of
3446          * resources. This facility is not available in BE3 FW.
3447          * Also, this is done by FW in Lancer chip.
3448          */
3449         if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
3450                 status = be_cmd_set_sriov_config(adapter,
3451                                                  adapter->pool_res,
3452                                                  adapter->num_vfs);
3453                 if (status)
3454                         dev_err(dev, "Failed to optimize SR-IOV resources\n");
3455         }
3456 }
3457
3458 static int be_get_config(struct be_adapter *adapter)
3459 {
3460         u16 profile_id;
3461         int status;
3462
3463         status = be_cmd_query_fw_cfg(adapter);
3464         if (status)
3465                 return status;
3466
3467          if (be_physfn(adapter)) {
3468                 status = be_cmd_get_active_profile(adapter, &profile_id);
3469                 if (!status)
3470                         dev_info(&adapter->pdev->dev,
3471                                  "Using profile 0x%x\n", profile_id);
3472         }
3473
3474         if (!BE2_chip(adapter) && be_physfn(adapter))
3475                 be_sriov_config(adapter);
3476
3477         status = be_get_resources(adapter);
3478         if (status)
3479                 return status;
3480
3481         adapter->pmac_id = kcalloc(be_max_uc(adapter),
3482                                    sizeof(*adapter->pmac_id), GFP_KERNEL);
3483         if (!adapter->pmac_id)
3484                 return -ENOMEM;
3485
3486         /* Sanitize cfg_num_qs based on HW and platform limits */
3487         adapter->cfg_num_qs = min(adapter->cfg_num_qs, be_max_qs(adapter));
3488
3489         return 0;
3490 }
3491
3492 static int be_mac_setup(struct be_adapter *adapter)
3493 {
3494         u8 mac[ETH_ALEN];
3495         int status;
3496
3497         if (is_zero_ether_addr(adapter->netdev->dev_addr)) {
3498                 status = be_cmd_get_perm_mac(adapter, mac);
3499                 if (status)
3500                         return status;
3501
3502                 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
3503                 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
3504         } else {
3505                 /* Maybe the HW was reset; dev_addr must be re-programmed */
3506                 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
3507         }
3508
3509         /* For BE3-R VFs, the PF programs the initial MAC address */
3510         if (!(BEx_chip(adapter) && be_virtfn(adapter)))
3511                 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
3512                                 &adapter->pmac_id[0], 0);
3513         return 0;
3514 }
3515
3516 static void be_schedule_worker(struct be_adapter *adapter)
3517 {
3518         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3519         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
3520 }
3521
3522 static int be_setup_queues(struct be_adapter *adapter)
3523 {
3524         struct net_device *netdev = adapter->netdev;
3525         int status;
3526
3527         status = be_evt_queues_create(adapter);
3528         if (status)
3529                 goto err;
3530
3531         status = be_tx_qs_create(adapter);
3532         if (status)
3533                 goto err;
3534
3535         status = be_rx_cqs_create(adapter);
3536         if (status)
3537                 goto err;
3538
3539         status = be_mcc_queues_create(adapter);
3540         if (status)
3541                 goto err;
3542
3543         status = netif_set_real_num_rx_queues(netdev, adapter->num_rx_qs);
3544         if (status)
3545                 goto err;
3546
3547         status = netif_set_real_num_tx_queues(netdev, adapter->num_tx_qs);
3548         if (status)
3549                 goto err;
3550
3551         return 0;
3552 err:
3553         dev_err(&adapter->pdev->dev, "queue_setup failed\n");
3554         return status;
3555 }
3556
3557 int be_update_queues(struct be_adapter *adapter)
3558 {
3559         struct net_device *netdev = adapter->netdev;
3560         int status;
3561
3562         if (netif_running(netdev))
3563                 be_close(netdev);
3564
3565         be_cancel_worker(adapter);
3566
3567         /* If any vectors have been shared with RoCE we cannot re-program
3568          * the MSIx table.
3569          */
3570         if (!adapter->num_msix_roce_vec)
3571                 be_msix_disable(adapter);
3572
3573         be_clear_queues(adapter);
3574
3575         if (!msix_enabled(adapter)) {
3576                 status = be_msix_enable(adapter);
3577                 if (status)
3578                         return status;
3579         }
3580
3581         status = be_setup_queues(adapter);
3582         if (status)
3583                 return status;
3584
3585         be_schedule_worker(adapter);
3586
3587         if (netif_running(netdev))
3588                 status = be_open(netdev);
3589
3590         return status;
3591 }
3592
3593 static int be_setup(struct be_adapter *adapter)
3594 {
3595         struct device *dev = &adapter->pdev->dev;
3596         u32 tx_fc, rx_fc, en_flags;
3597         int status;
3598
3599         be_setup_init(adapter);
3600
3601         if (!lancer_chip(adapter))
3602                 be_cmd_req_native_mode(adapter);
3603
3604         status = be_get_config(adapter);
3605         if (status)
3606                 goto err;
3607
3608         status = be_msix_enable(adapter);
3609         if (status)
3610                 goto err;
3611
3612         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3613                    BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
3614         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS)
3615                 en_flags |= BE_IF_FLAGS_RSS;
3616         en_flags = en_flags & be_if_cap_flags(adapter);
3617         status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
3618                                   &adapter->if_handle, 0);
3619         if (status)
3620                 goto err;
3621
3622         /* Updating real_num_tx/rx_queues() requires rtnl_lock() */
3623         rtnl_lock();
3624         status = be_setup_queues(adapter);
3625         rtnl_unlock();
3626         if (status)
3627                 goto err;
3628
3629         be_cmd_get_fn_privileges(adapter, &adapter->cmd_privileges, 0);
3630
3631         status = be_mac_setup(adapter);
3632         if (status)
3633                 goto err;
3634
3635         be_cmd_get_fw_ver(adapter);
3636
3637         if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
3638                 dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
3639                         adapter->fw_ver);
3640                 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
3641         }
3642
3643         if (adapter->vlans_added)
3644                 be_vid_config(adapter);
3645
3646         be_set_rx_mode(adapter->netdev);
3647
3648         be_cmd_get_acpi_wol_cap(adapter);
3649
3650         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
3651
3652         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
3653                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
3654                                         adapter->rx_fc);
3655
3656         if (be_physfn(adapter))
3657                 be_cmd_set_logical_link_config(adapter,
3658                                                IFLA_VF_LINK_STATE_AUTO, 0);
3659
3660         if (adapter->num_vfs)
3661                 be_vf_setup(adapter);
3662
3663         status = be_cmd_get_phy_info(adapter);
3664         if (!status && be_pause_supported(adapter))
3665                 adapter->phy.fc_autoneg = 1;
3666
3667         be_schedule_worker(adapter);
3668         adapter->flags |= BE_FLAGS_SETUP_DONE;
3669         return 0;
3670 err:
3671         be_clear(adapter);
3672         return status;
3673 }
3674
3675 #ifdef CONFIG_NET_POLL_CONTROLLER
3676 static void be_netpoll(struct net_device *netdev)
3677 {
3678         struct be_adapter *adapter = netdev_priv(netdev);
3679         struct be_eq_obj *eqo;
3680         int i;
3681
3682         for_all_evt_queues(adapter, eqo, i) {
3683                 be_eq_notify(eqo->adapter, eqo->q.id, false, true, 0);
3684                 napi_schedule(&eqo->napi);
3685         }
3686
3687         return;
3688 }
3689 #endif
3690
3691 static char flash_cookie[2][16] = {"*** SE FLAS", "H DIRECTORY *** "};
3692
3693 static bool phy_flashing_required(struct be_adapter *adapter)
3694 {
3695         return (adapter->phy.phy_type == TN_8022 &&
3696                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
3697 }
3698
3699 static bool is_comp_in_ufi(struct be_adapter *adapter,
3700                            struct flash_section_info *fsec, int type)
3701 {
3702         int i = 0, img_type = 0;
3703         struct flash_section_info_g2 *fsec_g2 = NULL;
3704
3705         if (BE2_chip(adapter))
3706                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
3707
3708         for (i = 0; i < MAX_FLASH_COMP; i++) {
3709                 if (fsec_g2)
3710                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
3711                 else
3712                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
3713
3714                 if (img_type == type)
3715                         return true;
3716         }
3717         return false;
3718
3719 }
3720
3721 static struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
3722                                                 int header_size,
3723                                                 const struct firmware *fw)
3724 {
3725         struct flash_section_info *fsec = NULL;
3726         const u8 *p = fw->data;
3727
3728         p += header_size;
3729         while (p < (fw->data + fw->size)) {
3730                 fsec = (struct flash_section_info *)p;
3731                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
3732                         return fsec;
3733                 p += 32;
3734         }
3735         return NULL;
3736 }
3737
3738 static int be_check_flash_crc(struct be_adapter *adapter, const u8 *p,
3739                               u32 img_offset, u32 img_size, int hdr_size,
3740                               u16 img_optype, bool *crc_match)
3741 {
3742         u32 crc_offset;
3743         int status;
3744         u8 crc[4];
3745
3746         status = be_cmd_get_flash_crc(adapter, crc, img_optype, img_size - 4);
3747         if (status)
3748                 return status;
3749
3750         crc_offset = hdr_size + img_offset + img_size - 4;
3751
3752         /* Skip flashing, if crc of flashed region matches */
3753         if (!memcmp(crc, p + crc_offset, 4))
3754                 *crc_match = true;
3755         else
3756                 *crc_match = false;
3757
3758         return status;
3759 }
3760
3761 static int be_flash(struct be_adapter *adapter, const u8 *img,
3762                     struct be_dma_mem *flash_cmd, int optype, int img_size)
3763 {
3764         struct be_cmd_write_flashrom *req = flash_cmd->va;
3765         u32 total_bytes, flash_op, num_bytes;
3766         int status;
3767
3768         total_bytes = img_size;
3769         while (total_bytes) {
3770                 num_bytes = min_t(u32, 32*1024, total_bytes);
3771
3772                 total_bytes -= num_bytes;
3773
3774                 if (!total_bytes) {
3775                         if (optype == OPTYPE_PHY_FW)
3776                                 flash_op = FLASHROM_OPER_PHY_FLASH;
3777                         else
3778                                 flash_op = FLASHROM_OPER_FLASH;
3779                 } else {
3780                         if (optype == OPTYPE_PHY_FW)
3781                                 flash_op = FLASHROM_OPER_PHY_SAVE;
3782                         else
3783                                 flash_op = FLASHROM_OPER_SAVE;
3784                 }
3785
3786                 memcpy(req->data_buf, img, num_bytes);
3787                 img += num_bytes;
3788                 status = be_cmd_write_flashrom(adapter, flash_cmd, optype,
3789                                                flash_op, num_bytes);
3790                 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST &&
3791                     optype == OPTYPE_PHY_FW)
3792                         break;
3793                 else if (status)
3794                         return status;
3795         }
3796         return 0;
3797 }
3798
3799 /* For BE2, BE3 and BE3-R */
3800 static int be_flash_BEx(struct be_adapter *adapter,
3801                         const struct firmware *fw,
3802                         struct be_dma_mem *flash_cmd, int num_of_images)
3803 {
3804         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
3805         struct device *dev = &adapter->pdev->dev;
3806         struct flash_section_info *fsec = NULL;
3807         int status, i, filehdr_size, num_comp;
3808         const struct flash_comp *pflashcomp;
3809         bool crc_match;
3810         const u8 *p;
3811
3812         struct flash_comp gen3_flash_types[] = {
3813                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
3814                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
3815                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
3816                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
3817                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
3818                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
3819                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
3820                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
3821                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
3822                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
3823                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
3824                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
3825                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
3826                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
3827                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
3828                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
3829                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
3830                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
3831                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
3832                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
3833         };
3834
3835         struct flash_comp gen2_flash_types[] = {
3836                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
3837                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
3838                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
3839                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
3840                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
3841                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
3842                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
3843                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
3844                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
3845                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
3846                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
3847                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
3848                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
3849                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
3850                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
3851                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
3852         };
3853
3854         if (BE3_chip(adapter)) {
3855                 pflashcomp = gen3_flash_types;
3856                 filehdr_size = sizeof(struct flash_file_hdr_g3);
3857                 num_comp = ARRAY_SIZE(gen3_flash_types);
3858         } else {
3859                 pflashcomp = gen2_flash_types;
3860                 filehdr_size = sizeof(struct flash_file_hdr_g2);
3861                 num_comp = ARRAY_SIZE(gen2_flash_types);
3862         }
3863
3864         /* Get flash section info*/
3865         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3866         if (!fsec) {
3867                 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3868                 return -1;
3869         }
3870         for (i = 0; i < num_comp; i++) {
3871                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
3872                         continue;
3873
3874                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
3875                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
3876                         continue;
3877
3878                 if (pflashcomp[i].optype == OPTYPE_PHY_FW  &&
3879                     !phy_flashing_required(adapter))
3880                                 continue;
3881
3882                 if (pflashcomp[i].optype == OPTYPE_REDBOOT) {
3883                         status = be_check_flash_crc(adapter, fw->data,
3884                                                     pflashcomp[i].offset,
3885                                                     pflashcomp[i].size,
3886                                                     filehdr_size +
3887                                                     img_hdrs_size,
3888                                                     OPTYPE_REDBOOT, &crc_match);
3889                         if (status) {
3890                                 dev_err(dev,
3891                                         "Could not get CRC for 0x%x region\n",
3892                                         pflashcomp[i].optype);
3893                                 continue;
3894                         }
3895
3896                         if (crc_match)
3897                                 continue;
3898                 }
3899
3900                 p = fw->data + filehdr_size + pflashcomp[i].offset +
3901                         img_hdrs_size;
3902                 if (p + pflashcomp[i].size > fw->data + fw->size)
3903                         return -1;
3904
3905                 status = be_flash(adapter, p, flash_cmd, pflashcomp[i].optype,
3906                                   pflashcomp[i].size);
3907                 if (status) {
3908                         dev_err(dev, "Flashing section type 0x%x failed\n",
3909                                 pflashcomp[i].img_type);
3910                         return status;
3911                 }
3912         }
3913         return 0;
3914 }
3915
3916 static u16 be_get_img_optype(struct flash_section_entry fsec_entry)
3917 {
3918         u32 img_type = le32_to_cpu(fsec_entry.type);
3919         u16 img_optype = le16_to_cpu(fsec_entry.optype);
3920
3921         if (img_optype != 0xFFFF)
3922                 return img_optype;
3923
3924         switch (img_type) {
3925         case IMAGE_FIRMWARE_iSCSI:
3926                 img_optype = OPTYPE_ISCSI_ACTIVE;
3927                 break;
3928         case IMAGE_BOOT_CODE:
3929                 img_optype = OPTYPE_REDBOOT;
3930                 break;
3931         case IMAGE_OPTION_ROM_ISCSI:
3932                 img_optype = OPTYPE_BIOS;
3933                 break;
3934         case IMAGE_OPTION_ROM_PXE:
3935                 img_optype = OPTYPE_PXE_BIOS;
3936                 break;
3937         case IMAGE_OPTION_ROM_FCoE:
3938                 img_optype = OPTYPE_FCOE_BIOS;
3939                 break;
3940         case IMAGE_FIRMWARE_BACKUP_iSCSI:
3941                 img_optype = OPTYPE_ISCSI_BACKUP;
3942                 break;
3943         case IMAGE_NCSI:
3944                 img_optype = OPTYPE_NCSI_FW;
3945                 break;
3946         case IMAGE_FLASHISM_JUMPVECTOR:
3947                 img_optype = OPTYPE_FLASHISM_JUMPVECTOR;
3948                 break;
3949         case IMAGE_FIRMWARE_PHY:
3950                 img_optype = OPTYPE_SH_PHY_FW;
3951                 break;
3952         case IMAGE_REDBOOT_DIR:
3953                 img_optype = OPTYPE_REDBOOT_DIR;
3954                 break;
3955         case IMAGE_REDBOOT_CONFIG:
3956                 img_optype = OPTYPE_REDBOOT_CONFIG;
3957                 break;
3958         case IMAGE_UFI_DIR:
3959                 img_optype = OPTYPE_UFI_DIR;
3960                 break;
3961         default:
3962                 break;
3963         }
3964
3965         return img_optype;
3966 }
3967
3968 static int be_flash_skyhawk(struct be_adapter *adapter,
3969                             const struct firmware *fw,
3970                             struct be_dma_mem *flash_cmd, int num_of_images)
3971 {
3972         int img_hdrs_size = num_of_images * sizeof(struct image_hdr);
3973         struct device *dev = &adapter->pdev->dev;
3974         struct flash_section_info *fsec = NULL;
3975         u32 img_offset, img_size, img_type;
3976         int status, i, filehdr_size;
3977         bool crc_match, old_fw_img;
3978         u16 img_optype;
3979         const u8 *p;
3980
3981         filehdr_size = sizeof(struct flash_file_hdr_g3);
3982         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
3983         if (!fsec) {
3984                 dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
3985                 return -EINVAL;
3986         }
3987
3988         for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
3989                 img_offset = le32_to_cpu(fsec->fsec_entry[i].offset);
3990                 img_size   = le32_to_cpu(fsec->fsec_entry[i].pad_size);
3991                 img_type   = le32_to_cpu(fsec->fsec_entry[i].type);
3992                 img_optype = be_get_img_optype(fsec->fsec_entry[i]);
3993                 old_fw_img = fsec->fsec_entry[i].optype == 0xFFFF;
3994
3995                 if (img_optype == 0xFFFF)
3996                         continue;
3997                 /* Don't bother verifying CRC if an old FW image is being
3998                  * flashed
3999                  */
4000                 if (old_fw_img)
4001                         goto flash;
4002
4003                 status = be_check_flash_crc(adapter, fw->data, img_offset,
4004                                             img_size, filehdr_size +
4005                                             img_hdrs_size, img_optype,
4006                                             &crc_match);
4007                 /* The current FW image on the card does not recognize the new
4008                  * FLASH op_type. The FW download is partially complete.
4009                  * Reboot the server now to enable FW image to recognize the
4010                  * new FLASH op_type. To complete the remaining process,
4011                  * download the same FW again after the reboot.
4012                  */
4013                 if (base_status(status) == MCC_STATUS_ILLEGAL_REQUEST ||
4014                     base_status(status) == MCC_STATUS_ILLEGAL_FIELD) {
4015                         dev_err(dev, "Flash incomplete. Reset the server\n");
4016                         dev_err(dev, "Download FW image again after reset\n");
4017                         return -EAGAIN;
4018                 } else if (status) {
4019                         dev_err(dev, "Could not get CRC for 0x%x region\n",
4020                                 img_optype);
4021                         return -EFAULT;
4022                 }
4023
4024                 if (crc_match)
4025                         continue;
4026
4027 flash:
4028                 p = fw->data + filehdr_size + img_offset + img_hdrs_size;
4029                 if (p + img_size > fw->data + fw->size)
4030                         return -1;
4031
4032                 status = be_flash(adapter, p, flash_cmd, img_optype, img_size);
4033                 /* For old FW images ignore ILLEGAL_FIELD error or errors on
4034                  * UFI_DIR region
4035                  */
4036                 if (old_fw_img &&
4037                     (base_status(status) == MCC_STATUS_ILLEGAL_FIELD ||
4038                      (img_optype == OPTYPE_UFI_DIR &&
4039                       base_status(status) == MCC_STATUS_FAILED))) {
4040                         continue;
4041                 } else if (status) {
4042                         dev_err(dev, "Flashing section type 0x%x failed\n",
4043                                 img_type);
4044                         return -EFAULT;
4045                 }
4046         }
4047         return 0;
4048 }
4049
4050 static int lancer_fw_download(struct be_adapter *adapter,
4051                               const struct firmware *fw)
4052 {
4053 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
4054 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
4055         struct be_dma_mem flash_cmd;
4056         const u8 *data_ptr = NULL;
4057         u8 *dest_image_ptr = NULL;
4058         size_t image_size = 0;
4059         u32 chunk_size = 0;
4060         u32 data_written = 0;
4061         u32 offset = 0;
4062         int status = 0;
4063         u8 add_status = 0;
4064         u8 change_status;
4065
4066         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
4067                 dev_err(&adapter->pdev->dev,
4068                         "FW Image not properly aligned. "
4069                         "Length must be 4 byte aligned.\n");
4070                 status = -EINVAL;
4071                 goto lancer_fw_exit;
4072         }
4073
4074         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
4075                                 + LANCER_FW_DOWNLOAD_CHUNK;
4076         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4077                                           &flash_cmd.dma, GFP_KERNEL);
4078         if (!flash_cmd.va) {
4079                 status = -ENOMEM;
4080                 goto lancer_fw_exit;
4081         }
4082
4083         dest_image_ptr = flash_cmd.va +
4084                                 sizeof(struct lancer_cmd_req_write_object);
4085         image_size = fw->size;
4086         data_ptr = fw->data;
4087
4088         while (image_size) {
4089                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
4090
4091                 /* Copy the image chunk content. */
4092                 memcpy(dest_image_ptr, data_ptr, chunk_size);
4093
4094                 status = lancer_cmd_write_object(adapter, &flash_cmd,
4095                                                  chunk_size, offset,
4096                                                  LANCER_FW_DOWNLOAD_LOCATION,
4097                                                  &data_written, &change_status,
4098                                                  &add_status);
4099                 if (status)
4100                         break;
4101
4102                 offset += data_written;
4103                 data_ptr += data_written;
4104                 image_size -= data_written;
4105         }
4106
4107         if (!status) {
4108                 /* Commit the FW written */
4109                 status = lancer_cmd_write_object(adapter, &flash_cmd,
4110                                                  0, offset,
4111                                                  LANCER_FW_DOWNLOAD_LOCATION,
4112                                                  &data_written, &change_status,
4113                                                  &add_status);
4114         }
4115
4116         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4117                           flash_cmd.dma);
4118         if (status) {
4119                 dev_err(&adapter->pdev->dev,
4120                         "Firmware load error. "
4121                         "Status code: 0x%x Additional Status: 0x%x\n",
4122                         status, add_status);
4123                 goto lancer_fw_exit;
4124         }
4125
4126         if (change_status == LANCER_FW_RESET_NEEDED) {
4127                 dev_info(&adapter->pdev->dev,
4128                          "Resetting adapter to activate new FW\n");
4129                 status = lancer_physdev_ctrl(adapter,
4130                                              PHYSDEV_CONTROL_FW_RESET_MASK);
4131                 if (status) {
4132                         dev_err(&adapter->pdev->dev,
4133                                 "Adapter busy for FW reset.\n"
4134                                 "New FW will not be active.\n");
4135                         goto lancer_fw_exit;
4136                 }
4137         } else if (change_status != LANCER_NO_RESET_NEEDED) {
4138                 dev_err(&adapter->pdev->dev,
4139                         "System reboot required for new FW to be active\n");
4140         }
4141
4142         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4143 lancer_fw_exit:
4144         return status;
4145 }
4146
4147 #define UFI_TYPE2               2
4148 #define UFI_TYPE3               3
4149 #define UFI_TYPE3R              10
4150 #define UFI_TYPE4               4
4151 static int be_get_ufi_type(struct be_adapter *adapter,
4152                            struct flash_file_hdr_g3 *fhdr)
4153 {
4154         if (!fhdr)
4155                 goto be_get_ufi_exit;
4156
4157         if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
4158                 return UFI_TYPE4;
4159         else if (BE3_chip(adapter) && fhdr->build[0] == '3') {
4160                 if (fhdr->asic_type_rev == 0x10)
4161                         return UFI_TYPE3R;
4162                 else
4163                         return UFI_TYPE3;
4164         } else if (BE2_chip(adapter) && fhdr->build[0] == '2')
4165                 return UFI_TYPE2;
4166
4167 be_get_ufi_exit:
4168         dev_err(&adapter->pdev->dev,
4169                 "UFI and Interface are not compatible for flashing\n");
4170         return -1;
4171 }
4172
4173 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
4174 {
4175         struct flash_file_hdr_g3 *fhdr3;
4176         struct image_hdr *img_hdr_ptr = NULL;
4177         struct be_dma_mem flash_cmd;
4178         const u8 *p;
4179         int status = 0, i = 0, num_imgs = 0, ufi_type = 0;
4180
4181         flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
4182         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
4183                                           &flash_cmd.dma, GFP_KERNEL);
4184         if (!flash_cmd.va) {
4185                 status = -ENOMEM;
4186                 goto be_fw_exit;
4187         }
4188
4189         p = fw->data;
4190         fhdr3 = (struct flash_file_hdr_g3 *)p;
4191
4192         ufi_type = be_get_ufi_type(adapter, fhdr3);
4193
4194         num_imgs = le32_to_cpu(fhdr3->num_imgs);
4195         for (i = 0; i < num_imgs; i++) {
4196                 img_hdr_ptr = (struct image_hdr *)(fw->data +
4197                                 (sizeof(struct flash_file_hdr_g3) +
4198                                  i * sizeof(struct image_hdr)));
4199                 if (le32_to_cpu(img_hdr_ptr->imageid) == 1) {
4200                         switch (ufi_type) {
4201                         case UFI_TYPE4:
4202                                 status = be_flash_skyhawk(adapter, fw,
4203                                                           &flash_cmd, num_imgs);
4204                                 break;
4205                         case UFI_TYPE3R:
4206                                 status = be_flash_BEx(adapter, fw, &flash_cmd,
4207                                                       num_imgs);
4208                                 break;
4209                         case UFI_TYPE3:
4210                                 /* Do not flash this ufi on BE3-R cards */
4211                                 if (adapter->asic_rev < 0x10)
4212                                         status = be_flash_BEx(adapter, fw,
4213                                                               &flash_cmd,
4214                                                               num_imgs);
4215                                 else {
4216                                         status = -EINVAL;
4217                                         dev_err(&adapter->pdev->dev,
4218                                                 "Can't load BE3 UFI on BE3R\n");
4219                                 }
4220                         }
4221                 }
4222         }
4223
4224         if (ufi_type == UFI_TYPE2)
4225                 status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
4226         else if (ufi_type == -1)
4227                 status = -EINVAL;
4228
4229         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
4230                           flash_cmd.dma);
4231         if (status) {
4232                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
4233                 goto be_fw_exit;
4234         }
4235
4236         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
4237
4238 be_fw_exit:
4239         return status;
4240 }
4241
4242 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
4243 {
4244         const struct firmware *fw;
4245         int status;
4246
4247         if (!netif_running(adapter->netdev)) {
4248                 dev_err(&adapter->pdev->dev,
4249                         "Firmware load not allowed (interface is down)\n");
4250                 return -ENETDOWN;
4251         }
4252
4253         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
4254         if (status)
4255                 goto fw_exit;
4256
4257         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
4258
4259         if (lancer_chip(adapter))
4260                 status = lancer_fw_download(adapter, fw);
4261         else
4262                 status = be_fw_download(adapter, fw);
4263
4264         if (!status)
4265                 be_cmd_get_fw_ver(adapter);
4266
4267 fw_exit:
4268         release_firmware(fw);
4269         return status;
4270 }
4271
4272 static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4273 {
4274         struct be_adapter *adapter = netdev_priv(dev);
4275         struct nlattr *attr, *br_spec;
4276         int rem;
4277         int status = 0;
4278         u16 mode = 0;
4279
4280         if (!sriov_enabled(adapter))
4281                 return -EOPNOTSUPP;
4282
4283         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4284
4285         nla_for_each_nested(attr, br_spec, rem) {
4286                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4287                         continue;
4288
4289                 mode = nla_get_u16(attr);
4290                 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4291                         return -EINVAL;
4292
4293                 status = be_cmd_set_hsw_config(adapter, 0, 0,
4294                                                adapter->if_handle,
4295                                                mode == BRIDGE_MODE_VEPA ?
4296                                                PORT_FWD_TYPE_VEPA :
4297                                                PORT_FWD_TYPE_VEB);
4298                 if (status)
4299                         goto err;
4300
4301                 dev_info(&adapter->pdev->dev, "enabled switch mode: %s\n",
4302                          mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4303
4304                 return status;
4305         }
4306 err:
4307         dev_err(&adapter->pdev->dev, "Failed to set switch mode %s\n",
4308                 mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
4309
4310         return status;
4311 }
4312
4313 static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
4314                                  struct net_device *dev, u32 filter_mask)
4315 {
4316         struct be_adapter *adapter = netdev_priv(dev);
4317         int status = 0;
4318         u8 hsw_mode;
4319
4320         if (!sriov_enabled(adapter))
4321                 return 0;
4322
4323         /* BE and Lancer chips support VEB mode only */
4324         if (BEx_chip(adapter) || lancer_chip(adapter)) {
4325                 hsw_mode = PORT_FWD_TYPE_VEB;
4326         } else {
4327                 status = be_cmd_get_hsw_config(adapter, NULL, 0,
4328                                                adapter->if_handle, &hsw_mode);
4329                 if (status)
4330                         return 0;
4331         }
4332
4333         return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
4334                                        hsw_mode == PORT_FWD_TYPE_VEPA ?
4335                                        BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB);
4336 }
4337
4338 #ifdef CONFIG_BE2NET_VXLAN
4339 static void be_add_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4340                               __be16 port)
4341 {
4342         struct be_adapter *adapter = netdev_priv(netdev);
4343         struct device *dev = &adapter->pdev->dev;
4344         int status;
4345
4346         if (lancer_chip(adapter) || BEx_chip(adapter))
4347                 return;
4348
4349         if (adapter->flags & BE_FLAGS_VXLAN_OFFLOADS) {
4350                 dev_warn(dev, "Cannot add UDP port %d for VxLAN offloads\n",
4351                          be16_to_cpu(port));
4352                 dev_info(dev,
4353                          "Only one UDP port supported for VxLAN offloads\n");
4354                 return;
4355         }
4356
4357         status = be_cmd_manage_iface(adapter, adapter->if_handle,
4358                                      OP_CONVERT_NORMAL_TO_TUNNEL);
4359         if (status) {
4360                 dev_warn(dev, "Failed to convert normal interface to tunnel\n");
4361                 goto err;
4362         }
4363
4364         status = be_cmd_set_vxlan_port(adapter, port);
4365         if (status) {
4366                 dev_warn(dev, "Failed to add VxLAN port\n");
4367                 goto err;
4368         }
4369         adapter->flags |= BE_FLAGS_VXLAN_OFFLOADS;
4370         adapter->vxlan_port = port;
4371
4372         dev_info(dev, "Enabled VxLAN offloads for UDP port %d\n",
4373                  be16_to_cpu(port));
4374         return;
4375 err:
4376         be_disable_vxlan_offloads(adapter);
4377         return;
4378 }
4379
4380 static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4381                               __be16 port)
4382 {
4383         struct be_adapter *adapter = netdev_priv(netdev);
4384
4385         if (lancer_chip(adapter) || BEx_chip(adapter))
4386                 return;
4387
4388         if (adapter->vxlan_port != port)
4389                 return;
4390
4391         be_disable_vxlan_offloads(adapter);
4392
4393         dev_info(&adapter->pdev->dev,
4394                  "Disabled VxLAN offloads for UDP port %d\n",
4395                  be16_to_cpu(port));
4396 }
4397 #endif
4398
4399 static const struct net_device_ops be_netdev_ops = {
4400         .ndo_open               = be_open,
4401         .ndo_stop               = be_close,
4402         .ndo_start_xmit         = be_xmit,
4403         .ndo_set_rx_mode        = be_set_rx_mode,
4404         .ndo_set_mac_address    = be_mac_addr_set,
4405         .ndo_change_mtu         = be_change_mtu,
4406         .ndo_get_stats64        = be_get_stats64,
4407         .ndo_validate_addr      = eth_validate_addr,
4408         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
4409         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
4410         .ndo_set_vf_mac         = be_set_vf_mac,
4411         .ndo_set_vf_vlan        = be_set_vf_vlan,
4412         .ndo_set_vf_rate        = be_set_vf_tx_rate,
4413         .ndo_get_vf_config      = be_get_vf_config,
4414         .ndo_set_vf_link_state  = be_set_vf_link_state,
4415 #ifdef CONFIG_NET_POLL_CONTROLLER
4416         .ndo_poll_controller    = be_netpoll,
4417 #endif
4418         .ndo_bridge_setlink     = be_ndo_bridge_setlink,
4419         .ndo_bridge_getlink     = be_ndo_bridge_getlink,
4420 #ifdef CONFIG_NET_RX_BUSY_POLL
4421         .ndo_busy_poll          = be_busy_poll,
4422 #endif
4423 #ifdef CONFIG_BE2NET_VXLAN
4424         .ndo_add_vxlan_port     = be_add_vxlan_port,
4425         .ndo_del_vxlan_port     = be_del_vxlan_port,
4426 #endif
4427 };
4428
4429 static void be_netdev_init(struct net_device *netdev)
4430 {
4431         struct be_adapter *adapter = netdev_priv(netdev);
4432
4433         if (skyhawk_chip(adapter)) {
4434                 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4435                                            NETIF_F_TSO | NETIF_F_TSO6 |
4436                                            NETIF_F_GSO_UDP_TUNNEL;
4437                 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
4438         }
4439         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4440                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
4441                 NETIF_F_HW_VLAN_CTAG_TX;
4442         if (be_multi_rxq(adapter))
4443                 netdev->hw_features |= NETIF_F_RXHASH;
4444
4445         netdev->features |= netdev->hw_features |
4446                 NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
4447
4448         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
4449                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
4450
4451         netdev->priv_flags |= IFF_UNICAST_FLT;
4452
4453         netdev->flags |= IFF_MULTICAST;
4454
4455         netif_set_gso_max_size(netdev, 65535 - ETH_HLEN);
4456
4457         netdev->netdev_ops = &be_netdev_ops;
4458
4459         netdev->ethtool_ops = &be_ethtool_ops;
4460 }
4461
4462 static void be_unmap_pci_bars(struct be_adapter *adapter)
4463 {
4464         if (adapter->csr)
4465                 pci_iounmap(adapter->pdev, adapter->csr);
4466         if (adapter->db)
4467                 pci_iounmap(adapter->pdev, adapter->db);
4468 }
4469
4470 static int db_bar(struct be_adapter *adapter)
4471 {
4472         if (lancer_chip(adapter) || !be_physfn(adapter))
4473                 return 0;
4474         else
4475                 return 4;
4476 }
4477
4478 static int be_roce_map_pci_bars(struct be_adapter *adapter)
4479 {
4480         if (skyhawk_chip(adapter)) {
4481                 adapter->roce_db.size = 4096;
4482                 adapter->roce_db.io_addr = pci_resource_start(adapter->pdev,
4483                                                               db_bar(adapter));
4484                 adapter->roce_db.total_size = pci_resource_len(adapter->pdev,
4485                                                                db_bar(adapter));
4486         }
4487         return 0;
4488 }
4489
4490 static int be_map_pci_bars(struct be_adapter *adapter)
4491 {
4492         u8 __iomem *addr;
4493
4494         if (BEx_chip(adapter) && be_physfn(adapter)) {
4495                 adapter->csr = pci_iomap(adapter->pdev, 2, 0);
4496                 if (!adapter->csr)
4497                         return -ENOMEM;
4498         }
4499
4500         addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
4501         if (!addr)
4502                 goto pci_map_err;
4503         adapter->db = addr;
4504
4505         be_roce_map_pci_bars(adapter);
4506         return 0;
4507
4508 pci_map_err:
4509         be_unmap_pci_bars(adapter);
4510         return -ENOMEM;
4511 }
4512
4513 static void be_ctrl_cleanup(struct be_adapter *adapter)
4514 {
4515         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
4516
4517         be_unmap_pci_bars(adapter);
4518
4519         if (mem->va)
4520                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4521                                   mem->dma);
4522
4523         mem = &adapter->rx_filter;
4524         if (mem->va)
4525                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
4526                                   mem->dma);
4527 }
4528
4529 static int be_ctrl_init(struct be_adapter *adapter)
4530 {
4531         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
4532         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
4533         struct be_dma_mem *rx_filter = &adapter->rx_filter;
4534         u32 sli_intf;
4535         int status;
4536
4537         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
4538         adapter->sli_family = (sli_intf & SLI_INTF_FAMILY_MASK) >>
4539                                  SLI_INTF_FAMILY_SHIFT;
4540         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
4541
4542         status = be_map_pci_bars(adapter);
4543         if (status)
4544                 goto done;
4545
4546         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
4547         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
4548                                                 mbox_mem_alloc->size,
4549                                                 &mbox_mem_alloc->dma,
4550                                                 GFP_KERNEL);
4551         if (!mbox_mem_alloc->va) {
4552                 status = -ENOMEM;
4553                 goto unmap_pci_bars;
4554         }
4555         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
4556         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
4557         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
4558         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
4559
4560         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
4561         rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
4562                                             rx_filter->size, &rx_filter->dma,
4563                                             GFP_KERNEL);
4564         if (!rx_filter->va) {
4565                 status = -ENOMEM;
4566                 goto free_mbox;
4567         }
4568
4569         mutex_init(&adapter->mbox_lock);
4570         spin_lock_init(&adapter->mcc_lock);
4571         spin_lock_init(&adapter->mcc_cq_lock);
4572
4573         init_completion(&adapter->et_cmd_compl);
4574         pci_save_state(adapter->pdev);
4575         return 0;
4576
4577 free_mbox:
4578         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
4579                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
4580
4581 unmap_pci_bars:
4582         be_unmap_pci_bars(adapter);
4583
4584 done:
4585         return status;
4586 }
4587
4588 static void be_stats_cleanup(struct be_adapter *adapter)
4589 {
4590         struct be_dma_mem *cmd = &adapter->stats_cmd;
4591
4592         if (cmd->va)
4593                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
4594                                   cmd->va, cmd->dma);
4595 }
4596
4597 static int be_stats_init(struct be_adapter *adapter)
4598 {
4599         struct be_dma_mem *cmd = &adapter->stats_cmd;
4600
4601         if (lancer_chip(adapter))
4602                 cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
4603         else if (BE2_chip(adapter))
4604                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
4605         else if (BE3_chip(adapter))
4606                 cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
4607         else
4608                 /* ALL non-BE ASICs */
4609                 cmd->size = sizeof(struct be_cmd_req_get_stats_v2);
4610
4611         cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
4612                                       GFP_KERNEL);
4613         if (!cmd->va)
4614                 return -ENOMEM;
4615         return 0;
4616 }
4617
4618 static void be_remove(struct pci_dev *pdev)
4619 {
4620         struct be_adapter *adapter = pci_get_drvdata(pdev);
4621
4622         if (!adapter)
4623                 return;
4624
4625         be_roce_dev_remove(adapter);
4626         be_intr_set(adapter, false);
4627
4628         cancel_delayed_work_sync(&adapter->func_recovery_work);
4629
4630         unregister_netdev(adapter->netdev);
4631
4632         be_clear(adapter);
4633
4634         /* tell fw we're done with firing cmds */
4635         be_cmd_fw_clean(adapter);
4636
4637         be_stats_cleanup(adapter);
4638
4639         be_ctrl_cleanup(adapter);
4640
4641         pci_disable_pcie_error_reporting(pdev);
4642
4643         pci_release_regions(pdev);
4644         pci_disable_device(pdev);
4645
4646         free_netdev(adapter->netdev);
4647 }
4648
4649 static int be_get_initial_config(struct be_adapter *adapter)
4650 {
4651         int status, level;
4652
4653         status = be_cmd_get_cntl_attributes(adapter);
4654         if (status)
4655                 return status;
4656
4657         /* Must be a power of 2 or else MODULO will BUG_ON */
4658         adapter->be_get_temp_freq = 64;
4659
4660         if (BEx_chip(adapter)) {
4661                 level = be_cmd_get_fw_log_level(adapter);
4662                 adapter->msg_enable =
4663                         level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
4664         }
4665
4666         adapter->cfg_num_qs = netif_get_num_default_rss_queues();
4667         return 0;
4668 }
4669
4670 static int lancer_recover_func(struct be_adapter *adapter)
4671 {
4672         struct device *dev = &adapter->pdev->dev;
4673         int status;
4674
4675         status = lancer_test_and_set_rdy_state(adapter);
4676         if (status)
4677                 goto err;
4678
4679         if (netif_running(adapter->netdev))
4680                 be_close(adapter->netdev);
4681
4682         be_clear(adapter);
4683
4684         be_clear_all_error(adapter);
4685
4686         status = be_setup(adapter);
4687         if (status)
4688                 goto err;
4689
4690         if (netif_running(adapter->netdev)) {
4691                 status = be_open(adapter->netdev);
4692                 if (status)
4693                         goto err;
4694         }
4695
4696         dev_err(dev, "Adapter recovery successful\n");
4697         return 0;
4698 err:
4699         if (status == -EAGAIN)
4700                 dev_err(dev, "Waiting for resource provisioning\n");
4701         else
4702                 dev_err(dev, "Adapter recovery failed\n");
4703
4704         return status;
4705 }
4706
4707 static void be_func_recovery_task(struct work_struct *work)
4708 {
4709         struct be_adapter *adapter =
4710                 container_of(work, struct be_adapter,  func_recovery_work.work);
4711         int status = 0;
4712
4713         be_detect_error(adapter);
4714
4715         if (adapter->hw_error && lancer_chip(adapter)) {
4716
4717                 rtnl_lock();
4718                 netif_device_detach(adapter->netdev);
4719                 rtnl_unlock();
4720
4721                 status = lancer_recover_func(adapter);
4722                 if (!status)
4723                         netif_device_attach(adapter->netdev);
4724         }
4725
4726         /* In Lancer, for all errors other than provisioning error (-EAGAIN),
4727          * no need to attempt further recovery.
4728          */
4729         if (!status || status == -EAGAIN)
4730                 schedule_delayed_work(&adapter->func_recovery_work,
4731                                       msecs_to_jiffies(1000));
4732 }
4733
4734 static void be_worker(struct work_struct *work)
4735 {
4736         struct be_adapter *adapter =
4737                 container_of(work, struct be_adapter, work.work);
4738         struct be_rx_obj *rxo;
4739         int i;
4740
4741         /* when interrupts are not yet enabled, just reap any pending
4742         * mcc completions */
4743         if (!netif_running(adapter->netdev)) {
4744                 local_bh_disable();
4745                 be_process_mcc(adapter);
4746                 local_bh_enable();
4747                 goto reschedule;
4748         }
4749
4750         if (!adapter->stats_cmd_sent) {
4751                 if (lancer_chip(adapter))
4752                         lancer_cmd_get_pport_stats(adapter,
4753                                                 &adapter->stats_cmd);
4754                 else
4755                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
4756         }
4757
4758         if (be_physfn(adapter) &&
4759             MODULO(adapter->work_counter, adapter->be_get_temp_freq) == 0)
4760                 be_cmd_get_die_temperature(adapter);
4761
4762         for_all_rx_queues(adapter, rxo, i) {
4763                 /* Replenish RX-queues starved due to memory
4764                  * allocation failures.
4765                  */
4766                 if (rxo->rx_post_starved)
4767                         be_post_rx_frags(rxo, GFP_KERNEL);
4768         }
4769
4770         be_eqd_update(adapter);
4771
4772 reschedule:
4773         adapter->work_counter++;
4774         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
4775 }
4776
4777 /* If any VFs are already enabled don't FLR the PF */
4778 static bool be_reset_required(struct be_adapter *adapter)
4779 {
4780         return pci_num_vf(adapter->pdev) ? false : true;
4781 }
4782
4783 static char *mc_name(struct be_adapter *adapter)
4784 {
4785         char *str = ""; /* default */
4786
4787         switch (adapter->mc_type) {
4788         case UMC:
4789                 str = "UMC";
4790                 break;
4791         case FLEX10:
4792                 str = "FLEX10";
4793                 break;
4794         case vNIC1:
4795                 str = "vNIC-1";
4796                 break;
4797         case nPAR:
4798                 str = "nPAR";
4799                 break;
4800         case UFP:
4801                 str = "UFP";
4802                 break;
4803         case vNIC2:
4804                 str = "vNIC-2";
4805                 break;
4806         default:
4807                 str = "";
4808         }
4809
4810         return str;
4811 }
4812
4813 static inline char *func_name(struct be_adapter *adapter)
4814 {
4815         return be_physfn(adapter) ? "PF" : "VF";
4816 }
4817
4818 static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
4819 {
4820         int status = 0;
4821         struct be_adapter *adapter;
4822         struct net_device *netdev;
4823         char port_name;
4824
4825         status = pci_enable_device(pdev);
4826         if (status)
4827                 goto do_none;
4828
4829         status = pci_request_regions(pdev, DRV_NAME);
4830         if (status)
4831                 goto disable_dev;
4832         pci_set_master(pdev);
4833
4834         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
4835         if (!netdev) {
4836                 status = -ENOMEM;
4837                 goto rel_reg;
4838         }
4839         adapter = netdev_priv(netdev);
4840         adapter->pdev = pdev;
4841         pci_set_drvdata(pdev, adapter);
4842         adapter->netdev = netdev;
4843         SET_NETDEV_DEV(netdev, &pdev->dev);
4844
4845         status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
4846         if (!status) {
4847                 netdev->features |= NETIF_F_HIGHDMA;
4848         } else {
4849                 status = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
4850                 if (status) {
4851                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
4852                         goto free_netdev;
4853                 }
4854         }
4855
4856         if (be_physfn(adapter)) {
4857                 status = pci_enable_pcie_error_reporting(pdev);
4858                 if (!status)
4859                         dev_info(&pdev->dev, "PCIe error reporting enabled\n");
4860         }
4861
4862         status = be_ctrl_init(adapter);
4863         if (status)
4864                 goto free_netdev;
4865
4866         /* sync up with fw's ready state */
4867         if (be_physfn(adapter)) {
4868                 status = be_fw_wait_ready(adapter);
4869                 if (status)
4870                         goto ctrl_clean;
4871         }
4872
4873         if (be_reset_required(adapter)) {
4874                 status = be_cmd_reset_function(adapter);
4875                 if (status)
4876                         goto ctrl_clean;
4877
4878                 /* Wait for interrupts to quiesce after an FLR */
4879                 msleep(100);
4880         }
4881
4882         /* Allow interrupts for other ULPs running on NIC function */
4883         be_intr_set(adapter, true);
4884
4885         /* tell fw we're ready to fire cmds */
4886         status = be_cmd_fw_init(adapter);
4887         if (status)
4888                 goto ctrl_clean;
4889
4890         status = be_stats_init(adapter);
4891         if (status)
4892                 goto ctrl_clean;
4893
4894         status = be_get_initial_config(adapter);
4895         if (status)
4896                 goto stats_clean;
4897
4898         INIT_DELAYED_WORK(&adapter->work, be_worker);
4899         INIT_DELAYED_WORK(&adapter->func_recovery_work, be_func_recovery_task);
4900         adapter->rx_fc = adapter->tx_fc = true;
4901
4902         status = be_setup(adapter);
4903         if (status)
4904                 goto stats_clean;
4905
4906         be_netdev_init(netdev);
4907         status = register_netdev(netdev);
4908         if (status != 0)
4909                 goto unsetup;
4910
4911         be_roce_dev_add(adapter);
4912
4913         schedule_delayed_work(&adapter->func_recovery_work,
4914                               msecs_to_jiffies(1000));
4915
4916         be_cmd_query_port_name(adapter, &port_name);
4917
4918         dev_info(&pdev->dev, "%s: %s %s port %c\n", nic_name(pdev),
4919                  func_name(adapter), mc_name(adapter), port_name);
4920
4921         return 0;
4922
4923 unsetup:
4924         be_clear(adapter);
4925 stats_clean:
4926         be_stats_cleanup(adapter);
4927 ctrl_clean:
4928         be_ctrl_cleanup(adapter);
4929 free_netdev:
4930         free_netdev(netdev);
4931 rel_reg:
4932         pci_release_regions(pdev);
4933 disable_dev:
4934         pci_disable_device(pdev);
4935 do_none:
4936         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
4937         return status;
4938 }
4939
4940 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
4941 {
4942         struct be_adapter *adapter = pci_get_drvdata(pdev);
4943         struct net_device *netdev =  adapter->netdev;
4944
4945         if (adapter->wol_en)
4946                 be_setup_wol(adapter, true);
4947
4948         be_intr_set(adapter, false);
4949         cancel_delayed_work_sync(&adapter->func_recovery_work);
4950
4951         netif_device_detach(netdev);
4952         if (netif_running(netdev)) {
4953                 rtnl_lock();
4954                 be_close(netdev);
4955                 rtnl_unlock();
4956         }
4957         be_clear(adapter);
4958
4959         pci_save_state(pdev);
4960         pci_disable_device(pdev);
4961         pci_set_power_state(pdev, pci_choose_state(pdev, state));
4962         return 0;
4963 }
4964
4965 static int be_resume(struct pci_dev *pdev)
4966 {
4967         int status = 0;
4968         struct be_adapter *adapter = pci_get_drvdata(pdev);
4969         struct net_device *netdev =  adapter->netdev;
4970
4971         netif_device_detach(netdev);
4972
4973         status = pci_enable_device(pdev);
4974         if (status)
4975                 return status;
4976
4977         pci_set_power_state(pdev, PCI_D0);
4978         pci_restore_state(pdev);
4979
4980         status = be_fw_wait_ready(adapter);
4981         if (status)
4982                 return status;
4983
4984         be_intr_set(adapter, true);
4985         /* tell fw we're ready to fire cmds */
4986         status = be_cmd_fw_init(adapter);
4987         if (status)
4988                 return status;
4989
4990         be_setup(adapter);
4991         if (netif_running(netdev)) {
4992                 rtnl_lock();
4993                 be_open(netdev);
4994                 rtnl_unlock();
4995         }
4996
4997         schedule_delayed_work(&adapter->func_recovery_work,
4998                               msecs_to_jiffies(1000));
4999         netif_device_attach(netdev);
5000
5001         if (adapter->wol_en)
5002                 be_setup_wol(adapter, false);
5003
5004         return 0;
5005 }
5006
5007 /*
5008  * An FLR will stop BE from DMAing any data.
5009  */
5010 static void be_shutdown(struct pci_dev *pdev)
5011 {
5012         struct be_adapter *adapter = pci_get_drvdata(pdev);
5013
5014         if (!adapter)
5015                 return;
5016
5017         cancel_delayed_work_sync(&adapter->work);
5018         cancel_delayed_work_sync(&adapter->func_recovery_work);
5019
5020         netif_device_detach(adapter->netdev);
5021
5022         be_cmd_reset_function(adapter);
5023
5024         pci_disable_device(pdev);
5025 }
5026
5027 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
5028                                             pci_channel_state_t state)
5029 {
5030         struct be_adapter *adapter = pci_get_drvdata(pdev);
5031         struct net_device *netdev =  adapter->netdev;
5032
5033         dev_err(&adapter->pdev->dev, "EEH error detected\n");
5034
5035         if (!adapter->eeh_error) {
5036                 adapter->eeh_error = true;
5037
5038                 cancel_delayed_work_sync(&adapter->func_recovery_work);
5039
5040                 rtnl_lock();
5041                 netif_device_detach(netdev);
5042                 if (netif_running(netdev))
5043                         be_close(netdev);
5044                 rtnl_unlock();
5045
5046                 be_clear(adapter);
5047         }
5048
5049         if (state == pci_channel_io_perm_failure)
5050                 return PCI_ERS_RESULT_DISCONNECT;
5051
5052         pci_disable_device(pdev);
5053
5054         /* The error could cause the FW to trigger a flash debug dump.
5055          * Resetting the card while flash dump is in progress
5056          * can cause it not to recover; wait for it to finish.
5057          * Wait only for first function as it is needed only once per
5058          * adapter.
5059          */
5060         if (pdev->devfn == 0)
5061                 ssleep(30);
5062
5063         return PCI_ERS_RESULT_NEED_RESET;
5064 }
5065
5066 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
5067 {
5068         struct be_adapter *adapter = pci_get_drvdata(pdev);
5069         int status;
5070
5071         dev_info(&adapter->pdev->dev, "EEH reset\n");
5072
5073         status = pci_enable_device(pdev);
5074         if (status)
5075                 return PCI_ERS_RESULT_DISCONNECT;
5076
5077         pci_set_master(pdev);
5078         pci_set_power_state(pdev, PCI_D0);
5079         pci_restore_state(pdev);
5080
5081         /* Check if card is ok and fw is ready */
5082         dev_info(&adapter->pdev->dev,
5083                  "Waiting for FW to be ready after EEH reset\n");
5084         status = be_fw_wait_ready(adapter);
5085         if (status)
5086                 return PCI_ERS_RESULT_DISCONNECT;
5087
5088         pci_cleanup_aer_uncorrect_error_status(pdev);
5089         be_clear_all_error(adapter);
5090         return PCI_ERS_RESULT_RECOVERED;
5091 }
5092
5093 static void be_eeh_resume(struct pci_dev *pdev)
5094 {
5095         int status = 0;
5096         struct be_adapter *adapter = pci_get_drvdata(pdev);
5097         struct net_device *netdev =  adapter->netdev;
5098
5099         dev_info(&adapter->pdev->dev, "EEH resume\n");
5100
5101         pci_save_state(pdev);
5102
5103         status = be_cmd_reset_function(adapter);
5104         if (status)
5105                 goto err;
5106
5107         /* On some BE3 FW versions, after a HW reset,
5108          * interrupts will remain disabled for each function.
5109          * So, explicitly enable interrupts
5110          */
5111         be_intr_set(adapter, true);
5112
5113         /* tell fw we're ready to fire cmds */
5114         status = be_cmd_fw_init(adapter);
5115         if (status)
5116                 goto err;
5117
5118         status = be_setup(adapter);
5119         if (status)
5120                 goto err;
5121
5122         if (netif_running(netdev)) {
5123                 status = be_open(netdev);
5124                 if (status)
5125                         goto err;
5126         }
5127
5128         schedule_delayed_work(&adapter->func_recovery_work,
5129                               msecs_to_jiffies(1000));
5130         netif_device_attach(netdev);
5131         return;
5132 err:
5133         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
5134 }
5135
5136 static const struct pci_error_handlers be_eeh_handlers = {
5137         .error_detected = be_eeh_err_detected,
5138         .slot_reset = be_eeh_reset,
5139         .resume = be_eeh_resume,
5140 };
5141
5142 static struct pci_driver be_driver = {
5143         .name = DRV_NAME,
5144         .id_table = be_dev_ids,
5145         .probe = be_probe,
5146         .remove = be_remove,
5147         .suspend = be_suspend,
5148         .resume = be_resume,
5149         .shutdown = be_shutdown,
5150         .err_handler = &be_eeh_handlers
5151 };
5152
5153 static int __init be_init_module(void)
5154 {
5155         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
5156             rx_frag_size != 2048) {
5157                 printk(KERN_WARNING DRV_NAME
5158                         " : Module param rx_frag_size must be 2048/4096/8192."
5159                         " Using 2048\n");
5160                 rx_frag_size = 2048;
5161         }
5162
5163         return pci_register_driver(&be_driver);
5164 }
5165 module_init(be_init_module);
5166
5167 static void __exit be_exit_module(void)
5168 {
5169         pci_unregister_driver(&be_driver);
5170 }
5171 module_exit(be_exit_module);