3aa478f5035807a044a59e7e4f6437eb94f93cca
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46         { 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51         "CEV",
52         "CTX",
53         "DBUF",
54         "ERX",
55         "Host",
56         "MPU",
57         "NDMA",
58         "PTC ",
59         "RDMA ",
60         "RXF ",
61         "RXIPS ",
62         "RXULP0 ",
63         "RXULP1 ",
64         "RXULP2 ",
65         "TIM ",
66         "TPOST ",
67         "TPRE ",
68         "TXIPS ",
69         "TXULP0 ",
70         "TXULP1 ",
71         "UC ",
72         "WDMA ",
73         "TXULP2 ",
74         "HOST1 ",
75         "P0_OB_LINK ",
76         "P1_OB_LINK ",
77         "HOST_GPIO ",
78         "MBOX ",
79         "AXGMAC0",
80         "AXGMAC1",
81         "JTAG",
82         "MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86         "LPCMEMHOST",
87         "MGMT_MAC",
88         "PCS0ONLINE",
89         "MPU_IRAM",
90         "PCS1ONLINE",
91         "PCTL0",
92         "PCTL1",
93         "PMEM",
94         "RR",
95         "TXPB",
96         "RXPP",
97         "XAUI",
98         "TXP",
99         "ARM",
100         "IPC",
101         "HOST2",
102         "HOST3",
103         "HOST4",
104         "HOST5",
105         "HOST6",
106         "HOST7",
107         "HOST8",
108         "HOST9",
109         "NETC",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown",
116         "Unknown",
117         "Unknown"
118 };
119
120 /* Is BE in a multi-channel mode */
121 static inline bool be_is_mc(struct be_adapter *adapter) {
122         return (adapter->function_mode & FLEX10_MODE ||
123                 adapter->function_mode & VNIC_MODE ||
124                 adapter->function_mode & UMC_ENABLED);
125 }
126
127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129         struct be_dma_mem *mem = &q->dma_mem;
130         if (mem->va) {
131                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132                                   mem->dma);
133                 mem->va = NULL;
134         }
135 }
136
137 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
138                 u16 len, u16 entry_size)
139 {
140         struct be_dma_mem *mem = &q->dma_mem;
141
142         memset(q, 0, sizeof(*q));
143         q->len = len;
144         q->entry_size = entry_size;
145         mem->size = len * entry_size;
146         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
147                                      GFP_KERNEL);
148         if (!mem->va)
149                 return -ENOMEM;
150         memset(mem->va, 0, mem->size);
151         return 0;
152 }
153
154 static void be_intr_set(struct be_adapter *adapter, bool enable)
155 {
156         u32 reg, enabled;
157
158         if (adapter->eeh_err)
159                 return;
160
161         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
162                                 &reg);
163         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
164
165         if (!enabled && enable)
166                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167         else if (enabled && !enable)
168                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
169         else
170                 return;
171
172         pci_write_config_dword(adapter->pdev,
173                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
174 }
175
176 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
177 {
178         u32 val = 0;
179         val |= qid & DB_RQ_RING_ID_MASK;
180         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
181
182         wmb();
183         iowrite32(val, adapter->db + DB_RQ_OFFSET);
184 }
185
186 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
187 {
188         u32 val = 0;
189         val |= qid & DB_TXULP_RING_ID_MASK;
190         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
191
192         wmb();
193         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
194 }
195
196 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
197                 bool arm, bool clear_int, u16 num_popped)
198 {
199         u32 val = 0;
200         val |= qid & DB_EQ_RING_ID_MASK;
201         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
202                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
203
204         if (adapter->eeh_err)
205                 return;
206
207         if (arm)
208                 val |= 1 << DB_EQ_REARM_SHIFT;
209         if (clear_int)
210                 val |= 1 << DB_EQ_CLR_SHIFT;
211         val |= 1 << DB_EQ_EVNT_SHIFT;
212         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
213         iowrite32(val, adapter->db + DB_EQ_OFFSET);
214 }
215
216 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
217 {
218         u32 val = 0;
219         val |= qid & DB_CQ_RING_ID_MASK;
220         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
221                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
222
223         if (adapter->eeh_err)
224                 return;
225
226         if (arm)
227                 val |= 1 << DB_CQ_REARM_SHIFT;
228         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
229         iowrite32(val, adapter->db + DB_CQ_OFFSET);
230 }
231
232 static int be_mac_addr_set(struct net_device *netdev, void *p)
233 {
234         struct be_adapter *adapter = netdev_priv(netdev);
235         struct sockaddr *addr = p;
236         int status = 0;
237         u8 current_mac[ETH_ALEN];
238         u32 pmac_id = adapter->pmac_id[0];
239
240         if (!is_valid_ether_addr(addr->sa_data))
241                 return -EADDRNOTAVAIL;
242
243         status = be_cmd_mac_addr_query(adapter, current_mac,
244                                 MAC_ADDRESS_TYPE_NETWORK, false,
245                                 adapter->if_handle, 0);
246         if (status)
247                 goto err;
248
249         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
250                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
251                                 adapter->if_handle, &adapter->pmac_id[0], 0);
252                 if (status)
253                         goto err;
254
255                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
256         }
257         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
258         return 0;
259 err:
260         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
261         return status;
262 }
263
264 static void populate_be2_stats(struct be_adapter *adapter)
265 {
266         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
267         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
268         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
269         struct be_port_rxf_stats_v0 *port_stats =
270                                         &rxf_stats->port[adapter->port_num];
271         struct be_drv_stats *drvs = &adapter->drv_stats;
272
273         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
274         drvs->rx_pause_frames = port_stats->rx_pause_frames;
275         drvs->rx_crc_errors = port_stats->rx_crc_errors;
276         drvs->rx_control_frames = port_stats->rx_control_frames;
277         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
278         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
279         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
280         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
281         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
282         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
283         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
284         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
285         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
286         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
287         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
288         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
289         drvs->rx_dropped_header_too_small =
290                 port_stats->rx_dropped_header_too_small;
291         drvs->rx_address_mismatch_drops =
292                                         port_stats->rx_address_mismatch_drops +
293                                         port_stats->rx_vlan_mismatch_drops;
294         drvs->rx_alignment_symbol_errors =
295                 port_stats->rx_alignment_symbol_errors;
296
297         drvs->tx_pauseframes = port_stats->tx_pauseframes;
298         drvs->tx_controlframes = port_stats->tx_controlframes;
299
300         if (adapter->port_num)
301                 drvs->jabber_events = rxf_stats->port1_jabber_events;
302         else
303                 drvs->jabber_events = rxf_stats->port0_jabber_events;
304         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
305         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
306         drvs->forwarded_packets = rxf_stats->forwarded_packets;
307         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
308         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
309         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
310         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
311 }
312
313 static void populate_be3_stats(struct be_adapter *adapter)
314 {
315         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
316         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
317         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
318         struct be_port_rxf_stats_v1 *port_stats =
319                                         &rxf_stats->port[adapter->port_num];
320         struct be_drv_stats *drvs = &adapter->drv_stats;
321
322         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
323         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
324         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
325         drvs->rx_pause_frames = port_stats->rx_pause_frames;
326         drvs->rx_crc_errors = port_stats->rx_crc_errors;
327         drvs->rx_control_frames = port_stats->rx_control_frames;
328         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
329         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
330         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
331         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
332         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
333         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
334         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
335         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
336         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
337         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
338         drvs->rx_dropped_header_too_small =
339                 port_stats->rx_dropped_header_too_small;
340         drvs->rx_input_fifo_overflow_drop =
341                 port_stats->rx_input_fifo_overflow_drop;
342         drvs->rx_address_mismatch_drops = port_stats->rx_address_mismatch_drops;
343         drvs->rx_alignment_symbol_errors =
344                 port_stats->rx_alignment_symbol_errors;
345         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
346         drvs->tx_pauseframes = port_stats->tx_pauseframes;
347         drvs->tx_controlframes = port_stats->tx_controlframes;
348         drvs->jabber_events = port_stats->jabber_events;
349         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
350         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
351         drvs->forwarded_packets = rxf_stats->forwarded_packets;
352         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357
358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360
361         struct be_drv_stats *drvs = &adapter->drv_stats;
362         struct lancer_pport_stats *pport_stats =
363                                         pport_stats_from_cmd(adapter);
364
365         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375         drvs->rx_dropped_tcp_length =
376                                 pport_stats->rx_dropped_invalid_tcp_length;
377         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380         drvs->rx_dropped_header_too_small =
381                                 pport_stats->rx_dropped_header_too_small;
382         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383         drvs->rx_address_mismatch_drops =
384                                         pport_stats->rx_address_mismatch_drops +
385                                         pport_stats->rx_vlan_mismatch_drops;
386         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
387         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
388         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
389         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
390         drvs->jabber_events = pport_stats->rx_jabbers;
391         drvs->forwarded_packets = pport_stats->num_forwards_lo;
392         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
393         drvs->rx_drops_too_many_frags =
394                                 pport_stats->rx_drops_too_many_frags_lo;
395 }
396
397 static void accumulate_16bit_val(u32 *acc, u16 val)
398 {
399 #define lo(x)                   (x & 0xFFFF)
400 #define hi(x)                   (x & 0xFFFF0000)
401         bool wrapped = val < lo(*acc);
402         u32 newacc = hi(*acc) + val;
403
404         if (wrapped)
405                 newacc += 65536;
406         ACCESS_ONCE(*acc) = newacc;
407 }
408
409 void be_parse_stats(struct be_adapter *adapter)
410 {
411         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
412         struct be_rx_obj *rxo;
413         int i;
414
415         if (adapter->generation == BE_GEN3) {
416                 if (lancer_chip(adapter))
417                         populate_lancer_stats(adapter);
418                  else
419                         populate_be3_stats(adapter);
420         } else {
421                 populate_be2_stats(adapter);
422         }
423
424         if (lancer_chip(adapter))
425                 goto done;
426
427         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
428         for_all_rx_queues(adapter, rxo, i) {
429                 /* below erx HW counter can actually wrap around after
430                  * 65535. Driver accumulates a 32-bit value
431                  */
432                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
433                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
434         }
435 done:
436         return;
437 }
438
439 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
440                                         struct rtnl_link_stats64 *stats)
441 {
442         struct be_adapter *adapter = netdev_priv(netdev);
443         struct be_drv_stats *drvs = &adapter->drv_stats;
444         struct be_rx_obj *rxo;
445         struct be_tx_obj *txo;
446         u64 pkts, bytes;
447         unsigned int start;
448         int i;
449
450         for_all_rx_queues(adapter, rxo, i) {
451                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
452                 do {
453                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
454                         pkts = rx_stats(rxo)->rx_pkts;
455                         bytes = rx_stats(rxo)->rx_bytes;
456                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
457                 stats->rx_packets += pkts;
458                 stats->rx_bytes += bytes;
459                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
460                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
461                                         rx_stats(rxo)->rx_drops_no_frags;
462         }
463
464         for_all_tx_queues(adapter, txo, i) {
465                 const struct be_tx_stats *tx_stats = tx_stats(txo);
466                 do {
467                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
468                         pkts = tx_stats(txo)->tx_pkts;
469                         bytes = tx_stats(txo)->tx_bytes;
470                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
471                 stats->tx_packets += pkts;
472                 stats->tx_bytes += bytes;
473         }
474
475         /* bad pkts received */
476         stats->rx_errors = drvs->rx_crc_errors +
477                 drvs->rx_alignment_symbol_errors +
478                 drvs->rx_in_range_errors +
479                 drvs->rx_out_range_errors +
480                 drvs->rx_frame_too_long +
481                 drvs->rx_dropped_too_small +
482                 drvs->rx_dropped_too_short +
483                 drvs->rx_dropped_header_too_small +
484                 drvs->rx_dropped_tcp_length +
485                 drvs->rx_dropped_runt;
486
487         /* detailed rx errors */
488         stats->rx_length_errors = drvs->rx_in_range_errors +
489                 drvs->rx_out_range_errors +
490                 drvs->rx_frame_too_long;
491
492         stats->rx_crc_errors = drvs->rx_crc_errors;
493
494         /* frame alignment errors */
495         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
496
497         /* receiver fifo overrun */
498         /* drops_no_pbuf is no per i/f, it's per BE card */
499         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
500                                 drvs->rx_input_fifo_overflow_drop +
501                                 drvs->rx_drops_no_pbuf;
502         return stats;
503 }
504
505 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
506 {
507         struct net_device *netdev = adapter->netdev;
508
509         if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
510                 netif_carrier_off(netdev);
511                 adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
512         }
513
514         if ((link_status & LINK_STATUS_MASK) == LINK_UP)
515                 netif_carrier_on(netdev);
516         else
517                 netif_carrier_off(netdev);
518 }
519
520 static void be_tx_stats_update(struct be_tx_obj *txo,
521                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
522 {
523         struct be_tx_stats *stats = tx_stats(txo);
524
525         u64_stats_update_begin(&stats->sync);
526         stats->tx_reqs++;
527         stats->tx_wrbs += wrb_cnt;
528         stats->tx_bytes += copied;
529         stats->tx_pkts += (gso_segs ? gso_segs : 1);
530         if (stopped)
531                 stats->tx_stops++;
532         u64_stats_update_end(&stats->sync);
533 }
534
535 /* Determine number of WRB entries needed to xmit data in an skb */
536 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
537                                                                 bool *dummy)
538 {
539         int cnt = (skb->len > skb->data_len);
540
541         cnt += skb_shinfo(skb)->nr_frags;
542
543         /* to account for hdr wrb */
544         cnt++;
545         if (lancer_chip(adapter) || !(cnt & 1)) {
546                 *dummy = false;
547         } else {
548                 /* add a dummy to make it an even num */
549                 cnt++;
550                 *dummy = true;
551         }
552         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
553         return cnt;
554 }
555
556 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
557 {
558         wrb->frag_pa_hi = upper_32_bits(addr);
559         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
560         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
561 }
562
563 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
564                                         struct sk_buff *skb)
565 {
566         u8 vlan_prio;
567         u16 vlan_tag;
568
569         vlan_tag = vlan_tx_tag_get(skb);
570         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
571         /* If vlan priority provided by OS is NOT in available bmap */
572         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
573                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
574                                 adapter->recommended_prio;
575
576         return vlan_tag;
577 }
578
579 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
580                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
581 {
582         u16 vlan_tag;
583
584         memset(hdr, 0, sizeof(*hdr));
585
586         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
587
588         if (skb_is_gso(skb)) {
589                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
590                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
591                         hdr, skb_shinfo(skb)->gso_size);
592                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
593                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
594                 if (lancer_chip(adapter) && adapter->sli_family  ==
595                                                         LANCER_A0_SLI_FAMILY) {
596                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
597                         if (is_tcp_pkt(skb))
598                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
599                                                                 tcpcs, hdr, 1);
600                         else if (is_udp_pkt(skb))
601                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
602                                                                 udpcs, hdr, 1);
603                 }
604         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
605                 if (is_tcp_pkt(skb))
606                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
607                 else if (is_udp_pkt(skb))
608                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
609         }
610
611         if (vlan_tx_tag_present(skb)) {
612                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
613                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
614                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
615         }
616
617         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
618         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
619         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
620         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
621 }
622
623 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
624                 bool unmap_single)
625 {
626         dma_addr_t dma;
627
628         be_dws_le_to_cpu(wrb, sizeof(*wrb));
629
630         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
631         if (wrb->frag_len) {
632                 if (unmap_single)
633                         dma_unmap_single(dev, dma, wrb->frag_len,
634                                          DMA_TO_DEVICE);
635                 else
636                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
637         }
638 }
639
640 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
641                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
642 {
643         dma_addr_t busaddr;
644         int i, copied = 0;
645         struct device *dev = &adapter->pdev->dev;
646         struct sk_buff *first_skb = skb;
647         struct be_eth_wrb *wrb;
648         struct be_eth_hdr_wrb *hdr;
649         bool map_single = false;
650         u16 map_head;
651
652         hdr = queue_head_node(txq);
653         queue_head_inc(txq);
654         map_head = txq->head;
655
656         if (skb->len > skb->data_len) {
657                 int len = skb_headlen(skb);
658                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
659                 if (dma_mapping_error(dev, busaddr))
660                         goto dma_err;
661                 map_single = true;
662                 wrb = queue_head_node(txq);
663                 wrb_fill(wrb, busaddr, len);
664                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
665                 queue_head_inc(txq);
666                 copied += len;
667         }
668
669         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
670                 const struct skb_frag_struct *frag =
671                         &skb_shinfo(skb)->frags[i];
672                 busaddr = skb_frag_dma_map(dev, frag, 0,
673                                            skb_frag_size(frag), DMA_TO_DEVICE);
674                 if (dma_mapping_error(dev, busaddr))
675                         goto dma_err;
676                 wrb = queue_head_node(txq);
677                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
678                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679                 queue_head_inc(txq);
680                 copied += skb_frag_size(frag);
681         }
682
683         if (dummy_wrb) {
684                 wrb = queue_head_node(txq);
685                 wrb_fill(wrb, 0, 0);
686                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
687                 queue_head_inc(txq);
688         }
689
690         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
691         be_dws_cpu_to_le(hdr, sizeof(*hdr));
692
693         return copied;
694 dma_err:
695         txq->head = map_head;
696         while (copied) {
697                 wrb = queue_head_node(txq);
698                 unmap_tx_frag(dev, wrb, map_single);
699                 map_single = false;
700                 copied -= wrb->frag_len;
701                 queue_head_inc(txq);
702         }
703         return 0;
704 }
705
706 static netdev_tx_t be_xmit(struct sk_buff *skb,
707                         struct net_device *netdev)
708 {
709         struct be_adapter *adapter = netdev_priv(netdev);
710         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
711         struct be_queue_info *txq = &txo->q;
712         u32 wrb_cnt = 0, copied = 0;
713         u32 start = txq->head;
714         bool dummy_wrb, stopped = false;
715
716         /* For vlan tagged pkts, BE
717          * 1) calculates checksum even when CSO is not requested
718          * 2) calculates checksum wrongly for padded pkt less than
719          * 60 bytes long.
720          * As a workaround disable TX vlan offloading in such cases.
721          */
722         if (unlikely(vlan_tx_tag_present(skb) &&
723                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
724                 skb = skb_share_check(skb, GFP_ATOMIC);
725                 if (unlikely(!skb))
726                         goto tx_drop;
727
728                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
729                 if (unlikely(!skb))
730                         goto tx_drop;
731
732                 skb->vlan_tci = 0;
733         }
734
735         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
736
737         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738         if (copied) {
739                 /* record the sent skb in the sent_skb table */
740                 BUG_ON(txo->sent_skb_list[start]);
741                 txo->sent_skb_list[start] = skb;
742
743                 /* Ensure txq has space for the next skb; Else stop the queue
744                  * *BEFORE* ringing the tx doorbell, so that we serialze the
745                  * tx compls of the current transmit which'll wake up the queue
746                  */
747                 atomic_add(wrb_cnt, &txq->used);
748                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
749                                                                 txq->len) {
750                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
751                         stopped = true;
752                 }
753
754                 be_txq_notify(adapter, txq->id, wrb_cnt);
755
756                 be_tx_stats_update(txo, wrb_cnt, copied,
757                                 skb_shinfo(skb)->gso_segs, stopped);
758         } else {
759                 txq->head = start;
760                 dev_kfree_skb_any(skb);
761         }
762 tx_drop:
763         return NETDEV_TX_OK;
764 }
765
766 static int be_change_mtu(struct net_device *netdev, int new_mtu)
767 {
768         struct be_adapter *adapter = netdev_priv(netdev);
769         if (new_mtu < BE_MIN_MTU ||
770                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
771                                         (ETH_HLEN + ETH_FCS_LEN))) {
772                 dev_info(&adapter->pdev->dev,
773                         "MTU must be between %d and %d bytes\n",
774                         BE_MIN_MTU,
775                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
776                 return -EINVAL;
777         }
778         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
779                         netdev->mtu, new_mtu);
780         netdev->mtu = new_mtu;
781         return 0;
782 }
783
784 /*
785  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
786  * If the user configures more, place BE in vlan promiscuous mode.
787  */
788 static int be_vid_config(struct be_adapter *adapter)
789 {
790         u16 vids[BE_NUM_VLANS_SUPPORTED];
791         u16 num = 0, i;
792         int status = 0;
793
794         /* No need to further configure vids if in promiscuous mode */
795         if (adapter->promiscuous)
796                 return 0;
797
798         if (adapter->vlans_added > adapter->max_vlans)
799                 goto set_vlan_promisc;
800
801         /* Construct VLAN Table to give to HW */
802         for (i = 0; i < VLAN_N_VID; i++)
803                 if (adapter->vlan_tag[i])
804                         vids[num++] = cpu_to_le16(i);
805
806         status = be_cmd_vlan_config(adapter, adapter->if_handle,
807                                     vids, num, 1, 0);
808
809         /* Set to VLAN promisc mode as setting VLAN filter failed */
810         if (status) {
811                 dev_info(&adapter->pdev->dev, "Exhausted VLAN HW filters.\n");
812                 dev_info(&adapter->pdev->dev, "Disabling HW VLAN filtering.\n");
813                 goto set_vlan_promisc;
814         }
815
816         return status;
817
818 set_vlan_promisc:
819         status = be_cmd_vlan_config(adapter, adapter->if_handle,
820                                     NULL, 0, 1, 1);
821         return status;
822 }
823
824 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
825 {
826         struct be_adapter *adapter = netdev_priv(netdev);
827         int status = 0;
828
829         if (!be_physfn(adapter)) {
830                 status = -EINVAL;
831                 goto ret;
832         }
833
834         adapter->vlan_tag[vid] = 1;
835         if (adapter->vlans_added <= (adapter->max_vlans + 1))
836                 status = be_vid_config(adapter);
837
838         if (!status)
839                 adapter->vlans_added++;
840         else
841                 adapter->vlan_tag[vid] = 0;
842 ret:
843         return status;
844 }
845
846 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
847 {
848         struct be_adapter *adapter = netdev_priv(netdev);
849         int status = 0;
850
851         if (!be_physfn(adapter)) {
852                 status = -EINVAL;
853                 goto ret;
854         }
855
856         adapter->vlan_tag[vid] = 0;
857         if (adapter->vlans_added <= adapter->max_vlans)
858                 status = be_vid_config(adapter);
859
860         if (!status)
861                 adapter->vlans_added--;
862         else
863                 adapter->vlan_tag[vid] = 1;
864 ret:
865         return status;
866 }
867
868 static void be_set_rx_mode(struct net_device *netdev)
869 {
870         struct be_adapter *adapter = netdev_priv(netdev);
871         int status;
872
873         if (netdev->flags & IFF_PROMISC) {
874                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
875                 adapter->promiscuous = true;
876                 goto done;
877         }
878
879         /* BE was previously in promiscuous mode; disable it */
880         if (adapter->promiscuous) {
881                 adapter->promiscuous = false;
882                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
883
884                 if (adapter->vlans_added)
885                         be_vid_config(adapter);
886         }
887
888         /* Enable multicast promisc if num configured exceeds what we support */
889         if (netdev->flags & IFF_ALLMULTI ||
890                         netdev_mc_count(netdev) > BE_MAX_MC) {
891                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
892                 goto done;
893         }
894
895         if (netdev_uc_count(netdev) != adapter->uc_macs) {
896                 struct netdev_hw_addr *ha;
897                 int i = 1; /* First slot is claimed by the Primary MAC */
898
899                 for (; adapter->uc_macs > 0; adapter->uc_macs--, i++) {
900                         be_cmd_pmac_del(adapter, adapter->if_handle,
901                                         adapter->pmac_id[i], 0);
902                 }
903
904                 if (netdev_uc_count(netdev) > adapter->max_pmac_cnt) {
905                         be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
906                         adapter->promiscuous = true;
907                         goto done;
908                 }
909
910                 netdev_for_each_uc_addr(ha, adapter->netdev) {
911                         adapter->uc_macs++; /* First slot is for Primary MAC */
912                         be_cmd_pmac_add(adapter, (u8 *)ha->addr,
913                                         adapter->if_handle,
914                                         &adapter->pmac_id[adapter->uc_macs], 0);
915                 }
916         }
917
918         status = be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
919
920         /* Set to MCAST promisc mode if setting MULTICAST address fails */
921         if (status) {
922                 dev_info(&adapter->pdev->dev, "Exhausted multicast HW filters.\n");
923                 dev_info(&adapter->pdev->dev, "Disabling HW multicast filtering.\n");
924                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
925         }
926 done:
927         return;
928 }
929
930 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
931 {
932         struct be_adapter *adapter = netdev_priv(netdev);
933         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
934         int status;
935
936         if (!sriov_enabled(adapter))
937                 return -EPERM;
938
939         if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
940                 return -EINVAL;
941
942         if (lancer_chip(adapter)) {
943                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
944         } else {
945                 status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
946                                          vf_cfg->pmac_id, vf + 1);
947
948                 status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
949                                          &vf_cfg->pmac_id, vf + 1);
950         }
951
952         if (status)
953                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
954                                 mac, vf);
955         else
956                 memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
957
958         return status;
959 }
960
961 static int be_get_vf_config(struct net_device *netdev, int vf,
962                         struct ifla_vf_info *vi)
963 {
964         struct be_adapter *adapter = netdev_priv(netdev);
965         struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
966
967         if (!sriov_enabled(adapter))
968                 return -EPERM;
969
970         if (vf >= adapter->num_vfs)
971                 return -EINVAL;
972
973         vi->vf = vf;
974         vi->tx_rate = vf_cfg->tx_rate;
975         vi->vlan = vf_cfg->vlan_tag;
976         vi->qos = 0;
977         memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
978
979         return 0;
980 }
981
982 static int be_set_vf_vlan(struct net_device *netdev,
983                         int vf, u16 vlan, u8 qos)
984 {
985         struct be_adapter *adapter = netdev_priv(netdev);
986         int status = 0;
987
988         if (!sriov_enabled(adapter))
989                 return -EPERM;
990
991         if (vf >= adapter->num_vfs || vlan > 4095)
992                 return -EINVAL;
993
994         if (vlan) {
995                 if (adapter->vf_cfg[vf].vlan_tag != vlan) {
996                         /* If this is new value, program it. Else skip. */
997                         adapter->vf_cfg[vf].vlan_tag = vlan;
998
999                         status = be_cmd_set_hsw_config(adapter, vlan,
1000                                 vf + 1, adapter->vf_cfg[vf].if_handle);
1001                 }
1002         } else {
1003                 /* Reset Transparent Vlan Tagging. */
1004                 adapter->vf_cfg[vf].vlan_tag = 0;
1005                 vlan = adapter->vf_cfg[vf].def_vid;
1006                 status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
1007                         adapter->vf_cfg[vf].if_handle);
1008         }
1009
1010
1011         if (status)
1012                 dev_info(&adapter->pdev->dev,
1013                                 "VLAN %d config on VF %d failed\n", vlan, vf);
1014         return status;
1015 }
1016
1017 static int be_set_vf_tx_rate(struct net_device *netdev,
1018                         int vf, int rate)
1019 {
1020         struct be_adapter *adapter = netdev_priv(netdev);
1021         int status = 0;
1022
1023         if (!sriov_enabled(adapter))
1024                 return -EPERM;
1025
1026         if (vf >= adapter->num_vfs)
1027                 return -EINVAL;
1028
1029         if (rate < 100 || rate > 10000) {
1030                 dev_err(&adapter->pdev->dev,
1031                         "tx rate must be between 100 and 10000 Mbps\n");
1032                 return -EINVAL;
1033         }
1034
1035         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
1036
1037         if (status)
1038                 dev_err(&adapter->pdev->dev,
1039                                 "tx rate %d on VF %d failed\n", rate, vf);
1040         else
1041                 adapter->vf_cfg[vf].tx_rate = rate;
1042         return status;
1043 }
1044
1045 static int be_find_vfs(struct be_adapter *adapter, int vf_state)
1046 {
1047         struct pci_dev *dev, *pdev = adapter->pdev;
1048         int vfs = 0, assigned_vfs = 0, pos, vf_fn;
1049         u16 offset, stride;
1050
1051         pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
1052         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_OFFSET, &offset);
1053         pci_read_config_word(pdev, pos + PCI_SRIOV_VF_STRIDE, &stride);
1054
1055         dev = pci_get_device(pdev->vendor, PCI_ANY_ID, NULL);
1056         while (dev) {
1057                 vf_fn = (pdev->devfn + offset + stride * vfs) & 0xFFFF;
1058                 if (dev->is_virtfn && dev->devfn == vf_fn) {
1059                         vfs++;
1060                         if (dev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)
1061                                 assigned_vfs++;
1062                 }
1063                 dev = pci_get_device(pdev->vendor, PCI_ANY_ID, dev);
1064         }
1065         return (vf_state == ASSIGNED) ? assigned_vfs : vfs;
1066 }
1067
1068 static void be_eqd_update(struct be_adapter *adapter, struct be_eq_obj *eqo)
1069 {
1070         struct be_rx_stats *stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1071         ulong now = jiffies;
1072         ulong delta = now - stats->rx_jiffies;
1073         u64 pkts;
1074         unsigned int start, eqd;
1075
1076         if (!eqo->enable_aic) {
1077                 eqd = eqo->eqd;
1078                 goto modify_eqd;
1079         }
1080
1081         if (eqo->idx >= adapter->num_rx_qs)
1082                 return;
1083
1084         stats = rx_stats(&adapter->rx_obj[eqo->idx]);
1085
1086         /* Wrapped around */
1087         if (time_before(now, stats->rx_jiffies)) {
1088                 stats->rx_jiffies = now;
1089                 return;
1090         }
1091
1092         /* Update once a second */
1093         if (delta < HZ)
1094                 return;
1095
1096         do {
1097                 start = u64_stats_fetch_begin_bh(&stats->sync);
1098                 pkts = stats->rx_pkts;
1099         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1100
1101         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1102         stats->rx_pkts_prev = pkts;
1103         stats->rx_jiffies = now;
1104         eqd = (stats->rx_pps / 110000) << 3;
1105         eqd = min(eqd, eqo->max_eqd);
1106         eqd = max(eqd, eqo->min_eqd);
1107         if (eqd < 10)
1108                 eqd = 0;
1109
1110 modify_eqd:
1111         if (eqd != eqo->cur_eqd) {
1112                 be_cmd_modify_eqd(adapter, eqo->q.id, eqd);
1113                 eqo->cur_eqd = eqd;
1114         }
1115 }
1116
1117 static void be_rx_stats_update(struct be_rx_obj *rxo,
1118                 struct be_rx_compl_info *rxcp)
1119 {
1120         struct be_rx_stats *stats = rx_stats(rxo);
1121
1122         u64_stats_update_begin(&stats->sync);
1123         stats->rx_compl++;
1124         stats->rx_bytes += rxcp->pkt_size;
1125         stats->rx_pkts++;
1126         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1127                 stats->rx_mcast_pkts++;
1128         if (rxcp->err)
1129                 stats->rx_compl_err++;
1130         u64_stats_update_end(&stats->sync);
1131 }
1132
1133 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1134 {
1135         /* L4 checksum is not reliable for non TCP/UDP packets.
1136          * Also ignore ipcksm for ipv6 pkts */
1137         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1138                                 (rxcp->ip_csum || rxcp->ipv6);
1139 }
1140
1141 static struct be_rx_page_info *get_rx_page_info(struct be_rx_obj *rxo,
1142                                                 u16 frag_idx)
1143 {
1144         struct be_adapter *adapter = rxo->adapter;
1145         struct be_rx_page_info *rx_page_info;
1146         struct be_queue_info *rxq = &rxo->q;
1147
1148         rx_page_info = &rxo->page_info_tbl[frag_idx];
1149         BUG_ON(!rx_page_info->page);
1150
1151         if (rx_page_info->last_page_user) {
1152                 dma_unmap_page(&adapter->pdev->dev,
1153                                dma_unmap_addr(rx_page_info, bus),
1154                                adapter->big_page_size, DMA_FROM_DEVICE);
1155                 rx_page_info->last_page_user = false;
1156         }
1157
1158         atomic_dec(&rxq->used);
1159         return rx_page_info;
1160 }
1161
1162 /* Throwaway the data in the Rx completion */
1163 static void be_rx_compl_discard(struct be_rx_obj *rxo,
1164                                 struct be_rx_compl_info *rxcp)
1165 {
1166         struct be_queue_info *rxq = &rxo->q;
1167         struct be_rx_page_info *page_info;
1168         u16 i, num_rcvd = rxcp->num_rcvd;
1169
1170         for (i = 0; i < num_rcvd; i++) {
1171                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1172                 put_page(page_info->page);
1173                 memset(page_info, 0, sizeof(*page_info));
1174                 index_inc(&rxcp->rxq_idx, rxq->len);
1175         }
1176 }
1177
1178 /*
1179  * skb_fill_rx_data forms a complete skb for an ether frame
1180  * indicated by rxcp.
1181  */
1182 static void skb_fill_rx_data(struct be_rx_obj *rxo, struct sk_buff *skb,
1183                              struct be_rx_compl_info *rxcp)
1184 {
1185         struct be_queue_info *rxq = &rxo->q;
1186         struct be_rx_page_info *page_info;
1187         u16 i, j;
1188         u16 hdr_len, curr_frag_len, remaining;
1189         u8 *start;
1190
1191         page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1192         start = page_address(page_info->page) + page_info->page_offset;
1193         prefetch(start);
1194
1195         /* Copy data in the first descriptor of this completion */
1196         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1197
1198         /* Copy the header portion into skb_data */
1199         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1200         memcpy(skb->data, start, hdr_len);
1201         skb->len = curr_frag_len;
1202         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1203                 /* Complete packet has now been moved to data */
1204                 put_page(page_info->page);
1205                 skb->data_len = 0;
1206                 skb->tail += curr_frag_len;
1207         } else {
1208                 skb_shinfo(skb)->nr_frags = 1;
1209                 skb_frag_set_page(skb, 0, page_info->page);
1210                 skb_shinfo(skb)->frags[0].page_offset =
1211                                         page_info->page_offset + hdr_len;
1212                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1213                 skb->data_len = curr_frag_len - hdr_len;
1214                 skb->truesize += rx_frag_size;
1215                 skb->tail += hdr_len;
1216         }
1217         page_info->page = NULL;
1218
1219         if (rxcp->pkt_size <= rx_frag_size) {
1220                 BUG_ON(rxcp->num_rcvd != 1);
1221                 return;
1222         }
1223
1224         /* More frags present for this completion */
1225         index_inc(&rxcp->rxq_idx, rxq->len);
1226         remaining = rxcp->pkt_size - curr_frag_len;
1227         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1228                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1229                 curr_frag_len = min(remaining, rx_frag_size);
1230
1231                 /* Coalesce all frags from the same physical page in one slot */
1232                 if (page_info->page_offset == 0) {
1233                         /* Fresh page */
1234                         j++;
1235                         skb_frag_set_page(skb, j, page_info->page);
1236                         skb_shinfo(skb)->frags[j].page_offset =
1237                                                         page_info->page_offset;
1238                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1239                         skb_shinfo(skb)->nr_frags++;
1240                 } else {
1241                         put_page(page_info->page);
1242                 }
1243
1244                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1245                 skb->len += curr_frag_len;
1246                 skb->data_len += curr_frag_len;
1247                 skb->truesize += rx_frag_size;
1248                 remaining -= curr_frag_len;
1249                 index_inc(&rxcp->rxq_idx, rxq->len);
1250                 page_info->page = NULL;
1251         }
1252         BUG_ON(j > MAX_SKB_FRAGS);
1253 }
1254
1255 /* Process the RX completion indicated by rxcp when GRO is disabled */
1256 static void be_rx_compl_process(struct be_rx_obj *rxo,
1257                                 struct be_rx_compl_info *rxcp)
1258 {
1259         struct be_adapter *adapter = rxo->adapter;
1260         struct net_device *netdev = adapter->netdev;
1261         struct sk_buff *skb;
1262
1263         skb = netdev_alloc_skb_ip_align(netdev, BE_RX_SKB_ALLOC_SIZE);
1264         if (unlikely(!skb)) {
1265                 rx_stats(rxo)->rx_drops_no_skbs++;
1266                 be_rx_compl_discard(rxo, rxcp);
1267                 return;
1268         }
1269
1270         skb_fill_rx_data(rxo, skb, rxcp);
1271
1272         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1273                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1274         else
1275                 skb_checksum_none_assert(skb);
1276
1277         skb->protocol = eth_type_trans(skb, netdev);
1278         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1279         if (netdev->features & NETIF_F_RXHASH)
1280                 skb->rxhash = rxcp->rss_hash;
1281
1282
1283         if (rxcp->vlanf)
1284                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1285
1286         netif_receive_skb(skb);
1287 }
1288
1289 /* Process the RX completion indicated by rxcp when GRO is enabled */
1290 void be_rx_compl_process_gro(struct be_rx_obj *rxo, struct napi_struct *napi,
1291                              struct be_rx_compl_info *rxcp)
1292 {
1293         struct be_adapter *adapter = rxo->adapter;
1294         struct be_rx_page_info *page_info;
1295         struct sk_buff *skb = NULL;
1296         struct be_queue_info *rxq = &rxo->q;
1297         u16 remaining, curr_frag_len;
1298         u16 i, j;
1299
1300         skb = napi_get_frags(napi);
1301         if (!skb) {
1302                 be_rx_compl_discard(rxo, rxcp);
1303                 return;
1304         }
1305
1306         remaining = rxcp->pkt_size;
1307         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1308                 page_info = get_rx_page_info(rxo, rxcp->rxq_idx);
1309
1310                 curr_frag_len = min(remaining, rx_frag_size);
1311
1312                 /* Coalesce all frags from the same physical page in one slot */
1313                 if (i == 0 || page_info->page_offset == 0) {
1314                         /* First frag or Fresh page */
1315                         j++;
1316                         skb_frag_set_page(skb, j, page_info->page);
1317                         skb_shinfo(skb)->frags[j].page_offset =
1318                                                         page_info->page_offset;
1319                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1320                 } else {
1321                         put_page(page_info->page);
1322                 }
1323                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1324                 skb->truesize += rx_frag_size;
1325                 remaining -= curr_frag_len;
1326                 index_inc(&rxcp->rxq_idx, rxq->len);
1327                 memset(page_info, 0, sizeof(*page_info));
1328         }
1329         BUG_ON(j > MAX_SKB_FRAGS);
1330
1331         skb_shinfo(skb)->nr_frags = j + 1;
1332         skb->len = rxcp->pkt_size;
1333         skb->data_len = rxcp->pkt_size;
1334         skb->ip_summed = CHECKSUM_UNNECESSARY;
1335         skb_record_rx_queue(skb, rxo - &adapter->rx_obj[0]);
1336         if (adapter->netdev->features & NETIF_F_RXHASH)
1337                 skb->rxhash = rxcp->rss_hash;
1338
1339         if (rxcp->vlanf)
1340                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1341
1342         napi_gro_frags(napi);
1343 }
1344
1345 static void be_parse_rx_compl_v1(struct be_eth_rx_compl *compl,
1346                                  struct be_rx_compl_info *rxcp)
1347 {
1348         rxcp->pkt_size =
1349                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1350         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1351         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1352         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1353         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1354         rxcp->ip_csum =
1355                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1356         rxcp->l4_csum =
1357                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1358         rxcp->ipv6 =
1359                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1360         rxcp->rxq_idx =
1361                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1362         rxcp->num_rcvd =
1363                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1364         rxcp->pkt_type =
1365                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1366         rxcp->rss_hash =
1367                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1368         if (rxcp->vlanf) {
1369                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1370                                           compl);
1371                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1372                                                compl);
1373         }
1374         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1375 }
1376
1377 static void be_parse_rx_compl_v0(struct be_eth_rx_compl *compl,
1378                                  struct be_rx_compl_info *rxcp)
1379 {
1380         rxcp->pkt_size =
1381                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1382         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1383         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1384         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1385         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1386         rxcp->ip_csum =
1387                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1388         rxcp->l4_csum =
1389                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1390         rxcp->ipv6 =
1391                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1392         rxcp->rxq_idx =
1393                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1394         rxcp->num_rcvd =
1395                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1396         rxcp->pkt_type =
1397                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1398         rxcp->rss_hash =
1399                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1400         if (rxcp->vlanf) {
1401                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1402                                           compl);
1403                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1404                                                compl);
1405         }
1406         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1407 }
1408
1409 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1410 {
1411         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1412         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1413         struct be_adapter *adapter = rxo->adapter;
1414
1415         /* For checking the valid bit it is Ok to use either definition as the
1416          * valid bit is at the same position in both v0 and v1 Rx compl */
1417         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1418                 return NULL;
1419
1420         rmb();
1421         be_dws_le_to_cpu(compl, sizeof(*compl));
1422
1423         if (adapter->be3_native)
1424                 be_parse_rx_compl_v1(compl, rxcp);
1425         else
1426                 be_parse_rx_compl_v0(compl, rxcp);
1427
1428         if (rxcp->vlanf) {
1429                 /* vlanf could be wrongly set in some cards.
1430                  * ignore if vtm is not set */
1431                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1432                         rxcp->vlanf = 0;
1433
1434                 if (!lancer_chip(adapter))
1435                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1436
1437                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1438                     !adapter->vlan_tag[rxcp->vlan_tag])
1439                         rxcp->vlanf = 0;
1440         }
1441
1442         /* As the compl has been parsed, reset it; we wont touch it again */
1443         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1444
1445         queue_tail_inc(&rxo->cq);
1446         return rxcp;
1447 }
1448
1449 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1450 {
1451         u32 order = get_order(size);
1452
1453         if (order > 0)
1454                 gfp |= __GFP_COMP;
1455         return  alloc_pages(gfp, order);
1456 }
1457
1458 /*
1459  * Allocate a page, split it to fragments of size rx_frag_size and post as
1460  * receive buffers to BE
1461  */
1462 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1463 {
1464         struct be_adapter *adapter = rxo->adapter;
1465         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1466         struct be_queue_info *rxq = &rxo->q;
1467         struct page *pagep = NULL;
1468         struct be_eth_rx_d *rxd;
1469         u64 page_dmaaddr = 0, frag_dmaaddr;
1470         u32 posted, page_offset = 0;
1471
1472         page_info = &rxo->page_info_tbl[rxq->head];
1473         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1474                 if (!pagep) {
1475                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1476                         if (unlikely(!pagep)) {
1477                                 rx_stats(rxo)->rx_post_fail++;
1478                                 break;
1479                         }
1480                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1481                                                     0, adapter->big_page_size,
1482                                                     DMA_FROM_DEVICE);
1483                         page_info->page_offset = 0;
1484                 } else {
1485                         get_page(pagep);
1486                         page_info->page_offset = page_offset + rx_frag_size;
1487                 }
1488                 page_offset = page_info->page_offset;
1489                 page_info->page = pagep;
1490                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1491                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1492
1493                 rxd = queue_head_node(rxq);
1494                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1495                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1496
1497                 /* Any space left in the current big page for another frag? */
1498                 if ((page_offset + rx_frag_size + rx_frag_size) >
1499                                         adapter->big_page_size) {
1500                         pagep = NULL;
1501                         page_info->last_page_user = true;
1502                 }
1503
1504                 prev_page_info = page_info;
1505                 queue_head_inc(rxq);
1506                 page_info = &rxo->page_info_tbl[rxq->head];
1507         }
1508         if (pagep)
1509                 prev_page_info->last_page_user = true;
1510
1511         if (posted) {
1512                 atomic_add(posted, &rxq->used);
1513                 be_rxq_notify(adapter, rxq->id, posted);
1514         } else if (atomic_read(&rxq->used) == 0) {
1515                 /* Let be_worker replenish when memory is available */
1516                 rxo->rx_post_starved = true;
1517         }
1518 }
1519
1520 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1521 {
1522         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1523
1524         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1525                 return NULL;
1526
1527         rmb();
1528         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1529
1530         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1531
1532         queue_tail_inc(tx_cq);
1533         return txcp;
1534 }
1535
1536 static u16 be_tx_compl_process(struct be_adapter *adapter,
1537                 struct be_tx_obj *txo, u16 last_index)
1538 {
1539         struct be_queue_info *txq = &txo->q;
1540         struct be_eth_wrb *wrb;
1541         struct sk_buff **sent_skbs = txo->sent_skb_list;
1542         struct sk_buff *sent_skb;
1543         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1544         bool unmap_skb_hdr = true;
1545
1546         sent_skb = sent_skbs[txq->tail];
1547         BUG_ON(!sent_skb);
1548         sent_skbs[txq->tail] = NULL;
1549
1550         /* skip header wrb */
1551         queue_tail_inc(txq);
1552
1553         do {
1554                 cur_index = txq->tail;
1555                 wrb = queue_tail_node(txq);
1556                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1557                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1558                 unmap_skb_hdr = false;
1559
1560                 num_wrbs++;
1561                 queue_tail_inc(txq);
1562         } while (cur_index != last_index);
1563
1564         kfree_skb(sent_skb);
1565         return num_wrbs;
1566 }
1567
1568 /* Return the number of events in the event queue */
1569 static inline int events_get(struct be_eq_obj *eqo)
1570 {
1571         struct be_eq_entry *eqe;
1572         int num = 0;
1573
1574         do {
1575                 eqe = queue_tail_node(&eqo->q);
1576                 if (eqe->evt == 0)
1577                         break;
1578
1579                 rmb();
1580                 eqe->evt = 0;
1581                 num++;
1582                 queue_tail_inc(&eqo->q);
1583         } while (true);
1584
1585         return num;
1586 }
1587
1588 static int event_handle(struct be_eq_obj *eqo)
1589 {
1590         bool rearm = false;
1591         int num = events_get(eqo);
1592
1593         /* Deal with any spurious interrupts that come without events */
1594         if (!num)
1595                 rearm = true;
1596
1597         if (num || msix_enabled(eqo->adapter))
1598                 be_eq_notify(eqo->adapter, eqo->q.id, rearm, true, num);
1599
1600         if (num)
1601                 napi_schedule(&eqo->napi);
1602
1603         return num;
1604 }
1605
1606 /* Leaves the EQ is disarmed state */
1607 static void be_eq_clean(struct be_eq_obj *eqo)
1608 {
1609         int num = events_get(eqo);
1610
1611         be_eq_notify(eqo->adapter, eqo->q.id, false, true, num);
1612 }
1613
1614 static void be_rx_cq_clean(struct be_rx_obj *rxo)
1615 {
1616         struct be_rx_page_info *page_info;
1617         struct be_queue_info *rxq = &rxo->q;
1618         struct be_queue_info *rx_cq = &rxo->cq;
1619         struct be_rx_compl_info *rxcp;
1620         u16 tail;
1621
1622         /* First cleanup pending rx completions */
1623         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1624                 be_rx_compl_discard(rxo, rxcp);
1625                 be_cq_notify(rxo->adapter, rx_cq->id, false, 1);
1626         }
1627
1628         /* Then free posted rx buffer that were not used */
1629         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1630         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1631                 page_info = get_rx_page_info(rxo, tail);
1632                 put_page(page_info->page);
1633                 memset(page_info, 0, sizeof(*page_info));
1634         }
1635         BUG_ON(atomic_read(&rxq->used));
1636         rxq->tail = rxq->head = 0;
1637 }
1638
1639 static void be_tx_compl_clean(struct be_adapter *adapter)
1640 {
1641         struct be_tx_obj *txo;
1642         struct be_queue_info *txq;
1643         struct be_eth_tx_compl *txcp;
1644         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1645         struct sk_buff *sent_skb;
1646         bool dummy_wrb;
1647         int i, pending_txqs;
1648
1649         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1650         do {
1651                 pending_txqs = adapter->num_tx_qs;
1652
1653                 for_all_tx_queues(adapter, txo, i) {
1654                         txq = &txo->q;
1655                         while ((txcp = be_tx_compl_get(&txo->cq))) {
1656                                 end_idx =
1657                                         AMAP_GET_BITS(struct amap_eth_tx_compl,
1658                                                       wrb_index, txcp);
1659                                 num_wrbs += be_tx_compl_process(adapter, txo,
1660                                                                 end_idx);
1661                                 cmpl++;
1662                         }
1663                         if (cmpl) {
1664                                 be_cq_notify(adapter, txo->cq.id, false, cmpl);
1665                                 atomic_sub(num_wrbs, &txq->used);
1666                                 cmpl = 0;
1667                                 num_wrbs = 0;
1668                         }
1669                         if (atomic_read(&txq->used) == 0)
1670                                 pending_txqs--;
1671                 }
1672
1673                 if (pending_txqs == 0 || ++timeo > 200)
1674                         break;
1675
1676                 mdelay(1);
1677         } while (true);
1678
1679         for_all_tx_queues(adapter, txo, i) {
1680                 txq = &txo->q;
1681                 if (atomic_read(&txq->used))
1682                         dev_err(&adapter->pdev->dev, "%d pending tx-compls\n",
1683                                 atomic_read(&txq->used));
1684
1685                 /* free posted tx for which compls will never arrive */
1686                 while (atomic_read(&txq->used)) {
1687                         sent_skb = txo->sent_skb_list[txq->tail];
1688                         end_idx = txq->tail;
1689                         num_wrbs = wrb_cnt_for_skb(adapter, sent_skb,
1690                                                    &dummy_wrb);
1691                         index_adv(&end_idx, num_wrbs - 1, txq->len);
1692                         num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1693                         atomic_sub(num_wrbs, &txq->used);
1694                 }
1695         }
1696 }
1697
1698 static void be_evt_queues_destroy(struct be_adapter *adapter)
1699 {
1700         struct be_eq_obj *eqo;
1701         int i;
1702
1703         for_all_evt_queues(adapter, eqo, i) {
1704                 be_eq_clean(eqo);
1705                 if (eqo->q.created)
1706                         be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
1707                 be_queue_free(adapter, &eqo->q);
1708         }
1709 }
1710
1711 static int be_evt_queues_create(struct be_adapter *adapter)
1712 {
1713         struct be_queue_info *eq;
1714         struct be_eq_obj *eqo;
1715         int i, rc;
1716
1717         adapter->num_evt_qs = num_irqs(adapter);
1718
1719         for_all_evt_queues(adapter, eqo, i) {
1720                 eqo->adapter = adapter;
1721                 eqo->tx_budget = BE_TX_BUDGET;
1722                 eqo->idx = i;
1723                 eqo->max_eqd = BE_MAX_EQD;
1724                 eqo->enable_aic = true;
1725
1726                 eq = &eqo->q;
1727                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1728                                         sizeof(struct be_eq_entry));
1729                 if (rc)
1730                         return rc;
1731
1732                 rc = be_cmd_eq_create(adapter, eq, eqo->cur_eqd);
1733                 if (rc)
1734                         return rc;
1735         }
1736         return 0;
1737 }
1738
1739 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1740 {
1741         struct be_queue_info *q;
1742
1743         q = &adapter->mcc_obj.q;
1744         if (q->created)
1745                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1746         be_queue_free(adapter, q);
1747
1748         q = &adapter->mcc_obj.cq;
1749         if (q->created)
1750                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1751         be_queue_free(adapter, q);
1752 }
1753
1754 /* Must be called only after TX qs are created as MCC shares TX EQ */
1755 static int be_mcc_queues_create(struct be_adapter *adapter)
1756 {
1757         struct be_queue_info *q, *cq;
1758
1759         cq = &adapter->mcc_obj.cq;
1760         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1761                         sizeof(struct be_mcc_compl)))
1762                 goto err;
1763
1764         /* Use the default EQ for MCC completions */
1765         if (be_cmd_cq_create(adapter, cq, &mcc_eqo(adapter)->q, true, 0))
1766                 goto mcc_cq_free;
1767
1768         q = &adapter->mcc_obj.q;
1769         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1770                 goto mcc_cq_destroy;
1771
1772         if (be_cmd_mccq_create(adapter, q, cq))
1773                 goto mcc_q_free;
1774
1775         return 0;
1776
1777 mcc_q_free:
1778         be_queue_free(adapter, q);
1779 mcc_cq_destroy:
1780         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1781 mcc_cq_free:
1782         be_queue_free(adapter, cq);
1783 err:
1784         return -1;
1785 }
1786
1787 static void be_tx_queues_destroy(struct be_adapter *adapter)
1788 {
1789         struct be_queue_info *q;
1790         struct be_tx_obj *txo;
1791         u8 i;
1792
1793         for_all_tx_queues(adapter, txo, i) {
1794                 q = &txo->q;
1795                 if (q->created)
1796                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1797                 be_queue_free(adapter, q);
1798
1799                 q = &txo->cq;
1800                 if (q->created)
1801                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1802                 be_queue_free(adapter, q);
1803         }
1804 }
1805
1806 static int be_num_txqs_want(struct be_adapter *adapter)
1807 {
1808         if (sriov_want(adapter) || be_is_mc(adapter) ||
1809             lancer_chip(adapter) || !be_physfn(adapter) ||
1810             adapter->generation == BE_GEN2)
1811                 return 1;
1812         else
1813                 return MAX_TX_QS;
1814 }
1815
1816 static int be_tx_cqs_create(struct be_adapter *adapter)
1817 {
1818         struct be_queue_info *cq, *eq;
1819         int status;
1820         struct be_tx_obj *txo;
1821         u8 i;
1822
1823         adapter->num_tx_qs = be_num_txqs_want(adapter);
1824         if (adapter->num_tx_qs != MAX_TX_QS) {
1825                 rtnl_lock();
1826                 netif_set_real_num_tx_queues(adapter->netdev,
1827                         adapter->num_tx_qs);
1828                 rtnl_unlock();
1829         }
1830
1831         for_all_tx_queues(adapter, txo, i) {
1832                 cq = &txo->cq;
1833                 status = be_queue_alloc(adapter, cq, TX_CQ_LEN,
1834                                         sizeof(struct be_eth_tx_compl));
1835                 if (status)
1836                         return status;
1837
1838                 /* If num_evt_qs is less than num_tx_qs, then more than
1839                  * one txq share an eq
1840                  */
1841                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1842                 status = be_cmd_cq_create(adapter, cq, eq, false, 3);
1843                 if (status)
1844                         return status;
1845         }
1846         return 0;
1847 }
1848
1849 static int be_tx_qs_create(struct be_adapter *adapter)
1850 {
1851         struct be_tx_obj *txo;
1852         int i, status;
1853
1854         for_all_tx_queues(adapter, txo, i) {
1855                 status = be_queue_alloc(adapter, &txo->q, TX_Q_LEN,
1856                                         sizeof(struct be_eth_wrb));
1857                 if (status)
1858                         return status;
1859
1860                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
1861                 if (status)
1862                         return status;
1863         }
1864
1865         return 0;
1866 }
1867
1868 static void be_rx_cqs_destroy(struct be_adapter *adapter)
1869 {
1870         struct be_queue_info *q;
1871         struct be_rx_obj *rxo;
1872         int i;
1873
1874         for_all_rx_queues(adapter, rxo, i) {
1875                 q = &rxo->cq;
1876                 if (q->created)
1877                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1878                 be_queue_free(adapter, q);
1879         }
1880 }
1881
1882 static int be_rx_cqs_create(struct be_adapter *adapter)
1883 {
1884         struct be_queue_info *eq, *cq;
1885         struct be_rx_obj *rxo;
1886         int rc, i;
1887
1888         /* We'll create as many RSS rings as there are irqs.
1889          * But when there's only one irq there's no use creating RSS rings
1890          */
1891         adapter->num_rx_qs = (num_irqs(adapter) > 1) ?
1892                                 num_irqs(adapter) + 1 : 1;
1893         if (adapter->num_rx_qs != MAX_RX_QS) {
1894                 rtnl_lock();
1895                 netif_set_real_num_rx_queues(adapter->netdev,
1896                                              adapter->num_rx_qs);
1897                 rtnl_unlock();
1898         }
1899
1900         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1901         for_all_rx_queues(adapter, rxo, i) {
1902                 rxo->adapter = adapter;
1903                 cq = &rxo->cq;
1904                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1905                                 sizeof(struct be_eth_rx_compl));
1906                 if (rc)
1907                         return rc;
1908
1909                 eq = &adapter->eq_obj[i % adapter->num_evt_qs].q;
1910                 rc = be_cmd_cq_create(adapter, cq, eq, false, 3);
1911                 if (rc)
1912                         return rc;
1913         }
1914
1915         if (adapter->num_rx_qs != MAX_RX_QS)
1916                 dev_info(&adapter->pdev->dev,
1917                         "Created only %d receive queues", adapter->num_rx_qs);
1918
1919         return 0;
1920 }
1921
1922 static irqreturn_t be_intx(int irq, void *dev)
1923 {
1924         struct be_adapter *adapter = dev;
1925         int num_evts;
1926
1927         /* With INTx only one EQ is used */
1928         num_evts = event_handle(&adapter->eq_obj[0]);
1929         if (num_evts)
1930                 return IRQ_HANDLED;
1931         else
1932                 return IRQ_NONE;
1933 }
1934
1935 static irqreturn_t be_msix(int irq, void *dev)
1936 {
1937         struct be_eq_obj *eqo = dev;
1938
1939         event_handle(eqo);
1940         return IRQ_HANDLED;
1941 }
1942
1943 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1944 {
1945         return (rxcp->tcpf && !rxcp->err) ? true : false;
1946 }
1947
1948 static int be_process_rx(struct be_rx_obj *rxo, struct napi_struct *napi,
1949                         int budget)
1950 {
1951         struct be_adapter *adapter = rxo->adapter;
1952         struct be_queue_info *rx_cq = &rxo->cq;
1953         struct be_rx_compl_info *rxcp;
1954         u32 work_done;
1955
1956         for (work_done = 0; work_done < budget; work_done++) {
1957                 rxcp = be_rx_compl_get(rxo);
1958                 if (!rxcp)
1959                         break;
1960
1961                 /* Is it a flush compl that has no data */
1962                 if (unlikely(rxcp->num_rcvd == 0))
1963                         goto loop_continue;
1964
1965                 /* Discard compl with partial DMA Lancer B0 */
1966                 if (unlikely(!rxcp->pkt_size)) {
1967                         be_rx_compl_discard(rxo, rxcp);
1968                         goto loop_continue;
1969                 }
1970
1971                 /* On BE drop pkts that arrive due to imperfect filtering in
1972                  * promiscuous mode on some skews
1973                  */
1974                 if (unlikely(rxcp->port != adapter->port_num &&
1975                                 !lancer_chip(adapter))) {
1976                         be_rx_compl_discard(rxo, rxcp);
1977                         goto loop_continue;
1978                 }
1979
1980                 if (do_gro(rxcp))
1981                         be_rx_compl_process_gro(rxo, napi, rxcp);
1982                 else
1983                         be_rx_compl_process(rxo, rxcp);
1984 loop_continue:
1985                 be_rx_stats_update(rxo, rxcp);
1986         }
1987
1988         if (work_done) {
1989                 be_cq_notify(adapter, rx_cq->id, true, work_done);
1990
1991                 if (atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1992                         be_post_rx_frags(rxo, GFP_ATOMIC);
1993         }
1994
1995         return work_done;
1996 }
1997
1998 static bool be_process_tx(struct be_adapter *adapter, struct be_tx_obj *txo,
1999                           int budget, int idx)
2000 {
2001         struct be_eth_tx_compl *txcp;
2002         int num_wrbs = 0, work_done;
2003
2004         for (work_done = 0; work_done < budget; work_done++) {
2005                 txcp = be_tx_compl_get(&txo->cq);
2006                 if (!txcp)
2007                         break;
2008                 num_wrbs += be_tx_compl_process(adapter, txo,
2009                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
2010                                         wrb_index, txcp));
2011         }
2012
2013         if (work_done) {
2014                 be_cq_notify(adapter, txo->cq.id, true, work_done);
2015                 atomic_sub(num_wrbs, &txo->q.used);
2016
2017                 /* As Tx wrbs have been freed up, wake up netdev queue
2018                  * if it was stopped due to lack of tx wrbs.  */
2019                 if (__netif_subqueue_stopped(adapter->netdev, idx) &&
2020                         atomic_read(&txo->q.used) < txo->q.len / 2) {
2021                         netif_wake_subqueue(adapter->netdev, idx);
2022                 }
2023
2024                 u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2025                 tx_stats(txo)->tx_compl += work_done;
2026                 u64_stats_update_end(&tx_stats(txo)->sync_compl);
2027         }
2028         return (work_done < budget); /* Done */
2029 }
2030
2031 int be_poll(struct napi_struct *napi, int budget)
2032 {
2033         struct be_eq_obj *eqo = container_of(napi, struct be_eq_obj, napi);
2034         struct be_adapter *adapter = eqo->adapter;
2035         int max_work = 0, work, i;
2036         bool tx_done;
2037
2038         /* Process all TXQs serviced by this EQ */
2039         for (i = eqo->idx; i < adapter->num_tx_qs; i += adapter->num_evt_qs) {
2040                 tx_done = be_process_tx(adapter, &adapter->tx_obj[i],
2041                                         eqo->tx_budget, i);
2042                 if (!tx_done)
2043                         max_work = budget;
2044         }
2045
2046         /* This loop will iterate twice for EQ0 in which
2047          * completions of the last RXQ (default one) are also processed
2048          * For other EQs the loop iterates only once
2049          */
2050         for (i = eqo->idx; i < adapter->num_rx_qs; i += adapter->num_evt_qs) {
2051                 work = be_process_rx(&adapter->rx_obj[i], napi, budget);
2052                 max_work = max(work, max_work);
2053         }
2054
2055         if (is_mcc_eqo(eqo))
2056                 be_process_mcc(adapter);
2057
2058         if (max_work < budget) {
2059                 napi_complete(napi);
2060                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2061         } else {
2062                 /* As we'll continue in polling mode, count and clear events */
2063                 be_eq_notify(adapter, eqo->q.id, false, false, events_get(eqo));
2064         }
2065         return max_work;
2066 }
2067
2068 void be_detect_dump_ue(struct be_adapter *adapter)
2069 {
2070         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2071         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2072         u32 i;
2073
2074         if (adapter->eeh_err || adapter->ue_detected)
2075                 return;
2076
2077         if (lancer_chip(adapter)) {
2078                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2079                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2080                         sliport_err1 = ioread32(adapter->db +
2081                                         SLIPORT_ERROR1_OFFSET);
2082                         sliport_err2 = ioread32(adapter->db +
2083                                         SLIPORT_ERROR2_OFFSET);
2084                 }
2085         } else {
2086                 pci_read_config_dword(adapter->pdev,
2087                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2088                 pci_read_config_dword(adapter->pdev,
2089                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2090                 pci_read_config_dword(adapter->pdev,
2091                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2092                 pci_read_config_dword(adapter->pdev,
2093                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2094
2095                 ue_lo = (ue_lo & (~ue_lo_mask));
2096                 ue_hi = (ue_hi & (~ue_hi_mask));
2097         }
2098
2099         if (ue_lo || ue_hi ||
2100                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2101                 adapter->ue_detected = true;
2102                 adapter->eeh_err = true;
2103                 dev_err(&adapter->pdev->dev,
2104                         "Unrecoverable error in the card\n");
2105         }
2106
2107         if (ue_lo) {
2108                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2109                         if (ue_lo & 1)
2110                                 dev_err(&adapter->pdev->dev,
2111                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2112                 }
2113         }
2114         if (ue_hi) {
2115                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2116                         if (ue_hi & 1)
2117                                 dev_err(&adapter->pdev->dev,
2118                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2119                 }
2120         }
2121
2122         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2123                 dev_err(&adapter->pdev->dev,
2124                         "sliport status 0x%x\n", sliport_status);
2125                 dev_err(&adapter->pdev->dev,
2126                         "sliport error1 0x%x\n", sliport_err1);
2127                 dev_err(&adapter->pdev->dev,
2128                         "sliport error2 0x%x\n", sliport_err2);
2129         }
2130 }
2131
2132 static void be_msix_disable(struct be_adapter *adapter)
2133 {
2134         if (msix_enabled(adapter)) {
2135                 pci_disable_msix(adapter->pdev);
2136                 adapter->num_msix_vec = 0;
2137         }
2138 }
2139
2140 static uint be_num_rss_want(struct be_adapter *adapter)
2141 {
2142         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
2143              !sriov_want(adapter) && be_physfn(adapter) &&
2144              !be_is_mc(adapter))
2145                 return (adapter->be3_native) ? BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
2146         else
2147                 return 0;
2148 }
2149
2150 static void be_msix_enable(struct be_adapter *adapter)
2151 {
2152 #define BE_MIN_MSIX_VECTORS             1
2153         int i, status, num_vec, num_roce_vec = 0;
2154
2155         /* If RSS queues are not used, need a vec for default RX Q */
2156         num_vec = min(be_num_rss_want(adapter), num_online_cpus());
2157         if (be_roce_supported(adapter)) {
2158                 num_roce_vec = min_t(u32, MAX_ROCE_MSIX_VECTORS,
2159                                         (num_online_cpus() + 1));
2160                 num_roce_vec = min(num_roce_vec, MAX_ROCE_EQS);
2161                 num_vec += num_roce_vec;
2162                 num_vec = min(num_vec, MAX_MSIX_VECTORS);
2163         }
2164         num_vec = max(num_vec, BE_MIN_MSIX_VECTORS);
2165
2166         for (i = 0; i < num_vec; i++)
2167                 adapter->msix_entries[i].entry = i;
2168
2169         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2170         if (status == 0) {
2171                 goto done;
2172         } else if (status >= BE_MIN_MSIX_VECTORS) {
2173                 num_vec = status;
2174                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2175                                 num_vec) == 0)
2176                         goto done;
2177         }
2178         return;
2179 done:
2180         if (be_roce_supported(adapter)) {
2181                 if (num_vec > num_roce_vec) {
2182                         adapter->num_msix_vec = num_vec - num_roce_vec;
2183                         adapter->num_msix_roce_vec =
2184                                 num_vec - adapter->num_msix_vec;
2185                 } else {
2186                         adapter->num_msix_vec = num_vec;
2187                         adapter->num_msix_roce_vec = 0;
2188                 }
2189         } else
2190                 adapter->num_msix_vec = num_vec;
2191         return;
2192 }
2193
2194 static inline int be_msix_vec_get(struct be_adapter *adapter,
2195                                 struct be_eq_obj *eqo)
2196 {
2197         return adapter->msix_entries[eqo->idx].vector;
2198 }
2199
2200 static int be_msix_register(struct be_adapter *adapter)
2201 {
2202         struct net_device *netdev = adapter->netdev;
2203         struct be_eq_obj *eqo;
2204         int status, i, vec;
2205
2206         for_all_evt_queues(adapter, eqo, i) {
2207                 sprintf(eqo->desc, "%s-q%d", netdev->name, i);
2208                 vec = be_msix_vec_get(adapter, eqo);
2209                 status = request_irq(vec, be_msix, 0, eqo->desc, eqo);
2210                 if (status)
2211                         goto err_msix;
2212         }
2213
2214         return 0;
2215 err_msix:
2216         for (i--, eqo = &adapter->eq_obj[i]; i >= 0; i--, eqo--)
2217                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2218         dev_warn(&adapter->pdev->dev, "MSIX Request IRQ failed - err %d\n",
2219                 status);
2220         be_msix_disable(adapter);
2221         return status;
2222 }
2223
2224 static int be_irq_register(struct be_adapter *adapter)
2225 {
2226         struct net_device *netdev = adapter->netdev;
2227         int status;
2228
2229         if (msix_enabled(adapter)) {
2230                 status = be_msix_register(adapter);
2231                 if (status == 0)
2232                         goto done;
2233                 /* INTx is not supported for VF */
2234                 if (!be_physfn(adapter))
2235                         return status;
2236         }
2237
2238         /* INTx */
2239         netdev->irq = adapter->pdev->irq;
2240         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2241                         adapter);
2242         if (status) {
2243                 dev_err(&adapter->pdev->dev,
2244                         "INTx request IRQ failed - err %d\n", status);
2245                 return status;
2246         }
2247 done:
2248         adapter->isr_registered = true;
2249         return 0;
2250 }
2251
2252 static void be_irq_unregister(struct be_adapter *adapter)
2253 {
2254         struct net_device *netdev = adapter->netdev;
2255         struct be_eq_obj *eqo;
2256         int i;
2257
2258         if (!adapter->isr_registered)
2259                 return;
2260
2261         /* INTx */
2262         if (!msix_enabled(adapter)) {
2263                 free_irq(netdev->irq, adapter);
2264                 goto done;
2265         }
2266
2267         /* MSIx */
2268         for_all_evt_queues(adapter, eqo, i)
2269                 free_irq(be_msix_vec_get(adapter, eqo), eqo);
2270
2271 done:
2272         adapter->isr_registered = false;
2273 }
2274
2275 static void be_rx_qs_destroy(struct be_adapter *adapter)
2276 {
2277         struct be_queue_info *q;
2278         struct be_rx_obj *rxo;
2279         int i;
2280
2281         for_all_rx_queues(adapter, rxo, i) {
2282                 q = &rxo->q;
2283                 if (q->created) {
2284                         be_cmd_rxq_destroy(adapter, q);
2285                         /* After the rxq is invalidated, wait for a grace time
2286                          * of 1ms for all dma to end and the flush compl to
2287                          * arrive
2288                          */
2289                         mdelay(1);
2290                         be_rx_cq_clean(rxo);
2291                 }
2292                 be_queue_free(adapter, q);
2293         }
2294 }
2295
2296 static int be_close(struct net_device *netdev)
2297 {
2298         struct be_adapter *adapter = netdev_priv(netdev);
2299         struct be_eq_obj *eqo;
2300         int i;
2301
2302         be_roce_dev_close(adapter);
2303
2304         be_async_mcc_disable(adapter);
2305
2306         if (!lancer_chip(adapter))
2307                 be_intr_set(adapter, false);
2308
2309         for_all_evt_queues(adapter, eqo, i) {
2310                 napi_disable(&eqo->napi);
2311                 if (msix_enabled(adapter))
2312                         synchronize_irq(be_msix_vec_get(adapter, eqo));
2313                 else
2314                         synchronize_irq(netdev->irq);
2315                 be_eq_clean(eqo);
2316         }
2317
2318         be_irq_unregister(adapter);
2319
2320         /* Wait for all pending tx completions to arrive so that
2321          * all tx skbs are freed.
2322          */
2323         be_tx_compl_clean(adapter);
2324
2325         be_rx_qs_destroy(adapter);
2326         return 0;
2327 }
2328
2329 static int be_rx_qs_create(struct be_adapter *adapter)
2330 {
2331         struct be_rx_obj *rxo;
2332         int rc, i, j;
2333         u8 rsstable[128];
2334
2335         for_all_rx_queues(adapter, rxo, i) {
2336                 rc = be_queue_alloc(adapter, &rxo->q, RX_Q_LEN,
2337                                     sizeof(struct be_eth_rx_d));
2338                 if (rc)
2339                         return rc;
2340         }
2341
2342         /* The FW would like the default RXQ to be created first */
2343         rxo = default_rxo(adapter);
2344         rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id, rx_frag_size,
2345                                adapter->if_handle, false, &rxo->rss_id);
2346         if (rc)
2347                 return rc;
2348
2349         for_all_rss_queues(adapter, rxo, i) {
2350                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2351                                        rx_frag_size, adapter->if_handle,
2352                                        true, &rxo->rss_id);
2353                 if (rc)
2354                         return rc;
2355         }
2356
2357         if (be_multi_rxq(adapter)) {
2358                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2359                         for_all_rss_queues(adapter, rxo, i) {
2360                                 if ((j + i) >= 128)
2361                                         break;
2362                                 rsstable[j + i] = rxo->rss_id;
2363                         }
2364                 }
2365                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2366                 if (rc)
2367                         return rc;
2368         }
2369
2370         /* First time posting */
2371         for_all_rx_queues(adapter, rxo, i)
2372                 be_post_rx_frags(rxo, GFP_KERNEL);
2373         return 0;
2374 }
2375
2376 static int be_open(struct net_device *netdev)
2377 {
2378         struct be_adapter *adapter = netdev_priv(netdev);
2379         struct be_eq_obj *eqo;
2380         struct be_rx_obj *rxo;
2381         struct be_tx_obj *txo;
2382         u8 link_status;
2383         int status, i;
2384
2385         status = be_rx_qs_create(adapter);
2386         if (status)
2387                 goto err;
2388
2389         be_irq_register(adapter);
2390
2391         if (!lancer_chip(adapter))
2392                 be_intr_set(adapter, true);
2393
2394         for_all_rx_queues(adapter, rxo, i)
2395                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2396
2397         for_all_tx_queues(adapter, txo, i)
2398                 be_cq_notify(adapter, txo->cq.id, true, 0);
2399
2400         be_async_mcc_enable(adapter);
2401
2402         for_all_evt_queues(adapter, eqo, i) {
2403                 napi_enable(&eqo->napi);
2404                 be_eq_notify(adapter, eqo->q.id, true, false, 0);
2405         }
2406
2407         status = be_cmd_link_status_query(adapter, NULL, NULL,
2408                                           &link_status, 0);
2409         if (!status)
2410                 be_link_status_update(adapter, link_status);
2411
2412         be_roce_dev_open(adapter);
2413         return 0;
2414 err:
2415         be_close(adapter->netdev);
2416         return -EIO;
2417 }
2418
2419 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2420 {
2421         struct be_dma_mem cmd;
2422         int status = 0;
2423         u8 mac[ETH_ALEN];
2424
2425         memset(mac, 0, ETH_ALEN);
2426
2427         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2428         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2429                                     GFP_KERNEL);
2430         if (cmd.va == NULL)
2431                 return -1;
2432         memset(cmd.va, 0, cmd.size);
2433
2434         if (enable) {
2435                 status = pci_write_config_dword(adapter->pdev,
2436                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2437                 if (status) {
2438                         dev_err(&adapter->pdev->dev,
2439                                 "Could not enable Wake-on-lan\n");
2440                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2441                                           cmd.dma);
2442                         return status;
2443                 }
2444                 status = be_cmd_enable_magic_wol(adapter,
2445                                 adapter->netdev->dev_addr, &cmd);
2446                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2447                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2448         } else {
2449                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2450                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2451                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2452         }
2453
2454         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2455         return status;
2456 }
2457
2458 /*
2459  * Generate a seed MAC address from the PF MAC Address using jhash.
2460  * MAC Address for VFs are assigned incrementally starting from the seed.
2461  * These addresses are programmed in the ASIC by the PF and the VF driver
2462  * queries for the MAC address during its probe.
2463  */
2464 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2465 {
2466         u32 vf;
2467         int status = 0;
2468         u8 mac[ETH_ALEN];
2469         struct be_vf_cfg *vf_cfg;
2470
2471         be_vf_eth_addr_generate(adapter, mac);
2472
2473         for_all_vfs(adapter, vf_cfg, vf) {
2474                 if (lancer_chip(adapter)) {
2475                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2476                 } else {
2477                         status = be_cmd_pmac_add(adapter, mac,
2478                                                  vf_cfg->if_handle,
2479                                                  &vf_cfg->pmac_id, vf + 1);
2480                 }
2481
2482                 if (status)
2483                         dev_err(&adapter->pdev->dev,
2484                         "Mac address assignment failed for VF %d\n", vf);
2485                 else
2486                         memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2487
2488                 mac[5] += 1;
2489         }
2490         return status;
2491 }
2492
2493 static void be_vf_clear(struct be_adapter *adapter)
2494 {
2495         struct be_vf_cfg *vf_cfg;
2496         u32 vf;
2497
2498         if (be_find_vfs(adapter, ASSIGNED)) {
2499                 dev_warn(&adapter->pdev->dev, "VFs are assigned to VMs\n");
2500                 goto done;
2501         }
2502
2503         for_all_vfs(adapter, vf_cfg, vf) {
2504                 if (lancer_chip(adapter))
2505                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2506                 else
2507                         be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2508                                         vf_cfg->pmac_id, vf + 1);
2509
2510                 be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2511         }
2512         pci_disable_sriov(adapter->pdev);
2513 done:
2514         kfree(adapter->vf_cfg);
2515         adapter->num_vfs = 0;
2516 }
2517
2518 static int be_clear(struct be_adapter *adapter)
2519 {
2520         int i = 1;
2521
2522         if (adapter->flags & BE_FLAGS_WORKER_SCHEDULED) {
2523                 cancel_delayed_work_sync(&adapter->work);
2524                 adapter->flags &= ~BE_FLAGS_WORKER_SCHEDULED;
2525         }
2526
2527         if (sriov_enabled(adapter))
2528                 be_vf_clear(adapter);
2529
2530         for (; adapter->uc_macs > 0; adapter->uc_macs--, i++)
2531                 be_cmd_pmac_del(adapter, adapter->if_handle,
2532                         adapter->pmac_id[i], 0);
2533
2534         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2535
2536         be_mcc_queues_destroy(adapter);
2537         be_rx_cqs_destroy(adapter);
2538         be_tx_queues_destroy(adapter);
2539         be_evt_queues_destroy(adapter);
2540
2541         /* tell fw we're done with firing cmds */
2542         be_cmd_fw_clean(adapter);
2543
2544         be_msix_disable(adapter);
2545         pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 0);
2546         return 0;
2547 }
2548
2549 static int be_vf_setup_init(struct be_adapter *adapter)
2550 {
2551         struct be_vf_cfg *vf_cfg;
2552         int vf;
2553
2554         adapter->vf_cfg = kcalloc(adapter->num_vfs, sizeof(*vf_cfg),
2555                                   GFP_KERNEL);
2556         if (!adapter->vf_cfg)
2557                 return -ENOMEM;
2558
2559         for_all_vfs(adapter, vf_cfg, vf) {
2560                 vf_cfg->if_handle = -1;
2561                 vf_cfg->pmac_id = -1;
2562         }
2563         return 0;
2564 }
2565
2566 static int be_vf_setup(struct be_adapter *adapter)
2567 {
2568         struct be_vf_cfg *vf_cfg;
2569         struct device *dev = &adapter->pdev->dev;
2570         u32 cap_flags, en_flags, vf;
2571         u16 def_vlan, lnk_speed;
2572         int status, enabled_vfs;
2573
2574         enabled_vfs = be_find_vfs(adapter, ENABLED);
2575         if (enabled_vfs) {
2576                 dev_warn(dev, "%d VFs are already enabled\n", enabled_vfs);
2577                 dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
2578                 return 0;
2579         }
2580
2581         if (num_vfs > adapter->dev_num_vfs) {
2582                 dev_warn(dev, "Device supports %d VFs and not %d\n",
2583                          adapter->dev_num_vfs, num_vfs);
2584                 num_vfs = adapter->dev_num_vfs;
2585         }
2586
2587         status = pci_enable_sriov(adapter->pdev, num_vfs);
2588         if (!status) {
2589                 adapter->num_vfs = num_vfs;
2590         } else {
2591                 /* Platform doesn't support SRIOV though device supports it */
2592                 dev_warn(dev, "SRIOV enable failed\n");
2593                 return 0;
2594         }
2595
2596         status = be_vf_setup_init(adapter);
2597         if (status)
2598                 goto err;
2599
2600         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2601                                 BE_IF_FLAGS_MULTICAST;
2602         for_all_vfs(adapter, vf_cfg, vf) {
2603                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2604                                           &vf_cfg->if_handle, NULL, vf + 1);
2605                 if (status)
2606                         goto err;
2607         }
2608
2609         if (!enabled_vfs) {
2610                 status = be_vf_eth_addr_config(adapter);
2611                 if (status)
2612                         goto err;
2613         }
2614
2615         for_all_vfs(adapter, vf_cfg, vf) {
2616                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2617                                                   NULL, vf + 1);
2618                 if (status)
2619                         goto err;
2620                 vf_cfg->tx_rate = lnk_speed * 10;
2621
2622                 status = be_cmd_get_hsw_config(adapter, &def_vlan,
2623                                 vf + 1, vf_cfg->if_handle);
2624                 if (status)
2625                         goto err;
2626                 vf_cfg->def_vid = def_vlan;
2627         }
2628         return 0;
2629 err:
2630         return status;
2631 }
2632
2633 static void be_setup_init(struct be_adapter *adapter)
2634 {
2635         adapter->vlan_prio_bmap = 0xff;
2636         adapter->phy.link_speed = -1;
2637         adapter->if_handle = -1;
2638         adapter->be3_native = false;
2639         adapter->promiscuous = false;
2640         adapter->eq_next_idx = 0;
2641         adapter->phy.forced_port_speed = -1;
2642 }
2643
2644 static int be_add_mac_from_list(struct be_adapter *adapter, u8 *mac)
2645 {
2646         u32 pmac_id;
2647         int status;
2648         bool pmac_id_active;
2649
2650         status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id_active,
2651                                                         &pmac_id, mac);
2652         if (status != 0)
2653                 goto do_none;
2654
2655         if (pmac_id_active) {
2656                 status = be_cmd_mac_addr_query(adapter, mac,
2657                                 MAC_ADDRESS_TYPE_NETWORK,
2658                                 false, adapter->if_handle, pmac_id);
2659
2660                 if (!status)
2661                         adapter->pmac_id[0] = pmac_id;
2662         } else {
2663                 status = be_cmd_pmac_add(adapter, mac,
2664                                 adapter->if_handle, &adapter->pmac_id[0], 0);
2665         }
2666 do_none:
2667         return status;
2668 }
2669
2670 /* Routine to query per function resource limits */
2671 static int be_get_config(struct be_adapter *adapter)
2672 {
2673         int pos;
2674         u16 dev_num_vfs;
2675
2676         pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_SRIOV);
2677         if (pos) {
2678                 pci_read_config_word(adapter->pdev, pos + PCI_SRIOV_TOTAL_VF,
2679                                      &dev_num_vfs);
2680                 adapter->dev_num_vfs = dev_num_vfs;
2681         }
2682         return 0;
2683 }
2684
2685 static int be_setup(struct be_adapter *adapter)
2686 {
2687         struct net_device *netdev = adapter->netdev;
2688         struct device *dev = &adapter->pdev->dev;
2689         u32 cap_flags, en_flags;
2690         u32 tx_fc, rx_fc;
2691         int status;
2692         u8 mac[ETH_ALEN];
2693
2694         be_setup_init(adapter);
2695
2696         be_get_config(adapter);
2697
2698         be_cmd_req_native_mode(adapter);
2699
2700         be_msix_enable(adapter);
2701
2702         status = be_evt_queues_create(adapter);
2703         if (status)
2704                 goto err;
2705
2706         status = be_tx_cqs_create(adapter);
2707         if (status)
2708                 goto err;
2709
2710         status = be_rx_cqs_create(adapter);
2711         if (status)
2712                 goto err;
2713
2714         status = be_mcc_queues_create(adapter);
2715         if (status)
2716                 goto err;
2717
2718         memset(mac, 0, ETH_ALEN);
2719         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2720                         true /*permanent */, 0, 0);
2721         if (status)
2722                 return status;
2723         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2724         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2725
2726         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2727                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2728         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2729                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2730
2731         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2732                 cap_flags |= BE_IF_FLAGS_RSS;
2733                 en_flags |= BE_IF_FLAGS_RSS;
2734         }
2735         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2736                         netdev->dev_addr, &adapter->if_handle,
2737                         &adapter->pmac_id[0], 0);
2738         if (status != 0)
2739                 goto err;
2740
2741          /* The VF's permanent mac queried from card is incorrect.
2742           * For BEx: Query the mac configued by the PF using if_handle
2743           * For Lancer: Get and use mac_list to obtain mac address.
2744           */
2745         if (!be_physfn(adapter)) {
2746                 if (lancer_chip(adapter))
2747                         status = be_add_mac_from_list(adapter, mac);
2748                 else
2749                         status = be_cmd_mac_addr_query(adapter, mac,
2750                                         MAC_ADDRESS_TYPE_NETWORK, false,
2751                                         adapter->if_handle, 0);
2752                 if (!status) {
2753                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2754                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2755                 }
2756         }
2757
2758         status = be_tx_qs_create(adapter);
2759         if (status)
2760                 goto err;
2761
2762         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2763
2764         if (adapter->vlans_added)
2765                 be_vid_config(adapter);
2766
2767         be_set_rx_mode(adapter->netdev);
2768
2769         be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2770
2771         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc)
2772                 be_cmd_set_flow_control(adapter, adapter->tx_fc,
2773                                         adapter->rx_fc);
2774
2775         if (be_physfn(adapter) && num_vfs) {
2776                 if (adapter->dev_num_vfs)
2777                         be_vf_setup(adapter);
2778                 else
2779                         dev_warn(dev, "device doesn't support SRIOV\n");
2780         }
2781
2782         be_cmd_get_phy_info(adapter);
2783         if (be_pause_supported(adapter))
2784                 adapter->phy.fc_autoneg = 1;
2785
2786         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
2787         adapter->flags |= BE_FLAGS_WORKER_SCHEDULED;
2788
2789         pci_write_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, 1);
2790         return 0;
2791 err:
2792         be_clear(adapter);
2793         return status;
2794 }
2795
2796 #ifdef CONFIG_NET_POLL_CONTROLLER
2797 static void be_netpoll(struct net_device *netdev)
2798 {
2799         struct be_adapter *adapter = netdev_priv(netdev);
2800         struct be_eq_obj *eqo;
2801         int i;
2802
2803         for_all_evt_queues(adapter, eqo, i)
2804                 event_handle(eqo);
2805
2806         return;
2807 }
2808 #endif
2809
2810 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2811 char flash_cookie[2][16] =      {"*** SE FLAS", "H DIRECTORY *** "};
2812
2813 static bool be_flash_redboot(struct be_adapter *adapter,
2814                         const u8 *p, u32 img_start, int image_size,
2815                         int hdr_size)
2816 {
2817         u32 crc_offset;
2818         u8 flashed_crc[4];
2819         int status;
2820
2821         crc_offset = hdr_size + img_start + image_size - 4;
2822
2823         p += crc_offset;
2824
2825         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2826                         (image_size - 4));
2827         if (status) {
2828                 dev_err(&adapter->pdev->dev,
2829                 "could not get crc from flash, not flashing redboot\n");
2830                 return false;
2831         }
2832
2833         /*update redboot only if crc does not match*/
2834         if (!memcmp(flashed_crc, p, 4))
2835                 return false;
2836         else
2837                 return true;
2838 }
2839
2840 static bool phy_flashing_required(struct be_adapter *adapter)
2841 {
2842         return (adapter->phy.phy_type == TN_8022 &&
2843                 adapter->phy.interface_type == PHY_TYPE_BASET_10GB);
2844 }
2845
2846 static bool is_comp_in_ufi(struct be_adapter *adapter,
2847                            struct flash_section_info *fsec, int type)
2848 {
2849         int i = 0, img_type = 0;
2850         struct flash_section_info_g2 *fsec_g2 = NULL;
2851
2852         if (adapter->generation != BE_GEN3)
2853                 fsec_g2 = (struct flash_section_info_g2 *)fsec;
2854
2855         for (i = 0; i < MAX_FLASH_COMP; i++) {
2856                 if (fsec_g2)
2857                         img_type = le32_to_cpu(fsec_g2->fsec_entry[i].type);
2858                 else
2859                         img_type = le32_to_cpu(fsec->fsec_entry[i].type);
2860
2861                 if (img_type == type)
2862                         return true;
2863         }
2864         return false;
2865
2866 }
2867
2868 struct flash_section_info *get_fsec_info(struct be_adapter *adapter,
2869                                          int header_size,
2870                                          const struct firmware *fw)
2871 {
2872         struct flash_section_info *fsec = NULL;
2873         const u8 *p = fw->data;
2874
2875         p += header_size;
2876         while (p < (fw->data + fw->size)) {
2877                 fsec = (struct flash_section_info *)p;
2878                 if (!memcmp(flash_cookie, fsec->cookie, sizeof(flash_cookie)))
2879                         return fsec;
2880                 p += 32;
2881         }
2882         return NULL;
2883 }
2884
2885 static int be_flash_data(struct be_adapter *adapter,
2886                          const struct firmware *fw,
2887                          struct be_dma_mem *flash_cmd,
2888                          int num_of_images)
2889
2890 {
2891         int status = 0, i, filehdr_size = 0;
2892         int img_hdrs_size = (num_of_images * sizeof(struct image_hdr));
2893         u32 total_bytes = 0, flash_op;
2894         int num_bytes;
2895         const u8 *p = fw->data;
2896         struct be_cmd_write_flashrom *req = flash_cmd->va;
2897         const struct flash_comp *pflashcomp;
2898         int num_comp, hdr_size;
2899         struct flash_section_info *fsec = NULL;
2900
2901         struct flash_comp gen3_flash_types[] = {
2902                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, OPTYPE_ISCSI_ACTIVE,
2903                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_iSCSI},
2904                 { FLASH_REDBOOT_START_g3, OPTYPE_REDBOOT,
2905                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3, IMAGE_BOOT_CODE},
2906                 { FLASH_iSCSI_BIOS_START_g3, OPTYPE_BIOS,
2907                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_ISCSI},
2908                 { FLASH_PXE_BIOS_START_g3, OPTYPE_PXE_BIOS,
2909                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_PXE},
2910                 { FLASH_FCoE_BIOS_START_g3, OPTYPE_FCOE_BIOS,
2911                         FLASH_BIOS_IMAGE_MAX_SIZE_g3, IMAGE_OPTION_ROM_FCoE},
2912                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, OPTYPE_ISCSI_BACKUP,
2913                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_iSCSI},
2914                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, OPTYPE_FCOE_FW_ACTIVE,
2915                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_FCoE},
2916                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, OPTYPE_FCOE_FW_BACKUP,
2917                         FLASH_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_BACKUP_FCoE},
2918                 { FLASH_NCSI_START_g3, OPTYPE_NCSI_FW,
2919                         FLASH_NCSI_IMAGE_MAX_SIZE_g3, IMAGE_NCSI},
2920                 { FLASH_PHY_FW_START_g3, OPTYPE_PHY_FW,
2921                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3, IMAGE_FIRMWARE_PHY}
2922         };
2923
2924         struct flash_comp gen2_flash_types[] = {
2925                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, OPTYPE_ISCSI_ACTIVE,
2926                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_iSCSI},
2927                 { FLASH_REDBOOT_START_g2, OPTYPE_REDBOOT,
2928                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2, IMAGE_BOOT_CODE},
2929                 { FLASH_iSCSI_BIOS_START_g2, OPTYPE_BIOS,
2930                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_ISCSI},
2931                 { FLASH_PXE_BIOS_START_g2, OPTYPE_PXE_BIOS,
2932                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_PXE},
2933                 { FLASH_FCoE_BIOS_START_g2, OPTYPE_FCOE_BIOS,
2934                         FLASH_BIOS_IMAGE_MAX_SIZE_g2, IMAGE_OPTION_ROM_FCoE},
2935                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, OPTYPE_ISCSI_BACKUP,
2936                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_iSCSI},
2937                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, OPTYPE_FCOE_FW_ACTIVE,
2938                         FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_FCoE},
2939                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, OPTYPE_FCOE_FW_BACKUP,
2940                          FLASH_IMAGE_MAX_SIZE_g2, IMAGE_FIRMWARE_BACKUP_FCoE}
2941         };
2942
2943         if (adapter->generation == BE_GEN3) {
2944                 pflashcomp = gen3_flash_types;
2945                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2946                 num_comp = ARRAY_SIZE(gen3_flash_types);
2947         } else {
2948                 pflashcomp = gen2_flash_types;
2949                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2950                 num_comp = ARRAY_SIZE(gen2_flash_types);
2951         }
2952         /* Get flash section info*/
2953         fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
2954         if (!fsec) {
2955                 dev_err(&adapter->pdev->dev,
2956                         "Invalid Cookie. UFI corrupted ?\n");
2957                 return -1;
2958         }
2959         for (i = 0; i < num_comp; i++) {
2960                 if (!is_comp_in_ufi(adapter, fsec, pflashcomp[i].img_type))
2961                         continue;
2962
2963                 if ((pflashcomp[i].optype == OPTYPE_NCSI_FW) &&
2964                     memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2965                         continue;
2966
2967                 if (pflashcomp[i].optype == OPTYPE_PHY_FW) {
2968                         if (!phy_flashing_required(adapter))
2969                                 continue;
2970                 }
2971
2972                 hdr_size = filehdr_size +
2973                            (num_of_images * sizeof(struct image_hdr));
2974
2975                 if ((pflashcomp[i].optype == OPTYPE_REDBOOT) &&
2976                     (!be_flash_redboot(adapter, fw->data, pflashcomp[i].offset,
2977                                        pflashcomp[i].size, hdr_size)))
2978                         continue;
2979
2980                 /* Flash the component */
2981                 p = fw->data;
2982                 p += filehdr_size + pflashcomp[i].offset + img_hdrs_size;
2983                 if (p + pflashcomp[i].size > fw->data + fw->size)
2984                         return -1;
2985                 total_bytes = pflashcomp[i].size;
2986                 while (total_bytes) {
2987                         if (total_bytes > 32*1024)
2988                                 num_bytes = 32*1024;
2989                         else
2990                                 num_bytes = total_bytes;
2991                         total_bytes -= num_bytes;
2992                         if (!total_bytes) {
2993                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2994                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2995                                 else
2996                                         flash_op = FLASHROM_OPER_FLASH;
2997                         } else {
2998                                 if (pflashcomp[i].optype == OPTYPE_PHY_FW)
2999                                         flash_op = FLASHROM_OPER_PHY_SAVE;
3000                                 else
3001                                         flash_op = FLASHROM_OPER_SAVE;
3002                         }
3003                         memcpy(req->params.data_buf, p, num_bytes);
3004                         p += num_bytes;
3005                         status = be_cmd_write_flashrom(adapter, flash_cmd,
3006                                 pflashcomp[i].optype, flash_op, num_bytes);
3007                         if (status) {
3008                                 if ((status == ILLEGAL_IOCTL_REQ) &&
3009                                         (pflashcomp[i].optype ==
3010                                                 OPTYPE_PHY_FW))
3011                                         break;
3012                                 dev_err(&adapter->pdev->dev,
3013                                         "cmd to write to flash rom failed.\n");
3014                                 return -1;
3015                         }
3016                 }
3017         }
3018         return 0;
3019 }
3020
3021 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
3022 {
3023         if (fhdr == NULL)
3024                 return 0;
3025         if (fhdr->build[0] == '3')
3026                 return BE_GEN3;
3027         else if (fhdr->build[0] == '2')
3028                 return BE_GEN2;
3029         else
3030                 return 0;
3031 }
3032
3033 static int lancer_fw_download(struct be_adapter *adapter,
3034                                 const struct firmware *fw)
3035 {
3036 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
3037 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
3038         struct be_dma_mem flash_cmd;
3039         const u8 *data_ptr = NULL;
3040         u8 *dest_image_ptr = NULL;
3041         size_t image_size = 0;
3042         u32 chunk_size = 0;
3043         u32 data_written = 0;
3044         u32 offset = 0;
3045         int status = 0;
3046         u8 add_status = 0;
3047
3048         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
3049                 dev_err(&adapter->pdev->dev,
3050                         "FW Image not properly aligned. "
3051                         "Length must be 4 byte aligned.\n");
3052                 status = -EINVAL;
3053                 goto lancer_fw_exit;
3054         }
3055
3056         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
3057                                 + LANCER_FW_DOWNLOAD_CHUNK;
3058         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3059                                                 &flash_cmd.dma, GFP_KERNEL);
3060         if (!flash_cmd.va) {
3061                 status = -ENOMEM;
3062                 dev_err(&adapter->pdev->dev,
3063                         "Memory allocation failure while flashing\n");
3064                 goto lancer_fw_exit;
3065         }
3066
3067         dest_image_ptr = flash_cmd.va +
3068                                 sizeof(struct lancer_cmd_req_write_object);
3069         image_size = fw->size;
3070         data_ptr = fw->data;
3071
3072         while (image_size) {
3073                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
3074
3075                 /* Copy the image chunk content. */
3076                 memcpy(dest_image_ptr, data_ptr, chunk_size);
3077
3078                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3079                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
3080                                 &data_written, &add_status);
3081
3082                 if (status)
3083                         break;
3084
3085                 offset += data_written;
3086                 data_ptr += data_written;
3087                 image_size -= data_written;
3088         }
3089
3090         if (!status) {
3091                 /* Commit the FW written */
3092                 status = lancer_cmd_write_object(adapter, &flash_cmd,
3093                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
3094                                         &data_written, &add_status);
3095         }
3096
3097         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3098                                 flash_cmd.dma);
3099         if (status) {
3100                 dev_err(&adapter->pdev->dev,
3101                         "Firmware load error. "
3102                         "Status code: 0x%x Additional Status: 0x%x\n",
3103                         status, add_status);
3104                 goto lancer_fw_exit;
3105         }
3106
3107         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3108 lancer_fw_exit:
3109         return status;
3110 }
3111
3112 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3113 {
3114         struct flash_file_hdr_g2 *fhdr;
3115         struct flash_file_hdr_g3 *fhdr3;
3116         struct image_hdr *img_hdr_ptr = NULL;
3117         struct be_dma_mem flash_cmd;
3118         const u8 *p;
3119         int status = 0, i = 0, num_imgs = 0;
3120
3121         p = fw->data;
3122         fhdr = (struct flash_file_hdr_g2 *) p;
3123
3124         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3125         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3126                                           &flash_cmd.dma, GFP_KERNEL);
3127         if (!flash_cmd.va) {
3128                 status = -ENOMEM;
3129                 dev_err(&adapter->pdev->dev,
3130                         "Memory allocation failure while flashing\n");
3131                 goto be_fw_exit;
3132         }
3133
3134         if ((adapter->generation == BE_GEN3) &&
3135                         (get_ufigen_type(fhdr) == BE_GEN3)) {
3136                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3137                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
3138                 for (i = 0; i < num_imgs; i++) {
3139                         img_hdr_ptr = (struct image_hdr *) (fw->data +
3140                                         (sizeof(struct flash_file_hdr_g3) +
3141                                          i * sizeof(struct image_hdr)));
3142                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3143                                 status = be_flash_data(adapter, fw, &flash_cmd,
3144                                                         num_imgs);
3145                 }
3146         } else if ((adapter->generation == BE_GEN2) &&
3147                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3148                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3149         } else {
3150                 dev_err(&adapter->pdev->dev,
3151                         "UFI and Interface are not compatible for flashing\n");
3152                 status = -1;
3153         }
3154
3155         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3156                           flash_cmd.dma);
3157         if (status) {
3158                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3159                 goto be_fw_exit;
3160         }
3161
3162         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3163
3164 be_fw_exit:
3165         return status;
3166 }
3167
3168 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3169 {
3170         const struct firmware *fw;
3171         int status;
3172
3173         if (!netif_running(adapter->netdev)) {
3174                 dev_err(&adapter->pdev->dev,
3175                         "Firmware load not allowed (interface is down)\n");
3176                 return -1;
3177         }
3178
3179         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3180         if (status)
3181                 goto fw_exit;
3182
3183         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3184
3185         if (lancer_chip(adapter))
3186                 status = lancer_fw_download(adapter, fw);
3187         else
3188                 status = be_fw_download(adapter, fw);
3189
3190 fw_exit:
3191         release_firmware(fw);
3192         return status;
3193 }
3194
3195 static const struct net_device_ops be_netdev_ops = {
3196         .ndo_open               = be_open,
3197         .ndo_stop               = be_close,
3198         .ndo_start_xmit         = be_xmit,
3199         .ndo_set_rx_mode        = be_set_rx_mode,
3200         .ndo_set_mac_address    = be_mac_addr_set,
3201         .ndo_change_mtu         = be_change_mtu,
3202         .ndo_get_stats64        = be_get_stats64,
3203         .ndo_validate_addr      = eth_validate_addr,
3204         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3205         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3206         .ndo_set_vf_mac         = be_set_vf_mac,
3207         .ndo_set_vf_vlan        = be_set_vf_vlan,
3208         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3209         .ndo_get_vf_config      = be_get_vf_config,
3210 #ifdef CONFIG_NET_POLL_CONTROLLER
3211         .ndo_poll_controller    = be_netpoll,
3212 #endif
3213 };
3214
3215 static void be_netdev_init(struct net_device *netdev)
3216 {
3217         struct be_adapter *adapter = netdev_priv(netdev);
3218         struct be_eq_obj *eqo;
3219         int i;
3220
3221         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3222                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3223                 NETIF_F_HW_VLAN_TX;
3224         if (be_multi_rxq(adapter))
3225                 netdev->hw_features |= NETIF_F_RXHASH;
3226
3227         netdev->features |= netdev->hw_features |
3228                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3229
3230         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3231                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3232
3233         netdev->priv_flags |= IFF_UNICAST_FLT;
3234
3235         netdev->flags |= IFF_MULTICAST;
3236
3237         netif_set_gso_max_size(netdev, 65535);
3238
3239         netdev->netdev_ops = &be_netdev_ops;
3240
3241         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3242
3243         for_all_evt_queues(adapter, eqo, i)
3244                 netif_napi_add(netdev, &eqo->napi, be_poll, BE_NAPI_WEIGHT);
3245 }
3246
3247 static void be_unmap_pci_bars(struct be_adapter *adapter)
3248 {
3249         if (adapter->csr)
3250                 iounmap(adapter->csr);
3251         if (adapter->db)
3252                 iounmap(adapter->db);
3253         if (adapter->roce_db.base)
3254                 pci_iounmap(adapter->pdev, adapter->roce_db.base);
3255 }
3256
3257 static int lancer_roce_map_pci_bars(struct be_adapter *adapter)
3258 {
3259         struct pci_dev *pdev = adapter->pdev;
3260         u8 __iomem *addr;
3261
3262         addr = pci_iomap(pdev, 2, 0);
3263         if (addr == NULL)
3264                 return -ENOMEM;
3265
3266         adapter->roce_db.base = addr;
3267         adapter->roce_db.io_addr = pci_resource_start(pdev, 2);
3268         adapter->roce_db.size = 8192;
3269         adapter->roce_db.total_size = pci_resource_len(pdev, 2);
3270         return 0;
3271 }
3272
3273 static int be_map_pci_bars(struct be_adapter *adapter)
3274 {
3275         u8 __iomem *addr;
3276         int db_reg;
3277
3278         if (lancer_chip(adapter)) {
3279                 if (be_type_2_3(adapter)) {
3280                         addr = ioremap_nocache(
3281                                         pci_resource_start(adapter->pdev, 0),
3282                                         pci_resource_len(adapter->pdev, 0));
3283                         if (addr == NULL)
3284                                 return -ENOMEM;
3285                         adapter->db = addr;
3286                 }
3287                 if (adapter->if_type == SLI_INTF_TYPE_3) {
3288                         if (lancer_roce_map_pci_bars(adapter))
3289                                 goto pci_map_err;
3290                 }
3291                 return 0;
3292         }
3293
3294         if (be_physfn(adapter)) {
3295                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3296                                 pci_resource_len(adapter->pdev, 2));
3297                 if (addr == NULL)
3298                         return -ENOMEM;
3299                 adapter->csr = addr;
3300         }
3301
3302         if (adapter->generation == BE_GEN2) {
3303                 db_reg = 4;
3304         } else {
3305                 if (be_physfn(adapter))
3306                         db_reg = 4;
3307                 else
3308                         db_reg = 0;
3309         }
3310         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3311                                 pci_resource_len(adapter->pdev, db_reg));
3312         if (addr == NULL)
3313                 goto pci_map_err;
3314         adapter->db = addr;
3315         if (adapter->sli_family == SKYHAWK_SLI_FAMILY) {
3316                 adapter->roce_db.size = 4096;
3317                 adapter->roce_db.io_addr =
3318                                 pci_resource_start(adapter->pdev, db_reg);
3319                 adapter->roce_db.total_size =
3320                                 pci_resource_len(adapter->pdev, db_reg);
3321         }
3322         return 0;
3323 pci_map_err:
3324         be_unmap_pci_bars(adapter);
3325         return -ENOMEM;
3326 }
3327
3328 static void be_ctrl_cleanup(struct be_adapter *adapter)
3329 {
3330         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3331
3332         be_unmap_pci_bars(adapter);
3333
3334         if (mem->va)
3335                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3336                                   mem->dma);
3337
3338         mem = &adapter->rx_filter;
3339         if (mem->va)
3340                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3341                                   mem->dma);
3342 }
3343
3344 static int be_ctrl_init(struct be_adapter *adapter)
3345 {
3346         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3347         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3348         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3349         int status;
3350
3351         status = be_map_pci_bars(adapter);
3352         if (status)
3353                 goto done;
3354
3355         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3356         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3357                                                 mbox_mem_alloc->size,
3358                                                 &mbox_mem_alloc->dma,
3359                                                 GFP_KERNEL);
3360         if (!mbox_mem_alloc->va) {
3361                 status = -ENOMEM;
3362                 goto unmap_pci_bars;
3363         }
3364         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3365         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3366         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3367         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3368
3369         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3370         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3371                                         &rx_filter->dma, GFP_KERNEL);
3372         if (rx_filter->va == NULL) {
3373                 status = -ENOMEM;
3374                 goto free_mbox;
3375         }
3376         memset(rx_filter->va, 0, rx_filter->size);
3377
3378         mutex_init(&adapter->mbox_lock);
3379         spin_lock_init(&adapter->mcc_lock);
3380         spin_lock_init(&adapter->mcc_cq_lock);
3381
3382         init_completion(&adapter->flash_compl);
3383         pci_save_state(adapter->pdev);
3384         return 0;
3385
3386 free_mbox:
3387         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3388                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3389
3390 unmap_pci_bars:
3391         be_unmap_pci_bars(adapter);
3392
3393 done:
3394         return status;
3395 }
3396
3397 static void be_stats_cleanup(struct be_adapter *adapter)
3398 {
3399         struct be_dma_mem *cmd = &adapter->stats_cmd;
3400
3401         if (cmd->va)
3402                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3403                                   cmd->va, cmd->dma);
3404 }
3405
3406 static int be_stats_init(struct be_adapter *adapter)
3407 {
3408         struct be_dma_mem *cmd = &adapter->stats_cmd;
3409
3410         if (adapter->generation == BE_GEN2) {
3411                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3412         } else {
3413                 if (lancer_chip(adapter))
3414                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3415                 else
3416                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3417         }
3418         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3419                                      GFP_KERNEL);
3420         if (cmd->va == NULL)
3421                 return -1;
3422         memset(cmd->va, 0, cmd->size);
3423         return 0;
3424 }
3425
3426 static void __devexit be_remove(struct pci_dev *pdev)
3427 {
3428         struct be_adapter *adapter = pci_get_drvdata(pdev);
3429
3430         if (!adapter)
3431                 return;
3432
3433         be_roce_dev_remove(adapter);
3434
3435         unregister_netdev(adapter->netdev);
3436
3437         be_clear(adapter);
3438
3439         be_stats_cleanup(adapter);
3440
3441         be_ctrl_cleanup(adapter);
3442
3443         pci_set_drvdata(pdev, NULL);
3444         pci_release_regions(pdev);
3445         pci_disable_device(pdev);
3446
3447         free_netdev(adapter->netdev);
3448 }
3449
3450 bool be_is_wol_supported(struct be_adapter *adapter)
3451 {
3452         return ((adapter->wol_cap & BE_WOL_CAP) &&
3453                 !be_is_wol_excluded(adapter)) ? true : false;
3454 }
3455
3456 u32 be_get_fw_log_level(struct be_adapter *adapter)
3457 {
3458         struct be_dma_mem extfat_cmd;
3459         struct be_fat_conf_params *cfgs;
3460         int status;
3461         u32 level = 0;
3462         int j;
3463
3464         memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
3465         extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
3466         extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
3467                                              &extfat_cmd.dma);
3468
3469         if (!extfat_cmd.va) {
3470                 dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
3471                         __func__);
3472                 goto err;
3473         }
3474
3475         status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd);
3476         if (!status) {
3477                 cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
3478                                                 sizeof(struct be_cmd_resp_hdr));
3479                 for (j = 0; j < cfgs->module[0].num_modes; j++) {
3480                         if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
3481                                 level = cfgs->module[0].trace_lvl[j].dbg_lvl;
3482                 }
3483         }
3484         pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
3485                             extfat_cmd.dma);
3486 err:
3487         return level;
3488 }
3489 static int be_get_initial_config(struct be_adapter *adapter)
3490 {
3491         int status;
3492         u32 level;
3493
3494         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3495                         &adapter->function_mode, &adapter->function_caps);
3496         if (status)
3497                 return status;
3498
3499         if (adapter->function_mode & FLEX10_MODE)
3500                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/8;
3501         else
3502                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3503
3504         if (be_physfn(adapter))
3505                 adapter->max_pmac_cnt = BE_UC_PMAC_COUNT;
3506         else
3507                 adapter->max_pmac_cnt = BE_VF_UC_PMAC_COUNT;
3508
3509         /* primary mac needs 1 pmac entry */
3510         adapter->pmac_id = kcalloc(adapter->max_pmac_cnt + 1,
3511                                   sizeof(u32), GFP_KERNEL);
3512         if (!adapter->pmac_id)
3513                 return -ENOMEM;
3514
3515         status = be_cmd_get_cntl_attributes(adapter);
3516         if (status)
3517                 return status;
3518
3519         status = be_cmd_get_acpi_wol_cap(adapter);
3520         if (status) {
3521                 /* in case of a failure to get wol capabillities
3522                  * check the exclusion list to determine WOL capability */
3523                 if (!be_is_wol_excluded(adapter))
3524                         adapter->wol_cap |= BE_WOL_CAP;
3525         }
3526
3527         if (be_is_wol_supported(adapter))
3528                 adapter->wol = true;
3529
3530         level = be_get_fw_log_level(adapter);
3531         adapter->msg_enable = level <= FW_LOG_LEVEL_DEFAULT ? NETIF_MSG_HW : 0;
3532
3533         return 0;
3534 }
3535
3536 static int be_dev_type_check(struct be_adapter *adapter)
3537 {
3538         struct pci_dev *pdev = adapter->pdev;
3539         u32 sli_intf = 0, if_type;
3540
3541         switch (pdev->device) {
3542         case BE_DEVICE_ID1:
3543         case OC_DEVICE_ID1:
3544                 adapter->generation = BE_GEN2;
3545                 break;
3546         case BE_DEVICE_ID2:
3547         case OC_DEVICE_ID2:
3548                 adapter->generation = BE_GEN3;
3549                 break;
3550         case OC_DEVICE_ID3:
3551         case OC_DEVICE_ID4:
3552                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3553                 adapter->if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3554                                                 SLI_INTF_IF_TYPE_SHIFT;
3555                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3556                                                 SLI_INTF_IF_TYPE_SHIFT;
3557                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3558                         !be_type_2_3(adapter)) {
3559                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3560                         return -EINVAL;
3561                 }
3562                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3563                                          SLI_INTF_FAMILY_SHIFT);
3564                 adapter->generation = BE_GEN3;
3565                 break;
3566         case OC_DEVICE_ID5:
3567                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3568                 if ((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) {
3569                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3570                         return -EINVAL;
3571                 }
3572                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3573                                          SLI_INTF_FAMILY_SHIFT);
3574                 adapter->generation = BE_GEN3;
3575                 break;
3576         default:
3577                 adapter->generation = 0;
3578         }
3579
3580         pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3581         adapter->virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
3582         return 0;
3583 }
3584
3585 static int lancer_wait_ready(struct be_adapter *adapter)
3586 {
3587 #define SLIPORT_READY_TIMEOUT 30
3588         u32 sliport_status;
3589         int status = 0, i;
3590
3591         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3592                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3593                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3594                         break;
3595
3596                 msleep(1000);
3597         }
3598
3599         if (i == SLIPORT_READY_TIMEOUT)
3600                 status = -1;
3601
3602         return status;
3603 }
3604
3605 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3606 {
3607         int status;
3608         u32 sliport_status, err, reset_needed;
3609         status = lancer_wait_ready(adapter);
3610         if (!status) {
3611                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3612                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3613                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3614                 if (err && reset_needed) {
3615                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3616                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3617
3618                         /* check adapter has corrected the error */
3619                         status = lancer_wait_ready(adapter);
3620                         sliport_status = ioread32(adapter->db +
3621                                                         SLIPORT_STATUS_OFFSET);
3622                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3623                                                 SLIPORT_STATUS_RN_MASK);
3624                         if (status || sliport_status)
3625                                 status = -1;
3626                 } else if (err || reset_needed) {
3627                         status = -1;
3628                 }
3629         }
3630         return status;
3631 }
3632
3633 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3634 {
3635         int status;
3636         u32 sliport_status;
3637
3638         if (adapter->eeh_err || adapter->ue_detected)
3639                 return;
3640
3641         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3642
3643         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3644                 dev_err(&adapter->pdev->dev,
3645                                 "Adapter in error state."
3646                                 "Trying to recover.\n");
3647
3648                 status = lancer_test_and_set_rdy_state(adapter);
3649                 if (status)
3650                         goto err;
3651
3652                 netif_device_detach(adapter->netdev);
3653
3654                 if (netif_running(adapter->netdev))
3655                         be_close(adapter->netdev);
3656
3657                 be_clear(adapter);
3658
3659                 adapter->fw_timeout = false;
3660
3661                 status = be_setup(adapter);
3662                 if (status)
3663                         goto err;
3664
3665                 if (netif_running(adapter->netdev)) {
3666                         status = be_open(adapter->netdev);
3667                         if (status)
3668                                 goto err;
3669                 }
3670
3671                 netif_device_attach(adapter->netdev);
3672
3673                 dev_err(&adapter->pdev->dev,
3674                                 "Adapter error recovery succeeded\n");
3675         }
3676         return;
3677 err:
3678         dev_err(&adapter->pdev->dev,
3679                         "Adapter error recovery failed\n");
3680 }
3681
3682 static void be_worker(struct work_struct *work)
3683 {
3684         struct be_adapter *adapter =
3685                 container_of(work, struct be_adapter, work.work);
3686         struct be_rx_obj *rxo;
3687         struct be_eq_obj *eqo;
3688         int i;
3689
3690         if (lancer_chip(adapter))
3691                 lancer_test_and_recover_fn_err(adapter);
3692
3693         be_detect_dump_ue(adapter);
3694
3695         /* when interrupts are not yet enabled, just reap any pending
3696         * mcc completions */
3697         if (!netif_running(adapter->netdev)) {
3698                 be_process_mcc(adapter);
3699                 goto reschedule;
3700         }
3701
3702         if (!adapter->stats_cmd_sent) {
3703                 if (lancer_chip(adapter))
3704                         lancer_cmd_get_pport_stats(adapter,
3705                                                 &adapter->stats_cmd);
3706                 else
3707                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3708         }
3709
3710         for_all_rx_queues(adapter, rxo, i) {
3711                 if (rxo->rx_post_starved) {
3712                         rxo->rx_post_starved = false;
3713                         be_post_rx_frags(rxo, GFP_KERNEL);
3714                 }
3715         }
3716
3717         for_all_evt_queues(adapter, eqo, i)
3718                 be_eqd_update(adapter, eqo);
3719
3720 reschedule:
3721         adapter->work_counter++;
3722         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3723 }
3724
3725 static bool be_reset_required(struct be_adapter *adapter)
3726 {
3727         u32 reg;
3728
3729         pci_read_config_dword(adapter->pdev, PCICFG_CUST_SCRATCHPAD_CSR, &reg);
3730         return reg;
3731 }
3732
3733 static int __devinit be_probe(struct pci_dev *pdev,
3734                         const struct pci_device_id *pdev_id)
3735 {
3736         int status = 0;
3737         struct be_adapter *adapter;
3738         struct net_device *netdev;
3739
3740         status = pci_enable_device(pdev);
3741         if (status)
3742                 goto do_none;
3743
3744         status = pci_request_regions(pdev, DRV_NAME);
3745         if (status)
3746                 goto disable_dev;
3747         pci_set_master(pdev);
3748
3749         netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
3750         if (netdev == NULL) {
3751                 status = -ENOMEM;
3752                 goto rel_reg;
3753         }
3754         adapter = netdev_priv(netdev);
3755         adapter->pdev = pdev;
3756         pci_set_drvdata(pdev, adapter);
3757
3758         status = be_dev_type_check(adapter);
3759         if (status)
3760                 goto free_netdev;
3761
3762         adapter->netdev = netdev;
3763         SET_NETDEV_DEV(netdev, &pdev->dev);
3764
3765         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3766         if (!status) {
3767                 netdev->features |= NETIF_F_HIGHDMA;
3768         } else {
3769                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3770                 if (status) {
3771                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3772                         goto free_netdev;
3773                 }
3774         }
3775
3776         status = be_ctrl_init(adapter);
3777         if (status)
3778                 goto free_netdev;
3779
3780         if (lancer_chip(adapter)) {
3781                 status = lancer_wait_ready(adapter);
3782                 if (!status) {
3783                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3784                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3785                         status = lancer_test_and_set_rdy_state(adapter);
3786                 }
3787                 if (status) {
3788                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3789                         goto ctrl_clean;
3790                 }
3791         }
3792
3793         /* sync up with fw's ready state */
3794         if (be_physfn(adapter)) {
3795                 status = be_cmd_POST(adapter);
3796                 if (status)
3797                         goto ctrl_clean;
3798         }
3799
3800         /* tell fw we're ready to fire cmds */
3801         status = be_cmd_fw_init(adapter);
3802         if (status)
3803                 goto ctrl_clean;
3804
3805         if (be_reset_required(adapter)) {
3806                 status = be_cmd_reset_function(adapter);
3807                 if (status)
3808                         goto ctrl_clean;
3809         }
3810
3811         /* The INTR bit may be set in the card when probed by a kdump kernel
3812          * after a crash.
3813          */
3814         if (!lancer_chip(adapter))
3815                 be_intr_set(adapter, false);
3816
3817         status = be_stats_init(adapter);
3818         if (status)
3819                 goto ctrl_clean;
3820
3821         status = be_get_initial_config(adapter);
3822         if (status)
3823                 goto stats_clean;
3824
3825         INIT_DELAYED_WORK(&adapter->work, be_worker);
3826         adapter->rx_fc = adapter->tx_fc = true;
3827
3828         status = be_setup(adapter);
3829         if (status)
3830                 goto msix_disable;
3831
3832         be_netdev_init(netdev);
3833         status = register_netdev(netdev);
3834         if (status != 0)
3835                 goto unsetup;
3836
3837         be_roce_dev_add(adapter);
3838
3839         dev_info(&pdev->dev, "%s: %s port %d\n", netdev->name, nic_name(pdev),
3840                 adapter->port_num);
3841
3842         return 0;
3843
3844 unsetup:
3845         be_clear(adapter);
3846 msix_disable:
3847         be_msix_disable(adapter);
3848 stats_clean:
3849         be_stats_cleanup(adapter);
3850 ctrl_clean:
3851         be_ctrl_cleanup(adapter);
3852 free_netdev:
3853         free_netdev(netdev);
3854         pci_set_drvdata(pdev, NULL);
3855 rel_reg:
3856         pci_release_regions(pdev);
3857 disable_dev:
3858         pci_disable_device(pdev);
3859 do_none:
3860         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3861         return status;
3862 }
3863
3864 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3865 {
3866         struct be_adapter *adapter = pci_get_drvdata(pdev);
3867         struct net_device *netdev =  adapter->netdev;
3868
3869         if (adapter->wol)
3870                 be_setup_wol(adapter, true);
3871
3872         netif_device_detach(netdev);
3873         if (netif_running(netdev)) {
3874                 rtnl_lock();
3875                 be_close(netdev);
3876                 rtnl_unlock();
3877         }
3878         be_clear(adapter);
3879
3880         pci_save_state(pdev);
3881         pci_disable_device(pdev);
3882         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3883         return 0;
3884 }
3885
3886 static int be_resume(struct pci_dev *pdev)
3887 {
3888         int status = 0;
3889         struct be_adapter *adapter = pci_get_drvdata(pdev);
3890         struct net_device *netdev =  adapter->netdev;
3891
3892         netif_device_detach(netdev);
3893
3894         status = pci_enable_device(pdev);
3895         if (status)
3896                 return status;
3897
3898         pci_set_power_state(pdev, 0);
3899         pci_restore_state(pdev);
3900
3901         /* tell fw we're ready to fire cmds */
3902         status = be_cmd_fw_init(adapter);
3903         if (status)
3904                 return status;
3905
3906         be_setup(adapter);
3907         if (netif_running(netdev)) {
3908                 rtnl_lock();
3909                 be_open(netdev);
3910                 rtnl_unlock();
3911         }
3912         netif_device_attach(netdev);
3913
3914         if (adapter->wol)
3915                 be_setup_wol(adapter, false);
3916
3917         return 0;
3918 }
3919
3920 /*
3921  * An FLR will stop BE from DMAing any data.
3922  */
3923 static void be_shutdown(struct pci_dev *pdev)
3924 {
3925         struct be_adapter *adapter = pci_get_drvdata(pdev);
3926
3927         if (!adapter)
3928                 return;
3929
3930         cancel_delayed_work_sync(&adapter->work);
3931
3932         netif_device_detach(adapter->netdev);
3933
3934         if (adapter->wol)
3935                 be_setup_wol(adapter, true);
3936
3937         be_cmd_reset_function(adapter);
3938
3939         pci_disable_device(pdev);
3940 }
3941
3942 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3943                                 pci_channel_state_t state)
3944 {
3945         struct be_adapter *adapter = pci_get_drvdata(pdev);
3946         struct net_device *netdev =  adapter->netdev;
3947
3948         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3949
3950         adapter->eeh_err = true;
3951
3952         netif_device_detach(netdev);
3953
3954         if (netif_running(netdev)) {
3955                 rtnl_lock();
3956                 be_close(netdev);
3957                 rtnl_unlock();
3958         }
3959         be_clear(adapter);
3960
3961         if (state == pci_channel_io_perm_failure)
3962                 return PCI_ERS_RESULT_DISCONNECT;
3963
3964         pci_disable_device(pdev);
3965
3966         /* The error could cause the FW to trigger a flash debug dump.
3967          * Resetting the card while flash dump is in progress
3968          * can cause it not to recover; wait for it to finish
3969          */
3970         ssleep(30);
3971         return PCI_ERS_RESULT_NEED_RESET;
3972 }
3973
3974 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3975 {
3976         struct be_adapter *adapter = pci_get_drvdata(pdev);
3977         int status;
3978
3979         dev_info(&adapter->pdev->dev, "EEH reset\n");
3980         adapter->eeh_err = false;
3981         adapter->ue_detected = false;
3982         adapter->fw_timeout = false;
3983
3984         status = pci_enable_device(pdev);
3985         if (status)
3986                 return PCI_ERS_RESULT_DISCONNECT;
3987
3988         pci_set_master(pdev);
3989         pci_set_power_state(pdev, 0);
3990         pci_restore_state(pdev);
3991
3992         /* Check if card is ok and fw is ready */
3993         status = be_cmd_POST(adapter);
3994         if (status)
3995                 return PCI_ERS_RESULT_DISCONNECT;
3996
3997         return PCI_ERS_RESULT_RECOVERED;
3998 }
3999
4000 static void be_eeh_resume(struct pci_dev *pdev)
4001 {
4002         int status = 0;
4003         struct be_adapter *adapter = pci_get_drvdata(pdev);
4004         struct net_device *netdev =  adapter->netdev;
4005
4006         dev_info(&adapter->pdev->dev, "EEH resume\n");
4007
4008         pci_save_state(pdev);
4009
4010         /* tell fw we're ready to fire cmds */
4011         status = be_cmd_fw_init(adapter);
4012         if (status)
4013                 goto err;
4014
4015         status = be_setup(adapter);
4016         if (status)
4017                 goto err;
4018
4019         if (netif_running(netdev)) {
4020                 status = be_open(netdev);
4021                 if (status)
4022                         goto err;
4023         }
4024         netif_device_attach(netdev);
4025         return;
4026 err:
4027         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
4028 }
4029
4030 static struct pci_error_handlers be_eeh_handlers = {
4031         .error_detected = be_eeh_err_detected,
4032         .slot_reset = be_eeh_reset,
4033         .resume = be_eeh_resume,
4034 };
4035
4036 static struct pci_driver be_driver = {
4037         .name = DRV_NAME,
4038         .id_table = be_dev_ids,
4039         .probe = be_probe,
4040         .remove = be_remove,
4041         .suspend = be_suspend,
4042         .resume = be_resume,
4043         .shutdown = be_shutdown,
4044         .err_handler = &be_eeh_handlers
4045 };
4046
4047 static int __init be_init_module(void)
4048 {
4049         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
4050             rx_frag_size != 2048) {
4051                 printk(KERN_WARNING DRV_NAME
4052                         " : Module param rx_frag_size must be 2048/4096/8192."
4053                         " Using 2048\n");
4054                 rx_frag_size = 2048;
4055         }
4056
4057         return pci_register_driver(&be_driver);
4058 }
4059 module_init(be_init_module);
4060
4061 static void __exit be_exit_module(void)
4062 {
4063         pci_unregister_driver(&be_driver);
4064 }
4065 module_exit(be_exit_module);