ed1d695b1777cc5c1fe014c3d57a51e7dee4bf9b
[firefly-linux-kernel-4.4.55.git] / drivers / net / bnx2x / bnx2x_cmn.c
1 /* bnx2x_cmn.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2011 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/etherdevice.h>
19 #include <linux/if_vlan.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <net/ipv6.h>
23 #include <net/ip6_checksum.h>
24 #include <linux/firmware.h>
25 #include <linux/prefetch.h>
26 #include "bnx2x_cmn.h"
27
28 #include "bnx2x_init.h"
29
30 static int bnx2x_setup_irqs(struct bnx2x *bp);
31
32 /**
33  * bnx2x_bz_fp - zero content of the fastpath structure.
34  *
35  * @bp:         driver handle
36  * @index:      fastpath index to be zeroed
37  *
38  * Makes sure the contents of the bp->fp[index].napi is kept
39  * intact.
40  */
41 static inline void bnx2x_bz_fp(struct bnx2x *bp, int index)
42 {
43         struct bnx2x_fastpath *fp = &bp->fp[index];
44         struct napi_struct orig_napi = fp->napi;
45         /* bzero bnx2x_fastpath contents */
46         memset(fp, 0, sizeof(*fp));
47
48         /* Restore the NAPI object as it has been already initialized */
49         fp->napi = orig_napi;
50 }
51
52 /**
53  * bnx2x_move_fp - move content of the fastpath structure.
54  *
55  * @bp:         driver handle
56  * @from:       source FP index
57  * @to:         destination FP index
58  *
59  * Makes sure the contents of the bp->fp[to].napi is kept
60  * intact.
61  */
62 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
63 {
64         struct bnx2x_fastpath *from_fp = &bp->fp[from];
65         struct bnx2x_fastpath *to_fp = &bp->fp[to];
66         struct napi_struct orig_napi = to_fp->napi;
67         /* Move bnx2x_fastpath contents */
68         memcpy(to_fp, from_fp, sizeof(*to_fp));
69         to_fp->index = to;
70
71         /* Restore the NAPI object as it has been already initialized */
72         to_fp->napi = orig_napi;
73 }
74
75 /* free skb in the packet ring at pos idx
76  * return idx of last bd freed
77  */
78 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
79                              u16 idx)
80 {
81         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
82         struct eth_tx_start_bd *tx_start_bd;
83         struct eth_tx_bd *tx_data_bd;
84         struct sk_buff *skb = tx_buf->skb;
85         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
86         int nbd;
87
88         /* prefetch skb end pointer to speedup dev_kfree_skb() */
89         prefetch(&skb->end);
90
91         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
92            idx, tx_buf, skb);
93
94         /* unmap first bd */
95         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
96         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
97         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
98                          BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE);
99
100         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
101 #ifdef BNX2X_STOP_ON_ERROR
102         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
103                 BNX2X_ERR("BAD nbd!\n");
104                 bnx2x_panic();
105         }
106 #endif
107         new_cons = nbd + tx_buf->first_bd;
108
109         /* Get the next bd */
110         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
111
112         /* Skip a parse bd... */
113         --nbd;
114         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
115
116         /* ...and the TSO split header bd since they have no mapping */
117         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
118                 --nbd;
119                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
120         }
121
122         /* now free frags */
123         while (nbd > 0) {
124
125                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
126                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
127                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
128                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
129                 if (--nbd)
130                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
131         }
132
133         /* release skb */
134         WARN_ON(!skb);
135         dev_kfree_skb_any(skb);
136         tx_buf->first_bd = 0;
137         tx_buf->skb = NULL;
138
139         return new_cons;
140 }
141
142 int bnx2x_tx_int(struct bnx2x_fastpath *fp)
143 {
144         struct bnx2x *bp = fp->bp;
145         struct netdev_queue *txq;
146         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
147
148 #ifdef BNX2X_STOP_ON_ERROR
149         if (unlikely(bp->panic))
150                 return -1;
151 #endif
152
153         txq = netdev_get_tx_queue(bp->dev, fp->index);
154         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
155         sw_cons = fp->tx_pkt_cons;
156
157         while (sw_cons != hw_cons) {
158                 u16 pkt_cons;
159
160                 pkt_cons = TX_BD(sw_cons);
161
162                 DP(NETIF_MSG_TX_DONE, "queue[%d]: hw_cons %u  sw_cons %u "
163                                       " pkt_cons %u\n",
164                    fp->index, hw_cons, sw_cons, pkt_cons);
165
166                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
167                 sw_cons++;
168         }
169
170         fp->tx_pkt_cons = sw_cons;
171         fp->tx_bd_cons = bd_cons;
172
173         /* Need to make the tx_bd_cons update visible to start_xmit()
174          * before checking for netif_tx_queue_stopped().  Without the
175          * memory barrier, there is a small possibility that
176          * start_xmit() will miss it and cause the queue to be stopped
177          * forever.
178          */
179         smp_mb();
180
181         if (unlikely(netif_tx_queue_stopped(txq))) {
182                 /* Taking tx_lock() is needed to prevent reenabling the queue
183                  * while it's empty. This could have happen if rx_action() gets
184                  * suspended in bnx2x_tx_int() after the condition before
185                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
186                  *
187                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
188                  * sends some packets consuming the whole queue again->
189                  * stops the queue
190                  */
191
192                 __netif_tx_lock(txq, smp_processor_id());
193
194                 if ((netif_tx_queue_stopped(txq)) &&
195                     (bp->state == BNX2X_STATE_OPEN) &&
196                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
197                         netif_tx_wake_queue(txq);
198
199                 __netif_tx_unlock(txq);
200         }
201         return 0;
202 }
203
204 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
205                                              u16 idx)
206 {
207         u16 last_max = fp->last_max_sge;
208
209         if (SUB_S16(idx, last_max) > 0)
210                 fp->last_max_sge = idx;
211 }
212
213 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
214                                   struct eth_fast_path_rx_cqe *fp_cqe)
215 {
216         struct bnx2x *bp = fp->bp;
217         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
218                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
219                       SGE_PAGE_SHIFT;
220         u16 last_max, last_elem, first_elem;
221         u16 delta = 0;
222         u16 i;
223
224         if (!sge_len)
225                 return;
226
227         /* First mark all used pages */
228         for (i = 0; i < sge_len; i++)
229                 SGE_MASK_CLEAR_BIT(fp,
230                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[i])));
231
232         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
233            sge_len - 1, le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
234
235         /* Here we assume that the last SGE index is the biggest */
236         prefetch((void *)(fp->sge_mask));
237         bnx2x_update_last_max_sge(fp,
238                 le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[sge_len - 1]));
239
240         last_max = RX_SGE(fp->last_max_sge);
241         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
242         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
243
244         /* If ring is not full */
245         if (last_elem + 1 != first_elem)
246                 last_elem++;
247
248         /* Now update the prod */
249         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
250                 if (likely(fp->sge_mask[i]))
251                         break;
252
253                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
254                 delta += RX_SGE_MASK_ELEM_SZ;
255         }
256
257         if (delta > 0) {
258                 fp->rx_sge_prod += delta;
259                 /* clear page-end entries */
260                 bnx2x_clear_sge_mask_next_elems(fp);
261         }
262
263         DP(NETIF_MSG_RX_STATUS,
264            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
265            fp->last_max_sge, fp->rx_sge_prod);
266 }
267
268 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
269                             struct sk_buff *skb, u16 cons, u16 prod)
270 {
271         struct bnx2x *bp = fp->bp;
272         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
273         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
274         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
275         dma_addr_t mapping;
276
277         /* move empty skb from pool to prod and map it */
278         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
279         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
280                                  fp->rx_buf_size, DMA_FROM_DEVICE);
281         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
282
283         /* move partial skb from cons to pool (don't unmap yet) */
284         fp->tpa_pool[queue] = *cons_rx_buf;
285
286         /* mark bin state as start - print error if current state != stop */
287         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
288                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
289
290         fp->tpa_state[queue] = BNX2X_TPA_START;
291
292         /* point prod_bd to new skb */
293         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
294         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
295
296 #ifdef BNX2X_STOP_ON_ERROR
297         fp->tpa_queue_used |= (1 << queue);
298 #ifdef _ASM_GENERIC_INT_L64_H
299         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
300 #else
301         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
302 #endif
303            fp->tpa_queue_used);
304 #endif
305 }
306
307 /* Timestamp option length allowed for TPA aggregation:
308  *
309  *              nop nop kind length echo val
310  */
311 #define TPA_TSTAMP_OPT_LEN      12
312 /**
313  * bnx2x_set_lro_mss - calculate the approximate value of the MSS
314  *
315  * @bp:                 driver handle
316  * @parsing_flags:      parsing flags from the START CQE
317  * @len_on_bd:          total length of the first packet for the
318  *                      aggregation.
319  *
320  * Approximate value of the MSS for this aggregation calculated using
321  * the first packet of it.
322  */
323 static inline u16 bnx2x_set_lro_mss(struct bnx2x *bp, u16 parsing_flags,
324                                     u16 len_on_bd)
325 {
326         /* TPA arrgregation won't have an IP options and TCP options
327          * other than timestamp.
328          */
329         u16 hdrs_len = ETH_HLEN + sizeof(struct iphdr) + sizeof(struct tcphdr);
330
331
332         /* Check if there was a TCP timestamp, if there is it's will
333          * always be 12 bytes length: nop nop kind length echo val.
334          *
335          * Otherwise FW would close the aggregation.
336          */
337         if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
338                 hdrs_len += TPA_TSTAMP_OPT_LEN;
339
340         return len_on_bd - hdrs_len;
341 }
342
343 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
344                                struct sk_buff *skb,
345                                struct eth_fast_path_rx_cqe *fp_cqe,
346                                u16 cqe_idx, u16 parsing_flags)
347 {
348         struct sw_rx_page *rx_pg, old_rx_pg;
349         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
350         u32 i, frag_len, frag_size, pages;
351         int err;
352         int j;
353
354         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
355         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
356
357         /* This is needed in order to enable forwarding support */
358         if (frag_size)
359                 skb_shinfo(skb)->gso_size = bnx2x_set_lro_mss(bp, parsing_flags,
360                                                               len_on_bd);
361
362 #ifdef BNX2X_STOP_ON_ERROR
363         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
364                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
365                           pages, cqe_idx);
366                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
367                           fp_cqe->pkt_len, len_on_bd);
368                 bnx2x_panic();
369                 return -EINVAL;
370         }
371 #endif
372
373         /* Run through the SGL and compose the fragmented skb */
374         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
375                 u16 sge_idx =
376                         RX_SGE(le16_to_cpu(fp_cqe->sgl_or_raw_data.sgl[j]));
377
378                 /* FW gives the indices of the SGE as if the ring is an array
379                    (meaning that "next" element will consume 2 indices) */
380                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
381                 rx_pg = &fp->rx_page_ring[sge_idx];
382                 old_rx_pg = *rx_pg;
383
384                 /* If we fail to allocate a substitute page, we simply stop
385                    where we are and drop the whole packet */
386                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
387                 if (unlikely(err)) {
388                         fp->eth_q_stats.rx_skb_alloc_failed++;
389                         return err;
390                 }
391
392                 /* Unmap the page as we r going to pass it to the stack */
393                 dma_unmap_page(&bp->pdev->dev,
394                                dma_unmap_addr(&old_rx_pg, mapping),
395                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
396
397                 /* Add one frag and update the appropriate fields in the skb */
398                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
399
400                 skb->data_len += frag_len;
401                 skb->truesize += frag_len;
402                 skb->len += frag_len;
403
404                 frag_size -= frag_len;
405         }
406
407         return 0;
408 }
409
410 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
411                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
412                            u16 cqe_idx)
413 {
414         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
415         struct sk_buff *skb = rx_buf->skb;
416         /* alloc new skb */
417         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, fp->rx_buf_size);
418
419         /* Unmap skb in the pool anyway, as we are going to change
420            pool entry status to BNX2X_TPA_STOP even if new skb allocation
421            fails. */
422         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
423                          fp->rx_buf_size, DMA_FROM_DEVICE);
424
425         if (likely(new_skb)) {
426                 /* fix ip xsum and give it to the stack */
427                 /* (no need to map the new skb) */
428                 u16 parsing_flags =
429                         le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags);
430
431                 prefetch(skb);
432                 prefetch(((char *)(skb)) + L1_CACHE_BYTES);
433
434 #ifdef BNX2X_STOP_ON_ERROR
435                 if (pad + len > fp->rx_buf_size) {
436                         BNX2X_ERR("skb_put is about to fail...  "
437                                   "pad %d  len %d  rx_buf_size %d\n",
438                                   pad, len, fp->rx_buf_size);
439                         bnx2x_panic();
440                         return;
441                 }
442 #endif
443
444                 skb_reserve(skb, pad);
445                 skb_put(skb, len);
446
447                 skb->protocol = eth_type_trans(skb, bp->dev);
448                 skb->ip_summed = CHECKSUM_UNNECESSARY;
449
450                 {
451                         struct iphdr *iph;
452
453                         iph = (struct iphdr *)skb->data;
454                         iph->check = 0;
455                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
456                 }
457
458                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
459                                          &cqe->fast_path_cqe, cqe_idx,
460                                          parsing_flags)) {
461                         if (parsing_flags & PARSING_FLAGS_VLAN)
462                                 __vlan_hwaccel_put_tag(skb,
463                                                  le16_to_cpu(cqe->fast_path_cqe.
464                                                              vlan_tag));
465                         napi_gro_receive(&fp->napi, skb);
466                 } else {
467                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
468                            " - dropping packet!\n");
469                         dev_kfree_skb_any(skb);
470                 }
471
472
473                 /* put new skb in bin */
474                 fp->tpa_pool[queue].skb = new_skb;
475
476         } else {
477                 /* else drop the packet and keep the buffer in the bin */
478                 DP(NETIF_MSG_RX_STATUS,
479                    "Failed to allocate new skb - dropping packet!\n");
480                 fp->eth_q_stats.rx_skb_alloc_failed++;
481         }
482
483         fp->tpa_state[queue] = BNX2X_TPA_STOP;
484 }
485
486 /* Set Toeplitz hash value in the skb using the value from the
487  * CQE (calculated by HW).
488  */
489 static inline void bnx2x_set_skb_rxhash(struct bnx2x *bp, union eth_rx_cqe *cqe,
490                                         struct sk_buff *skb)
491 {
492         /* Set Toeplitz hash from CQE */
493         if ((bp->dev->features & NETIF_F_RXHASH) &&
494             (cqe->fast_path_cqe.status_flags &
495              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
496                 skb->rxhash =
497                 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result);
498 }
499
500 int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
501 {
502         struct bnx2x *bp = fp->bp;
503         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
504         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
505         int rx_pkt = 0;
506
507 #ifdef BNX2X_STOP_ON_ERROR
508         if (unlikely(bp->panic))
509                 return 0;
510 #endif
511
512         /* CQ "next element" is of the size of the regular element,
513            that's why it's ok here */
514         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
515         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
516                 hw_comp_cons++;
517
518         bd_cons = fp->rx_bd_cons;
519         bd_prod = fp->rx_bd_prod;
520         bd_prod_fw = bd_prod;
521         sw_comp_cons = fp->rx_comp_cons;
522         sw_comp_prod = fp->rx_comp_prod;
523
524         /* Memory barrier necessary as speculative reads of the rx
525          * buffer can be ahead of the index in the status block
526          */
527         rmb();
528
529         DP(NETIF_MSG_RX_STATUS,
530            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
531            fp->index, hw_comp_cons, sw_comp_cons);
532
533         while (sw_comp_cons != hw_comp_cons) {
534                 struct sw_rx_bd *rx_buf = NULL;
535                 struct sk_buff *skb;
536                 union eth_rx_cqe *cqe;
537                 u8 cqe_fp_flags;
538                 u16 len, pad;
539
540                 comp_ring_cons = RCQ_BD(sw_comp_cons);
541                 bd_prod = RX_BD(bd_prod);
542                 bd_cons = RX_BD(bd_cons);
543
544                 /* Prefetch the page containing the BD descriptor
545                    at producer's index. It will be needed when new skb is
546                    allocated */
547                 prefetch((void *)(PAGE_ALIGN((unsigned long)
548                                              (&fp->rx_desc_ring[bd_prod])) -
549                                   PAGE_SIZE + 1));
550
551                 cqe = &fp->rx_comp_ring[comp_ring_cons];
552                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
553
554                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
555                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
556                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
557                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
558                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
559                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
560
561                 /* is this a slowpath msg? */
562                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
563                         bnx2x_sp_event(fp, cqe);
564                         goto next_cqe;
565
566                 /* this is an rx packet */
567                 } else {
568                         rx_buf = &fp->rx_buf_ring[bd_cons];
569                         skb = rx_buf->skb;
570                         prefetch(skb);
571                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
572                         pad = cqe->fast_path_cqe.placement_offset;
573
574                         /* - If CQE is marked both TPA_START and TPA_END it is
575                          *   a non-TPA CQE.
576                          * - FP CQE will always have either TPA_START or/and
577                          *   TPA_STOP flags set.
578                          */
579                         if ((!fp->disable_tpa) &&
580                             (TPA_TYPE(cqe_fp_flags) !=
581                                         (TPA_TYPE_START | TPA_TYPE_END))) {
582                                 u16 queue = cqe->fast_path_cqe.queue_index;
583
584                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
585                                         DP(NETIF_MSG_RX_STATUS,
586                                            "calling tpa_start on queue %d\n",
587                                            queue);
588
589                                         bnx2x_tpa_start(fp, queue, skb,
590                                                         bd_cons, bd_prod);
591
592                                         /* Set Toeplitz hash for an LRO skb */
593                                         bnx2x_set_skb_rxhash(bp, cqe, skb);
594
595                                         goto next_rx;
596                                 } else { /* TPA_STOP */
597                                         DP(NETIF_MSG_RX_STATUS,
598                                            "calling tpa_stop on queue %d\n",
599                                            queue);
600
601                                         if (!BNX2X_RX_SUM_FIX(cqe))
602                                                 BNX2X_ERR("STOP on none TCP "
603                                                           "data\n");
604
605                                         /* This is a size of the linear data
606                                            on this skb */
607                                         len = le16_to_cpu(cqe->fast_path_cqe.
608                                                                 len_on_bd);
609                                         bnx2x_tpa_stop(bp, fp, queue, pad,
610                                                     len, cqe, comp_ring_cons);
611 #ifdef BNX2X_STOP_ON_ERROR
612                                         if (bp->panic)
613                                                 return 0;
614 #endif
615
616                                         bnx2x_update_sge_prod(fp,
617                                                         &cqe->fast_path_cqe);
618                                         goto next_cqe;
619                                 }
620                         }
621
622                         dma_sync_single_for_device(&bp->pdev->dev,
623                                         dma_unmap_addr(rx_buf, mapping),
624                                                    pad + RX_COPY_THRESH,
625                                                    DMA_FROM_DEVICE);
626                         prefetch(((char *)(skb)) + L1_CACHE_BYTES);
627
628                         /* is this an error packet? */
629                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
630                                 DP(NETIF_MSG_RX_ERR,
631                                    "ERROR  flags %x  rx packet %u\n",
632                                    cqe_fp_flags, sw_comp_cons);
633                                 fp->eth_q_stats.rx_err_discard_pkt++;
634                                 goto reuse_rx;
635                         }
636
637                         /* Since we don't have a jumbo ring
638                          * copy small packets if mtu > 1500
639                          */
640                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
641                             (len <= RX_COPY_THRESH)) {
642                                 struct sk_buff *new_skb;
643
644                                 new_skb = netdev_alloc_skb(bp->dev,
645                                                            len + pad);
646                                 if (new_skb == NULL) {
647                                         DP(NETIF_MSG_RX_ERR,
648                                            "ERROR  packet dropped "
649                                            "because of alloc failure\n");
650                                         fp->eth_q_stats.rx_skb_alloc_failed++;
651                                         goto reuse_rx;
652                                 }
653
654                                 /* aligned copy */
655                                 skb_copy_from_linear_data_offset(skb, pad,
656                                                     new_skb->data + pad, len);
657                                 skb_reserve(new_skb, pad);
658                                 skb_put(new_skb, len);
659
660                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
661
662                                 skb = new_skb;
663
664                         } else
665                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
666                                 dma_unmap_single(&bp->pdev->dev,
667                                         dma_unmap_addr(rx_buf, mapping),
668                                                  fp->rx_buf_size,
669                                                  DMA_FROM_DEVICE);
670                                 skb_reserve(skb, pad);
671                                 skb_put(skb, len);
672
673                         } else {
674                                 DP(NETIF_MSG_RX_ERR,
675                                    "ERROR  packet dropped because "
676                                    "of alloc failure\n");
677                                 fp->eth_q_stats.rx_skb_alloc_failed++;
678 reuse_rx:
679                                 bnx2x_reuse_rx_skb(fp, bd_cons, bd_prod);
680                                 goto next_rx;
681                         }
682
683                         skb->protocol = eth_type_trans(skb, bp->dev);
684
685                         /* Set Toeplitz hash for a none-LRO skb */
686                         bnx2x_set_skb_rxhash(bp, cqe, skb);
687
688                         skb_checksum_none_assert(skb);
689
690                         if (bp->dev->features & NETIF_F_RXCSUM) {
691                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
692                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
693                                 else
694                                         fp->eth_q_stats.hw_csum_err++;
695                         }
696                 }
697
698                 skb_record_rx_queue(skb, fp->index);
699
700                 if (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
701                      PARSING_FLAGS_VLAN)
702                         __vlan_hwaccel_put_tag(skb,
703                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
704                 napi_gro_receive(&fp->napi, skb);
705
706
707 next_rx:
708                 rx_buf->skb = NULL;
709
710                 bd_cons = NEXT_RX_IDX(bd_cons);
711                 bd_prod = NEXT_RX_IDX(bd_prod);
712                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
713                 rx_pkt++;
714 next_cqe:
715                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
716                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
717
718                 if (rx_pkt == budget)
719                         break;
720         } /* while */
721
722         fp->rx_bd_cons = bd_cons;
723         fp->rx_bd_prod = bd_prod_fw;
724         fp->rx_comp_cons = sw_comp_cons;
725         fp->rx_comp_prod = sw_comp_prod;
726
727         /* Update producers */
728         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
729                              fp->rx_sge_prod);
730
731         fp->rx_pkt += rx_pkt;
732         fp->rx_calls++;
733
734         return rx_pkt;
735 }
736
737 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
738 {
739         struct bnx2x_fastpath *fp = fp_cookie;
740         struct bnx2x *bp = fp->bp;
741
742         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB "
743                          "[fp %d fw_sd %d igusb %d]\n",
744            fp->index, fp->fw_sb_id, fp->igu_sb_id);
745         bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
746
747 #ifdef BNX2X_STOP_ON_ERROR
748         if (unlikely(bp->panic))
749                 return IRQ_HANDLED;
750 #endif
751
752         /* Handle Rx and Tx according to MSI-X vector */
753         prefetch(fp->rx_cons_sb);
754         prefetch(fp->tx_cons_sb);
755         prefetch(&fp->sb_running_index[SM_RX_ID]);
756         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
757
758         return IRQ_HANDLED;
759 }
760
761 /* HW Lock for shared dual port PHYs */
762 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
763 {
764         mutex_lock(&bp->port.phy_mutex);
765
766         if (bp->port.need_hw_lock)
767                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
768 }
769
770 void bnx2x_release_phy_lock(struct bnx2x *bp)
771 {
772         if (bp->port.need_hw_lock)
773                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
774
775         mutex_unlock(&bp->port.phy_mutex);
776 }
777
778 /* calculates MF speed according to current linespeed and MF configuration */
779 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
780 {
781         u16 line_speed = bp->link_vars.line_speed;
782         if (IS_MF(bp)) {
783                 u16 maxCfg = bnx2x_extract_max_cfg(bp,
784                                                    bp->mf_config[BP_VN(bp)]);
785
786                 /* Calculate the current MAX line speed limit for the MF
787                  * devices
788                  */
789                 if (IS_MF_SI(bp))
790                         line_speed = (line_speed * maxCfg) / 100;
791                 else { /* SD mode */
792                         u16 vn_max_rate = maxCfg * 100;
793
794                         if (vn_max_rate < line_speed)
795                                 line_speed = vn_max_rate;
796                 }
797         }
798
799         return line_speed;
800 }
801
802 /**
803  * bnx2x_fill_report_data - fill link report data to report
804  *
805  * @bp:         driver handle
806  * @data:       link state to update
807  *
808  * It uses a none-atomic bit operations because is called under the mutex.
809  */
810 static inline void bnx2x_fill_report_data(struct bnx2x *bp,
811                                           struct bnx2x_link_report_data *data)
812 {
813         u16 line_speed = bnx2x_get_mf_speed(bp);
814
815         memset(data, 0, sizeof(*data));
816
817         /* Fill the report data: efective line speed */
818         data->line_speed = line_speed;
819
820         /* Link is down */
821         if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
822                 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
823                           &data->link_report_flags);
824
825         /* Full DUPLEX */
826         if (bp->link_vars.duplex == DUPLEX_FULL)
827                 __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
828
829         /* Rx Flow Control is ON */
830         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
831                 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
832
833         /* Tx Flow Control is ON */
834         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
835                 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
836 }
837
838 /**
839  * bnx2x_link_report - report link status to OS.
840  *
841  * @bp:         driver handle
842  *
843  * Calls the __bnx2x_link_report() under the same locking scheme
844  * as a link/PHY state managing code to ensure a consistent link
845  * reporting.
846  */
847
848 void bnx2x_link_report(struct bnx2x *bp)
849 {
850         bnx2x_acquire_phy_lock(bp);
851         __bnx2x_link_report(bp);
852         bnx2x_release_phy_lock(bp);
853 }
854
855 /**
856  * __bnx2x_link_report - report link status to OS.
857  *
858  * @bp:         driver handle
859  *
860  * None atomic inmlementation.
861  * Should be called under the phy_lock.
862  */
863 void __bnx2x_link_report(struct bnx2x *bp)
864 {
865         struct bnx2x_link_report_data cur_data;
866
867         /* reread mf_cfg */
868         if (!CHIP_IS_E1(bp))
869                 bnx2x_read_mf_cfg(bp);
870
871         /* Read the current link report info */
872         bnx2x_fill_report_data(bp, &cur_data);
873
874         /* Don't report link down or exactly the same link status twice */
875         if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
876             (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
877                       &bp->last_reported_link.link_report_flags) &&
878              test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
879                       &cur_data.link_report_flags)))
880                 return;
881
882         bp->link_cnt++;
883
884         /* We are going to report a new link parameters now -
885          * remember the current data for the next time.
886          */
887         memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
888
889         if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
890                      &cur_data.link_report_flags)) {
891                 netif_carrier_off(bp->dev);
892                 netdev_err(bp->dev, "NIC Link is Down\n");
893                 return;
894         } else {
895                 netif_carrier_on(bp->dev);
896                 netdev_info(bp->dev, "NIC Link is Up, ");
897                 pr_cont("%d Mbps ", cur_data.line_speed);
898
899                 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
900                                        &cur_data.link_report_flags))
901                         pr_cont("full duplex");
902                 else
903                         pr_cont("half duplex");
904
905                 /* Handle the FC at the end so that only these flags would be
906                  * possibly set. This way we may easily check if there is no FC
907                  * enabled.
908                  */
909                 if (cur_data.link_report_flags) {
910                         if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
911                                      &cur_data.link_report_flags)) {
912                                 pr_cont(", receive ");
913                                 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
914                                      &cur_data.link_report_flags))
915                                         pr_cont("& transmit ");
916                         } else {
917                                 pr_cont(", transmit ");
918                         }
919                         pr_cont("flow control ON");
920                 }
921                 pr_cont("\n");
922         }
923 }
924
925 void bnx2x_init_rx_rings(struct bnx2x *bp)
926 {
927         int func = BP_FUNC(bp);
928         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
929                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
930         u16 ring_prod;
931         int i, j;
932
933         /* Allocate TPA resources */
934         for_each_rx_queue(bp, j) {
935                 struct bnx2x_fastpath *fp = &bp->fp[j];
936
937                 DP(NETIF_MSG_IFUP,
938                    "mtu %d  rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
939
940                 if (!fp->disable_tpa) {
941                         /* Fill the per-aggregation pool */
942                         for (i = 0; i < max_agg_queues; i++) {
943                                 fp->tpa_pool[i].skb =
944                                    netdev_alloc_skb(bp->dev, fp->rx_buf_size);
945                                 if (!fp->tpa_pool[i].skb) {
946                                         BNX2X_ERR("Failed to allocate TPA "
947                                                   "skb pool for queue[%d] - "
948                                                   "disabling TPA on this "
949                                                   "queue!\n", j);
950                                         bnx2x_free_tpa_pool(bp, fp, i);
951                                         fp->disable_tpa = 1;
952                                         break;
953                                 }
954                                 dma_unmap_addr_set((struct sw_rx_bd *)
955                                                         &bp->fp->tpa_pool[i],
956                                                    mapping, 0);
957                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
958                         }
959
960                         /* "next page" elements initialization */
961                         bnx2x_set_next_page_sgl(fp);
962
963                         /* set SGEs bit mask */
964                         bnx2x_init_sge_ring_bit_mask(fp);
965
966                         /* Allocate SGEs and initialize the ring elements */
967                         for (i = 0, ring_prod = 0;
968                              i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
969
970                                 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
971                                         BNX2X_ERR("was only able to allocate "
972                                                   "%d rx sges\n", i);
973                                         BNX2X_ERR("disabling TPA for"
974                                                   " queue[%d]\n", j);
975                                         /* Cleanup already allocated elements */
976                                         bnx2x_free_rx_sge_range(bp,
977                                                                 fp, ring_prod);
978                                         bnx2x_free_tpa_pool(bp,
979                                                             fp, max_agg_queues);
980                                         fp->disable_tpa = 1;
981                                         ring_prod = 0;
982                                         break;
983                                 }
984                                 ring_prod = NEXT_SGE_IDX(ring_prod);
985                         }
986
987                         fp->rx_sge_prod = ring_prod;
988                 }
989         }
990
991         for_each_rx_queue(bp, j) {
992                 struct bnx2x_fastpath *fp = &bp->fp[j];
993
994                 fp->rx_bd_cons = 0;
995
996                 /* Activate BD ring */
997                 /* Warning!
998                  * this will generate an interrupt (to the TSTORM)
999                  * must only be done after chip is initialized
1000                  */
1001                 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1002                                      fp->rx_sge_prod);
1003
1004                 if (j != 0)
1005                         continue;
1006
1007                 if (!CHIP_IS_E2(bp)) {
1008                         REG_WR(bp, BAR_USTRORM_INTMEM +
1009                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1010                                U64_LO(fp->rx_comp_mapping));
1011                         REG_WR(bp, BAR_USTRORM_INTMEM +
1012                                USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1013                                U64_HI(fp->rx_comp_mapping));
1014                 }
1015         }
1016 }
1017
1018 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1019 {
1020         int i;
1021
1022         for_each_tx_queue(bp, i) {
1023                 struct bnx2x_fastpath *fp = &bp->fp[i];
1024
1025                 u16 bd_cons = fp->tx_bd_cons;
1026                 u16 sw_prod = fp->tx_pkt_prod;
1027                 u16 sw_cons = fp->tx_pkt_cons;
1028
1029                 while (sw_cons != sw_prod) {
1030                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
1031                         sw_cons++;
1032                 }
1033         }
1034 }
1035
1036 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1037 {
1038         struct bnx2x *bp = fp->bp;
1039         int i;
1040
1041         /* ring wasn't allocated */
1042         if (fp->rx_buf_ring == NULL)
1043                 return;
1044
1045         for (i = 0; i < NUM_RX_BD; i++) {
1046                 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1047                 struct sk_buff *skb = rx_buf->skb;
1048
1049                 if (skb == NULL)
1050                         continue;
1051
1052                 dma_unmap_single(&bp->pdev->dev,
1053                                  dma_unmap_addr(rx_buf, mapping),
1054                                  fp->rx_buf_size, DMA_FROM_DEVICE);
1055
1056                 rx_buf->skb = NULL;
1057                 dev_kfree_skb(skb);
1058         }
1059 }
1060
1061 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1062 {
1063         int j;
1064
1065         for_each_rx_queue(bp, j) {
1066                 struct bnx2x_fastpath *fp = &bp->fp[j];
1067
1068                 bnx2x_free_rx_bds(fp);
1069
1070                 if (!fp->disable_tpa)
1071                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
1072                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
1073                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
1074         }
1075 }
1076
1077 void bnx2x_free_skbs(struct bnx2x *bp)
1078 {
1079         bnx2x_free_tx_skbs(bp);
1080         bnx2x_free_rx_skbs(bp);
1081 }
1082
1083 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1084 {
1085         /* load old values */
1086         u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1087
1088         if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1089                 /* leave all but MAX value */
1090                 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1091
1092                 /* set new MAX value */
1093                 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1094                                 & FUNC_MF_CFG_MAX_BW_MASK;
1095
1096                 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1097         }
1098 }
1099
1100 /**
1101  * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1102  *
1103  * @bp:         driver handle
1104  * @nvecs:      number of vectors to be released
1105  */
1106 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1107 {
1108         int i, offset = 0;
1109
1110         if (nvecs == offset)
1111                 return;
1112         free_irq(bp->msix_table[offset].vector, bp->dev);
1113         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1114            bp->msix_table[offset].vector);
1115         offset++;
1116 #ifdef BCM_CNIC
1117         if (nvecs == offset)
1118                 return;
1119         offset++;
1120 #endif
1121
1122         for_each_eth_queue(bp, i) {
1123                 if (nvecs == offset)
1124                         return;
1125                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d "
1126                    "irq\n", i, bp->msix_table[offset].vector);
1127
1128                 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1129         }
1130 }
1131
1132 void bnx2x_free_irq(struct bnx2x *bp)
1133 {
1134         if (bp->flags & USING_MSIX_FLAG)
1135                 bnx2x_free_msix_irqs(bp, BNX2X_NUM_ETH_QUEUES(bp) +
1136                                      CNIC_CONTEXT_USE + 1);
1137         else if (bp->flags & USING_MSI_FLAG)
1138                 free_irq(bp->pdev->irq, bp->dev);
1139         else
1140                 free_irq(bp->pdev->irq, bp->dev);
1141 }
1142
1143 int bnx2x_enable_msix(struct bnx2x *bp)
1144 {
1145         int msix_vec = 0, i, rc, req_cnt;
1146
1147         bp->msix_table[msix_vec].entry = msix_vec;
1148         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n",
1149            bp->msix_table[0].entry);
1150         msix_vec++;
1151
1152 #ifdef BCM_CNIC
1153         bp->msix_table[msix_vec].entry = msix_vec;
1154         DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d (CNIC)\n",
1155            bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
1156         msix_vec++;
1157 #endif
1158         for_each_eth_queue(bp, i) {
1159                 bp->msix_table[msix_vec].entry = msix_vec;
1160                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
1161                    "(fastpath #%u)\n", msix_vec, msix_vec, i);
1162                 msix_vec++;
1163         }
1164
1165         req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1166
1167         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1168
1169         /*
1170          * reconfigure number of tx/rx queues according to available
1171          * MSI-X vectors
1172          */
1173         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
1174                 /* how less vectors we will have? */
1175                 int diff = req_cnt - rc;
1176
1177                 DP(NETIF_MSG_IFUP,
1178                    "Trying to use less MSI-X vectors: %d\n", rc);
1179
1180                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
1181
1182                 if (rc) {
1183                         DP(NETIF_MSG_IFUP,
1184                            "MSI-X is not attainable  rc %d\n", rc);
1185                         return rc;
1186                 }
1187                 /*
1188                  * decrease number of queues by number of unallocated entries
1189                  */
1190                 bp->num_queues -= diff;
1191
1192                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
1193                                   bp->num_queues);
1194         } else if (rc) {
1195                 /* fall to INTx if not enough memory */
1196                 if (rc == -ENOMEM)
1197                         bp->flags |= DISABLE_MSI_FLAG;
1198                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
1199                 return rc;
1200         }
1201
1202         bp->flags |= USING_MSIX_FLAG;
1203
1204         return 0;
1205 }
1206
1207 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1208 {
1209         int i, rc, offset = 0;
1210
1211         rc = request_irq(bp->msix_table[offset++].vector,
1212                          bnx2x_msix_sp_int, 0,
1213                          bp->dev->name, bp->dev);
1214         if (rc) {
1215                 BNX2X_ERR("request sp irq failed\n");
1216                 return -EBUSY;
1217         }
1218
1219 #ifdef BCM_CNIC
1220         offset++;
1221 #endif
1222         for_each_eth_queue(bp, i) {
1223                 struct bnx2x_fastpath *fp = &bp->fp[i];
1224                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1225                          bp->dev->name, i);
1226
1227                 rc = request_irq(bp->msix_table[offset].vector,
1228                                  bnx2x_msix_fp_int, 0, fp->name, fp);
1229                 if (rc) {
1230                         BNX2X_ERR("request fp #%d irq (%d) failed  rc %d\n", i,
1231                               bp->msix_table[offset].vector, rc);
1232                         bnx2x_free_msix_irqs(bp, offset);
1233                         return -EBUSY;
1234                 }
1235
1236                 offset++;
1237                 fp->state = BNX2X_FP_STATE_IRQ;
1238         }
1239
1240         i = BNX2X_NUM_ETH_QUEUES(bp);
1241         offset = 1 + CNIC_CONTEXT_USE;
1242         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
1243                " ... fp[%d] %d\n",
1244                bp->msix_table[0].vector,
1245                0, bp->msix_table[offset].vector,
1246                i - 1, bp->msix_table[offset + i - 1].vector);
1247
1248         return 0;
1249 }
1250
1251 int bnx2x_enable_msi(struct bnx2x *bp)
1252 {
1253         int rc;
1254
1255         rc = pci_enable_msi(bp->pdev);
1256         if (rc) {
1257                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
1258                 return -1;
1259         }
1260         bp->flags |= USING_MSI_FLAG;
1261
1262         return 0;
1263 }
1264
1265 static int bnx2x_req_irq(struct bnx2x *bp)
1266 {
1267         unsigned long flags;
1268         int rc;
1269
1270         if (bp->flags & USING_MSI_FLAG)
1271                 flags = 0;
1272         else
1273                 flags = IRQF_SHARED;
1274
1275         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
1276                          bp->dev->name, bp->dev);
1277         if (!rc)
1278                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
1279
1280         return rc;
1281 }
1282
1283 static void bnx2x_napi_enable(struct bnx2x *bp)
1284 {
1285         int i;
1286
1287         for_each_napi_queue(bp, i)
1288                 napi_enable(&bnx2x_fp(bp, i, napi));
1289 }
1290
1291 static void bnx2x_napi_disable(struct bnx2x *bp)
1292 {
1293         int i;
1294
1295         for_each_napi_queue(bp, i)
1296                 napi_disable(&bnx2x_fp(bp, i, napi));
1297 }
1298
1299 void bnx2x_netif_start(struct bnx2x *bp)
1300 {
1301         if (netif_running(bp->dev)) {
1302                 bnx2x_napi_enable(bp);
1303                 bnx2x_int_enable(bp);
1304                 if (bp->state == BNX2X_STATE_OPEN)
1305                         netif_tx_wake_all_queues(bp->dev);
1306         }
1307 }
1308
1309 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1310 {
1311         bnx2x_int_disable_sync(bp, disable_hw);
1312         bnx2x_napi_disable(bp);
1313         netif_tx_disable(bp->dev);
1314 }
1315
1316 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1317 {
1318 #ifdef BCM_CNIC
1319         struct bnx2x *bp = netdev_priv(dev);
1320         if (NO_FCOE(bp))
1321                 return skb_tx_hash(dev, skb);
1322         else {
1323                 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1324                 u16 ether_type = ntohs(hdr->h_proto);
1325
1326                 /* Skip VLAN tag if present */
1327                 if (ether_type == ETH_P_8021Q) {
1328                         struct vlan_ethhdr *vhdr =
1329                                 (struct vlan_ethhdr *)skb->data;
1330
1331                         ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1332                 }
1333
1334                 /* If ethertype is FCoE or FIP - use FCoE ring */
1335                 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1336                         return bnx2x_fcoe(bp, index);
1337         }
1338 #endif
1339         /* Select a none-FCoE queue:  if FCoE is enabled, exclude FCoE L2 ring
1340          */
1341         return __skb_tx_hash(dev, skb,
1342                         dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1343 }
1344
1345 void bnx2x_set_num_queues(struct bnx2x *bp)
1346 {
1347         switch (bp->multi_mode) {
1348         case ETH_RSS_MODE_DISABLED:
1349                 bp->num_queues = 1;
1350                 break;
1351         case ETH_RSS_MODE_REGULAR:
1352                 bp->num_queues = bnx2x_calc_num_queues(bp);
1353                 break;
1354
1355         default:
1356                 bp->num_queues = 1;
1357                 break;
1358         }
1359
1360         /* Add special queues */
1361         bp->num_queues += NONE_ETH_CONTEXT_USE;
1362 }
1363
1364 #ifdef BCM_CNIC
1365 static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1366 {
1367         if (!NO_FCOE(bp)) {
1368                 if (!IS_MF_SD(bp))
1369                         bnx2x_set_fip_eth_mac_addr(bp, 1);
1370                 bnx2x_set_all_enode_macs(bp, 1);
1371                 bp->flags |= FCOE_MACS_SET;
1372         }
1373 }
1374 #endif
1375
1376 static void bnx2x_release_firmware(struct bnx2x *bp)
1377 {
1378         kfree(bp->init_ops_offsets);
1379         kfree(bp->init_ops);
1380         kfree(bp->init_data);
1381         release_firmware(bp->firmware);
1382 }
1383
1384 static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1385 {
1386         int rc, num = bp->num_queues;
1387
1388 #ifdef BCM_CNIC
1389         if (NO_FCOE(bp))
1390                 num -= FCOE_CONTEXT_USE;
1391
1392 #endif
1393         netif_set_real_num_tx_queues(bp->dev, num);
1394         rc = netif_set_real_num_rx_queues(bp->dev, num);
1395         return rc;
1396 }
1397
1398 static inline void bnx2x_set_rx_buf_size(struct bnx2x *bp)
1399 {
1400         int i;
1401
1402         for_each_queue(bp, i) {
1403                 struct bnx2x_fastpath *fp = &bp->fp[i];
1404
1405                 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
1406                 if (IS_FCOE_IDX(i))
1407                         /*
1408                          * Although there are no IP frames expected to arrive to
1409                          * this ring we still want to add an
1410                          * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
1411                          * overrun attack.
1412                          */
1413                         fp->rx_buf_size =
1414                                 BNX2X_FCOE_MINI_JUMBO_MTU + ETH_OVREHEAD +
1415                                 BNX2X_RX_ALIGN + IP_HEADER_ALIGNMENT_PADDING;
1416                 else
1417                         fp->rx_buf_size =
1418                                 bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN +
1419                                 IP_HEADER_ALIGNMENT_PADDING;
1420         }
1421 }
1422
1423 /* must be called with rtnl_lock */
1424 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1425 {
1426         u32 load_code;
1427         int i, rc;
1428
1429         /* Set init arrays */
1430         rc = bnx2x_init_firmware(bp);
1431         if (rc) {
1432                 BNX2X_ERR("Error loading firmware\n");
1433                 return rc;
1434         }
1435
1436 #ifdef BNX2X_STOP_ON_ERROR
1437         if (unlikely(bp->panic))
1438                 return -EPERM;
1439 #endif
1440
1441         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
1442
1443         /* Set the initial link reported state to link down */
1444         bnx2x_acquire_phy_lock(bp);
1445         memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
1446         __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1447                 &bp->last_reported_link.link_report_flags);
1448         bnx2x_release_phy_lock(bp);
1449
1450         /* must be called before memory allocation and HW init */
1451         bnx2x_ilt_set_info(bp);
1452
1453         /* zero fastpath structures preserving invariants like napi which are
1454          * allocated only once
1455          */
1456         for_each_queue(bp, i)
1457                 bnx2x_bz_fp(bp, i);
1458
1459         /* Set the receive queues buffer size */
1460         bnx2x_set_rx_buf_size(bp);
1461
1462         for_each_queue(bp, i)
1463                 bnx2x_fp(bp, i, disable_tpa) =
1464                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
1465
1466 #ifdef BCM_CNIC
1467         /* We don't want TPA on FCoE L2 ring */
1468         bnx2x_fcoe(bp, disable_tpa) = 1;
1469 #endif
1470
1471         if (bnx2x_alloc_mem(bp))
1472                 return -ENOMEM;
1473
1474         /* As long as bnx2x_alloc_mem() may possibly update
1475          * bp->num_queues, bnx2x_set_real_num_queues() should always
1476          * come after it.
1477          */
1478         rc = bnx2x_set_real_num_queues(bp);
1479         if (rc) {
1480                 BNX2X_ERR("Unable to set real_num_queues\n");
1481                 goto load_error0;
1482         }
1483
1484         bnx2x_napi_enable(bp);
1485
1486         /* Send LOAD_REQUEST command to MCP
1487            Returns the type of LOAD command:
1488            if it is the first port to be initialized
1489            common blocks should be initialized, otherwise - not
1490         */
1491         if (!BP_NOMCP(bp)) {
1492                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, 0);
1493                 if (!load_code) {
1494                         BNX2X_ERR("MCP response failure, aborting\n");
1495                         rc = -EBUSY;
1496                         goto load_error1;
1497                 }
1498                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
1499                         rc = -EBUSY; /* other port in diagnostic mode */
1500                         goto load_error1;
1501                 }
1502
1503         } else {
1504                 int path = BP_PATH(bp);
1505                 int port = BP_PORT(bp);
1506
1507                 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d]      %d, %d, %d\n",
1508                    path, load_count[path][0], load_count[path][1],
1509                    load_count[path][2]);
1510                 load_count[path][0]++;
1511                 load_count[path][1 + port]++;
1512                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d]  %d, %d, %d\n",
1513                    path, load_count[path][0], load_count[path][1],
1514                    load_count[path][2]);
1515                 if (load_count[path][0] == 1)
1516                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
1517                 else if (load_count[path][1 + port] == 1)
1518                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
1519                 else
1520                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
1521         }
1522
1523         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1524             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
1525             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
1526                 bp->port.pmf = 1;
1527         else
1528                 bp->port.pmf = 0;
1529         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1530
1531         /* Initialize HW */
1532         rc = bnx2x_init_hw(bp, load_code);
1533         if (rc) {
1534                 BNX2X_ERR("HW init failed, aborting\n");
1535                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1536                 goto load_error2;
1537         }
1538
1539         /* Connect to IRQs */
1540         rc = bnx2x_setup_irqs(bp);
1541         if (rc) {
1542                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1543                 goto load_error2;
1544         }
1545
1546         /* Setup NIC internals and enable interrupts */
1547         bnx2x_nic_init(bp, load_code);
1548
1549         if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
1550             (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
1551             (bp->common.shmem2_base))
1552                 SHMEM2_WR(bp, dcc_support,
1553                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
1554                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
1555
1556         /* Send LOAD_DONE command to MCP */
1557         if (!BP_NOMCP(bp)) {
1558                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
1559                 if (!load_code) {
1560                         BNX2X_ERR("MCP response failure, aborting\n");
1561                         rc = -EBUSY;
1562                         goto load_error3;
1563                 }
1564         }
1565
1566         bnx2x_dcbx_init(bp);
1567
1568         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1569
1570         rc = bnx2x_func_start(bp);
1571         if (rc) {
1572                 BNX2X_ERR("Function start failed!\n");
1573 #ifndef BNX2X_STOP_ON_ERROR
1574                 goto load_error3;
1575 #else
1576                 bp->panic = 1;
1577                 return -EBUSY;
1578 #endif
1579         }
1580
1581         rc = bnx2x_setup_client(bp, &bp->fp[0], 1 /* Leading */);
1582         if (rc) {
1583                 BNX2X_ERR("Setup leading failed!\n");
1584 #ifndef BNX2X_STOP_ON_ERROR
1585                 goto load_error3;
1586 #else
1587                 bp->panic = 1;
1588                 return -EBUSY;
1589 #endif
1590         }
1591
1592         if (!CHIP_IS_E1(bp) &&
1593             (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED)) {
1594                 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
1595                 bp->flags |= MF_FUNC_DIS;
1596         }
1597
1598 #ifdef BCM_CNIC
1599         /* Enable Timer scan */
1600         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
1601 #endif
1602
1603         for_each_nondefault_queue(bp, i) {
1604                 rc = bnx2x_setup_client(bp, &bp->fp[i], 0);
1605                 if (rc)
1606 #ifdef BCM_CNIC
1607                         goto load_error4;
1608 #else
1609                         goto load_error3;
1610 #endif
1611         }
1612
1613         /* Now when Clients are configured we are ready to work */
1614         bp->state = BNX2X_STATE_OPEN;
1615
1616 #ifdef BCM_CNIC
1617         bnx2x_set_fcoe_eth_macs(bp);
1618 #endif
1619
1620         bnx2x_set_eth_mac(bp, 1);
1621
1622         /* Clear MC configuration */
1623         if (CHIP_IS_E1(bp))
1624                 bnx2x_invalidate_e1_mc_list(bp);
1625         else
1626                 bnx2x_invalidate_e1h_mc_list(bp);
1627
1628         /* Clear UC lists configuration */
1629         bnx2x_invalidate_uc_list(bp);
1630
1631         if (bp->pending_max) {
1632                 bnx2x_update_max_mf_config(bp, bp->pending_max);
1633                 bp->pending_max = 0;
1634         }
1635
1636         if (bp->port.pmf)
1637                 bnx2x_initial_phy_init(bp, load_mode);
1638
1639         /* Initialize Rx filtering */
1640         bnx2x_set_rx_mode(bp->dev);
1641
1642         /* Start fast path */
1643         switch (load_mode) {
1644         case LOAD_NORMAL:
1645                 /* Tx queue should be only reenabled */
1646                 netif_tx_wake_all_queues(bp->dev);
1647                 /* Initialize the receive filter. */
1648                 break;
1649
1650         case LOAD_OPEN:
1651                 netif_tx_start_all_queues(bp->dev);
1652                 smp_mb__after_clear_bit();
1653                 break;
1654
1655         case LOAD_DIAG:
1656                 bp->state = BNX2X_STATE_DIAG;
1657                 break;
1658
1659         default:
1660                 break;
1661         }
1662
1663         if (!bp->port.pmf)
1664                 bnx2x__link_status_update(bp);
1665
1666         /* start the timer */
1667         mod_timer(&bp->timer, jiffies + bp->current_interval);
1668
1669 #ifdef BCM_CNIC
1670         bnx2x_setup_cnic_irq_info(bp);
1671         if (bp->state == BNX2X_STATE_OPEN)
1672                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
1673 #endif
1674         bnx2x_inc_load_cnt(bp);
1675
1676         bnx2x_release_firmware(bp);
1677
1678         return 0;
1679
1680 #ifdef BCM_CNIC
1681 load_error4:
1682         /* Disable Timer scan */
1683         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
1684 #endif
1685 load_error3:
1686         bnx2x_int_disable_sync(bp, 1);
1687
1688         /* Free SKBs, SGEs, TPA pool and driver internals */
1689         bnx2x_free_skbs(bp);
1690         for_each_rx_queue(bp, i)
1691                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1692
1693         /* Release IRQs */
1694         bnx2x_free_irq(bp);
1695 load_error2:
1696         if (!BP_NOMCP(bp)) {
1697                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
1698                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
1699         }
1700
1701         bp->port.pmf = 0;
1702 load_error1:
1703         bnx2x_napi_disable(bp);
1704 load_error0:
1705         bnx2x_free_mem(bp);
1706
1707         bnx2x_release_firmware(bp);
1708
1709         return rc;
1710 }
1711
1712 /* must be called with rtnl_lock */
1713 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1714 {
1715         int i;
1716
1717         if (bp->state == BNX2X_STATE_CLOSED) {
1718                 /* Interface has been removed - nothing to recover */
1719                 bp->recovery_state = BNX2X_RECOVERY_DONE;
1720                 bp->is_leader = 0;
1721                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
1722                 smp_wmb();
1723
1724                 return -EINVAL;
1725         }
1726
1727 #ifdef BCM_CNIC
1728         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
1729 #endif
1730         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
1731
1732         /* Set "drop all" */
1733         bp->rx_mode = BNX2X_RX_MODE_NONE;
1734         bnx2x_set_storm_rx_mode(bp);
1735
1736         /* Stop Tx */
1737         bnx2x_tx_disable(bp);
1738
1739         del_timer_sync(&bp->timer);
1740
1741         SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
1742                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
1743
1744         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1745
1746         /* Cleanup the chip if needed */
1747         if (unload_mode != UNLOAD_RECOVERY)
1748                 bnx2x_chip_cleanup(bp, unload_mode);
1749         else {
1750                 /* Disable HW interrupts, NAPI and Tx */
1751                 bnx2x_netif_stop(bp, 1);
1752
1753                 /* Release IRQs */
1754                 bnx2x_free_irq(bp);
1755         }
1756
1757         bp->port.pmf = 0;
1758
1759         /* Free SKBs, SGEs, TPA pool and driver internals */
1760         bnx2x_free_skbs(bp);
1761         for_each_rx_queue(bp, i)
1762                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1763
1764         bnx2x_free_mem(bp);
1765
1766         bp->state = BNX2X_STATE_CLOSED;
1767
1768         /* The last driver must disable a "close the gate" if there is no
1769          * parity attention or "process kill" pending.
1770          */
1771         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
1772             bnx2x_reset_is_done(bp))
1773                 bnx2x_disable_close_the_gate(bp);
1774
1775         /* Reset MCP mail box sequence if there is on going recovery */
1776         if (unload_mode == UNLOAD_RECOVERY)
1777                 bp->fw_seq = 0;
1778
1779         return 0;
1780 }
1781
1782 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
1783 {
1784         u16 pmcsr;
1785
1786         /* If there is no power capability, silently succeed */
1787         if (!bp->pm_cap) {
1788                 DP(NETIF_MSG_HW, "No power capability. Breaking.\n");
1789                 return 0;
1790         }
1791
1792         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
1793
1794         switch (state) {
1795         case PCI_D0:
1796                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1797                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
1798                                        PCI_PM_CTRL_PME_STATUS));
1799
1800                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
1801                         /* delay required during transition out of D3hot */
1802                         msleep(20);
1803                 break;
1804
1805         case PCI_D3hot:
1806                 /* If there are other clients above don't
1807                    shut down the power */
1808                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
1809                         return 0;
1810                 /* Don't shut down the power for emulation and FPGA */
1811                 if (CHIP_REV_IS_SLOW(bp))
1812                         return 0;
1813
1814                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
1815                 pmcsr |= 3;
1816
1817                 if (bp->wol)
1818                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
1819
1820                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
1821                                       pmcsr);
1822
1823                 /* No more memory access after this point until
1824                 * device is brought back to D0.
1825                 */
1826                 break;
1827
1828         default:
1829                 return -EINVAL;
1830         }
1831         return 0;
1832 }
1833
1834 /*
1835  * net_device service functions
1836  */
1837 int bnx2x_poll(struct napi_struct *napi, int budget)
1838 {
1839         int work_done = 0;
1840         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
1841                                                  napi);
1842         struct bnx2x *bp = fp->bp;
1843
1844         while (1) {
1845 #ifdef BNX2X_STOP_ON_ERROR
1846                 if (unlikely(bp->panic)) {
1847                         napi_complete(napi);
1848                         return 0;
1849                 }
1850 #endif
1851
1852                 if (bnx2x_has_tx_work(fp))
1853                         bnx2x_tx_int(fp);
1854
1855                 if (bnx2x_has_rx_work(fp)) {
1856                         work_done += bnx2x_rx_int(fp, budget - work_done);
1857
1858                         /* must not complete if we consumed full budget */
1859                         if (work_done >= budget)
1860                                 break;
1861                 }
1862
1863                 /* Fall out from the NAPI loop if needed */
1864                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1865 #ifdef BCM_CNIC
1866                         /* No need to update SB for FCoE L2 ring as long as
1867                          * it's connected to the default SB and the SB
1868                          * has been updated when NAPI was scheduled.
1869                          */
1870                         if (IS_FCOE_FP(fp)) {
1871                                 napi_complete(napi);
1872                                 break;
1873                         }
1874 #endif
1875
1876                         bnx2x_update_fpsb_idx(fp);
1877                         /* bnx2x_has_rx_work() reads the status block,
1878                          * thus we need to ensure that status block indices
1879                          * have been actually read (bnx2x_update_fpsb_idx)
1880                          * prior to this check (bnx2x_has_rx_work) so that
1881                          * we won't write the "newer" value of the status block
1882                          * to IGU (if there was a DMA right after
1883                          * bnx2x_has_rx_work and if there is no rmb, the memory
1884                          * reading (bnx2x_update_fpsb_idx) may be postponed
1885                          * to right before bnx2x_ack_sb). In this case there
1886                          * will never be another interrupt until there is
1887                          * another update of the status block, while there
1888                          * is still unhandled work.
1889                          */
1890                         rmb();
1891
1892                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1893                                 napi_complete(napi);
1894                                 /* Re-enable interrupts */
1895                                 DP(NETIF_MSG_HW,
1896                                    "Update index to %d\n", fp->fp_hc_idx);
1897                                 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
1898                                              le16_to_cpu(fp->fp_hc_idx),
1899                                              IGU_INT_ENABLE, 1);
1900                                 break;
1901                         }
1902                 }
1903         }
1904
1905         return work_done;
1906 }
1907
1908 /* we split the first BD into headers and data BDs
1909  * to ease the pain of our fellow microcode engineers
1910  * we use one mapping for both BDs
1911  * So far this has only been observed to happen
1912  * in Other Operating Systems(TM)
1913  */
1914 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
1915                                    struct bnx2x_fastpath *fp,
1916                                    struct sw_tx_bd *tx_buf,
1917                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
1918                                    u16 bd_prod, int nbd)
1919 {
1920         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
1921         struct eth_tx_bd *d_tx_bd;
1922         dma_addr_t mapping;
1923         int old_len = le16_to_cpu(h_tx_bd->nbytes);
1924
1925         /* first fix first BD */
1926         h_tx_bd->nbd = cpu_to_le16(nbd);
1927         h_tx_bd->nbytes = cpu_to_le16(hlen);
1928
1929         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
1930            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
1931            h_tx_bd->addr_lo, h_tx_bd->nbd);
1932
1933         /* now get a new data BD
1934          * (after the pbd) and fill it */
1935         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
1936         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
1937
1938         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
1939                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
1940
1941         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1942         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1943         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
1944
1945         /* this marks the BD as one that has no individual mapping */
1946         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
1947
1948         DP(NETIF_MSG_TX_QUEUED,
1949            "TSO split data size is %d (%x:%x)\n",
1950            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
1951
1952         /* update tx_bd */
1953         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
1954
1955         return bd_prod;
1956 }
1957
1958 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
1959 {
1960         if (fix > 0)
1961                 csum = (u16) ~csum_fold(csum_sub(csum,
1962                                 csum_partial(t_header - fix, fix, 0)));
1963
1964         else if (fix < 0)
1965                 csum = (u16) ~csum_fold(csum_add(csum,
1966                                 csum_partial(t_header, -fix, 0)));
1967
1968         return swab16(csum);
1969 }
1970
1971 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1972 {
1973         u32 rc;
1974
1975         if (skb->ip_summed != CHECKSUM_PARTIAL)
1976                 rc = XMIT_PLAIN;
1977
1978         else {
1979                 if (vlan_get_protocol(skb) == htons(ETH_P_IPV6)) {
1980                         rc = XMIT_CSUM_V6;
1981                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
1982                                 rc |= XMIT_CSUM_TCP;
1983
1984                 } else {
1985                         rc = XMIT_CSUM_V4;
1986                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
1987                                 rc |= XMIT_CSUM_TCP;
1988                 }
1989         }
1990
1991         if (skb_is_gso_v6(skb))
1992                 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1993         else if (skb_is_gso(skb))
1994                 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1995
1996         return rc;
1997 }
1998
1999 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2000 /* check if packet requires linearization (packet is too fragmented)
2001    no need to check fragmentation if page size > 8K (there will be no
2002    violation to FW restrictions) */
2003 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
2004                              u32 xmit_type)
2005 {
2006         int to_copy = 0;
2007         int hlen = 0;
2008         int first_bd_sz = 0;
2009
2010         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
2011         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
2012
2013                 if (xmit_type & XMIT_GSO) {
2014                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
2015                         /* Check if LSO packet needs to be copied:
2016                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
2017                         int wnd_size = MAX_FETCH_BD - 3;
2018                         /* Number of windows to check */
2019                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
2020                         int wnd_idx = 0;
2021                         int frag_idx = 0;
2022                         u32 wnd_sum = 0;
2023
2024                         /* Headers length */
2025                         hlen = (int)(skb_transport_header(skb) - skb->data) +
2026                                 tcp_hdrlen(skb);
2027
2028                         /* Amount of data (w/o headers) on linear part of SKB*/
2029                         first_bd_sz = skb_headlen(skb) - hlen;
2030
2031                         wnd_sum  = first_bd_sz;
2032
2033                         /* Calculate the first sum - it's special */
2034                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
2035                                 wnd_sum +=
2036                                         skb_shinfo(skb)->frags[frag_idx].size;
2037
2038                         /* If there was data on linear skb data - check it */
2039                         if (first_bd_sz > 0) {
2040                                 if (unlikely(wnd_sum < lso_mss)) {
2041                                         to_copy = 1;
2042                                         goto exit_lbl;
2043                                 }
2044
2045                                 wnd_sum -= first_bd_sz;
2046                         }
2047
2048                         /* Others are easier: run through the frag list and
2049                            check all windows */
2050                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
2051                                 wnd_sum +=
2052                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
2053
2054                                 if (unlikely(wnd_sum < lso_mss)) {
2055                                         to_copy = 1;
2056                                         break;
2057                                 }
2058                                 wnd_sum -=
2059                                         skb_shinfo(skb)->frags[wnd_idx].size;
2060                         }
2061                 } else {
2062                         /* in non-LSO too fragmented packet should always
2063                            be linearized */
2064                         to_copy = 1;
2065                 }
2066         }
2067
2068 exit_lbl:
2069         if (unlikely(to_copy))
2070                 DP(NETIF_MSG_TX_QUEUED,
2071                    "Linearization IS REQUIRED for %s packet. "
2072                    "num_frags %d  hlen %d  first_bd_sz %d\n",
2073                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
2074                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
2075
2076         return to_copy;
2077 }
2078 #endif
2079
2080 static inline void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
2081                                         u32 xmit_type)
2082 {
2083         *parsing_data |= (skb_shinfo(skb)->gso_size <<
2084                               ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
2085                               ETH_TX_PARSE_BD_E2_LSO_MSS;
2086         if ((xmit_type & XMIT_GSO_V6) &&
2087             (ipv6_hdr(skb)->nexthdr == NEXTHDR_IPV6))
2088                 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
2089 }
2090
2091 /**
2092  * bnx2x_set_pbd_gso - update PBD in GSO case.
2093  *
2094  * @skb:        packet skb
2095  * @pbd:        parse BD
2096  * @xmit_type:  xmit flags
2097  */
2098 static inline void bnx2x_set_pbd_gso(struct sk_buff *skb,
2099                                      struct eth_tx_parse_bd_e1x *pbd,
2100                                      u32 xmit_type)
2101 {
2102         pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2103         pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
2104         pbd->tcp_flags = pbd_tcp_flags(skb);
2105
2106         if (xmit_type & XMIT_GSO_V4) {
2107                 pbd->ip_id = swab16(ip_hdr(skb)->id);
2108                 pbd->tcp_pseudo_csum =
2109                         swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
2110                                                   ip_hdr(skb)->daddr,
2111                                                   0, IPPROTO_TCP, 0));
2112
2113         } else
2114                 pbd->tcp_pseudo_csum =
2115                         swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2116                                                 &ipv6_hdr(skb)->daddr,
2117                                                 0, IPPROTO_TCP, 0));
2118
2119         pbd->global_data |= ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN;
2120 }
2121
2122 /**
2123  * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
2124  *
2125  * @bp:                 driver handle
2126  * @skb:                packet skb
2127  * @parsing_data:       data to be updated
2128  * @xmit_type:          xmit flags
2129  *
2130  * 57712 related
2131  */
2132 static inline  u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
2133         u32 *parsing_data, u32 xmit_type)
2134 {
2135         *parsing_data |=
2136                         ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
2137                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W_SHIFT) &
2138                         ETH_TX_PARSE_BD_E2_TCP_HDR_START_OFFSET_W;
2139
2140         if (xmit_type & XMIT_CSUM_TCP) {
2141                 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
2142                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
2143                         ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
2144
2145                 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
2146         } else
2147                 /* We support checksum offload for TCP and UDP only.
2148                  * No need to pass the UDP header length - it's a constant.
2149                  */
2150                 return skb_transport_header(skb) +
2151                                 sizeof(struct udphdr) - skb->data;
2152 }
2153
2154 static inline void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2155         struct eth_tx_start_bd *tx_start_bd, u32 xmit_type)
2156 {
2157
2158         tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
2159
2160         if (xmit_type & XMIT_CSUM_V4)
2161                 tx_start_bd->bd_flags.as_bitfield |=
2162                                         ETH_TX_BD_FLAGS_IP_CSUM;
2163         else
2164                 tx_start_bd->bd_flags.as_bitfield |=
2165                                         ETH_TX_BD_FLAGS_IPV6;
2166
2167         if (!(xmit_type & XMIT_CSUM_TCP))
2168                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
2169
2170 }
2171
2172 /**
2173  * bnx2x_set_pbd_csum - update PBD with checksum and return header length
2174  *
2175  * @bp:         driver handle
2176  * @skb:        packet skb
2177  * @pbd:        parse BD to be updated
2178  * @xmit_type:  xmit flags
2179  */
2180 static inline u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
2181         struct eth_tx_parse_bd_e1x *pbd,
2182         u32 xmit_type)
2183 {
2184         u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
2185
2186         /* for now NS flag is not used in Linux */
2187         pbd->global_data =
2188                 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
2189                          ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
2190
2191         pbd->ip_hlen_w = (skb_transport_header(skb) -
2192                         skb_network_header(skb)) >> 1;
2193
2194         hlen += pbd->ip_hlen_w;
2195
2196         /* We support checksum offload for TCP and UDP only */
2197         if (xmit_type & XMIT_CSUM_TCP)
2198                 hlen += tcp_hdrlen(skb) / 2;
2199         else
2200                 hlen += sizeof(struct udphdr) / 2;
2201
2202         pbd->total_hlen_w = cpu_to_le16(hlen);
2203         hlen = hlen*2;
2204
2205         if (xmit_type & XMIT_CSUM_TCP) {
2206                 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
2207
2208         } else {
2209                 s8 fix = SKB_CS_OFF(skb); /* signed! */
2210
2211                 DP(NETIF_MSG_TX_QUEUED,
2212                    "hlen %d  fix %d  csum before fix %x\n",
2213                    le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
2214
2215                 /* HW bug: fixup the CSUM */
2216                 pbd->tcp_pseudo_csum =
2217                         bnx2x_csum_fix(skb_transport_header(skb),
2218                                        SKB_CS(skb), fix);
2219
2220                 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
2221                    pbd->tcp_pseudo_csum);
2222         }
2223
2224         return hlen;
2225 }
2226
2227 /* called with netif_tx_lock
2228  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
2229  * netif_wake_queue()
2230  */
2231 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
2232 {
2233         struct bnx2x *bp = netdev_priv(dev);
2234         struct bnx2x_fastpath *fp;
2235         struct netdev_queue *txq;
2236         struct sw_tx_bd *tx_buf;
2237         struct eth_tx_start_bd *tx_start_bd;
2238         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
2239         struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
2240         struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
2241         u32 pbd_e2_parsing_data = 0;
2242         u16 pkt_prod, bd_prod;
2243         int nbd, fp_index;
2244         dma_addr_t mapping;
2245         u32 xmit_type = bnx2x_xmit_type(bp, skb);
2246         int i;
2247         u8 hlen = 0;
2248         __le16 pkt_size = 0;
2249         struct ethhdr *eth;
2250         u8 mac_type = UNICAST_ADDRESS;
2251
2252 #ifdef BNX2X_STOP_ON_ERROR
2253         if (unlikely(bp->panic))
2254                 return NETDEV_TX_BUSY;
2255 #endif
2256
2257         fp_index = skb_get_queue_mapping(skb);
2258         txq = netdev_get_tx_queue(dev, fp_index);
2259
2260         fp = &bp->fp[fp_index];
2261
2262         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
2263                 fp->eth_q_stats.driver_xoff++;
2264                 netif_tx_stop_queue(txq);
2265                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
2266                 return NETDEV_TX_BUSY;
2267         }
2268
2269         DP(NETIF_MSG_TX_QUEUED, "queue[%d]: SKB: summed %x  protocol %x  "
2270                                 "protocol(%x,%x) gso type %x  xmit_type %x\n",
2271            fp_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
2272            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
2273
2274         eth = (struct ethhdr *)skb->data;
2275
2276         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
2277         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
2278                 if (is_broadcast_ether_addr(eth->h_dest))
2279                         mac_type = BROADCAST_ADDRESS;
2280                 else
2281                         mac_type = MULTICAST_ADDRESS;
2282         }
2283
2284 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
2285         /* First, check if we need to linearize the skb (due to FW
2286            restrictions). No need to check fragmentation if page size > 8K
2287            (there will be no violation to FW restrictions) */
2288         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
2289                 /* Statistics of linearization */
2290                 bp->lin_cnt++;
2291                 if (skb_linearize(skb) != 0) {
2292                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
2293                            "silently dropping this SKB\n");
2294                         dev_kfree_skb_any(skb);
2295                         return NETDEV_TX_OK;
2296                 }
2297         }
2298 #endif
2299
2300         /*
2301         Please read carefully. First we use one BD which we mark as start,
2302         then we have a parsing info BD (used for TSO or xsum),
2303         and only then we have the rest of the TSO BDs.
2304         (don't forget to mark the last one as last,
2305         and to unmap only AFTER you write to the BD ...)
2306         And above all, all pdb sizes are in words - NOT DWORDS!
2307         */
2308
2309         pkt_prod = fp->tx_pkt_prod++;
2310         bd_prod = TX_BD(fp->tx_bd_prod);
2311
2312         /* get a tx_buf and first BD */
2313         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
2314         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
2315
2316         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
2317         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_ETH_ADDR_TYPE,
2318                  mac_type);
2319
2320         /* header nbd */
2321         SET_FLAG(tx_start_bd->general_data, ETH_TX_START_BD_HDR_NBDS, 1);
2322
2323         /* remember the first BD of the packet */
2324         tx_buf->first_bd = fp->tx_bd_prod;
2325         tx_buf->skb = skb;
2326         tx_buf->flags = 0;
2327
2328         DP(NETIF_MSG_TX_QUEUED,
2329            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
2330            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
2331
2332         if (vlan_tx_tag_present(skb)) {
2333                 tx_start_bd->vlan_or_ethertype =
2334                     cpu_to_le16(vlan_tx_tag_get(skb));
2335                 tx_start_bd->bd_flags.as_bitfield |=
2336                     (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
2337         } else
2338                 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
2339
2340         /* turn on parsing and get a BD */
2341         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2342
2343         if (xmit_type & XMIT_CSUM)
2344                 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
2345
2346         if (CHIP_IS_E2(bp)) {
2347                 pbd_e2 = &fp->tx_desc_ring[bd_prod].parse_bd_e2;
2348                 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
2349                 /* Set PBD in checksum offload case */
2350                 if (xmit_type & XMIT_CSUM)
2351                         hlen = bnx2x_set_pbd_csum_e2(bp, skb,
2352                                                      &pbd_e2_parsing_data,
2353                                                      xmit_type);
2354         } else {
2355                 pbd_e1x = &fp->tx_desc_ring[bd_prod].parse_bd_e1x;
2356                 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
2357                 /* Set PBD in checksum offload case */
2358                 if (xmit_type & XMIT_CSUM)
2359                         hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
2360
2361         }
2362
2363         /* Map skb linear data for DMA */
2364         mapping = dma_map_single(&bp->pdev->dev, skb->data,
2365                                  skb_headlen(skb), DMA_TO_DEVICE);
2366
2367         /* Setup the data pointer of the first BD of the packet */
2368         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2369         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2370         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
2371         tx_start_bd->nbd = cpu_to_le16(nbd);
2372         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
2373         pkt_size = tx_start_bd->nbytes;
2374
2375         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
2376            "  nbytes %d  flags %x  vlan %x\n",
2377            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
2378            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
2379            tx_start_bd->bd_flags.as_bitfield,
2380            le16_to_cpu(tx_start_bd->vlan_or_ethertype));
2381
2382         if (xmit_type & XMIT_GSO) {
2383
2384                 DP(NETIF_MSG_TX_QUEUED,
2385                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
2386                    skb->len, hlen, skb_headlen(skb),
2387                    skb_shinfo(skb)->gso_size);
2388
2389                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
2390
2391                 if (unlikely(skb_headlen(skb) > hlen))
2392                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
2393                                                  hlen, bd_prod, ++nbd);
2394                 if (CHIP_IS_E2(bp))
2395                         bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
2396                                              xmit_type);
2397                 else
2398                         bnx2x_set_pbd_gso(skb, pbd_e1x, xmit_type);
2399         }
2400
2401         /* Set the PBD's parsing_data field if not zero
2402          * (for the chips newer than 57711).
2403          */
2404         if (pbd_e2_parsing_data)
2405                 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
2406
2407         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
2408
2409         /* Handle fragmented skb */
2410         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2411                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2412
2413                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2414                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2415                 if (total_pkt_bd == NULL)
2416                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
2417
2418                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
2419                                        frag->page_offset,
2420                                        frag->size, DMA_TO_DEVICE);
2421
2422                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
2423                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
2424                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
2425                 le16_add_cpu(&pkt_size, frag->size);
2426
2427                 DP(NETIF_MSG_TX_QUEUED,
2428                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
2429                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
2430                    le16_to_cpu(tx_data_bd->nbytes));
2431         }
2432
2433         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
2434
2435         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
2436
2437         /* now send a tx doorbell, counting the next BD
2438          * if the packet contains or ends with it
2439          */
2440         if (TX_BD_POFF(bd_prod) < nbd)
2441                 nbd++;
2442
2443         if (total_pkt_bd != NULL)
2444                 total_pkt_bd->total_pkt_bytes = pkt_size;
2445
2446         if (pbd_e1x)
2447                 DP(NETIF_MSG_TX_QUEUED,
2448                    "PBD (E1X) @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
2449                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
2450                    pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
2451                    pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
2452                    pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
2453                     le16_to_cpu(pbd_e1x->total_hlen_w));
2454         if (pbd_e2)
2455                 DP(NETIF_MSG_TX_QUEUED,
2456                    "PBD (E2) @%p  dst %x %x %x src %x %x %x parsing_data %x\n",
2457                    pbd_e2, pbd_e2->dst_mac_addr_hi, pbd_e2->dst_mac_addr_mid,
2458                    pbd_e2->dst_mac_addr_lo, pbd_e2->src_mac_addr_hi,
2459                    pbd_e2->src_mac_addr_mid, pbd_e2->src_mac_addr_lo,
2460                    pbd_e2->parsing_data);
2461         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
2462
2463         /*
2464          * Make sure that the BD data is updated before updating the producer
2465          * since FW might read the BD right after the producer is updated.
2466          * This is only applicable for weak-ordered memory model archs such
2467          * as IA-64. The following barrier is also mandatory since FW will
2468          * assumes packets must have BDs.
2469          */
2470         wmb();
2471
2472         fp->tx_db.data.prod += nbd;
2473         barrier();
2474
2475         DOORBELL(bp, fp->cid, fp->tx_db.raw);
2476
2477         mmiowb();
2478
2479         fp->tx_bd_prod += nbd;
2480
2481         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
2482                 netif_tx_stop_queue(txq);
2483
2484                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
2485                  * ordering of set_bit() in netif_tx_stop_queue() and read of
2486                  * fp->bd_tx_cons */
2487                 smp_mb();
2488
2489                 fp->eth_q_stats.driver_xoff++;
2490                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
2491                         netif_tx_wake_queue(txq);
2492         }
2493         fp->tx_pkt++;
2494
2495         return NETDEV_TX_OK;
2496 }
2497
2498 /* called with rtnl_lock */
2499 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
2500 {
2501         struct sockaddr *addr = p;
2502         struct bnx2x *bp = netdev_priv(dev);
2503
2504         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
2505                 return -EINVAL;
2506
2507         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2508         if (netif_running(dev))
2509                 bnx2x_set_eth_mac(bp, 1);
2510
2511         return 0;
2512 }
2513
2514 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
2515 {
2516         union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
2517         struct bnx2x_fastpath *fp = &bp->fp[fp_index];
2518
2519         /* Common */
2520 #ifdef BCM_CNIC
2521         if (IS_FCOE_IDX(fp_index)) {
2522                 memset(sb, 0, sizeof(union host_hc_status_block));
2523                 fp->status_blk_mapping = 0;
2524
2525         } else {
2526 #endif
2527                 /* status blocks */
2528                 if (CHIP_IS_E2(bp))
2529                         BNX2X_PCI_FREE(sb->e2_sb,
2530                                        bnx2x_fp(bp, fp_index,
2531                                                 status_blk_mapping),
2532                                        sizeof(struct host_hc_status_block_e2));
2533                 else
2534                         BNX2X_PCI_FREE(sb->e1x_sb,
2535                                        bnx2x_fp(bp, fp_index,
2536                                                 status_blk_mapping),
2537                                        sizeof(struct host_hc_status_block_e1x));
2538 #ifdef BCM_CNIC
2539         }
2540 #endif
2541         /* Rx */
2542         if (!skip_rx_queue(bp, fp_index)) {
2543                 bnx2x_free_rx_bds(fp);
2544
2545                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2546                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
2547                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
2548                                bnx2x_fp(bp, fp_index, rx_desc_mapping),
2549                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
2550
2551                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
2552                                bnx2x_fp(bp, fp_index, rx_comp_mapping),
2553                                sizeof(struct eth_fast_path_rx_cqe) *
2554                                NUM_RCQ_BD);
2555
2556                 /* SGE ring */
2557                 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
2558                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
2559                                bnx2x_fp(bp, fp_index, rx_sge_mapping),
2560                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2561         }
2562
2563         /* Tx */
2564         if (!skip_tx_queue(bp, fp_index)) {
2565                 /* fastpath tx rings: tx_buf tx_desc */
2566                 BNX2X_FREE(bnx2x_fp(bp, fp_index, tx_buf_ring));
2567                 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, tx_desc_ring),
2568                                bnx2x_fp(bp, fp_index, tx_desc_mapping),
2569                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2570         }
2571         /* end of fastpath */
2572 }
2573
2574 void bnx2x_free_fp_mem(struct bnx2x *bp)
2575 {
2576         int i;
2577         for_each_queue(bp, i)
2578                 bnx2x_free_fp_mem_at(bp, i);
2579 }
2580
2581 static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
2582 {
2583         union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
2584         if (CHIP_IS_E2(bp)) {
2585                 bnx2x_fp(bp, index, sb_index_values) =
2586                         (__le16 *)status_blk.e2_sb->sb.index_values;
2587                 bnx2x_fp(bp, index, sb_running_index) =
2588                         (__le16 *)status_blk.e2_sb->sb.running_index;
2589         } else {
2590                 bnx2x_fp(bp, index, sb_index_values) =
2591                         (__le16 *)status_blk.e1x_sb->sb.index_values;
2592                 bnx2x_fp(bp, index, sb_running_index) =
2593                         (__le16 *)status_blk.e1x_sb->sb.running_index;
2594         }
2595 }
2596
2597 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
2598 {
2599         union host_hc_status_block *sb;
2600         struct bnx2x_fastpath *fp = &bp->fp[index];
2601         int ring_size = 0;
2602
2603         /* if rx_ring_size specified - use it */
2604         int rx_ring_size = bp->rx_ring_size ? bp->rx_ring_size :
2605                            MAX_RX_AVAIL/bp->num_queues;
2606
2607         /* allocate at least number of buffers required by FW */
2608         rx_ring_size = max_t(int, fp->disable_tpa ? MIN_RX_SIZE_NONTPA :
2609                                                     MIN_RX_SIZE_TPA,
2610                                   rx_ring_size);
2611
2612         bnx2x_fp(bp, index, bp) = bp;
2613         bnx2x_fp(bp, index, index) = index;
2614
2615         /* Common */
2616         sb = &bnx2x_fp(bp, index, status_blk);
2617 #ifdef BCM_CNIC
2618         if (!IS_FCOE_IDX(index)) {
2619 #endif
2620                 /* status blocks */
2621                 if (CHIP_IS_E2(bp))
2622                         BNX2X_PCI_ALLOC(sb->e2_sb,
2623                                 &bnx2x_fp(bp, index, status_blk_mapping),
2624                                 sizeof(struct host_hc_status_block_e2));
2625                 else
2626                         BNX2X_PCI_ALLOC(sb->e1x_sb,
2627                                 &bnx2x_fp(bp, index, status_blk_mapping),
2628                             sizeof(struct host_hc_status_block_e1x));
2629 #ifdef BCM_CNIC
2630         }
2631 #endif
2632
2633         /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
2634          * set shortcuts for it.
2635          */
2636         if (!IS_FCOE_IDX(index))
2637                 set_sb_shortcuts(bp, index);
2638
2639         /* Tx */
2640         if (!skip_tx_queue(bp, index)) {
2641                 /* fastpath tx rings: tx_buf tx_desc */
2642                 BNX2X_ALLOC(bnx2x_fp(bp, index, tx_buf_ring),
2643                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
2644                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, tx_desc_ring),
2645                                 &bnx2x_fp(bp, index, tx_desc_mapping),
2646                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
2647         }
2648
2649         /* Rx */
2650         if (!skip_rx_queue(bp, index)) {
2651                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
2652                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_buf_ring),
2653                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
2654                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_desc_ring),
2655                                 &bnx2x_fp(bp, index, rx_desc_mapping),
2656                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
2657
2658                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_comp_ring),
2659                                 &bnx2x_fp(bp, index, rx_comp_mapping),
2660                                 sizeof(struct eth_fast_path_rx_cqe) *
2661                                 NUM_RCQ_BD);
2662
2663                 /* SGE ring */
2664                 BNX2X_ALLOC(bnx2x_fp(bp, index, rx_page_ring),
2665                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
2666                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, index, rx_sge_ring),
2667                                 &bnx2x_fp(bp, index, rx_sge_mapping),
2668                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
2669                 /* RX BD ring */
2670                 bnx2x_set_next_page_rx_bd(fp);
2671
2672                 /* CQ ring */
2673                 bnx2x_set_next_page_rx_cq(fp);
2674
2675                 /* BDs */
2676                 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
2677                 if (ring_size < rx_ring_size)
2678                         goto alloc_mem_err;
2679         }
2680
2681         return 0;
2682
2683 /* handles low memory cases */
2684 alloc_mem_err:
2685         BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
2686                                                 index, ring_size);
2687         /* FW will drop all packets if queue is not big enough,
2688          * In these cases we disable the queue
2689          * Min size diferent for TPA and non-TPA queues
2690          */
2691         if (ring_size < (fp->disable_tpa ?
2692                                 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
2693                         /* release memory allocated for this queue */
2694                         bnx2x_free_fp_mem_at(bp, index);
2695                         return -ENOMEM;
2696         }
2697         return 0;
2698 }
2699
2700 int bnx2x_alloc_fp_mem(struct bnx2x *bp)
2701 {
2702         int i;
2703
2704         /**
2705          * 1. Allocate FP for leading - fatal if error
2706          * 2. {CNIC} Allocate FCoE FP - fatal if error
2707          * 3. Allocate RSS - fix number of queues if error
2708          */
2709
2710         /* leading */
2711         if (bnx2x_alloc_fp_mem_at(bp, 0))
2712                 return -ENOMEM;
2713 #ifdef BCM_CNIC
2714         if (!NO_FCOE(bp))
2715                 /* FCoE */
2716                 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX))
2717                         /* we will fail load process instead of mark
2718                          * NO_FCOE_FLAG
2719                          */
2720                         return -ENOMEM;
2721 #endif
2722         /* RSS */
2723         for_each_nondefault_eth_queue(bp, i)
2724                 if (bnx2x_alloc_fp_mem_at(bp, i))
2725                         break;
2726
2727         /* handle memory failures */
2728         if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
2729                 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
2730
2731                 WARN_ON(delta < 0);
2732 #ifdef BCM_CNIC
2733                 /**
2734                  * move non eth FPs next to last eth FP
2735                  * must be done in that order
2736                  * FCOE_IDX < FWD_IDX < OOO_IDX
2737                  */
2738
2739                 /* move FCoE fp */
2740                 bnx2x_move_fp(bp, FCOE_IDX, FCOE_IDX - delta);
2741 #endif
2742                 bp->num_queues -= delta;
2743                 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
2744                           bp->num_queues + delta, bp->num_queues);
2745         }
2746
2747         return 0;
2748 }
2749
2750 static int bnx2x_setup_irqs(struct bnx2x *bp)
2751 {
2752         int rc = 0;
2753         if (bp->flags & USING_MSIX_FLAG) {
2754                 rc = bnx2x_req_msix_irqs(bp);
2755                 if (rc)
2756                         return rc;
2757         } else {
2758                 bnx2x_ack_int(bp);
2759                 rc = bnx2x_req_irq(bp);
2760                 if (rc) {
2761                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
2762                         return rc;
2763                 }
2764                 if (bp->flags & USING_MSI_FLAG) {
2765                         bp->dev->irq = bp->pdev->irq;
2766                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
2767                                bp->pdev->irq);
2768                 }
2769         }
2770
2771         return 0;
2772 }
2773
2774 void bnx2x_free_mem_bp(struct bnx2x *bp)
2775 {
2776         kfree(bp->fp);
2777         kfree(bp->msix_table);
2778         kfree(bp->ilt);
2779 }
2780
2781 int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2782 {
2783         struct bnx2x_fastpath *fp;
2784         struct msix_entry *tbl;
2785         struct bnx2x_ilt *ilt;
2786
2787         /* fp array */
2788         fp = kzalloc(L2_FP_COUNT(bp->l2_cid_count)*sizeof(*fp), GFP_KERNEL);
2789         if (!fp)
2790                 goto alloc_err;
2791         bp->fp = fp;
2792
2793         /* msix table */
2794         tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2795                                   GFP_KERNEL);
2796         if (!tbl)
2797                 goto alloc_err;
2798         bp->msix_table = tbl;
2799
2800         /* ilt */
2801         ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
2802         if (!ilt)
2803                 goto alloc_err;
2804         bp->ilt = ilt;
2805
2806         return 0;
2807 alloc_err:
2808         bnx2x_free_mem_bp(bp);
2809         return -ENOMEM;
2810
2811 }
2812
2813 int bnx2x_reload_if_running(struct net_device *dev)
2814 {
2815         struct bnx2x *bp = netdev_priv(dev);
2816
2817         if (unlikely(!netif_running(dev)))
2818                 return 0;
2819
2820         bnx2x_nic_unload(bp, UNLOAD_NORMAL);
2821         return bnx2x_nic_load(bp, LOAD_NORMAL);
2822 }
2823
2824 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
2825 {
2826         u32 sel_phy_idx = 0;
2827         if (bp->link_params.num_phys <= 1)
2828                 return INT_PHY;
2829
2830         if (bp->link_vars.link_up) {
2831                 sel_phy_idx = EXT_PHY1;
2832                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
2833                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
2834                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
2835                         sel_phy_idx = EXT_PHY2;
2836         } else {
2837
2838                 switch (bnx2x_phy_selection(&bp->link_params)) {
2839                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
2840                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
2841                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
2842                        sel_phy_idx = EXT_PHY1;
2843                        break;
2844                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
2845                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
2846                        sel_phy_idx = EXT_PHY2;
2847                        break;
2848                 }
2849         }
2850
2851         return sel_phy_idx;
2852
2853 }
2854 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
2855 {
2856         u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
2857         /*
2858          * The selected actived PHY is always after swapping (in case PHY
2859          * swapping is enabled). So when swapping is enabled, we need to reverse
2860          * the configuration
2861          */
2862
2863         if (bp->link_params.multi_phy_config &
2864             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
2865                 if (sel_phy_idx == EXT_PHY1)
2866                         sel_phy_idx = EXT_PHY2;
2867                 else if (sel_phy_idx == EXT_PHY2)
2868                         sel_phy_idx = EXT_PHY1;
2869         }
2870         return LINK_CONFIG_IDX(sel_phy_idx);
2871 }
2872
2873 /* called with rtnl_lock */
2874 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
2875 {
2876         struct bnx2x *bp = netdev_priv(dev);
2877
2878         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
2879                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
2880                 return -EAGAIN;
2881         }
2882
2883         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
2884             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
2885                 return -EINVAL;
2886
2887         /* This does not race with packet allocation
2888          * because the actual alloc size is
2889          * only updated as part of load
2890          */
2891         dev->mtu = new_mtu;
2892
2893         return bnx2x_reload_if_running(dev);
2894 }
2895
2896 u32 bnx2x_fix_features(struct net_device *dev, u32 features)
2897 {
2898         struct bnx2x *bp = netdev_priv(dev);
2899
2900         /* TPA requires Rx CSUM offloading */
2901         if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa)
2902                 features &= ~NETIF_F_LRO;
2903
2904         return features;
2905 }
2906
2907 int bnx2x_set_features(struct net_device *dev, u32 features)
2908 {
2909         struct bnx2x *bp = netdev_priv(dev);
2910         u32 flags = bp->flags;
2911         bool bnx2x_reload = false;
2912
2913         if (features & NETIF_F_LRO)
2914                 flags |= TPA_ENABLE_FLAG;
2915         else
2916                 flags &= ~TPA_ENABLE_FLAG;
2917
2918         if (features & NETIF_F_LOOPBACK) {
2919                 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
2920                         bp->link_params.loopback_mode = LOOPBACK_BMAC;
2921                         bnx2x_reload = true;
2922                 }
2923         } else {
2924                 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
2925                         bp->link_params.loopback_mode = LOOPBACK_NONE;
2926                         bnx2x_reload = true;
2927                 }
2928         }
2929
2930         if (flags ^ bp->flags) {
2931                 bp->flags = flags;
2932                 bnx2x_reload = true;
2933         }
2934
2935         if (bnx2x_reload) {
2936                 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
2937                         return bnx2x_reload_if_running(dev);
2938                 /* else: bnx2x_nic_load() will be called at end of recovery */
2939         }
2940
2941         return 0;
2942 }
2943
2944 void bnx2x_tx_timeout(struct net_device *dev)
2945 {
2946         struct bnx2x *bp = netdev_priv(dev);
2947
2948 #ifdef BNX2X_STOP_ON_ERROR
2949         if (!bp->panic)
2950                 bnx2x_panic();
2951 #endif
2952         /* This allows the netif to be shutdown gracefully before resetting */
2953         schedule_delayed_work(&bp->reset_task, 0);
2954 }
2955
2956 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
2957 {
2958         struct net_device *dev = pci_get_drvdata(pdev);
2959         struct bnx2x *bp;
2960
2961         if (!dev) {
2962                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2963                 return -ENODEV;
2964         }
2965         bp = netdev_priv(dev);
2966
2967         rtnl_lock();
2968
2969         pci_save_state(pdev);
2970
2971         if (!netif_running(dev)) {
2972                 rtnl_unlock();
2973                 return 0;
2974         }
2975
2976         netif_device_detach(dev);
2977
2978         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
2979
2980         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
2981
2982         rtnl_unlock();
2983
2984         return 0;
2985 }
2986
2987 int bnx2x_resume(struct pci_dev *pdev)
2988 {
2989         struct net_device *dev = pci_get_drvdata(pdev);
2990         struct bnx2x *bp;
2991         int rc;
2992
2993         if (!dev) {
2994                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
2995                 return -ENODEV;
2996         }
2997         bp = netdev_priv(dev);
2998
2999         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
3000                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
3001                 return -EAGAIN;
3002         }
3003
3004         rtnl_lock();
3005
3006         pci_restore_state(pdev);
3007
3008         if (!netif_running(dev)) {
3009                 rtnl_unlock();
3010                 return 0;
3011         }
3012
3013         bnx2x_set_power_state(bp, PCI_D0);
3014         netif_device_attach(dev);
3015
3016         /* Since the chip was reset, clear the FW sequence number */
3017         bp->fw_seq = 0;
3018         rc = bnx2x_nic_load(bp, LOAD_OPEN);
3019
3020         rtnl_unlock();
3021
3022         return rc;
3023 }