1 /* bnx2x_cmn.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2013 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Ariel Elior <ariel.elior@qlogic.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20 #include <linux/etherdevice.h>
21 #include <linux/if_vlan.h>
22 #include <linux/interrupt.h>
26 #include <net/ip6_checksum.h>
27 #include <net/busy_poll.h>
28 #include <linux/prefetch.h>
29 #include "bnx2x_cmn.h"
30 #include "bnx2x_init.h"
33 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp);
34 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp);
35 static int bnx2x_alloc_fp_mem(struct bnx2x *bp);
36 static int bnx2x_poll(struct napi_struct *napi, int budget);
38 static void bnx2x_add_all_napi_cnic(struct bnx2x *bp)
42 /* Add NAPI objects */
43 for_each_rx_queue_cnic(bp, i) {
44 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
45 bnx2x_poll, NAPI_POLL_WEIGHT);
46 napi_hash_add(&bnx2x_fp(bp, i, napi));
50 static void bnx2x_add_all_napi(struct bnx2x *bp)
54 /* Add NAPI objects */
55 for_each_eth_queue(bp, i) {
56 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
57 bnx2x_poll, NAPI_POLL_WEIGHT);
58 napi_hash_add(&bnx2x_fp(bp, i, napi));
62 static int bnx2x_calc_num_queues(struct bnx2x *bp)
64 int nq = bnx2x_num_queues ? : netif_get_num_default_rss_queues();
66 /* Reduce memory usage in kdump environment by using only one queue */
70 nq = clamp(nq, 1, BNX2X_MAX_QUEUES(bp));
75 * bnx2x_move_fp - move content of the fastpath structure.
78 * @from: source FP index
79 * @to: destination FP index
81 * Makes sure the contents of the bp->fp[to].napi is kept
82 * intact. This is done by first copying the napi struct from
83 * the target to the source, and then mem copying the entire
84 * source onto the target. Update txdata pointers and related
87 static inline void bnx2x_move_fp(struct bnx2x *bp, int from, int to)
89 struct bnx2x_fastpath *from_fp = &bp->fp[from];
90 struct bnx2x_fastpath *to_fp = &bp->fp[to];
91 struct bnx2x_sp_objs *from_sp_objs = &bp->sp_objs[from];
92 struct bnx2x_sp_objs *to_sp_objs = &bp->sp_objs[to];
93 struct bnx2x_fp_stats *from_fp_stats = &bp->fp_stats[from];
94 struct bnx2x_fp_stats *to_fp_stats = &bp->fp_stats[to];
95 int old_max_eth_txqs, new_max_eth_txqs;
96 int old_txdata_index = 0, new_txdata_index = 0;
97 struct bnx2x_agg_info *old_tpa_info = to_fp->tpa_info;
99 /* Copy the NAPI object as it has been already initialized */
100 from_fp->napi = to_fp->napi;
102 /* Move bnx2x_fastpath contents */
103 memcpy(to_fp, from_fp, sizeof(*to_fp));
106 /* Retain the tpa_info of the original `to' version as we don't want
107 * 2 FPs to contain the same tpa_info pointer.
109 to_fp->tpa_info = old_tpa_info;
111 /* move sp_objs contents as well, as their indices match fp ones */
112 memcpy(to_sp_objs, from_sp_objs, sizeof(*to_sp_objs));
114 /* move fp_stats contents as well, as their indices match fp ones */
115 memcpy(to_fp_stats, from_fp_stats, sizeof(*to_fp_stats));
117 /* Update txdata pointers in fp and move txdata content accordingly:
118 * Each fp consumes 'max_cos' txdata structures, so the index should be
119 * decremented by max_cos x delta.
122 old_max_eth_txqs = BNX2X_NUM_ETH_QUEUES(bp) * (bp)->max_cos;
123 new_max_eth_txqs = (BNX2X_NUM_ETH_QUEUES(bp) - from + to) *
125 if (from == FCOE_IDX(bp)) {
126 old_txdata_index = old_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
127 new_txdata_index = new_max_eth_txqs + FCOE_TXQ_IDX_OFFSET;
130 memcpy(&bp->bnx2x_txq[new_txdata_index],
131 &bp->bnx2x_txq[old_txdata_index],
132 sizeof(struct bnx2x_fp_txdata));
133 to_fp->txdata_ptr[0] = &bp->bnx2x_txq[new_txdata_index];
137 * bnx2x_fill_fw_str - Fill buffer with FW version string.
140 * @buf: character buffer to fill with the fw name
141 * @buf_len: length of the above buffer
144 void bnx2x_fill_fw_str(struct bnx2x *bp, char *buf, size_t buf_len)
147 u8 phy_fw_ver[PHY_FW_VER_LEN];
149 phy_fw_ver[0] = '\0';
150 bnx2x_get_ext_phy_fw_version(&bp->link_params,
151 phy_fw_ver, PHY_FW_VER_LEN);
152 strlcpy(buf, bp->fw_ver, buf_len);
153 snprintf(buf + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
155 (bp->common.bc_ver & 0xff0000) >> 16,
156 (bp->common.bc_ver & 0xff00) >> 8,
157 (bp->common.bc_ver & 0xff),
158 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
160 bnx2x_vf_fill_fw_str(bp, buf, buf_len);
165 * bnx2x_shrink_eth_fp - guarantees fastpath structures stay intact
168 * @delta: number of eth queues which were not allocated
170 static void bnx2x_shrink_eth_fp(struct bnx2x *bp, int delta)
172 int i, cos, old_eth_num = BNX2X_NUM_ETH_QUEUES(bp);
174 /* Queue pointer cannot be re-set on an fp-basis, as moving pointer
175 * backward along the array could cause memory to be overridden
177 for (cos = 1; cos < bp->max_cos; cos++) {
178 for (i = 0; i < old_eth_num - delta; i++) {
179 struct bnx2x_fastpath *fp = &bp->fp[i];
180 int new_idx = cos * (old_eth_num - delta) + i;
182 memcpy(&bp->bnx2x_txq[new_idx], fp->txdata_ptr[cos],
183 sizeof(struct bnx2x_fp_txdata));
184 fp->txdata_ptr[cos] = &bp->bnx2x_txq[new_idx];
189 int bnx2x_load_count[2][3] = { {0} }; /* per-path: 0-common, 1-port0, 2-port1 */
191 /* free skb in the packet ring at pos idx
192 * return idx of last bd freed
194 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
195 u16 idx, unsigned int *pkts_compl,
196 unsigned int *bytes_compl)
198 struct sw_tx_bd *tx_buf = &txdata->tx_buf_ring[idx];
199 struct eth_tx_start_bd *tx_start_bd;
200 struct eth_tx_bd *tx_data_bd;
201 struct sk_buff *skb = tx_buf->skb;
202 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
204 u16 split_bd_len = 0;
206 /* prefetch skb end pointer to speedup dev_kfree_skb() */
209 DP(NETIF_MSG_TX_DONE, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n",
210 txdata->txq_index, idx, tx_buf, skb);
212 tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd;
214 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
215 #ifdef BNX2X_STOP_ON_ERROR
216 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
217 BNX2X_ERR("BAD nbd!\n");
221 new_cons = nbd + tx_buf->first_bd;
223 /* Get the next bd */
224 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
226 /* Skip a parse bd... */
228 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
230 if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
231 /* Skip second parse bd... */
233 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
236 /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
237 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
238 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
239 split_bd_len = BD_UNMAP_LEN(tx_data_bd);
241 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
245 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
246 BD_UNMAP_LEN(tx_start_bd) + split_bd_len,
252 tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
253 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
254 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
256 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
263 (*bytes_compl) += skb->len;
266 dev_kfree_skb_any(skb);
267 tx_buf->first_bd = 0;
273 int bnx2x_tx_int(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata)
275 struct netdev_queue *txq;
276 u16 hw_cons, sw_cons, bd_cons = txdata->tx_bd_cons;
277 unsigned int pkts_compl = 0, bytes_compl = 0;
279 #ifdef BNX2X_STOP_ON_ERROR
280 if (unlikely(bp->panic))
284 txq = netdev_get_tx_queue(bp->dev, txdata->txq_index);
285 hw_cons = le16_to_cpu(*txdata->tx_cons_sb);
286 sw_cons = txdata->tx_pkt_cons;
288 while (sw_cons != hw_cons) {
291 pkt_cons = TX_BD(sw_cons);
293 DP(NETIF_MSG_TX_DONE,
294 "queue[%d]: hw_cons %u sw_cons %u pkt_cons %u\n",
295 txdata->txq_index, hw_cons, sw_cons, pkt_cons);
297 bd_cons = bnx2x_free_tx_pkt(bp, txdata, pkt_cons,
298 &pkts_compl, &bytes_compl);
303 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
305 txdata->tx_pkt_cons = sw_cons;
306 txdata->tx_bd_cons = bd_cons;
308 /* Need to make the tx_bd_cons update visible to start_xmit()
309 * before checking for netif_tx_queue_stopped(). Without the
310 * memory barrier, there is a small possibility that
311 * start_xmit() will miss it and cause the queue to be stopped
313 * On the other hand we need an rmb() here to ensure the proper
314 * ordering of bit testing in the following
315 * netif_tx_queue_stopped(txq) call.
319 if (unlikely(netif_tx_queue_stopped(txq))) {
320 /* Taking tx_lock() is needed to prevent re-enabling the queue
321 * while it's empty. This could have happen if rx_action() gets
322 * suspended in bnx2x_tx_int() after the condition before
323 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
325 * stops the queue->sees fresh tx_bd_cons->releases the queue->
326 * sends some packets consuming the whole queue again->
330 __netif_tx_lock(txq, smp_processor_id());
332 if ((netif_tx_queue_stopped(txq)) &&
333 (bp->state == BNX2X_STATE_OPEN) &&
334 (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT))
335 netif_tx_wake_queue(txq);
337 __netif_tx_unlock(txq);
342 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
345 u16 last_max = fp->last_max_sge;
347 if (SUB_S16(idx, last_max) > 0)
348 fp->last_max_sge = idx;
351 static inline void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
353 struct eth_end_agg_rx_cqe *cqe)
355 struct bnx2x *bp = fp->bp;
356 u16 last_max, last_elem, first_elem;
363 /* First mark all used pages */
364 for (i = 0; i < sge_len; i++)
365 BIT_VEC64_CLEAR_BIT(fp->sge_mask,
366 RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[i])));
368 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
369 sge_len - 1, le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
371 /* Here we assume that the last SGE index is the biggest */
372 prefetch((void *)(fp->sge_mask));
373 bnx2x_update_last_max_sge(fp,
374 le16_to_cpu(cqe->sgl_or_raw_data.sgl[sge_len - 1]));
376 last_max = RX_SGE(fp->last_max_sge);
377 last_elem = last_max >> BIT_VEC64_ELEM_SHIFT;
378 first_elem = RX_SGE(fp->rx_sge_prod) >> BIT_VEC64_ELEM_SHIFT;
380 /* If ring is not full */
381 if (last_elem + 1 != first_elem)
384 /* Now update the prod */
385 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
386 if (likely(fp->sge_mask[i]))
389 fp->sge_mask[i] = BIT_VEC64_ELEM_ONE_MASK;
390 delta += BIT_VEC64_ELEM_SZ;
394 fp->rx_sge_prod += delta;
395 /* clear page-end entries */
396 bnx2x_clear_sge_mask_next_elems(fp);
399 DP(NETIF_MSG_RX_STATUS,
400 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
401 fp->last_max_sge, fp->rx_sge_prod);
404 /* Get Toeplitz hash value in the skb using the value from the
405 * CQE (calculated by HW).
407 static u32 bnx2x_get_rxhash(const struct bnx2x *bp,
408 const struct eth_fast_path_rx_cqe *cqe,
409 enum pkt_hash_types *rxhash_type)
411 /* Get Toeplitz hash from CQE */
412 if ((bp->dev->features & NETIF_F_RXHASH) &&
413 (cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG)) {
414 enum eth_rss_hash_type htype;
416 htype = cqe->status_flags & ETH_FAST_PATH_RX_CQE_RSS_HASH_TYPE;
417 *rxhash_type = ((htype == TCP_IPV4_HASH_TYPE) ||
418 (htype == TCP_IPV6_HASH_TYPE)) ?
419 PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3;
421 return le32_to_cpu(cqe->rss_hash_result);
423 *rxhash_type = PKT_HASH_TYPE_NONE;
427 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
429 struct eth_fast_path_rx_cqe *cqe)
431 struct bnx2x *bp = fp->bp;
432 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
433 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
434 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
436 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[queue];
437 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
439 /* print error if current state != stop */
440 if (tpa_info->tpa_state != BNX2X_TPA_STOP)
441 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
443 /* Try to map an empty data buffer from the aggregation info */
444 mapping = dma_map_single(&bp->pdev->dev,
445 first_buf->data + NET_SKB_PAD,
446 fp->rx_buf_size, DMA_FROM_DEVICE);
448 * ...if it fails - move the skb from the consumer to the producer
449 * and set the current aggregation state as ERROR to drop it
450 * when TPA_STOP arrives.
453 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
454 /* Move the BD from the consumer to the producer */
455 bnx2x_reuse_rx_data(fp, cons, prod);
456 tpa_info->tpa_state = BNX2X_TPA_ERROR;
460 /* move empty data from pool to prod */
461 prod_rx_buf->data = first_buf->data;
462 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
463 /* point prod_bd to new data */
464 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
465 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
467 /* move partial skb from cons to pool (don't unmap yet) */
468 *first_buf = *cons_rx_buf;
470 /* mark bin state as START */
471 tpa_info->parsing_flags =
472 le16_to_cpu(cqe->pars_flags.flags);
473 tpa_info->vlan_tag = le16_to_cpu(cqe->vlan_tag);
474 tpa_info->tpa_state = BNX2X_TPA_START;
475 tpa_info->len_on_bd = le16_to_cpu(cqe->len_on_bd);
476 tpa_info->placement_offset = cqe->placement_offset;
477 tpa_info->rxhash = bnx2x_get_rxhash(bp, cqe, &tpa_info->rxhash_type);
478 if (fp->mode == TPA_MODE_GRO) {
479 u16 gro_size = le16_to_cpu(cqe->pkt_len_or_gro_seg_len);
480 tpa_info->full_page = SGE_PAGES / gro_size * gro_size;
481 tpa_info->gro_size = gro_size;
484 #ifdef BNX2X_STOP_ON_ERROR
485 fp->tpa_queue_used |= (1 << queue);
486 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
491 /* Timestamp option length allowed for TPA aggregation:
493 * nop nop kind length echo val
495 #define TPA_TSTAMP_OPT_LEN 12
497 * bnx2x_set_gro_params - compute GRO values
500 * @parsing_flags: parsing flags from the START CQE
501 * @len_on_bd: total length of the first packet for the
503 * @pkt_len: length of all segments
505 * Approximate value of the MSS for this aggregation calculated using
506 * the first packet of it.
507 * Compute number of aggregated segments, and gso_type.
509 static void bnx2x_set_gro_params(struct sk_buff *skb, u16 parsing_flags,
510 u16 len_on_bd, unsigned int pkt_len,
511 u16 num_of_coalesced_segs)
513 /* TPA aggregation won't have either IP options or TCP options
514 * other than timestamp or IPv6 extension headers.
516 u16 hdrs_len = ETH_HLEN + sizeof(struct tcphdr);
518 if (GET_FLAG(parsing_flags, PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) ==
519 PRS_FLAG_OVERETH_IPV6) {
520 hdrs_len += sizeof(struct ipv6hdr);
521 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
523 hdrs_len += sizeof(struct iphdr);
524 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
527 /* Check if there was a TCP timestamp, if there is it's will
528 * always be 12 bytes length: nop nop kind length echo val.
530 * Otherwise FW would close the aggregation.
532 if (parsing_flags & PARSING_FLAGS_TIME_STAMP_EXIST_FLAG)
533 hdrs_len += TPA_TSTAMP_OPT_LEN;
535 skb_shinfo(skb)->gso_size = len_on_bd - hdrs_len;
537 /* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
538 * to skb_shinfo(skb)->gso_segs
540 NAPI_GRO_CB(skb)->count = num_of_coalesced_segs;
543 static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
544 u16 index, gfp_t gfp_mask)
546 struct page *page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
547 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
548 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
551 if (unlikely(page == NULL)) {
552 BNX2X_ERR("Can't alloc sge\n");
556 mapping = dma_map_page(&bp->pdev->dev, page, 0,
557 SGE_PAGES, DMA_FROM_DEVICE);
558 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
559 __free_pages(page, PAGES_PER_SGE_SHIFT);
560 BNX2X_ERR("Can't map sge\n");
565 dma_unmap_addr_set(sw_buf, mapping, mapping);
567 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
568 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
573 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
574 struct bnx2x_agg_info *tpa_info,
577 struct eth_end_agg_rx_cqe *cqe,
580 struct sw_rx_page *rx_pg, old_rx_pg;
581 u32 i, frag_len, frag_size;
582 int err, j, frag_id = 0;
583 u16 len_on_bd = tpa_info->len_on_bd;
584 u16 full_page = 0, gro_size = 0;
586 frag_size = le16_to_cpu(cqe->pkt_len) - len_on_bd;
588 if (fp->mode == TPA_MODE_GRO) {
589 gro_size = tpa_info->gro_size;
590 full_page = tpa_info->full_page;
593 /* This is needed in order to enable forwarding support */
595 bnx2x_set_gro_params(skb, tpa_info->parsing_flags, len_on_bd,
596 le16_to_cpu(cqe->pkt_len),
597 le16_to_cpu(cqe->num_of_coalesced_segs));
599 #ifdef BNX2X_STOP_ON_ERROR
600 if (pages > min_t(u32, 8, MAX_SKB_FRAGS) * SGE_PAGES) {
601 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
603 BNX2X_ERR("cqe->pkt_len = %d\n", cqe->pkt_len);
609 /* Run through the SGL and compose the fragmented skb */
610 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
611 u16 sge_idx = RX_SGE(le16_to_cpu(cqe->sgl_or_raw_data.sgl[j]));
613 /* FW gives the indices of the SGE as if the ring is an array
614 (meaning that "next" element will consume 2 indices) */
615 if (fp->mode == TPA_MODE_GRO)
616 frag_len = min_t(u32, frag_size, (u32)full_page);
618 frag_len = min_t(u32, frag_size, (u32)SGE_PAGES);
620 rx_pg = &fp->rx_page_ring[sge_idx];
623 /* If we fail to allocate a substitute page, we simply stop
624 where we are and drop the whole packet */
625 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx, GFP_ATOMIC);
627 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
631 /* Unmap the page as we're going to pass it to the stack */
632 dma_unmap_page(&bp->pdev->dev,
633 dma_unmap_addr(&old_rx_pg, mapping),
634 SGE_PAGES, DMA_FROM_DEVICE);
635 /* Add one frag and update the appropriate fields in the skb */
636 if (fp->mode == TPA_MODE_LRO)
637 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
641 for (rem = frag_len; rem > 0; rem -= gro_size) {
642 int len = rem > gro_size ? gro_size : rem;
643 skb_fill_page_desc(skb, frag_id++,
644 old_rx_pg.page, offset, len);
646 get_page(old_rx_pg.page);
651 skb->data_len += frag_len;
652 skb->truesize += SGE_PAGES;
653 skb->len += frag_len;
655 frag_size -= frag_len;
661 static void bnx2x_frag_free(const struct bnx2x_fastpath *fp, void *data)
663 if (fp->rx_frag_size)
664 put_page(virt_to_head_page(data));
669 static void *bnx2x_frag_alloc(const struct bnx2x_fastpath *fp, gfp_t gfp_mask)
671 if (fp->rx_frag_size) {
672 /* GFP_KERNEL allocations are used only during initialization */
673 if (unlikely(gfp_mask & __GFP_WAIT))
674 return (void *)__get_free_page(gfp_mask);
676 return netdev_alloc_frag(fp->rx_frag_size);
679 return kmalloc(fp->rx_buf_size + NET_SKB_PAD, gfp_mask);
683 static void bnx2x_gro_ip_csum(struct bnx2x *bp, struct sk_buff *skb)
685 const struct iphdr *iph = ip_hdr(skb);
688 skb_set_transport_header(skb, sizeof(struct iphdr));
691 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
692 iph->saddr, iph->daddr, 0);
695 static void bnx2x_gro_ipv6_csum(struct bnx2x *bp, struct sk_buff *skb)
697 struct ipv6hdr *iph = ipv6_hdr(skb);
700 skb_set_transport_header(skb, sizeof(struct ipv6hdr));
703 th->check = ~tcp_v6_check(skb->len - skb_transport_offset(skb),
704 &iph->saddr, &iph->daddr, 0);
707 static void bnx2x_gro_csum(struct bnx2x *bp, struct sk_buff *skb,
708 void (*gro_func)(struct bnx2x*, struct sk_buff*))
710 skb_set_network_header(skb, 0);
712 tcp_gro_complete(skb);
716 static void bnx2x_gro_receive(struct bnx2x *bp, struct bnx2x_fastpath *fp,
720 if (skb_shinfo(skb)->gso_size) {
721 switch (be16_to_cpu(skb->protocol)) {
723 bnx2x_gro_csum(bp, skb, bnx2x_gro_ip_csum);
726 bnx2x_gro_csum(bp, skb, bnx2x_gro_ipv6_csum);
729 BNX2X_ERR("Error: FW GRO supports only IPv4/IPv6, not 0x%04x\n",
730 be16_to_cpu(skb->protocol));
734 skb_record_rx_queue(skb, fp->rx_queue);
735 napi_gro_receive(&fp->napi, skb);
738 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
739 struct bnx2x_agg_info *tpa_info,
741 struct eth_end_agg_rx_cqe *cqe,
744 struct sw_rx_bd *rx_buf = &tpa_info->first_buf;
745 u8 pad = tpa_info->placement_offset;
746 u16 len = tpa_info->len_on_bd;
747 struct sk_buff *skb = NULL;
748 u8 *new_data, *data = rx_buf->data;
749 u8 old_tpa_state = tpa_info->tpa_state;
751 tpa_info->tpa_state = BNX2X_TPA_STOP;
753 /* If we there was an error during the handling of the TPA_START -
754 * drop this aggregation.
756 if (old_tpa_state == BNX2X_TPA_ERROR)
759 /* Try to allocate the new data */
760 new_data = bnx2x_frag_alloc(fp, GFP_ATOMIC);
761 /* Unmap skb in the pool anyway, as we are going to change
762 pool entry status to BNX2X_TPA_STOP even if new skb allocation
764 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
765 fp->rx_buf_size, DMA_FROM_DEVICE);
766 if (likely(new_data))
767 skb = build_skb(data, fp->rx_frag_size);
770 #ifdef BNX2X_STOP_ON_ERROR
771 if (pad + len > fp->rx_buf_size) {
772 BNX2X_ERR("skb_put is about to fail... pad %d len %d rx_buf_size %d\n",
773 pad, len, fp->rx_buf_size);
779 skb_reserve(skb, pad + NET_SKB_PAD);
781 skb_set_hash(skb, tpa_info->rxhash, tpa_info->rxhash_type);
783 skb->protocol = eth_type_trans(skb, bp->dev);
784 skb->ip_summed = CHECKSUM_UNNECESSARY;
786 if (!bnx2x_fill_frag_skb(bp, fp, tpa_info, pages,
787 skb, cqe, cqe_idx)) {
788 if (tpa_info->parsing_flags & PARSING_FLAGS_VLAN)
789 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tpa_info->vlan_tag);
790 bnx2x_gro_receive(bp, fp, skb);
792 DP(NETIF_MSG_RX_STATUS,
793 "Failed to allocate new pages - dropping packet!\n");
794 dev_kfree_skb_any(skb);
797 /* put new data in bin */
798 rx_buf->data = new_data;
803 bnx2x_frag_free(fp, new_data);
805 /* drop the packet and keep the buffer in the bin */
806 DP(NETIF_MSG_RX_STATUS,
807 "Failed to allocate or map a new skb - dropping packet!\n");
808 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed++;
811 static int bnx2x_alloc_rx_data(struct bnx2x *bp, struct bnx2x_fastpath *fp,
812 u16 index, gfp_t gfp_mask)
815 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
816 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
819 data = bnx2x_frag_alloc(fp, gfp_mask);
820 if (unlikely(data == NULL))
823 mapping = dma_map_single(&bp->pdev->dev, data + NET_SKB_PAD,
826 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
827 bnx2x_frag_free(fp, data);
828 BNX2X_ERR("Can't map rx data\n");
833 dma_unmap_addr_set(rx_buf, mapping, mapping);
835 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
836 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
842 void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
843 struct bnx2x_fastpath *fp,
844 struct bnx2x_eth_q_stats *qstats)
846 /* Do nothing if no L4 csum validation was done.
847 * We do not check whether IP csum was validated. For IPv4 we assume
848 * that if the card got as far as validating the L4 csum, it also
849 * validated the IP csum. IPv6 has no IP csum.
851 if (cqe->fast_path_cqe.status_flags &
852 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG)
855 /* If L4 validation was done, check if an error was found. */
857 if (cqe->fast_path_cqe.type_error_flags &
858 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
859 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
860 qstats->hw_csum_err++;
862 skb->ip_summed = CHECKSUM_UNNECESSARY;
865 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
867 struct bnx2x *bp = fp->bp;
868 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
869 u16 sw_comp_cons, sw_comp_prod;
871 union eth_rx_cqe *cqe;
872 struct eth_fast_path_rx_cqe *cqe_fp;
874 #ifdef BNX2X_STOP_ON_ERROR
875 if (unlikely(bp->panic))
881 bd_cons = fp->rx_bd_cons;
882 bd_prod = fp->rx_bd_prod;
883 bd_prod_fw = bd_prod;
884 sw_comp_cons = fp->rx_comp_cons;
885 sw_comp_prod = fp->rx_comp_prod;
887 comp_ring_cons = RCQ_BD(sw_comp_cons);
888 cqe = &fp->rx_comp_ring[comp_ring_cons];
889 cqe_fp = &cqe->fast_path_cqe;
891 DP(NETIF_MSG_RX_STATUS,
892 "queue[%d]: sw_comp_cons %u\n", fp->index, sw_comp_cons);
894 while (BNX2X_IS_CQE_COMPLETED(cqe_fp)) {
895 struct sw_rx_bd *rx_buf = NULL;
898 enum eth_rx_cqe_type cqe_fp_type;
902 enum pkt_hash_types rxhash_type;
904 #ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
909 bd_prod = RX_BD(bd_prod);
910 bd_cons = RX_BD(bd_cons);
912 /* A rmb() is required to ensure that the CQE is not read
913 * before it is written by the adapter DMA. PCI ordering
914 * rules will make sure the other fields are written before
915 * the marker at the end of struct eth_fast_path_rx_cqe
916 * but without rmb() a weakly ordered processor can process
917 * stale data. Without the barrier TPA state-machine might
918 * enter inconsistent state and kernel stack might be
919 * provided with incorrect packet description - these lead
920 * to various kernel crashed.
924 cqe_fp_flags = cqe_fp->type_error_flags;
925 cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
927 DP(NETIF_MSG_RX_STATUS,
928 "CQE type %x err %x status %x queue %x vlan %x len %u\n",
929 CQE_TYPE(cqe_fp_flags),
930 cqe_fp_flags, cqe_fp->status_flags,
931 le32_to_cpu(cqe_fp->rss_hash_result),
932 le16_to_cpu(cqe_fp->vlan_tag),
933 le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len));
935 /* is this a slowpath msg? */
936 if (unlikely(CQE_TYPE_SLOW(cqe_fp_type))) {
937 bnx2x_sp_event(fp, cqe);
941 rx_buf = &fp->rx_buf_ring[bd_cons];
944 if (!CQE_TYPE_FAST(cqe_fp_type)) {
945 struct bnx2x_agg_info *tpa_info;
946 u16 frag_size, pages;
947 #ifdef BNX2X_STOP_ON_ERROR
949 if (fp->disable_tpa &&
950 (CQE_TYPE_START(cqe_fp_type) ||
951 CQE_TYPE_STOP(cqe_fp_type)))
952 BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
953 CQE_TYPE(cqe_fp_type));
956 if (CQE_TYPE_START(cqe_fp_type)) {
957 u16 queue = cqe_fp->queue_index;
958 DP(NETIF_MSG_RX_STATUS,
959 "calling tpa_start on queue %d\n",
962 bnx2x_tpa_start(fp, queue,
968 queue = cqe->end_agg_cqe.queue_index;
969 tpa_info = &fp->tpa_info[queue];
970 DP(NETIF_MSG_RX_STATUS,
971 "calling tpa_stop on queue %d\n",
974 frag_size = le16_to_cpu(cqe->end_agg_cqe.pkt_len) -
977 if (fp->mode == TPA_MODE_GRO)
978 pages = (frag_size + tpa_info->full_page - 1) /
981 pages = SGE_PAGE_ALIGN(frag_size) >>
984 bnx2x_tpa_stop(bp, fp, tpa_info, pages,
985 &cqe->end_agg_cqe, comp_ring_cons);
986 #ifdef BNX2X_STOP_ON_ERROR
991 bnx2x_update_sge_prod(fp, pages, &cqe->end_agg_cqe);
995 len = le16_to_cpu(cqe_fp->pkt_len_or_gro_seg_len);
996 pad = cqe_fp->placement_offset;
997 dma_sync_single_for_cpu(&bp->pdev->dev,
998 dma_unmap_addr(rx_buf, mapping),
999 pad + RX_COPY_THRESH,
1002 prefetch(data + pad); /* speedup eth_type_trans() */
1003 /* is this an error packet? */
1004 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1005 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1006 "ERROR flags %x rx packet %u\n",
1007 cqe_fp_flags, sw_comp_cons);
1008 bnx2x_fp_qstats(bp, fp)->rx_err_discard_pkt++;
1012 /* Since we don't have a jumbo ring
1013 * copy small packets if mtu > 1500
1015 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1016 (len <= RX_COPY_THRESH)) {
1017 skb = netdev_alloc_skb_ip_align(bp->dev, len);
1019 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1020 "ERROR packet dropped because of alloc failure\n");
1021 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1024 memcpy(skb->data, data + pad, len);
1025 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1027 if (likely(bnx2x_alloc_rx_data(bp, fp, bd_prod,
1028 GFP_ATOMIC) == 0)) {
1029 dma_unmap_single(&bp->pdev->dev,
1030 dma_unmap_addr(rx_buf, mapping),
1033 skb = build_skb(data, fp->rx_frag_size);
1034 if (unlikely(!skb)) {
1035 bnx2x_frag_free(fp, data);
1036 bnx2x_fp_qstats(bp, fp)->
1037 rx_skb_alloc_failed++;
1040 skb_reserve(skb, pad);
1042 DP(NETIF_MSG_RX_ERR | NETIF_MSG_RX_STATUS,
1043 "ERROR packet dropped because of alloc failure\n");
1044 bnx2x_fp_qstats(bp, fp)->rx_skb_alloc_failed++;
1046 bnx2x_reuse_rx_data(fp, bd_cons, bd_prod);
1052 skb->protocol = eth_type_trans(skb, bp->dev);
1054 /* Set Toeplitz hash for a none-LRO skb */
1055 rxhash = bnx2x_get_rxhash(bp, cqe_fp, &rxhash_type);
1056 skb_set_hash(skb, rxhash, rxhash_type);
1058 skb_checksum_none_assert(skb);
1060 if (bp->dev->features & NETIF_F_RXCSUM)
1061 bnx2x_csum_validate(skb, cqe, fp,
1062 bnx2x_fp_qstats(bp, fp));
1064 skb_record_rx_queue(skb, fp->rx_queue);
1066 if (le16_to_cpu(cqe_fp->pars_flags.flags) &
1068 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1069 le16_to_cpu(cqe_fp->vlan_tag));
1071 skb_mark_napi_id(skb, &fp->napi);
1073 if (bnx2x_fp_ll_polling(fp))
1074 netif_receive_skb(skb);
1076 napi_gro_receive(&fp->napi, skb);
1078 rx_buf->data = NULL;
1080 bd_cons = NEXT_RX_IDX(bd_cons);
1081 bd_prod = NEXT_RX_IDX(bd_prod);
1082 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1085 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1086 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1088 /* mark CQE as free */
1089 BNX2X_SEED_CQE(cqe_fp);
1091 if (rx_pkt == budget)
1094 comp_ring_cons = RCQ_BD(sw_comp_cons);
1095 cqe = &fp->rx_comp_ring[comp_ring_cons];
1096 cqe_fp = &cqe->fast_path_cqe;
1099 fp->rx_bd_cons = bd_cons;
1100 fp->rx_bd_prod = bd_prod_fw;
1101 fp->rx_comp_cons = sw_comp_cons;
1102 fp->rx_comp_prod = sw_comp_prod;
1104 /* Update producers */
1105 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1108 fp->rx_pkt += rx_pkt;
1114 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1116 struct bnx2x_fastpath *fp = fp_cookie;
1117 struct bnx2x *bp = fp->bp;
1121 "got an MSI-X interrupt on IDX:SB [fp %d fw_sd %d igusb %d]\n",
1122 fp->index, fp->fw_sb_id, fp->igu_sb_id);
1124 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1126 #ifdef BNX2X_STOP_ON_ERROR
1127 if (unlikely(bp->panic))
1131 /* Handle Rx and Tx according to MSI-X vector */
1132 for_each_cos_in_tx_queue(fp, cos)
1133 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
1135 prefetch(&fp->sb_running_index[SM_RX_ID]);
1136 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1141 /* HW Lock for shared dual port PHYs */
1142 void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1144 mutex_lock(&bp->port.phy_mutex);
1146 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1149 void bnx2x_release_phy_lock(struct bnx2x *bp)
1151 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1153 mutex_unlock(&bp->port.phy_mutex);
1156 /* calculates MF speed according to current linespeed and MF configuration */
1157 u16 bnx2x_get_mf_speed(struct bnx2x *bp)
1159 u16 line_speed = bp->link_vars.line_speed;
1161 u16 maxCfg = bnx2x_extract_max_cfg(bp,
1162 bp->mf_config[BP_VN(bp)]);
1164 /* Calculate the current MAX line speed limit for the MF
1168 line_speed = (line_speed * maxCfg) / 100;
1169 else { /* SD mode */
1170 u16 vn_max_rate = maxCfg * 100;
1172 if (vn_max_rate < line_speed)
1173 line_speed = vn_max_rate;
1181 * bnx2x_fill_report_data - fill link report data to report
1183 * @bp: driver handle
1184 * @data: link state to update
1186 * It uses a none-atomic bit operations because is called under the mutex.
1188 static void bnx2x_fill_report_data(struct bnx2x *bp,
1189 struct bnx2x_link_report_data *data)
1191 memset(data, 0, sizeof(*data));
1194 /* Fill the report data: effective line speed */
1195 data->line_speed = bnx2x_get_mf_speed(bp);
1198 if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
1199 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1200 &data->link_report_flags);
1202 if (!BNX2X_NUM_ETH_QUEUES(bp))
1203 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1204 &data->link_report_flags);
1207 if (bp->link_vars.duplex == DUPLEX_FULL)
1208 __set_bit(BNX2X_LINK_REPORT_FD,
1209 &data->link_report_flags);
1211 /* Rx Flow Control is ON */
1212 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
1213 __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1214 &data->link_report_flags);
1216 /* Tx Flow Control is ON */
1217 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1218 __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1219 &data->link_report_flags);
1221 *data = bp->vf_link_vars;
1226 * bnx2x_link_report - report link status to OS.
1228 * @bp: driver handle
1230 * Calls the __bnx2x_link_report() under the same locking scheme
1231 * as a link/PHY state managing code to ensure a consistent link
1235 void bnx2x_link_report(struct bnx2x *bp)
1237 bnx2x_acquire_phy_lock(bp);
1238 __bnx2x_link_report(bp);
1239 bnx2x_release_phy_lock(bp);
1243 * __bnx2x_link_report - report link status to OS.
1245 * @bp: driver handle
1247 * None atomic implementation.
1248 * Should be called under the phy_lock.
1250 void __bnx2x_link_report(struct bnx2x *bp)
1252 struct bnx2x_link_report_data cur_data;
1255 if (IS_PF(bp) && !CHIP_IS_E1(bp))
1256 bnx2x_read_mf_cfg(bp);
1258 /* Read the current link report info */
1259 bnx2x_fill_report_data(bp, &cur_data);
1261 /* Don't report link down or exactly the same link status twice */
1262 if (!memcmp(&cur_data, &bp->last_reported_link, sizeof(cur_data)) ||
1263 (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1264 &bp->last_reported_link.link_report_flags) &&
1265 test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1266 &cur_data.link_report_flags)))
1271 /* We are going to report a new link parameters now -
1272 * remember the current data for the next time.
1274 memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
1276 /* propagate status to VFs */
1278 bnx2x_iov_link_update(bp);
1280 if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
1281 &cur_data.link_report_flags)) {
1282 netif_carrier_off(bp->dev);
1283 netdev_err(bp->dev, "NIC Link is Down\n");
1289 netif_carrier_on(bp->dev);
1291 if (test_and_clear_bit(BNX2X_LINK_REPORT_FD,
1292 &cur_data.link_report_flags))
1297 /* Handle the FC at the end so that only these flags would be
1298 * possibly set. This way we may easily check if there is no FC
1301 if (cur_data.link_report_flags) {
1302 if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
1303 &cur_data.link_report_flags)) {
1304 if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
1305 &cur_data.link_report_flags))
1306 flow = "ON - receive & transmit";
1308 flow = "ON - receive";
1310 flow = "ON - transmit";
1315 netdev_info(bp->dev, "NIC Link is Up, %d Mbps %s duplex, Flow control: %s\n",
1316 cur_data.line_speed, duplex, flow);
1320 static void bnx2x_set_next_page_sgl(struct bnx2x_fastpath *fp)
1324 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1325 struct eth_rx_sge *sge;
1327 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
1329 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
1330 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1333 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
1334 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
1338 static void bnx2x_free_tpa_pool(struct bnx2x *bp,
1339 struct bnx2x_fastpath *fp, int last)
1343 for (i = 0; i < last; i++) {
1344 struct bnx2x_agg_info *tpa_info = &fp->tpa_info[i];
1345 struct sw_rx_bd *first_buf = &tpa_info->first_buf;
1346 u8 *data = first_buf->data;
1349 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
1352 if (tpa_info->tpa_state == BNX2X_TPA_START)
1353 dma_unmap_single(&bp->pdev->dev,
1354 dma_unmap_addr(first_buf, mapping),
1355 fp->rx_buf_size, DMA_FROM_DEVICE);
1356 bnx2x_frag_free(fp, data);
1357 first_buf->data = NULL;
1361 void bnx2x_init_rx_rings_cnic(struct bnx2x *bp)
1365 for_each_rx_queue_cnic(bp, j) {
1366 struct bnx2x_fastpath *fp = &bp->fp[j];
1370 /* Activate BD ring */
1372 * this will generate an interrupt (to the TSTORM)
1373 * must only be done after chip is initialized
1375 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1380 void bnx2x_init_rx_rings(struct bnx2x *bp)
1382 int func = BP_FUNC(bp);
1386 /* Allocate TPA resources */
1387 for_each_eth_queue(bp, j) {
1388 struct bnx2x_fastpath *fp = &bp->fp[j];
1391 "mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
1393 if (!fp->disable_tpa) {
1394 /* Fill the per-aggregation pool */
1395 for (i = 0; i < MAX_AGG_QS(bp); i++) {
1396 struct bnx2x_agg_info *tpa_info =
1398 struct sw_rx_bd *first_buf =
1399 &tpa_info->first_buf;
1402 bnx2x_frag_alloc(fp, GFP_KERNEL);
1403 if (!first_buf->data) {
1404 BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
1406 bnx2x_free_tpa_pool(bp, fp, i);
1407 fp->disable_tpa = 1;
1410 dma_unmap_addr_set(first_buf, mapping, 0);
1411 tpa_info->tpa_state = BNX2X_TPA_STOP;
1414 /* "next page" elements initialization */
1415 bnx2x_set_next_page_sgl(fp);
1417 /* set SGEs bit mask */
1418 bnx2x_init_sge_ring_bit_mask(fp);
1420 /* Allocate SGEs and initialize the ring elements */
1421 for (i = 0, ring_prod = 0;
1422 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
1424 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod,
1426 BNX2X_ERR("was only able to allocate %d rx sges\n",
1428 BNX2X_ERR("disabling TPA for queue[%d]\n",
1430 /* Cleanup already allocated elements */
1431 bnx2x_free_rx_sge_range(bp, fp,
1433 bnx2x_free_tpa_pool(bp, fp,
1435 fp->disable_tpa = 1;
1439 ring_prod = NEXT_SGE_IDX(ring_prod);
1442 fp->rx_sge_prod = ring_prod;
1446 for_each_eth_queue(bp, j) {
1447 struct bnx2x_fastpath *fp = &bp->fp[j];
1451 /* Activate BD ring */
1453 * this will generate an interrupt (to the TSTORM)
1454 * must only be done after chip is initialized
1456 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
1462 if (CHIP_IS_E1(bp)) {
1463 REG_WR(bp, BAR_USTRORM_INTMEM +
1464 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
1465 U64_LO(fp->rx_comp_mapping));
1466 REG_WR(bp, BAR_USTRORM_INTMEM +
1467 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
1468 U64_HI(fp->rx_comp_mapping));
1473 static void bnx2x_free_tx_skbs_queue(struct bnx2x_fastpath *fp)
1476 struct bnx2x *bp = fp->bp;
1478 for_each_cos_in_tx_queue(fp, cos) {
1479 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
1480 unsigned pkts_compl = 0, bytes_compl = 0;
1482 u16 sw_prod = txdata->tx_pkt_prod;
1483 u16 sw_cons = txdata->tx_pkt_cons;
1485 while (sw_cons != sw_prod) {
1486 bnx2x_free_tx_pkt(bp, txdata, TX_BD(sw_cons),
1487 &pkts_compl, &bytes_compl);
1491 netdev_tx_reset_queue(
1492 netdev_get_tx_queue(bp->dev,
1493 txdata->txq_index));
1497 static void bnx2x_free_tx_skbs_cnic(struct bnx2x *bp)
1501 for_each_tx_queue_cnic(bp, i) {
1502 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1506 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
1510 for_each_eth_queue(bp, i) {
1511 bnx2x_free_tx_skbs_queue(&bp->fp[i]);
1515 static void bnx2x_free_rx_bds(struct bnx2x_fastpath *fp)
1517 struct bnx2x *bp = fp->bp;
1520 /* ring wasn't allocated */
1521 if (fp->rx_buf_ring == NULL)
1524 for (i = 0; i < NUM_RX_BD; i++) {
1525 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
1526 u8 *data = rx_buf->data;
1530 dma_unmap_single(&bp->pdev->dev,
1531 dma_unmap_addr(rx_buf, mapping),
1532 fp->rx_buf_size, DMA_FROM_DEVICE);
1534 rx_buf->data = NULL;
1535 bnx2x_frag_free(fp, data);
1539 static void bnx2x_free_rx_skbs_cnic(struct bnx2x *bp)
1543 for_each_rx_queue_cnic(bp, j) {
1544 bnx2x_free_rx_bds(&bp->fp[j]);
1548 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
1552 for_each_eth_queue(bp, j) {
1553 struct bnx2x_fastpath *fp = &bp->fp[j];
1555 bnx2x_free_rx_bds(fp);
1557 if (!fp->disable_tpa)
1558 bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
1562 static void bnx2x_free_skbs_cnic(struct bnx2x *bp)
1564 bnx2x_free_tx_skbs_cnic(bp);
1565 bnx2x_free_rx_skbs_cnic(bp);
1568 void bnx2x_free_skbs(struct bnx2x *bp)
1570 bnx2x_free_tx_skbs(bp);
1571 bnx2x_free_rx_skbs(bp);
1574 void bnx2x_update_max_mf_config(struct bnx2x *bp, u32 value)
1576 /* load old values */
1577 u32 mf_cfg = bp->mf_config[BP_VN(bp)];
1579 if (value != bnx2x_extract_max_cfg(bp, mf_cfg)) {
1580 /* leave all but MAX value */
1581 mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK;
1583 /* set new MAX value */
1584 mf_cfg |= (value << FUNC_MF_CFG_MAX_BW_SHIFT)
1585 & FUNC_MF_CFG_MAX_BW_MASK;
1587 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, mf_cfg);
1592 * bnx2x_free_msix_irqs - free previously requested MSI-X IRQ vectors
1594 * @bp: driver handle
1595 * @nvecs: number of vectors to be released
1597 static void bnx2x_free_msix_irqs(struct bnx2x *bp, int nvecs)
1601 if (nvecs == offset)
1604 /* VFs don't have a default SB */
1606 free_irq(bp->msix_table[offset].vector, bp->dev);
1607 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
1608 bp->msix_table[offset].vector);
1612 if (CNIC_SUPPORT(bp)) {
1613 if (nvecs == offset)
1618 for_each_eth_queue(bp, i) {
1619 if (nvecs == offset)
1621 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq\n",
1622 i, bp->msix_table[offset].vector);
1624 free_irq(bp->msix_table[offset++].vector, &bp->fp[i]);
1628 void bnx2x_free_irq(struct bnx2x *bp)
1630 if (bp->flags & USING_MSIX_FLAG &&
1631 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1632 int nvecs = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_SUPPORT(bp);
1634 /* vfs don't have a default status block */
1638 bnx2x_free_msix_irqs(bp, nvecs);
1640 free_irq(bp->dev->irq, bp->dev);
1644 int bnx2x_enable_msix(struct bnx2x *bp)
1646 int msix_vec = 0, i, rc;
1648 /* VFs don't have a default status block */
1650 bp->msix_table[msix_vec].entry = msix_vec;
1651 BNX2X_DEV_INFO("msix_table[0].entry = %d (slowpath)\n",
1652 bp->msix_table[0].entry);
1656 /* Cnic requires an msix vector for itself */
1657 if (CNIC_SUPPORT(bp)) {
1658 bp->msix_table[msix_vec].entry = msix_vec;
1659 BNX2X_DEV_INFO("msix_table[%d].entry = %d (CNIC)\n",
1660 msix_vec, bp->msix_table[msix_vec].entry);
1664 /* We need separate vectors for ETH queues only (not FCoE) */
1665 for_each_eth_queue(bp, i) {
1666 bp->msix_table[msix_vec].entry = msix_vec;
1667 BNX2X_DEV_INFO("msix_table[%d].entry = %d (fastpath #%u)\n",
1668 msix_vec, msix_vec, i);
1672 DP(BNX2X_MSG_SP, "about to request enable msix with %d vectors\n",
1675 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0],
1676 BNX2X_MIN_MSIX_VEC_CNT(bp), msix_vec);
1678 * reconfigure number of tx/rx queues according to available
1681 if (rc == -ENOSPC) {
1682 /* Get by with single vector */
1683 rc = pci_enable_msix_range(bp->pdev, &bp->msix_table[0], 1, 1);
1685 BNX2X_DEV_INFO("Single MSI-X is not attainable rc %d\n",
1690 BNX2X_DEV_INFO("Using single MSI-X vector\n");
1691 bp->flags |= USING_SINGLE_MSIX_FLAG;
1693 BNX2X_DEV_INFO("set number of queues to 1\n");
1694 bp->num_ethernet_queues = 1;
1695 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1696 } else if (rc < 0) {
1697 BNX2X_DEV_INFO("MSI-X is not attainable rc %d\n", rc);
1699 } else if (rc < msix_vec) {
1700 /* how less vectors we will have? */
1701 int diff = msix_vec - rc;
1703 BNX2X_DEV_INFO("Trying to use less MSI-X vectors: %d\n", rc);
1706 * decrease number of queues by number of unallocated entries
1708 bp->num_ethernet_queues -= diff;
1709 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1711 BNX2X_DEV_INFO("New queue configuration set: %d\n",
1715 bp->flags |= USING_MSIX_FLAG;
1720 /* fall to INTx if not enough memory */
1722 bp->flags |= DISABLE_MSI_FLAG;
1727 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1729 int i, rc, offset = 0;
1731 /* no default status block for vf */
1733 rc = request_irq(bp->msix_table[offset++].vector,
1734 bnx2x_msix_sp_int, 0,
1735 bp->dev->name, bp->dev);
1737 BNX2X_ERR("request sp irq failed\n");
1742 if (CNIC_SUPPORT(bp))
1745 for_each_eth_queue(bp, i) {
1746 struct bnx2x_fastpath *fp = &bp->fp[i];
1747 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1750 rc = request_irq(bp->msix_table[offset].vector,
1751 bnx2x_msix_fp_int, 0, fp->name, fp);
1753 BNX2X_ERR("request fp #%d irq (%d) failed rc %d\n", i,
1754 bp->msix_table[offset].vector, rc);
1755 bnx2x_free_msix_irqs(bp, offset);
1762 i = BNX2X_NUM_ETH_QUEUES(bp);
1764 offset = 1 + CNIC_SUPPORT(bp);
1765 netdev_info(bp->dev,
1766 "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
1767 bp->msix_table[0].vector,
1768 0, bp->msix_table[offset].vector,
1769 i - 1, bp->msix_table[offset + i - 1].vector);
1771 offset = CNIC_SUPPORT(bp);
1772 netdev_info(bp->dev,
1773 "using MSI-X IRQs: fp[%d] %d ... fp[%d] %d\n",
1774 0, bp->msix_table[offset].vector,
1775 i - 1, bp->msix_table[offset + i - 1].vector);
1780 int bnx2x_enable_msi(struct bnx2x *bp)
1784 rc = pci_enable_msi(bp->pdev);
1786 BNX2X_DEV_INFO("MSI is not attainable\n");
1789 bp->flags |= USING_MSI_FLAG;
1794 static int bnx2x_req_irq(struct bnx2x *bp)
1796 unsigned long flags;
1799 if (bp->flags & (USING_MSI_FLAG | USING_MSIX_FLAG))
1802 flags = IRQF_SHARED;
1804 if (bp->flags & USING_MSIX_FLAG)
1805 irq = bp->msix_table[0].vector;
1807 irq = bp->pdev->irq;
1809 return request_irq(irq, bnx2x_interrupt, flags, bp->dev->name, bp->dev);
1812 static int bnx2x_setup_irqs(struct bnx2x *bp)
1815 if (bp->flags & USING_MSIX_FLAG &&
1816 !(bp->flags & USING_SINGLE_MSIX_FLAG)) {
1817 rc = bnx2x_req_msix_irqs(bp);
1821 rc = bnx2x_req_irq(bp);
1823 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
1826 if (bp->flags & USING_MSI_FLAG) {
1827 bp->dev->irq = bp->pdev->irq;
1828 netdev_info(bp->dev, "using MSI IRQ %d\n",
1831 if (bp->flags & USING_MSIX_FLAG) {
1832 bp->dev->irq = bp->msix_table[0].vector;
1833 netdev_info(bp->dev, "using MSIX IRQ %d\n",
1841 static void bnx2x_napi_enable_cnic(struct bnx2x *bp)
1845 for_each_rx_queue_cnic(bp, i) {
1846 bnx2x_fp_init_lock(&bp->fp[i]);
1847 napi_enable(&bnx2x_fp(bp, i, napi));
1851 static void bnx2x_napi_enable(struct bnx2x *bp)
1855 for_each_eth_queue(bp, i) {
1856 bnx2x_fp_init_lock(&bp->fp[i]);
1857 napi_enable(&bnx2x_fp(bp, i, napi));
1861 static void bnx2x_napi_disable_cnic(struct bnx2x *bp)
1865 for_each_rx_queue_cnic(bp, i) {
1866 napi_disable(&bnx2x_fp(bp, i, napi));
1867 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1868 usleep_range(1000, 2000);
1872 static void bnx2x_napi_disable(struct bnx2x *bp)
1876 for_each_eth_queue(bp, i) {
1877 napi_disable(&bnx2x_fp(bp, i, napi));
1878 while (!bnx2x_fp_ll_disable(&bp->fp[i]))
1879 usleep_range(1000, 2000);
1883 void bnx2x_netif_start(struct bnx2x *bp)
1885 if (netif_running(bp->dev)) {
1886 bnx2x_napi_enable(bp);
1887 if (CNIC_LOADED(bp))
1888 bnx2x_napi_enable_cnic(bp);
1889 bnx2x_int_enable(bp);
1890 if (bp->state == BNX2X_STATE_OPEN)
1891 netif_tx_wake_all_queues(bp->dev);
1895 void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1897 bnx2x_int_disable_sync(bp, disable_hw);
1898 bnx2x_napi_disable(bp);
1899 if (CNIC_LOADED(bp))
1900 bnx2x_napi_disable_cnic(bp);
1903 u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
1904 void *accel_priv, select_queue_fallback_t fallback)
1906 struct bnx2x *bp = netdev_priv(dev);
1908 if (CNIC_LOADED(bp) && !NO_FCOE(bp)) {
1909 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1910 u16 ether_type = ntohs(hdr->h_proto);
1912 /* Skip VLAN tag if present */
1913 if (ether_type == ETH_P_8021Q) {
1914 struct vlan_ethhdr *vhdr =
1915 (struct vlan_ethhdr *)skb->data;
1917 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1920 /* If ethertype is FCoE or FIP - use FCoE ring */
1921 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1922 return bnx2x_fcoe_tx(bp, txq_index);
1925 /* select a non-FCoE queue */
1926 return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
1929 void bnx2x_set_num_queues(struct bnx2x *bp)
1932 bp->num_ethernet_queues = bnx2x_calc_num_queues(bp);
1934 /* override in STORAGE SD modes */
1935 if (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))
1936 bp->num_ethernet_queues = 1;
1938 /* Add special queues */
1939 bp->num_cnic_queues = CNIC_SUPPORT(bp); /* For FCOE */
1940 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
1942 BNX2X_DEV_INFO("set number of queues to %d\n", bp->num_queues);
1946 * bnx2x_set_real_num_queues - configure netdev->real_num_[tx,rx]_queues
1948 * @bp: Driver handle
1950 * We currently support for at most 16 Tx queues for each CoS thus we will
1951 * allocate a multiple of 16 for ETH L2 rings according to the value of the
1954 * If there is an FCoE L2 queue the appropriate Tx queue will have the next
1955 * index after all ETH L2 indices.
1957 * If the actual number of Tx queues (for each CoS) is less than 16 then there
1958 * will be the holes at the end of each group of 16 ETh L2 indices (0..15,
1959 * 16..31,...) with indices that are not coupled with any real Tx queue.
1961 * The proper configuration of skb->queue_mapping is handled by
1962 * bnx2x_select_queue() and __skb_tx_hash().
1964 * bnx2x_setup_tc() takes care of the proper TC mappings so that __skb_tx_hash()
1965 * will return a proper Tx index if TC is enabled (netdev->num_tc > 0).
1967 static int bnx2x_set_real_num_queues(struct bnx2x *bp, int include_cnic)
1971 tx = BNX2X_NUM_ETH_QUEUES(bp) * bp->max_cos;
1972 rx = BNX2X_NUM_ETH_QUEUES(bp);
1974 /* account for fcoe queue */
1975 if (include_cnic && !NO_FCOE(bp)) {
1980 rc = netif_set_real_num_tx_queues(bp->dev, tx);
1982 BNX2X_ERR("Failed to set real number of Tx queues: %d\n", rc);
1985 rc = netif_set_real_num_rx_queues(bp->dev, rx);
1987 BNX2X_ERR("Failed to set real number of Rx queues: %d\n", rc);
1991 DP(NETIF_MSG_IFUP, "Setting real num queues to (tx, rx) (%d, %d)\n",
1997 static void bnx2x_set_rx_buf_size(struct bnx2x *bp)
2001 for_each_queue(bp, i) {
2002 struct bnx2x_fastpath *fp = &bp->fp[i];
2005 /* Always use a mini-jumbo MTU for the FCoE L2 ring */
2008 * Although there are no IP frames expected to arrive to
2009 * this ring we still want to add an
2010 * IP_HEADER_ALIGNMENT_PADDING to prevent a buffer
2013 mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2016 fp->rx_buf_size = BNX2X_FW_RX_ALIGN_START +
2017 IP_HEADER_ALIGNMENT_PADDING +
2020 BNX2X_FW_RX_ALIGN_END;
2021 /* Note : rx_buf_size doesn't take into account NET_SKB_PAD */
2022 if (fp->rx_buf_size + NET_SKB_PAD <= PAGE_SIZE)
2023 fp->rx_frag_size = fp->rx_buf_size + NET_SKB_PAD;
2025 fp->rx_frag_size = 0;
2029 static int bnx2x_init_rss(struct bnx2x *bp)
2032 u8 num_eth_queues = BNX2X_NUM_ETH_QUEUES(bp);
2034 /* Prepare the initial contents for the indirection table if RSS is
2037 for (i = 0; i < sizeof(bp->rss_conf_obj.ind_table); i++)
2038 bp->rss_conf_obj.ind_table[i] =
2040 ethtool_rxfh_indir_default(i, num_eth_queues);
2043 * For 57710 and 57711 SEARCHER configuration (rss_keys) is
2044 * per-port, so if explicit configuration is needed , do it only
2047 * For 57712 and newer on the other hand it's a per-function
2050 return bnx2x_config_rss_eth(bp, bp->port.pmf || !CHIP_IS_E1x(bp));
2053 int bnx2x_rss(struct bnx2x *bp, struct bnx2x_rss_config_obj *rss_obj,
2054 bool config_hash, bool enable)
2056 struct bnx2x_config_rss_params params = {NULL};
2058 /* Although RSS is meaningless when there is a single HW queue we
2059 * still need it enabled in order to have HW Rx hash generated.
2061 * if (!is_eth_multi(bp))
2062 * bp->multi_mode = ETH_RSS_MODE_DISABLED;
2065 params.rss_obj = rss_obj;
2067 __set_bit(RAMROD_COMP_WAIT, ¶ms.ramrod_flags);
2070 __set_bit(BNX2X_RSS_MODE_REGULAR, ¶ms.rss_flags);
2072 /* RSS configuration */
2073 __set_bit(BNX2X_RSS_IPV4, ¶ms.rss_flags);
2074 __set_bit(BNX2X_RSS_IPV4_TCP, ¶ms.rss_flags);
2075 __set_bit(BNX2X_RSS_IPV6, ¶ms.rss_flags);
2076 __set_bit(BNX2X_RSS_IPV6_TCP, ¶ms.rss_flags);
2077 if (rss_obj->udp_rss_v4)
2078 __set_bit(BNX2X_RSS_IPV4_UDP, ¶ms.rss_flags);
2079 if (rss_obj->udp_rss_v6)
2080 __set_bit(BNX2X_RSS_IPV6_UDP, ¶ms.rss_flags);
2082 __set_bit(BNX2X_RSS_MODE_DISABLED, ¶ms.rss_flags);
2086 params.rss_result_mask = MULTI_MASK;
2088 memcpy(params.ind_table, rss_obj->ind_table, sizeof(params.ind_table));
2092 prandom_bytes(params.rss_key, T_ETH_RSS_KEY * 4);
2093 __set_bit(BNX2X_RSS_SET_SRCH, ¶ms.rss_flags);
2097 return bnx2x_config_rss(bp, ¶ms);
2099 return bnx2x_vfpf_config_rss(bp, ¶ms);
2102 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
2104 struct bnx2x_func_state_params func_params = {NULL};
2106 /* Prepare parameters for function state transitions */
2107 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
2109 func_params.f_obj = &bp->func_obj;
2110 func_params.cmd = BNX2X_F_CMD_HW_INIT;
2112 func_params.params.hw_init.load_phase = load_code;
2114 return bnx2x_func_state_change(bp, &func_params);
2118 * Cleans the object that have internal lists without sending
2119 * ramrods. Should be run when interrupts are disabled.
2121 void bnx2x_squeeze_objects(struct bnx2x *bp)
2124 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
2125 struct bnx2x_mcast_ramrod_params rparam = {NULL};
2126 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
2128 /***************** Cleanup MACs' object first *************************/
2130 /* Wait for completion of requested */
2131 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
2132 /* Perform a dry cleanup */
2133 __set_bit(RAMROD_DRV_CLR_ONLY, &ramrod_flags);
2135 /* Clean ETH primary MAC */
2136 __set_bit(BNX2X_ETH_MAC, &vlan_mac_flags);
2137 rc = mac_obj->delete_all(bp, &bp->sp_objs->mac_obj, &vlan_mac_flags,
2140 BNX2X_ERR("Failed to clean ETH MACs: %d\n", rc);
2142 /* Cleanup UC list */
2144 __set_bit(BNX2X_UC_LIST_MAC, &vlan_mac_flags);
2145 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags,
2148 BNX2X_ERR("Failed to clean UC list MACs: %d\n", rc);
2150 /***************** Now clean mcast object *****************************/
2151 rparam.mcast_obj = &bp->mcast_obj;
2152 __set_bit(RAMROD_DRV_CLR_ONLY, &rparam.ramrod_flags);
2154 /* Add a DEL command... - Since we're doing a driver cleanup only,
2155 * we take a lock surrounding both the initial send and the CONTs,
2156 * as we don't want a true completion to disrupt us in the middle.
2158 netif_addr_lock_bh(bp->dev);
2159 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
2161 BNX2X_ERR("Failed to add a new DEL command to a multi-cast object: %d\n",
2164 /* ...and wait until all pending commands are cleared */
2165 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2168 BNX2X_ERR("Failed to clean multi-cast object: %d\n",
2170 netif_addr_unlock_bh(bp->dev);
2174 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
2176 netif_addr_unlock_bh(bp->dev);
2179 #ifndef BNX2X_STOP_ON_ERROR
2180 #define LOAD_ERROR_EXIT(bp, label) \
2182 (bp)->state = BNX2X_STATE_ERROR; \
2186 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2188 bp->cnic_loaded = false; \
2191 #else /*BNX2X_STOP_ON_ERROR*/
2192 #define LOAD_ERROR_EXIT(bp, label) \
2194 (bp)->state = BNX2X_STATE_ERROR; \
2198 #define LOAD_ERROR_EXIT_CNIC(bp, label) \
2200 bp->cnic_loaded = false; \
2204 #endif /*BNX2X_STOP_ON_ERROR*/
2206 static void bnx2x_free_fw_stats_mem(struct bnx2x *bp)
2208 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
2209 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2213 static int bnx2x_alloc_fw_stats_mem(struct bnx2x *bp)
2215 int num_groups, vf_headroom = 0;
2216 int is_fcoe_stats = NO_FCOE(bp) ? 0 : 1;
2218 /* number of queues for statistics is number of eth queues + FCoE */
2219 u8 num_queue_stats = BNX2X_NUM_ETH_QUEUES(bp) + is_fcoe_stats;
2221 /* Total number of FW statistics requests =
2222 * 1 for port stats + 1 for PF stats + potential 2 for FCoE (fcoe proper
2223 * and fcoe l2 queue) stats + num of queues (which includes another 1
2224 * for fcoe l2 queue if applicable)
2226 bp->fw_stats_num = 2 + is_fcoe_stats + num_queue_stats;
2228 /* vf stats appear in the request list, but their data is allocated by
2229 * the VFs themselves. We don't include them in the bp->fw_stats_num as
2230 * it is used to determine where to place the vf stats queries in the
2234 vf_headroom = bnx2x_vf_headroom(bp);
2236 /* Request is built from stats_query_header and an array of
2237 * stats_query_cmd_group each of which contains
2238 * STATS_QUERY_CMD_COUNT rules. The real number or requests is
2239 * configured in the stats_query_header.
2242 (((bp->fw_stats_num + vf_headroom) / STATS_QUERY_CMD_COUNT) +
2243 (((bp->fw_stats_num + vf_headroom) % STATS_QUERY_CMD_COUNT) ?
2246 DP(BNX2X_MSG_SP, "stats fw_stats_num %d, vf headroom %d, num_groups %d\n",
2247 bp->fw_stats_num, vf_headroom, num_groups);
2248 bp->fw_stats_req_sz = sizeof(struct stats_query_header) +
2249 num_groups * sizeof(struct stats_query_cmd_group);
2251 /* Data for statistics requests + stats_counter
2252 * stats_counter holds per-STORM counters that are incremented
2253 * when STORM has finished with the current request.
2254 * memory for FCoE offloaded statistics are counted anyway,
2255 * even if they will not be sent.
2256 * VF stats are not accounted for here as the data of VF stats is stored
2257 * in memory allocated by the VF, not here.
2259 bp->fw_stats_data_sz = sizeof(struct per_port_stats) +
2260 sizeof(struct per_pf_stats) +
2261 sizeof(struct fcoe_statistics_params) +
2262 sizeof(struct per_queue_stats) * num_queue_stats +
2263 sizeof(struct stats_counter);
2265 bp->fw_stats = BNX2X_PCI_ALLOC(&bp->fw_stats_mapping,
2266 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
2271 bp->fw_stats_req = (struct bnx2x_fw_stats_req *)bp->fw_stats;
2272 bp->fw_stats_req_mapping = bp->fw_stats_mapping;
2273 bp->fw_stats_data = (struct bnx2x_fw_stats_data *)
2274 ((u8 *)bp->fw_stats + bp->fw_stats_req_sz);
2275 bp->fw_stats_data_mapping = bp->fw_stats_mapping +
2276 bp->fw_stats_req_sz;
2278 DP(BNX2X_MSG_SP, "statistics request base address set to %x %x\n",
2279 U64_HI(bp->fw_stats_req_mapping),
2280 U64_LO(bp->fw_stats_req_mapping));
2281 DP(BNX2X_MSG_SP, "statistics data base address set to %x %x\n",
2282 U64_HI(bp->fw_stats_data_mapping),
2283 U64_LO(bp->fw_stats_data_mapping));
2287 bnx2x_free_fw_stats_mem(bp);
2288 BNX2X_ERR("Can't allocate FW stats memory\n");
2292 /* send load request to mcp and analyze response */
2293 static int bnx2x_nic_load_request(struct bnx2x *bp, u32 *load_code)
2299 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
2300 DRV_MSG_SEQ_NUMBER_MASK);
2301 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
2303 /* Get current FW pulse sequence */
2304 bp->fw_drv_pulse_wr_seq =
2305 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb) &
2306 DRV_PULSE_SEQ_MASK);
2307 BNX2X_DEV_INFO("drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
2309 param = DRV_MSG_CODE_LOAD_REQ_WITH_LFA;
2311 if (IS_MF_SD(bp) && bnx2x_port_after_undi(bp))
2312 param |= DRV_MSG_CODE_LOAD_REQ_FORCE_LFA;
2315 (*load_code) = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ, param);
2317 /* if mcp fails to respond we must abort */
2318 if (!(*load_code)) {
2319 BNX2X_ERR("MCP response failure, aborting\n");
2323 /* If mcp refused (e.g. other port is in diagnostic mode) we
2326 if ((*load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED) {
2327 BNX2X_ERR("MCP refused load request, aborting\n");
2333 /* check whether another PF has already loaded FW to chip. In
2334 * virtualized environments a pf from another VM may have already
2335 * initialized the device including loading FW
2337 int bnx2x_compare_fw_ver(struct bnx2x *bp, u32 load_code, bool print_err)
2339 /* is another pf loaded on this engine? */
2340 if (load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP &&
2341 load_code != FW_MSG_CODE_DRV_LOAD_COMMON) {
2342 /* build my FW version dword */
2343 u32 my_fw = (BCM_5710_FW_MAJOR_VERSION) +
2344 (BCM_5710_FW_MINOR_VERSION << 8) +
2345 (BCM_5710_FW_REVISION_VERSION << 16) +
2346 (BCM_5710_FW_ENGINEERING_VERSION << 24);
2348 /* read loaded FW from chip */
2349 u32 loaded_fw = REG_RD(bp, XSEM_REG_PRAM);
2351 DP(BNX2X_MSG_SP, "loaded fw %x, my fw %x\n",
2354 /* abort nic load if version mismatch */
2355 if (my_fw != loaded_fw) {
2357 BNX2X_ERR("bnx2x with FW %x was already loaded which mismatches my %x FW. Aborting\n",
2360 BNX2X_DEV_INFO("bnx2x with FW %x was already loaded which mismatches my %x FW, possibly due to MF UNDI\n",
2368 /* returns the "mcp load_code" according to global load_count array */
2369 static int bnx2x_nic_load_no_mcp(struct bnx2x *bp, int port)
2371 int path = BP_PATH(bp);
2373 DP(NETIF_MSG_IFUP, "NO MCP - load counts[%d] %d, %d, %d\n",
2374 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2375 bnx2x_load_count[path][2]);
2376 bnx2x_load_count[path][0]++;
2377 bnx2x_load_count[path][1 + port]++;
2378 DP(NETIF_MSG_IFUP, "NO MCP - new load counts[%d] %d, %d, %d\n",
2379 path, bnx2x_load_count[path][0], bnx2x_load_count[path][1],
2380 bnx2x_load_count[path][2]);
2381 if (bnx2x_load_count[path][0] == 1)
2382 return FW_MSG_CODE_DRV_LOAD_COMMON;
2383 else if (bnx2x_load_count[path][1 + port] == 1)
2384 return FW_MSG_CODE_DRV_LOAD_PORT;
2386 return FW_MSG_CODE_DRV_LOAD_FUNCTION;
2389 /* mark PMF if applicable */
2390 static void bnx2x_nic_load_pmf(struct bnx2x *bp, u32 load_code)
2392 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2393 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
2394 (load_code == FW_MSG_CODE_DRV_LOAD_PORT)) {
2396 /* We need the barrier to ensure the ordering between the
2397 * writing to bp->port.pmf here and reading it from the
2398 * bnx2x_periodic_task().
2405 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2408 static void bnx2x_nic_load_afex_dcc(struct bnx2x *bp, int load_code)
2410 if (((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
2411 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP)) &&
2412 (bp->common.shmem2_base)) {
2413 if (SHMEM2_HAS(bp, dcc_support))
2414 SHMEM2_WR(bp, dcc_support,
2415 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
2416 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
2417 if (SHMEM2_HAS(bp, afex_driver_support))
2418 SHMEM2_WR(bp, afex_driver_support,
2419 SHMEM_AFEX_SUPPORTED_VERSION_ONE);
2422 /* Set AFEX default VLAN tag to an invalid value */
2423 bp->afex_def_vlan_tag = -1;
2427 * bnx2x_bz_fp - zero content of the fastpath structure.
2429 * @bp: driver handle
2430 * @index: fastpath index to be zeroed
2432 * Makes sure the contents of the bp->fp[index].napi is kept
2435 static void bnx2x_bz_fp(struct bnx2x *bp, int index)
2437 struct bnx2x_fastpath *fp = &bp->fp[index];
2439 struct napi_struct orig_napi = fp->napi;
2440 struct bnx2x_agg_info *orig_tpa_info = fp->tpa_info;
2442 /* bzero bnx2x_fastpath contents */
2444 memset(fp->tpa_info, 0, ETH_MAX_AGGREGATION_QUEUES_E1H_E2 *
2445 sizeof(struct bnx2x_agg_info));
2446 memset(fp, 0, sizeof(*fp));
2448 /* Restore the NAPI object as it has been already initialized */
2449 fp->napi = orig_napi;
2450 fp->tpa_info = orig_tpa_info;
2454 fp->max_cos = bp->max_cos;
2456 /* Special queues support only one CoS */
2459 /* Init txdata pointers */
2461 fp->txdata_ptr[0] = &bp->bnx2x_txq[FCOE_TXQ_IDX(bp)];
2463 for_each_cos_in_tx_queue(fp, cos)
2464 fp->txdata_ptr[cos] = &bp->bnx2x_txq[cos *
2465 BNX2X_NUM_ETH_QUEUES(bp) + index];
2467 /* set the tpa flag for each queue. The tpa flag determines the queue
2468 * minimal size so it must be set prior to queue memory allocation
2470 fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
2471 (bp->flags & GRO_ENABLE_FLAG &&
2472 bnx2x_mtu_allows_gro(bp->dev->mtu)));
2473 if (bp->flags & TPA_ENABLE_FLAG)
2474 fp->mode = TPA_MODE_LRO;
2475 else if (bp->flags & GRO_ENABLE_FLAG)
2476 fp->mode = TPA_MODE_GRO;
2478 /* We don't want TPA on an FCoE L2 ring */
2480 fp->disable_tpa = 1;
2483 int bnx2x_load_cnic(struct bnx2x *bp)
2485 int i, rc, port = BP_PORT(bp);
2487 DP(NETIF_MSG_IFUP, "Starting CNIC-related load\n");
2489 mutex_init(&bp->cnic_mutex);
2492 rc = bnx2x_alloc_mem_cnic(bp);
2494 BNX2X_ERR("Unable to allocate bp memory for cnic\n");
2495 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2499 rc = bnx2x_alloc_fp_mem_cnic(bp);
2501 BNX2X_ERR("Unable to allocate memory for cnic fps\n");
2502 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2505 /* Update the number of queues with the cnic queues */
2506 rc = bnx2x_set_real_num_queues(bp, 1);
2508 BNX2X_ERR("Unable to set real_num_queues including cnic\n");
2509 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic0);
2512 /* Add all CNIC NAPI objects */
2513 bnx2x_add_all_napi_cnic(bp);
2514 DP(NETIF_MSG_IFUP, "cnic napi added\n");
2515 bnx2x_napi_enable_cnic(bp);
2517 rc = bnx2x_init_hw_func_cnic(bp);
2519 LOAD_ERROR_EXIT_CNIC(bp, load_error_cnic1);
2521 bnx2x_nic_init_cnic(bp);
2524 /* Enable Timer scan */
2525 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 1);
2527 /* setup cnic queues */
2528 for_each_cnic_queue(bp, i) {
2529 rc = bnx2x_setup_queue(bp, &bp->fp[i], 0);
2531 BNX2X_ERR("Queue setup failed\n");
2532 LOAD_ERROR_EXIT(bp, load_error_cnic2);
2537 /* Initialize Rx filter. */
2538 bnx2x_set_rx_mode_inner(bp);
2540 /* re-read iscsi info */
2541 bnx2x_get_iscsi_info(bp);
2542 bnx2x_setup_cnic_irq_info(bp);
2543 bnx2x_setup_cnic_info(bp);
2544 bp->cnic_loaded = true;
2545 if (bp->state == BNX2X_STATE_OPEN)
2546 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
2548 DP(NETIF_MSG_IFUP, "Ending successfully CNIC-related load\n");
2552 #ifndef BNX2X_STOP_ON_ERROR
2554 /* Disable Timer scan */
2555 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
2558 bnx2x_napi_disable_cnic(bp);
2559 /* Update the number of queues without the cnic queues */
2560 if (bnx2x_set_real_num_queues(bp, 0))
2561 BNX2X_ERR("Unable to set real_num_queues not including cnic\n");
2563 BNX2X_ERR("CNIC-related load failed\n");
2564 bnx2x_free_fp_mem_cnic(bp);
2565 bnx2x_free_mem_cnic(bp);
2567 #endif /* ! BNX2X_STOP_ON_ERROR */
2570 /* must be called with rtnl_lock */
2571 int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
2573 int port = BP_PORT(bp);
2574 int i, rc = 0, load_code = 0;
2576 DP(NETIF_MSG_IFUP, "Starting NIC load\n");
2578 "CNIC is %s\n", CNIC_ENABLED(bp) ? "enabled" : "disabled");
2580 #ifdef BNX2X_STOP_ON_ERROR
2581 if (unlikely(bp->panic)) {
2582 BNX2X_ERR("Can't load NIC when there is panic\n");
2587 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
2589 /* zero the structure w/o any lock, before SP handler is initialized */
2590 memset(&bp->last_reported_link, 0, sizeof(bp->last_reported_link));
2591 __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
2592 &bp->last_reported_link.link_report_flags);
2595 /* must be called before memory allocation and HW init */
2596 bnx2x_ilt_set_info(bp);
2599 * Zero fastpath structures preserving invariants like napi, which are
2600 * allocated only once, fp index, max_cos, bp pointer.
2601 * Also set fp->disable_tpa and txdata_ptr.
2603 DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
2604 for_each_queue(bp, i)
2606 memset(bp->bnx2x_txq, 0, (BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS +
2607 bp->num_cnic_queues) *
2608 sizeof(struct bnx2x_fp_txdata));
2610 bp->fcoe_init = false;
2612 /* Set the receive queues buffer size */
2613 bnx2x_set_rx_buf_size(bp);
2616 rc = bnx2x_alloc_mem(bp);
2618 BNX2X_ERR("Unable to allocate bp memory\n");
2623 /* need to be done after alloc mem, since it's self adjusting to amount
2624 * of memory available for RSS queues
2626 rc = bnx2x_alloc_fp_mem(bp);
2628 BNX2X_ERR("Unable to allocate memory for fps\n");
2629 LOAD_ERROR_EXIT(bp, load_error0);
2632 /* Allocated memory for FW statistics */
2633 if (bnx2x_alloc_fw_stats_mem(bp))
2634 LOAD_ERROR_EXIT(bp, load_error0);
2636 /* request pf to initialize status blocks */
2638 rc = bnx2x_vfpf_init(bp);
2640 LOAD_ERROR_EXIT(bp, load_error0);
2643 /* As long as bnx2x_alloc_mem() may possibly update
2644 * bp->num_queues, bnx2x_set_real_num_queues() should always
2645 * come after it. At this stage cnic queues are not counted.
2647 rc = bnx2x_set_real_num_queues(bp, 0);
2649 BNX2X_ERR("Unable to set real_num_queues\n");
2650 LOAD_ERROR_EXIT(bp, load_error0);
2653 /* configure multi cos mappings in kernel.
2654 * this configuration may be overridden by a multi class queue
2655 * discipline or by a dcbx negotiation result.
2657 bnx2x_setup_tc(bp->dev, bp->max_cos);
2659 /* Add all NAPI objects */
2660 bnx2x_add_all_napi(bp);
2661 DP(NETIF_MSG_IFUP, "napi added\n");
2662 bnx2x_napi_enable(bp);
2665 /* set pf load just before approaching the MCP */
2666 bnx2x_set_pf_load(bp);
2668 /* if mcp exists send load request and analyze response */
2669 if (!BP_NOMCP(bp)) {
2670 /* attempt to load pf */
2671 rc = bnx2x_nic_load_request(bp, &load_code);
2673 LOAD_ERROR_EXIT(bp, load_error1);
2675 /* what did mcp say? */
2676 rc = bnx2x_compare_fw_ver(bp, load_code, true);
2678 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2679 LOAD_ERROR_EXIT(bp, load_error2);
2682 load_code = bnx2x_nic_load_no_mcp(bp, port);
2685 /* mark pmf if applicable */
2686 bnx2x_nic_load_pmf(bp, load_code);
2688 /* Init Function state controlling object */
2689 bnx2x__init_func_obj(bp);
2692 rc = bnx2x_init_hw(bp, load_code);
2694 BNX2X_ERR("HW init failed, aborting\n");
2695 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2696 LOAD_ERROR_EXIT(bp, load_error2);
2700 bnx2x_pre_irq_nic_init(bp);
2702 /* Connect to IRQs */
2703 rc = bnx2x_setup_irqs(bp);
2705 BNX2X_ERR("setup irqs failed\n");
2707 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2708 LOAD_ERROR_EXIT(bp, load_error2);
2711 /* Init per-function objects */
2713 /* Setup NIC internals and enable interrupts */
2714 bnx2x_post_irq_nic_init(bp, load_code);
2716 bnx2x_init_bp_objs(bp);
2717 bnx2x_iov_nic_init(bp);
2719 /* Set AFEX default VLAN tag to an invalid value */
2720 bp->afex_def_vlan_tag = -1;
2721 bnx2x_nic_load_afex_dcc(bp, load_code);
2722 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
2723 rc = bnx2x_func_start(bp);
2725 BNX2X_ERR("Function start failed!\n");
2726 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
2728 LOAD_ERROR_EXIT(bp, load_error3);
2731 /* Send LOAD_DONE command to MCP */
2732 if (!BP_NOMCP(bp)) {
2733 load_code = bnx2x_fw_command(bp,
2734 DRV_MSG_CODE_LOAD_DONE, 0);
2736 BNX2X_ERR("MCP response failure, aborting\n");
2738 LOAD_ERROR_EXIT(bp, load_error3);
2742 /* initialize FW coalescing state machines in RAM */
2743 bnx2x_update_coalesce(bp);
2746 /* setup the leading queue */
2747 rc = bnx2x_setup_leading(bp);
2749 BNX2X_ERR("Setup leading failed!\n");
2750 LOAD_ERROR_EXIT(bp, load_error3);
2753 /* set up the rest of the queues */
2754 for_each_nondefault_eth_queue(bp, i) {
2756 rc = bnx2x_setup_queue(bp, &bp->fp[i], false);
2758 rc = bnx2x_vfpf_setup_q(bp, &bp->fp[i], false);
2760 BNX2X_ERR("Queue %d setup failed\n", i);
2761 LOAD_ERROR_EXIT(bp, load_error3);
2766 rc = bnx2x_init_rss(bp);
2768 BNX2X_ERR("PF RSS init failed\n");
2769 LOAD_ERROR_EXIT(bp, load_error3);
2772 /* Now when Clients are configured we are ready to work */
2773 bp->state = BNX2X_STATE_OPEN;
2775 /* Configure a ucast MAC */
2777 rc = bnx2x_set_eth_mac(bp, true);
2779 rc = bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr, bp->fp->index,
2782 BNX2X_ERR("Setting Ethernet MAC failed\n");
2783 LOAD_ERROR_EXIT(bp, load_error3);
2786 if (IS_PF(bp) && bp->pending_max) {
2787 bnx2x_update_max_mf_config(bp, bp->pending_max);
2788 bp->pending_max = 0;
2792 rc = bnx2x_initial_phy_init(bp, load_mode);
2794 LOAD_ERROR_EXIT(bp, load_error3);
2796 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_BOOT_FROM_SAN;
2798 /* Start fast path */
2800 /* Initialize Rx filter. */
2801 bnx2x_set_rx_mode_inner(bp);
2804 switch (load_mode) {
2806 /* Tx queue should be only re-enabled */
2807 netif_tx_wake_all_queues(bp->dev);
2811 netif_tx_start_all_queues(bp->dev);
2812 smp_mb__after_atomic();
2816 case LOAD_LOOPBACK_EXT:
2817 bp->state = BNX2X_STATE_DIAG;
2825 bnx2x_update_drv_flags(bp, 1 << DRV_FLAGS_PORT_MASK, 0);
2827 bnx2x__link_status_update(bp);
2829 /* start the timer */
2830 mod_timer(&bp->timer, jiffies + bp->current_interval);
2832 if (CNIC_ENABLED(bp))
2833 bnx2x_load_cnic(bp);
2836 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_GET_DRV_VERSION, 0);
2838 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2839 /* mark driver is loaded in shmem2 */
2841 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2842 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2843 val | DRV_FLAGS_CAPABILITIES_LOADED_SUPPORTED |
2844 DRV_FLAGS_CAPABILITIES_LOADED_L2);
2847 /* Wait for all pending SP commands to complete */
2848 if (IS_PF(bp) && !bnx2x_wait_sp_comp(bp, ~0x0UL)) {
2849 BNX2X_ERR("Timeout waiting for SP elements to complete\n");
2850 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
2854 /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */
2855 if (bp->port.pmf && (bp->state != BNX2X_STATE_DIAG))
2856 bnx2x_dcbx_init(bp, false);
2858 DP(NETIF_MSG_IFUP, "Ending successfully NIC load\n");
2862 #ifndef BNX2X_STOP_ON_ERROR
2865 bnx2x_int_disable_sync(bp, 1);
2867 /* Clean queueable objects */
2868 bnx2x_squeeze_objects(bp);
2871 /* Free SKBs, SGEs, TPA pool and driver internals */
2872 bnx2x_free_skbs(bp);
2873 for_each_rx_queue(bp, i)
2874 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2879 if (IS_PF(bp) && !BP_NOMCP(bp)) {
2880 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
2881 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
2886 bnx2x_napi_disable(bp);
2887 bnx2x_del_all_napi(bp);
2889 /* clear pf_load status, as it was already set */
2891 bnx2x_clear_pf_load(bp);
2893 bnx2x_free_fw_stats_mem(bp);
2894 bnx2x_free_fp_mem(bp);
2898 #endif /* ! BNX2X_STOP_ON_ERROR */
2901 int bnx2x_drain_tx_queues(struct bnx2x *bp)
2905 /* Wait until tx fastpath tasks complete */
2906 for_each_tx_queue(bp, i) {
2907 struct bnx2x_fastpath *fp = &bp->fp[i];
2909 for_each_cos_in_tx_queue(fp, cos)
2910 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
2917 /* must be called with rtnl_lock */
2918 int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode, bool keep_link)
2921 bool global = false;
2923 DP(NETIF_MSG_IFUP, "Starting NIC unload\n");
2925 /* mark driver is unloaded in shmem2 */
2926 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
2928 val = SHMEM2_RD(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
2929 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
2930 val & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
2933 if (IS_PF(bp) && bp->recovery_state != BNX2X_RECOVERY_DONE &&
2934 (bp->state == BNX2X_STATE_CLOSED ||
2935 bp->state == BNX2X_STATE_ERROR)) {
2936 /* We can get here if the driver has been unloaded
2937 * during parity error recovery and is either waiting for a
2938 * leader to complete or for other functions to unload and
2939 * then ifdown has been issued. In this case we want to
2940 * unload and let other functions to complete a recovery
2943 bp->recovery_state = BNX2X_RECOVERY_DONE;
2945 bnx2x_release_leader_lock(bp);
2948 DP(NETIF_MSG_IFDOWN, "Releasing a leadership...\n");
2949 BNX2X_ERR("Can't unload in closed or error state\n");
2953 /* Nothing to do during unload if previous bnx2x_nic_load()
2954 * have not completed successfully - all resources are released.
2956 * we can get here only after unsuccessful ndo_* callback, during which
2957 * dev->IFF_UP flag is still on.
2959 if (bp->state == BNX2X_STATE_CLOSED || bp->state == BNX2X_STATE_ERROR)
2962 /* It's important to set the bp->state to the value different from
2963 * BNX2X_STATE_OPEN and only then stop the Tx. Otherwise bnx2x_tx_int()
2964 * may restart the Tx from the NAPI context (see bnx2x_tx_int()).
2966 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
2969 /* indicate to VFs that the PF is going down */
2970 bnx2x_iov_channel_down(bp);
2972 if (CNIC_LOADED(bp))
2973 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
2976 bnx2x_tx_disable(bp);
2977 netdev_reset_tc(bp->dev);
2979 bp->rx_mode = BNX2X_RX_MODE_NONE;
2981 del_timer_sync(&bp->timer);
2984 /* Set ALWAYS_ALIVE bit in shmem */
2985 bp->fw_drv_pulse_wr_seq |= DRV_PULSE_ALWAYS_ALIVE;
2986 bnx2x_drv_pulse(bp);
2987 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2988 bnx2x_save_statistics(bp);
2991 /* wait till consumers catch up with producers in all queues */
2992 bnx2x_drain_tx_queues(bp);
2994 /* if VF indicate to PF this function is going down (PF will delete sp
2995 * elements and clear initializations
2998 bnx2x_vfpf_close_vf(bp);
2999 else if (unload_mode != UNLOAD_RECOVERY)
3000 /* if this is a normal/close unload need to clean up chip*/
3001 bnx2x_chip_cleanup(bp, unload_mode, keep_link);
3003 /* Send the UNLOAD_REQUEST to the MCP */
3004 bnx2x_send_unload_req(bp, unload_mode);
3006 /* Prevent transactions to host from the functions on the
3007 * engine that doesn't reset global blocks in case of global
3008 * attention once global blocks are reset and gates are opened
3009 * (the engine which leader will perform the recovery
3012 if (!CHIP_IS_E1x(bp))
3013 bnx2x_pf_disable(bp);
3015 /* Disable HW interrupts, NAPI */
3016 bnx2x_netif_stop(bp, 1);
3017 /* Delete all NAPI objects */
3018 bnx2x_del_all_napi(bp);
3019 if (CNIC_LOADED(bp))
3020 bnx2x_del_all_napi_cnic(bp);
3024 /* Report UNLOAD_DONE to MCP */
3025 bnx2x_send_unload_done(bp, false);
3029 * At this stage no more interrupts will arrive so we may safely clean
3030 * the queueable objects here in case they failed to get cleaned so far.
3033 bnx2x_squeeze_objects(bp);
3035 /* There should be no more pending SP commands at this stage */
3040 /* clear pending work in rtnl task */
3041 bp->sp_rtnl_state = 0;
3044 /* Free SKBs, SGEs, TPA pool and driver internals */
3045 bnx2x_free_skbs(bp);
3046 if (CNIC_LOADED(bp))
3047 bnx2x_free_skbs_cnic(bp);
3048 for_each_rx_queue(bp, i)
3049 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
3051 bnx2x_free_fp_mem(bp);
3052 if (CNIC_LOADED(bp))
3053 bnx2x_free_fp_mem_cnic(bp);
3056 if (CNIC_LOADED(bp))
3057 bnx2x_free_mem_cnic(bp);
3061 bp->state = BNX2X_STATE_CLOSED;
3062 bp->cnic_loaded = false;
3064 /* Clear driver version indication in shmem */
3066 bnx2x_update_mng_version(bp);
3068 /* Check if there are pending parity attentions. If there are - set
3069 * RECOVERY_IN_PROGRESS.
3071 if (IS_PF(bp) && bnx2x_chk_parity_attn(bp, &global, false)) {
3072 bnx2x_set_reset_in_progress(bp);
3074 /* Set RESET_IS_GLOBAL if needed */
3076 bnx2x_set_reset_global(bp);
3079 /* The last driver must disable a "close the gate" if there is no
3080 * parity attention or "process kill" pending.
3083 !bnx2x_clear_pf_load(bp) &&
3084 bnx2x_reset_is_done(bp, BP_PATH(bp)))
3085 bnx2x_disable_close_the_gate(bp);
3087 DP(NETIF_MSG_IFUP, "Ending NIC unload\n");
3092 int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
3096 /* If there is no power capability, silently succeed */
3097 if (!bp->pdev->pm_cap) {
3098 BNX2X_DEV_INFO("No power capability. Breaking.\n");
3102 pci_read_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL, &pmcsr);
3106 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3107 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3108 PCI_PM_CTRL_PME_STATUS));
3110 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3111 /* delay required during transition out of D3hot */
3116 /* If there are other clients above don't
3117 shut down the power */
3118 if (atomic_read(&bp->pdev->enable_cnt) != 1)
3120 /* Don't shut down the power for emulation and FPGA */
3121 if (CHIP_REV_IS_SLOW(bp))
3124 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3128 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3130 pci_write_config_word(bp->pdev, bp->pdev->pm_cap + PCI_PM_CTRL,
3133 /* No more memory access after this point until
3134 * device is brought back to D0.
3139 dev_err(&bp->pdev->dev, "Can't support state = %d\n", state);
3146 * net_device service functions
3148 static int bnx2x_poll(struct napi_struct *napi, int budget)
3152 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3154 struct bnx2x *bp = fp->bp;
3157 #ifdef BNX2X_STOP_ON_ERROR
3158 if (unlikely(bp->panic)) {
3159 napi_complete(napi);
3163 if (!bnx2x_fp_lock_napi(fp))
3166 for_each_cos_in_tx_queue(fp, cos)
3167 if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
3168 bnx2x_tx_int(bp, fp->txdata_ptr[cos]);
3170 if (bnx2x_has_rx_work(fp)) {
3171 work_done += bnx2x_rx_int(fp, budget - work_done);
3173 /* must not complete if we consumed full budget */
3174 if (work_done >= budget) {
3175 bnx2x_fp_unlock_napi(fp);
3180 /* Fall out from the NAPI loop if needed */
3181 if (!bnx2x_fp_unlock_napi(fp) &&
3182 !(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3184 /* No need to update SB for FCoE L2 ring as long as
3185 * it's connected to the default SB and the SB
3186 * has been updated when NAPI was scheduled.
3188 if (IS_FCOE_FP(fp)) {
3189 napi_complete(napi);
3192 bnx2x_update_fpsb_idx(fp);
3193 /* bnx2x_has_rx_work() reads the status block,
3194 * thus we need to ensure that status block indices
3195 * have been actually read (bnx2x_update_fpsb_idx)
3196 * prior to this check (bnx2x_has_rx_work) so that
3197 * we won't write the "newer" value of the status block
3198 * to IGU (if there was a DMA right after
3199 * bnx2x_has_rx_work and if there is no rmb, the memory
3200 * reading (bnx2x_update_fpsb_idx) may be postponed
3201 * to right before bnx2x_ack_sb). In this case there
3202 * will never be another interrupt until there is
3203 * another update of the status block, while there
3204 * is still unhandled work.
3208 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
3209 napi_complete(napi);
3210 /* Re-enable interrupts */
3211 DP(NETIF_MSG_RX_STATUS,
3212 "Update index to %d\n", fp->fp_hc_idx);
3213 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID,
3214 le16_to_cpu(fp->fp_hc_idx),
3224 #ifdef CONFIG_NET_RX_BUSY_POLL
3225 /* must be called with local_bh_disable()d */
3226 int bnx2x_low_latency_recv(struct napi_struct *napi)
3228 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
3230 struct bnx2x *bp = fp->bp;
3233 if ((bp->state == BNX2X_STATE_CLOSED) ||
3234 (bp->state == BNX2X_STATE_ERROR) ||
3235 (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
3236 return LL_FLUSH_FAILED;
3238 if (!bnx2x_fp_lock_poll(fp))
3239 return LL_FLUSH_BUSY;
3241 if (bnx2x_has_rx_work(fp))
3242 found = bnx2x_rx_int(fp, 4);
3244 bnx2x_fp_unlock_poll(fp);
3250 /* we split the first BD into headers and data BDs
3251 * to ease the pain of our fellow microcode engineers
3252 * we use one mapping for both BDs
3254 static u16 bnx2x_tx_split(struct bnx2x *bp,
3255 struct bnx2x_fp_txdata *txdata,
3256 struct sw_tx_bd *tx_buf,
3257 struct eth_tx_start_bd **tx_bd, u16 hlen,
3260 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
3261 struct eth_tx_bd *d_tx_bd;
3263 int old_len = le16_to_cpu(h_tx_bd->nbytes);
3265 /* first fix first BD */
3266 h_tx_bd->nbytes = cpu_to_le16(hlen);
3268 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d (%x:%x)\n",
3269 h_tx_bd->nbytes, h_tx_bd->addr_hi, h_tx_bd->addr_lo);
3271 /* now get a new data BD
3272 * (after the pbd) and fill it */
3273 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3274 d_tx_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
3276 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
3277 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
3279 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3280 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3281 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
3283 /* this marks the BD as one that has no individual mapping */
3284 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
3286 DP(NETIF_MSG_TX_QUEUED,
3287 "TSO split data size is %d (%x:%x)\n",
3288 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
3291 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
3296 #define bswab32(b32) ((__force __le32) swab32((__force __u32) (b32)))
3297 #define bswab16(b16) ((__force __le16) swab16((__force __u16) (b16)))
3298 static __le16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
3300 __sum16 tsum = (__force __sum16) csum;
3303 tsum = ~csum_fold(csum_sub((__force __wsum) csum,
3304 csum_partial(t_header - fix, fix, 0)));
3307 tsum = ~csum_fold(csum_add((__force __wsum) csum,
3308 csum_partial(t_header, -fix, 0)));
3310 return bswab16(tsum);
3313 static u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
3319 if (skb->ip_summed != CHECKSUM_PARTIAL)
3322 protocol = vlan_get_protocol(skb);
3323 if (protocol == htons(ETH_P_IPV6)) {
3325 prot = ipv6_hdr(skb)->nexthdr;
3328 prot = ip_hdr(skb)->protocol;
3331 if (!CHIP_IS_E1x(bp) && skb->encapsulation) {
3332 if (inner_ip_hdr(skb)->version == 6) {
3333 rc |= XMIT_CSUM_ENC_V6;
3334 if (inner_ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
3335 rc |= XMIT_CSUM_TCP;
3337 rc |= XMIT_CSUM_ENC_V4;
3338 if (inner_ip_hdr(skb)->protocol == IPPROTO_TCP)
3339 rc |= XMIT_CSUM_TCP;
3342 if (prot == IPPROTO_TCP)
3343 rc |= XMIT_CSUM_TCP;
3345 if (skb_is_gso(skb)) {
3346 if (skb_is_gso_v6(skb)) {
3347 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP);
3348 if (rc & XMIT_CSUM_ENC)
3349 rc |= XMIT_GSO_ENC_V6;
3351 rc |= (XMIT_GSO_V4 | XMIT_CSUM_TCP);
3352 if (rc & XMIT_CSUM_ENC)
3353 rc |= XMIT_GSO_ENC_V4;
3360 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
3361 /* check if packet requires linearization (packet is too fragmented)
3362 no need to check fragmentation if page size > 8K (there will be no
3363 violation to FW restrictions) */
3364 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
3369 int first_bd_sz = 0;
3371 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
3372 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
3374 if (xmit_type & XMIT_GSO) {
3375 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
3376 /* Check if LSO packet needs to be copied:
3377 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
3378 int wnd_size = MAX_FETCH_BD - 3;
3379 /* Number of windows to check */
3380 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
3385 /* Headers length */
3386 hlen = (int)(skb_transport_header(skb) - skb->data) +
3389 /* Amount of data (w/o headers) on linear part of SKB*/
3390 first_bd_sz = skb_headlen(skb) - hlen;
3392 wnd_sum = first_bd_sz;
3394 /* Calculate the first sum - it's special */
3395 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
3397 skb_frag_size(&skb_shinfo(skb)->frags[frag_idx]);
3399 /* If there was data on linear skb data - check it */
3400 if (first_bd_sz > 0) {
3401 if (unlikely(wnd_sum < lso_mss)) {
3406 wnd_sum -= first_bd_sz;
3409 /* Others are easier: run through the frag list and
3410 check all windows */
3411 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
3413 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1]);
3415 if (unlikely(wnd_sum < lso_mss)) {
3420 skb_frag_size(&skb_shinfo(skb)->frags[wnd_idx]);
3423 /* in non-LSO too fragmented packet should always
3430 if (unlikely(to_copy))
3431 DP(NETIF_MSG_TX_QUEUED,
3432 "Linearization IS REQUIRED for %s packet. num_frags %d hlen %d first_bd_sz %d\n",
3433 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
3434 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
3440 static void bnx2x_set_pbd_gso_e2(struct sk_buff *skb, u32 *parsing_data,
3443 struct ipv6hdr *ipv6;
3445 *parsing_data |= (skb_shinfo(skb)->gso_size <<
3446 ETH_TX_PARSE_BD_E2_LSO_MSS_SHIFT) &
3447 ETH_TX_PARSE_BD_E2_LSO_MSS;
3449 if (xmit_type & XMIT_GSO_ENC_V6)
3450 ipv6 = inner_ipv6_hdr(skb);
3451 else if (xmit_type & XMIT_GSO_V6)
3452 ipv6 = ipv6_hdr(skb);
3456 if (ipv6 && ipv6->nexthdr == NEXTHDR_IPV6)
3457 *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR;
3461 * bnx2x_set_pbd_gso - update PBD in GSO case.
3465 * @xmit_type: xmit flags
3467 static void bnx2x_set_pbd_gso(struct sk_buff *skb,
3468 struct eth_tx_parse_bd_e1x *pbd,
3469 struct eth_tx_start_bd *tx_start_bd,
3472 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
3473 pbd->tcp_send_seq = bswab32(tcp_hdr(skb)->seq);
3474 pbd->tcp_flags = pbd_tcp_flags(tcp_hdr(skb));
3476 if (xmit_type & XMIT_GSO_V4) {
3477 pbd->ip_id = bswab16(ip_hdr(skb)->id);
3478 pbd->tcp_pseudo_csum =
3479 bswab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
3481 0, IPPROTO_TCP, 0));
3483 /* GSO on 57710/57711 needs FW to calculate IP checksum */
3484 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
3486 pbd->tcp_pseudo_csum =
3487 bswab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
3488 &ipv6_hdr(skb)->daddr,
3489 0, IPPROTO_TCP, 0));
3493 cpu_to_le16(ETH_TX_PARSE_BD_E1X_PSEUDO_CS_WITHOUT_LEN);
3497 * bnx2x_set_pbd_csum_enc - update PBD with checksum and return header length
3499 * @bp: driver handle
3501 * @parsing_data: data to be updated
3502 * @xmit_type: xmit flags
3504 * 57712/578xx related, when skb has encapsulation
3506 static u8 bnx2x_set_pbd_csum_enc(struct bnx2x *bp, struct sk_buff *skb,
3507 u32 *parsing_data, u32 xmit_type)
3510 ((((u8 *)skb_inner_transport_header(skb) - skb->data) >> 1) <<
3511 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3512 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3514 if (xmit_type & XMIT_CSUM_TCP) {
3515 *parsing_data |= ((inner_tcp_hdrlen(skb) / 4) <<
3516 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3517 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3519 return skb_inner_transport_header(skb) +
3520 inner_tcp_hdrlen(skb) - skb->data;
3523 /* We support checksum offload for TCP and UDP only.
3524 * No need to pass the UDP header length - it's a constant.
3526 return skb_inner_transport_header(skb) +
3527 sizeof(struct udphdr) - skb->data;
3531 * bnx2x_set_pbd_csum_e2 - update PBD with checksum and return header length
3533 * @bp: driver handle
3535 * @parsing_data: data to be updated
3536 * @xmit_type: xmit flags
3538 * 57712/578xx related
3540 static u8 bnx2x_set_pbd_csum_e2(struct bnx2x *bp, struct sk_buff *skb,
3541 u32 *parsing_data, u32 xmit_type)
3544 ((((u8 *)skb_transport_header(skb) - skb->data) >> 1) <<
3545 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W_SHIFT) &
3546 ETH_TX_PARSE_BD_E2_L4_HDR_START_OFFSET_W;
3548 if (xmit_type & XMIT_CSUM_TCP) {
3549 *parsing_data |= ((tcp_hdrlen(skb) / 4) <<
3550 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW_SHIFT) &
3551 ETH_TX_PARSE_BD_E2_TCP_HDR_LENGTH_DW;
3553 return skb_transport_header(skb) + tcp_hdrlen(skb) - skb->data;
3555 /* We support checksum offload for TCP and UDP only.
3556 * No need to pass the UDP header length - it's a constant.
3558 return skb_transport_header(skb) + sizeof(struct udphdr) - skb->data;
3561 /* set FW indication according to inner or outer protocols if tunneled */
3562 static void bnx2x_set_sbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3563 struct eth_tx_start_bd *tx_start_bd,
3566 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
3568 if (xmit_type & (XMIT_CSUM_ENC_V6 | XMIT_CSUM_V6))
3569 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
3571 if (!(xmit_type & XMIT_CSUM_TCP))
3572 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IS_UDP;
3576 * bnx2x_set_pbd_csum - update PBD with checksum and return header length
3578 * @bp: driver handle
3580 * @pbd: parse BD to be updated
3581 * @xmit_type: xmit flags
3583 static u8 bnx2x_set_pbd_csum(struct bnx2x *bp, struct sk_buff *skb,
3584 struct eth_tx_parse_bd_e1x *pbd,
3587 u8 hlen = (skb_network_header(skb) - skb->data) >> 1;
3589 /* for now NS flag is not used in Linux */
3592 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3593 ETH_TX_PARSE_BD_E1X_LLC_SNAP_EN_SHIFT));
3595 pbd->ip_hlen_w = (skb_transport_header(skb) -
3596 skb_network_header(skb)) >> 1;
3598 hlen += pbd->ip_hlen_w;
3600 /* We support checksum offload for TCP and UDP only */
3601 if (xmit_type & XMIT_CSUM_TCP)
3602 hlen += tcp_hdrlen(skb) / 2;
3604 hlen += sizeof(struct udphdr) / 2;
3606 pbd->total_hlen_w = cpu_to_le16(hlen);
3609 if (xmit_type & XMIT_CSUM_TCP) {
3610 pbd->tcp_pseudo_csum = bswab16(tcp_hdr(skb)->check);
3613 s8 fix = SKB_CS_OFF(skb); /* signed! */
3615 DP(NETIF_MSG_TX_QUEUED,
3616 "hlen %d fix %d csum before fix %x\n",
3617 le16_to_cpu(pbd->total_hlen_w), fix, SKB_CS(skb));
3619 /* HW bug: fixup the CSUM */
3620 pbd->tcp_pseudo_csum =
3621 bnx2x_csum_fix(skb_transport_header(skb),
3624 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
3625 pbd->tcp_pseudo_csum);
3631 static void bnx2x_update_pbds_gso_enc(struct sk_buff *skb,
3632 struct eth_tx_parse_bd_e2 *pbd_e2,
3633 struct eth_tx_parse_2nd_bd *pbd2,
3638 u8 outerip_off, outerip_len = 0;
3640 /* from outer IP to transport */
3641 hlen_w = (skb_inner_transport_header(skb) -
3642 skb_network_header(skb)) >> 1;
3645 hlen_w += inner_tcp_hdrlen(skb) >> 1;
3647 pbd2->fw_ip_hdr_to_payload_w = hlen_w;
3649 /* outer IP header info */
3650 if (xmit_type & XMIT_CSUM_V4) {
3651 struct iphdr *iph = ip_hdr(skb);
3652 u32 csum = (__force u32)(~iph->check) -
3653 (__force u32)iph->tot_len -
3654 (__force u32)iph->frag_off;
3656 pbd2->fw_ip_csum_wo_len_flags_frag =
3657 bswab16(csum_fold((__force __wsum)csum));
3659 pbd2->fw_ip_hdr_to_payload_w =
3660 hlen_w - ((sizeof(struct ipv6hdr)) >> 1);
3663 pbd2->tcp_send_seq = bswab32(inner_tcp_hdr(skb)->seq);
3665 pbd2->tcp_flags = pbd_tcp_flags(inner_tcp_hdr(skb));
3667 if (xmit_type & XMIT_GSO_V4) {
3668 pbd2->hw_ip_id = bswab16(inner_ip_hdr(skb)->id);
3670 pbd_e2->data.tunnel_data.pseudo_csum =
3671 bswab16(~csum_tcpudp_magic(
3672 inner_ip_hdr(skb)->saddr,
3673 inner_ip_hdr(skb)->daddr,
3674 0, IPPROTO_TCP, 0));
3676 outerip_len = ip_hdr(skb)->ihl << 1;
3678 pbd_e2->data.tunnel_data.pseudo_csum =
3679 bswab16(~csum_ipv6_magic(
3680 &inner_ipv6_hdr(skb)->saddr,
3681 &inner_ipv6_hdr(skb)->daddr,
3682 0, IPPROTO_TCP, 0));
3685 outerip_off = (skb_network_header(skb) - skb->data) >> 1;
3689 (!!(xmit_type & XMIT_CSUM_V6) <<
3690 ETH_TX_PARSE_2ND_BD_IP_HDR_TYPE_OUTER_SHIFT) |
3692 ETH_TX_PARSE_2ND_BD_IP_HDR_LEN_OUTER_W_SHIFT) |
3693 ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
3694 ETH_TX_PARSE_2ND_BD_LLC_SNAP_EN_SHIFT);
3696 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
3697 SET_FLAG(*global_data, ETH_TX_PARSE_2ND_BD_TUNNEL_UDP_EXIST, 1);
3698 pbd2->tunnel_udp_hdr_start_w = skb_transport_offset(skb) >> 1;
3702 /* called with netif_tx_lock
3703 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
3704 * netif_wake_queue()
3706 netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
3708 struct bnx2x *bp = netdev_priv(dev);
3710 struct netdev_queue *txq;
3711 struct bnx2x_fp_txdata *txdata;
3712 struct sw_tx_bd *tx_buf;
3713 struct eth_tx_start_bd *tx_start_bd, *first_bd;
3714 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
3715 struct eth_tx_parse_bd_e1x *pbd_e1x = NULL;
3716 struct eth_tx_parse_bd_e2 *pbd_e2 = NULL;
3717 struct eth_tx_parse_2nd_bd *pbd2 = NULL;
3718 u32 pbd_e2_parsing_data = 0;
3719 u16 pkt_prod, bd_prod;
3722 u32 xmit_type = bnx2x_xmit_type(bp, skb);
3725 __le16 pkt_size = 0;
3727 u8 mac_type = UNICAST_ADDRESS;
3729 #ifdef BNX2X_STOP_ON_ERROR
3730 if (unlikely(bp->panic))
3731 return NETDEV_TX_BUSY;
3734 txq_index = skb_get_queue_mapping(skb);
3735 txq = netdev_get_tx_queue(dev, txq_index);
3737 BUG_ON(txq_index >= MAX_ETH_TXQ_IDX(bp) + (CNIC_LOADED(bp) ? 1 : 0));
3739 txdata = &bp->bnx2x_txq[txq_index];
3741 /* enable this debug print to view the transmission queue being used
3742 DP(NETIF_MSG_TX_QUEUED, "indices: txq %d, fp %d, txdata %d\n",
3743 txq_index, fp_index, txdata_index); */
3745 /* enable this debug print to view the transmission details
3746 DP(NETIF_MSG_TX_QUEUED,
3747 "transmitting packet cid %d fp index %d txdata_index %d tx_data ptr %p fp pointer %p\n",
3748 txdata->cid, fp_index, txdata_index, txdata, fp); */
3750 if (unlikely(bnx2x_tx_avail(bp, txdata) <
3751 skb_shinfo(skb)->nr_frags +
3753 NEXT_CNT_PER_TX_PKT(MAX_BDS_PER_TX_PKT))) {
3754 /* Handle special storage cases separately */
3755 if (txdata->tx_ring_size == 0) {
3756 struct bnx2x_eth_q_stats *q_stats =
3757 bnx2x_fp_qstats(bp, txdata->parent_fp);
3758 q_stats->driver_filtered_tx_pkt++;
3760 return NETDEV_TX_OK;
3762 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
3763 netif_tx_stop_queue(txq);
3764 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
3766 return NETDEV_TX_BUSY;
3769 DP(NETIF_MSG_TX_QUEUED,
3770 "queue[%d]: SKB: summed %x protocol %x protocol(%x,%x) gso type %x xmit_type %x len %d\n",
3771 txq_index, skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
3772 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type,
3775 eth = (struct ethhdr *)skb->data;
3777 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
3778 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
3779 if (is_broadcast_ether_addr(eth->h_dest))
3780 mac_type = BROADCAST_ADDRESS;
3782 mac_type = MULTICAST_ADDRESS;
3785 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - BDS_PER_TX_PKT)
3786 /* First, check if we need to linearize the skb (due to FW
3787 restrictions). No need to check fragmentation if page size > 8K
3788 (there will be no violation to FW restrictions) */
3789 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
3790 /* Statistics of linearization */
3792 if (skb_linearize(skb) != 0) {
3793 DP(NETIF_MSG_TX_QUEUED,
3794 "SKB linearization failed - silently dropping this SKB\n");
3795 dev_kfree_skb_any(skb);
3796 return NETDEV_TX_OK;
3800 /* Map skb linear data for DMA */
3801 mapping = dma_map_single(&bp->pdev->dev, skb->data,
3802 skb_headlen(skb), DMA_TO_DEVICE);
3803 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
3804 DP(NETIF_MSG_TX_QUEUED,
3805 "SKB mapping failed - silently dropping this SKB\n");
3806 dev_kfree_skb_any(skb);
3807 return NETDEV_TX_OK;
3810 Please read carefully. First we use one BD which we mark as start,
3811 then we have a parsing info BD (used for TSO or xsum),
3812 and only then we have the rest of the TSO BDs.
3813 (don't forget to mark the last one as last,
3814 and to unmap only AFTER you write to the BD ...)
3815 And above all, all pdb sizes are in words - NOT DWORDS!
3818 /* get current pkt produced now - advance it just before sending packet
3819 * since mapping of pages may fail and cause packet to be dropped
3821 pkt_prod = txdata->tx_pkt_prod;
3822 bd_prod = TX_BD(txdata->tx_bd_prod);
3824 /* get a tx_buf and first BD
3825 * tx_start_bd may be changed during SPLIT,
3826 * but first_bd will always stay first
3828 tx_buf = &txdata->tx_buf_ring[TX_BD(pkt_prod)];
3829 tx_start_bd = &txdata->tx_desc_ring[bd_prod].start_bd;
3830 first_bd = tx_start_bd;
3832 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3834 /* header nbd: indirectly zero other flags! */
3835 tx_start_bd->general_data = 1 << ETH_TX_START_BD_HDR_NBDS_SHIFT;
3837 /* remember the first BD of the packet */
3838 tx_buf->first_bd = txdata->tx_bd_prod;
3842 DP(NETIF_MSG_TX_QUEUED,
3843 "sending pkt %u @%p next_idx %u bd %u @%p\n",
3844 pkt_prod, tx_buf, txdata->tx_pkt_prod, bd_prod, tx_start_bd);
3846 if (vlan_tx_tag_present(skb)) {
3847 tx_start_bd->vlan_or_ethertype =
3848 cpu_to_le16(vlan_tx_tag_get(skb));
3849 tx_start_bd->bd_flags.as_bitfield |=
3850 (X_ETH_OUTBAND_VLAN << ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT);
3852 /* when transmitting in a vf, start bd must hold the ethertype
3853 * for fw to enforce it
3856 tx_start_bd->vlan_or_ethertype =
3857 cpu_to_le16(ntohs(eth->h_proto));
3859 /* used by FW for packet accounting */
3860 tx_start_bd->vlan_or_ethertype = cpu_to_le16(pkt_prod);
3863 nbd = 2; /* start_bd + pbd + frags (updated when pages are mapped) */
3865 /* turn on parsing and get a BD */
3866 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3868 if (xmit_type & XMIT_CSUM)
3869 bnx2x_set_sbd_csum(bp, skb, tx_start_bd, xmit_type);
3871 if (!CHIP_IS_E1x(bp)) {
3872 pbd_e2 = &txdata->tx_desc_ring[bd_prod].parse_bd_e2;
3873 memset(pbd_e2, 0, sizeof(struct eth_tx_parse_bd_e2));
3875 if (xmit_type & XMIT_CSUM_ENC) {
3876 u16 global_data = 0;
3878 /* Set PBD in enc checksum offload case */
3879 hlen = bnx2x_set_pbd_csum_enc(bp, skb,
3880 &pbd_e2_parsing_data,
3883 /* turn on 2nd parsing and get a BD */
3884 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
3886 pbd2 = &txdata->tx_desc_ring[bd_prod].parse_2nd_bd;
3888 memset(pbd2, 0, sizeof(*pbd2));
3890 pbd_e2->data.tunnel_data.ip_hdr_start_inner_w =
3891 (skb_inner_network_header(skb) -
3894 if (xmit_type & XMIT_GSO_ENC)
3895 bnx2x_update_pbds_gso_enc(skb, pbd_e2, pbd2,
3899 pbd2->global_data = cpu_to_le16(global_data);
3901 /* add addition parse BD indication to start BD */
3902 SET_FLAG(tx_start_bd->general_data,
3903 ETH_TX_START_BD_PARSE_NBDS, 1);
3904 /* set encapsulation flag in start BD */
3905 SET_FLAG(tx_start_bd->general_data,
3906 ETH_TX_START_BD_TUNNEL_EXIST, 1);
3908 tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
3911 } else if (xmit_type & XMIT_CSUM) {
3912 /* Set PBD in checksum offload case w/o encapsulation */
3913 hlen = bnx2x_set_pbd_csum_e2(bp, skb,
3914 &pbd_e2_parsing_data,
3918 /* Add the macs to the parsing BD if this is a vf or if
3919 * Tx Switching is enabled.
3922 /* override GRE parameters in BD */
3923 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
3924 &pbd_e2->data.mac_addr.src_mid,
3925 &pbd_e2->data.mac_addr.src_lo,
3928 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3929 &pbd_e2->data.mac_addr.dst_mid,
3930 &pbd_e2->data.mac_addr.dst_lo,
3932 } else if (bp->flags & TX_SWITCHING) {
3933 bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
3934 &pbd_e2->data.mac_addr.dst_mid,
3935 &pbd_e2->data.mac_addr.dst_lo,
3939 SET_FLAG(pbd_e2_parsing_data,
3940 ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type);
3942 u16 global_data = 0;
3943 pbd_e1x = &txdata->tx_desc_ring[bd_prod].parse_bd_e1x;
3944 memset(pbd_e1x, 0, sizeof(struct eth_tx_parse_bd_e1x));
3945 /* Set PBD in checksum offload case */
3946 if (xmit_type & XMIT_CSUM)
3947 hlen = bnx2x_set_pbd_csum(bp, skb, pbd_e1x, xmit_type);
3949 SET_FLAG(global_data,
3950 ETH_TX_PARSE_BD_E1X_ETH_ADDR_TYPE, mac_type);
3951 pbd_e1x->global_data |= cpu_to_le16(global_data);
3954 /* Setup the data pointer of the first BD of the packet */
3955 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
3956 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
3957 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
3958 pkt_size = tx_start_bd->nbytes;
3960 DP(NETIF_MSG_TX_QUEUED,
3961 "first bd @%p addr (%x:%x) nbytes %d flags %x vlan %x\n",
3962 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
3963 le16_to_cpu(tx_start_bd->nbytes),
3964 tx_start_bd->bd_flags.as_bitfield,
3965 le16_to_cpu(tx_start_bd->vlan_or_ethertype));
3967 if (xmit_type & XMIT_GSO) {
3969 DP(NETIF_MSG_TX_QUEUED,
3970 "TSO packet len %d hlen %d total len %d tso size %d\n",
3971 skb->len, hlen, skb_headlen(skb),
3972 skb_shinfo(skb)->gso_size);
3974 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
3976 if (unlikely(skb_headlen(skb) > hlen)) {
3978 bd_prod = bnx2x_tx_split(bp, txdata, tx_buf,
3982 if (!CHIP_IS_E1x(bp))
3983 bnx2x_set_pbd_gso_e2(skb, &pbd_e2_parsing_data,
3986 bnx2x_set_pbd_gso(skb, pbd_e1x, first_bd, xmit_type);
3989 /* Set the PBD's parsing_data field if not zero
3990 * (for the chips newer than 57711).
3992 if (pbd_e2_parsing_data)
3993 pbd_e2->parsing_data = cpu_to_le32(pbd_e2_parsing_data);
3995 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
3997 /* Handle fragmented skb */
3998 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
3999 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4001 mapping = skb_frag_dma_map(&bp->pdev->dev, frag, 0,
4002 skb_frag_size(frag), DMA_TO_DEVICE);
4003 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
4004 unsigned int pkts_compl = 0, bytes_compl = 0;
4006 DP(NETIF_MSG_TX_QUEUED,
4007 "Unable to map page - dropping packet...\n");
4009 /* we need unmap all buffers already mapped
4011 * first_bd->nbd need to be properly updated
4012 * before call to bnx2x_free_tx_pkt
4014 first_bd->nbd = cpu_to_le16(nbd);
4015 bnx2x_free_tx_pkt(bp, txdata,
4016 TX_BD(txdata->tx_pkt_prod),
4017 &pkts_compl, &bytes_compl);
4018 return NETDEV_TX_OK;
4021 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4022 tx_data_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4023 if (total_pkt_bd == NULL)
4024 total_pkt_bd = &txdata->tx_desc_ring[bd_prod].reg_bd;
4026 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
4027 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
4028 tx_data_bd->nbytes = cpu_to_le16(skb_frag_size(frag));
4029 le16_add_cpu(&pkt_size, skb_frag_size(frag));
4032 DP(NETIF_MSG_TX_QUEUED,
4033 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
4034 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
4035 le16_to_cpu(tx_data_bd->nbytes));
4038 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
4040 /* update with actual num BDs */
4041 first_bd->nbd = cpu_to_le16(nbd);
4043 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
4045 /* now send a tx doorbell, counting the next BD
4046 * if the packet contains or ends with it
4048 if (TX_BD_POFF(bd_prod) < nbd)
4051 /* total_pkt_bytes should be set on the first data BD if
4052 * it's not an LSO packet and there is more than one
4053 * data BD. In this case pkt_size is limited by an MTU value.
4054 * However we prefer to set it for an LSO packet (while we don't
4055 * have to) in order to save some CPU cycles in a none-LSO
4056 * case, when we much more care about them.
4058 if (total_pkt_bd != NULL)
4059 total_pkt_bd->total_pkt_bytes = pkt_size;
4062 DP(NETIF_MSG_TX_QUEUED,
4063 "PBD (E1X) @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u tcp_flags %x xsum %x seq %u hlen %u\n",
4064 pbd_e1x, pbd_e1x->global_data, pbd_e1x->ip_hlen_w,
4065 pbd_e1x->ip_id, pbd_e1x->lso_mss, pbd_e1x->tcp_flags,
4066 pbd_e1x->tcp_pseudo_csum, pbd_e1x->tcp_send_seq,
4067 le16_to_cpu(pbd_e1x->total_hlen_w));
4069 DP(NETIF_MSG_TX_QUEUED,
4070 "PBD (E2) @%p dst %x %x %x src %x %x %x parsing_data %x\n",
4072 pbd_e2->data.mac_addr.dst_hi,
4073 pbd_e2->data.mac_addr.dst_mid,
4074 pbd_e2->data.mac_addr.dst_lo,
4075 pbd_e2->data.mac_addr.src_hi,
4076 pbd_e2->data.mac_addr.src_mid,
4077 pbd_e2->data.mac_addr.src_lo,
4078 pbd_e2->parsing_data);
4079 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
4081 netdev_tx_sent_queue(txq, skb->len);
4083 skb_tx_timestamp(skb);
4085 txdata->tx_pkt_prod++;
4087 * Make sure that the BD data is updated before updating the producer
4088 * since FW might read the BD right after the producer is updated.
4089 * This is only applicable for weak-ordered memory model archs such
4090 * as IA-64. The following barrier is also mandatory since FW will
4091 * assumes packets must have BDs.
4095 txdata->tx_db.data.prod += nbd;
4098 DOORBELL(bp, txdata->cid, txdata->tx_db.raw);
4102 txdata->tx_bd_prod += nbd;
4104 if (unlikely(bnx2x_tx_avail(bp, txdata) < MAX_DESC_PER_TX_PKT)) {
4105 netif_tx_stop_queue(txq);
4107 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
4108 * ordering of set_bit() in netif_tx_stop_queue() and read of
4112 bnx2x_fp_qstats(bp, txdata->parent_fp)->driver_xoff++;
4113 if (bnx2x_tx_avail(bp, txdata) >= MAX_DESC_PER_TX_PKT)
4114 netif_tx_wake_queue(txq);
4118 return NETDEV_TX_OK;
4122 * bnx2x_setup_tc - routine to configure net_device for multi tc
4124 * @netdev: net device to configure
4125 * @tc: number of traffic classes to enable
4127 * callback connected to the ndo_setup_tc function pointer
4129 int bnx2x_setup_tc(struct net_device *dev, u8 num_tc)
4131 int cos, prio, count, offset;
4132 struct bnx2x *bp = netdev_priv(dev);
4134 /* setup tc must be called under rtnl lock */
4137 /* no traffic classes requested. Aborting */
4139 netdev_reset_tc(dev);
4143 /* requested to support too many traffic classes */
4144 if (num_tc > bp->max_cos) {
4145 BNX2X_ERR("support for too many traffic classes requested: %d. Max supported is %d\n",
4146 num_tc, bp->max_cos);
4150 /* declare amount of supported traffic classes */
4151 if (netdev_set_num_tc(dev, num_tc)) {
4152 BNX2X_ERR("failed to declare %d traffic classes\n", num_tc);
4156 /* configure priority to traffic class mapping */
4157 for (prio = 0; prio < BNX2X_MAX_PRIORITY; prio++) {
4158 netdev_set_prio_tc_map(dev, prio, bp->prio_to_cos[prio]);
4159 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4160 "mapping priority %d to tc %d\n",
4161 prio, bp->prio_to_cos[prio]);
4164 /* Use this configuration to differentiate tc0 from other COSes
4165 This can be used for ets or pfc, and save the effort of setting
4166 up a multio class queue disc or negotiating DCBX with a switch
4167 netdev_set_prio_tc_map(dev, 0, 0);
4168 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", 0, 0);
4169 for (prio = 1; prio < 16; prio++) {
4170 netdev_set_prio_tc_map(dev, prio, 1);
4171 DP(BNX2X_MSG_SP, "mapping priority %d to tc %d\n", prio, 1);
4174 /* configure traffic class to transmission queue mapping */
4175 for (cos = 0; cos < bp->max_cos; cos++) {
4176 count = BNX2X_NUM_ETH_QUEUES(bp);
4177 offset = cos * BNX2X_NUM_NON_CNIC_QUEUES(bp);
4178 netdev_set_tc_queue(dev, cos, count, offset);
4179 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
4180 "mapping tc %d to offset %d count %d\n",
4181 cos, offset, count);
4187 /* called with rtnl_lock */
4188 int bnx2x_change_mac_addr(struct net_device *dev, void *p)
4190 struct sockaddr *addr = p;
4191 struct bnx2x *bp = netdev_priv(dev);
4194 if (!bnx2x_is_valid_ether_addr(bp, addr->sa_data)) {
4195 BNX2X_ERR("Requested MAC address is not valid\n");
4199 if ((IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp)) &&
4200 !is_zero_ether_addr(addr->sa_data)) {
4201 BNX2X_ERR("Can't configure non-zero address on iSCSI or FCoE functions in MF-SD mode\n");
4205 if (netif_running(dev)) {
4206 rc = bnx2x_set_eth_mac(bp, false);
4211 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4213 if (netif_running(dev))
4214 rc = bnx2x_set_eth_mac(bp, true);
4219 static void bnx2x_free_fp_mem_at(struct bnx2x *bp, int fp_index)
4221 union host_hc_status_block *sb = &bnx2x_fp(bp, fp_index, status_blk);
4222 struct bnx2x_fastpath *fp = &bp->fp[fp_index];
4227 if (IS_FCOE_IDX(fp_index)) {
4228 memset(sb, 0, sizeof(union host_hc_status_block));
4229 fp->status_blk_mapping = 0;
4232 if (!CHIP_IS_E1x(bp))
4233 BNX2X_PCI_FREE(sb->e2_sb,
4234 bnx2x_fp(bp, fp_index,
4235 status_blk_mapping),
4236 sizeof(struct host_hc_status_block_e2));
4238 BNX2X_PCI_FREE(sb->e1x_sb,
4239 bnx2x_fp(bp, fp_index,
4240 status_blk_mapping),
4241 sizeof(struct host_hc_status_block_e1x));
4245 if (!skip_rx_queue(bp, fp_index)) {
4246 bnx2x_free_rx_bds(fp);
4248 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4249 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_buf_ring));
4250 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_desc_ring),
4251 bnx2x_fp(bp, fp_index, rx_desc_mapping),
4252 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4254 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_comp_ring),
4255 bnx2x_fp(bp, fp_index, rx_comp_mapping),
4256 sizeof(struct eth_fast_path_rx_cqe) *
4260 BNX2X_FREE(bnx2x_fp(bp, fp_index, rx_page_ring));
4261 BNX2X_PCI_FREE(bnx2x_fp(bp, fp_index, rx_sge_ring),
4262 bnx2x_fp(bp, fp_index, rx_sge_mapping),
4263 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4267 if (!skip_tx_queue(bp, fp_index)) {
4268 /* fastpath tx rings: tx_buf tx_desc */
4269 for_each_cos_in_tx_queue(fp, cos) {
4270 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4272 DP(NETIF_MSG_IFDOWN,
4273 "freeing tx memory of fp %d cos %d cid %d\n",
4274 fp_index, cos, txdata->cid);
4276 BNX2X_FREE(txdata->tx_buf_ring);
4277 BNX2X_PCI_FREE(txdata->tx_desc_ring,
4278 txdata->tx_desc_mapping,
4279 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4282 /* end of fastpath */
4285 static void bnx2x_free_fp_mem_cnic(struct bnx2x *bp)
4288 for_each_cnic_queue(bp, i)
4289 bnx2x_free_fp_mem_at(bp, i);
4292 void bnx2x_free_fp_mem(struct bnx2x *bp)
4295 for_each_eth_queue(bp, i)
4296 bnx2x_free_fp_mem_at(bp, i);
4299 static void set_sb_shortcuts(struct bnx2x *bp, int index)
4301 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
4302 if (!CHIP_IS_E1x(bp)) {
4303 bnx2x_fp(bp, index, sb_index_values) =
4304 (__le16 *)status_blk.e2_sb->sb.index_values;
4305 bnx2x_fp(bp, index, sb_running_index) =
4306 (__le16 *)status_blk.e2_sb->sb.running_index;
4308 bnx2x_fp(bp, index, sb_index_values) =
4309 (__le16 *)status_blk.e1x_sb->sb.index_values;
4310 bnx2x_fp(bp, index, sb_running_index) =
4311 (__le16 *)status_blk.e1x_sb->sb.running_index;
4315 /* Returns the number of actually allocated BDs */
4316 static int bnx2x_alloc_rx_bds(struct bnx2x_fastpath *fp,
4319 struct bnx2x *bp = fp->bp;
4320 u16 ring_prod, cqe_ring_prod;
4321 int i, failure_cnt = 0;
4323 fp->rx_comp_cons = 0;
4324 cqe_ring_prod = ring_prod = 0;
4326 /* This routine is called only during fo init so
4327 * fp->eth_q_stats.rx_skb_alloc_failed = 0
4329 for (i = 0; i < rx_ring_size; i++) {
4330 if (bnx2x_alloc_rx_data(bp, fp, ring_prod, GFP_KERNEL) < 0) {
4334 ring_prod = NEXT_RX_IDX(ring_prod);
4335 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4336 WARN_ON(ring_prod <= (i - failure_cnt));
4340 BNX2X_ERR("was only able to allocate %d rx skbs on queue[%d]\n",
4341 i - failure_cnt, fp->index);
4343 fp->rx_bd_prod = ring_prod;
4344 /* Limit the CQE producer by the CQE ring size */
4345 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
4347 fp->rx_pkt = fp->rx_calls = 0;
4349 bnx2x_fp_stats(bp, fp)->eth_q_stats.rx_skb_alloc_failed += failure_cnt;
4351 return i - failure_cnt;
4354 static void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
4358 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4359 struct eth_rx_cqe_next_page *nextpg;
4361 nextpg = (struct eth_rx_cqe_next_page *)
4362 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4364 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4365 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4367 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4368 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4372 static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
4374 union host_hc_status_block *sb;
4375 struct bnx2x_fastpath *fp = &bp->fp[index];
4378 int rx_ring_size = 0;
4380 if (!bp->rx_ring_size &&
4381 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
4382 rx_ring_size = MIN_RX_SIZE_NONTPA;
4383 bp->rx_ring_size = rx_ring_size;
4384 } else if (!bp->rx_ring_size) {
4385 rx_ring_size = MAX_RX_AVAIL/BNX2X_NUM_RX_QUEUES(bp);
4387 if (CHIP_IS_E3(bp)) {
4388 u32 cfg = SHMEM_RD(bp,
4389 dev_info.port_hw_config[BP_PORT(bp)].
4392 /* Decrease ring size for 1G functions */
4393 if ((cfg & PORT_HW_CFG_NET_SERDES_IF_MASK) ==
4394 PORT_HW_CFG_NET_SERDES_IF_SGMII)
4398 /* allocate at least number of buffers required by FW */
4399 rx_ring_size = max_t(int, bp->disable_tpa ? MIN_RX_SIZE_NONTPA :
4400 MIN_RX_SIZE_TPA, rx_ring_size);
4402 bp->rx_ring_size = rx_ring_size;
4403 } else /* if rx_ring_size specified - use it */
4404 rx_ring_size = bp->rx_ring_size;
4406 DP(BNX2X_MSG_SP, "calculated rx_ring_size %d\n", rx_ring_size);
4409 sb = &bnx2x_fp(bp, index, status_blk);
4411 if (!IS_FCOE_IDX(index)) {
4413 if (!CHIP_IS_E1x(bp)) {
4414 sb->e2_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4415 sizeof(struct host_hc_status_block_e2));
4419 sb->e1x_sb = BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, status_blk_mapping),
4420 sizeof(struct host_hc_status_block_e1x));
4426 /* FCoE Queue uses Default SB and doesn't ACK the SB, thus no need to
4427 * set shortcuts for it.
4429 if (!IS_FCOE_IDX(index))
4430 set_sb_shortcuts(bp, index);
4433 if (!skip_tx_queue(bp, index)) {
4434 /* fastpath tx rings: tx_buf tx_desc */
4435 for_each_cos_in_tx_queue(fp, cos) {
4436 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
4439 "allocating tx memory of fp %d cos %d\n",
4442 txdata->tx_buf_ring = kcalloc(NUM_TX_BD,
4443 sizeof(struct sw_tx_bd),
4445 if (!txdata->tx_buf_ring)
4447 txdata->tx_desc_ring = BNX2X_PCI_ALLOC(&txdata->tx_desc_mapping,
4448 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4449 if (!txdata->tx_desc_ring)
4455 if (!skip_rx_queue(bp, index)) {
4456 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4457 bnx2x_fp(bp, index, rx_buf_ring) =
4458 kcalloc(NUM_RX_BD, sizeof(struct sw_rx_bd), GFP_KERNEL);
4459 if (!bnx2x_fp(bp, index, rx_buf_ring))
4461 bnx2x_fp(bp, index, rx_desc_ring) =
4462 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_desc_mapping),
4463 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4464 if (!bnx2x_fp(bp, index, rx_desc_ring))
4467 /* Seed all CQEs by 1s */
4468 bnx2x_fp(bp, index, rx_comp_ring) =
4469 BNX2X_PCI_FALLOC(&bnx2x_fp(bp, index, rx_comp_mapping),
4470 sizeof(struct eth_fast_path_rx_cqe) * NUM_RCQ_BD);
4471 if (!bnx2x_fp(bp, index, rx_comp_ring))
4475 bnx2x_fp(bp, index, rx_page_ring) =
4476 kcalloc(NUM_RX_SGE, sizeof(struct sw_rx_page),
4478 if (!bnx2x_fp(bp, index, rx_page_ring))
4480 bnx2x_fp(bp, index, rx_sge_ring) =
4481 BNX2X_PCI_ALLOC(&bnx2x_fp(bp, index, rx_sge_mapping),
4482 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4483 if (!bnx2x_fp(bp, index, rx_sge_ring))
4486 bnx2x_set_next_page_rx_bd(fp);
4489 bnx2x_set_next_page_rx_cq(fp);
4492 ring_size = bnx2x_alloc_rx_bds(fp, rx_ring_size);
4493 if (ring_size < rx_ring_size)
4499 /* handles low memory cases */
4501 BNX2X_ERR("Unable to allocate full memory for queue %d (size %d)\n",
4503 /* FW will drop all packets if queue is not big enough,
4504 * In these cases we disable the queue
4505 * Min size is different for OOO, TPA and non-TPA queues
4507 if (ring_size < (fp->disable_tpa ?
4508 MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
4509 /* release memory allocated for this queue */
4510 bnx2x_free_fp_mem_at(bp, index);
4516 static int bnx2x_alloc_fp_mem_cnic(struct bnx2x *bp)
4520 if (bnx2x_alloc_fp_mem_at(bp, FCOE_IDX(bp)))
4521 /* we will fail load process instead of mark
4529 static int bnx2x_alloc_fp_mem(struct bnx2x *bp)
4533 /* 1. Allocate FP for leading - fatal if error
4534 * 2. Allocate RSS - fix number of queues if error
4538 if (bnx2x_alloc_fp_mem_at(bp, 0))
4542 for_each_nondefault_eth_queue(bp, i)
4543 if (bnx2x_alloc_fp_mem_at(bp, i))
4546 /* handle memory failures */
4547 if (i != BNX2X_NUM_ETH_QUEUES(bp)) {
4548 int delta = BNX2X_NUM_ETH_QUEUES(bp) - i;
4551 bnx2x_shrink_eth_fp(bp, delta);
4552 if (CNIC_SUPPORT(bp))
4553 /* move non eth FPs next to last eth FP
4554 * must be done in that order
4555 * FCOE_IDX < FWD_IDX < OOO_IDX
4558 /* move FCoE fp even NO_FCOE_FLAG is on */
4559 bnx2x_move_fp(bp, FCOE_IDX(bp), FCOE_IDX(bp) - delta);
4560 bp->num_ethernet_queues -= delta;
4561 bp->num_queues = bp->num_ethernet_queues +
4562 bp->num_cnic_queues;
4563 BNX2X_ERR("Adjusted num of queues from %d to %d\n",
4564 bp->num_queues + delta, bp->num_queues);
4570 void bnx2x_free_mem_bp(struct bnx2x *bp)
4574 for (i = 0; i < bp->fp_array_size; i++)
4575 kfree(bp->fp[i].tpa_info);
4578 kfree(bp->fp_stats);
4579 kfree(bp->bnx2x_txq);
4580 kfree(bp->msix_table);
4584 int bnx2x_alloc_mem_bp(struct bnx2x *bp)
4586 struct bnx2x_fastpath *fp;
4587 struct msix_entry *tbl;
4588 struct bnx2x_ilt *ilt;
4589 int msix_table_size = 0;
4590 int fp_array_size, txq_array_size;
4594 * The biggest MSI-X table we might need is as a maximum number of fast
4595 * path IGU SBs plus default SB (for PF only).
4597 msix_table_size = bp->igu_sb_cnt;
4600 BNX2X_DEV_INFO("msix_table_size %d\n", msix_table_size);
4602 /* fp array: RSS plus CNIC related L2 queues */
4603 fp_array_size = BNX2X_MAX_RSS_COUNT(bp) + CNIC_SUPPORT(bp);
4604 bp->fp_array_size = fp_array_size;
4605 BNX2X_DEV_INFO("fp_array_size %d\n", bp->fp_array_size);
4607 fp = kcalloc(bp->fp_array_size, sizeof(*fp), GFP_KERNEL);
4610 for (i = 0; i < bp->fp_array_size; i++) {
4612 kcalloc(ETH_MAX_AGGREGATION_QUEUES_E1H_E2,
4613 sizeof(struct bnx2x_agg_info), GFP_KERNEL);
4614 if (!(fp[i].tpa_info))
4620 /* allocate sp objs */
4621 bp->sp_objs = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_sp_objs),
4626 /* allocate fp_stats */
4627 bp->fp_stats = kcalloc(bp->fp_array_size, sizeof(struct bnx2x_fp_stats),
4632 /* Allocate memory for the transmission queues array */
4634 BNX2X_MAX_RSS_COUNT(bp) * BNX2X_MULTI_TX_COS + CNIC_SUPPORT(bp);
4635 BNX2X_DEV_INFO("txq_array_size %d", txq_array_size);
4637 bp->bnx2x_txq = kcalloc(txq_array_size, sizeof(struct bnx2x_fp_txdata),
4643 tbl = kcalloc(msix_table_size, sizeof(*tbl), GFP_KERNEL);
4646 bp->msix_table = tbl;
4649 ilt = kzalloc(sizeof(*ilt), GFP_KERNEL);
4656 bnx2x_free_mem_bp(bp);
4660 int bnx2x_reload_if_running(struct net_device *dev)
4662 struct bnx2x *bp = netdev_priv(dev);
4664 if (unlikely(!netif_running(dev)))
4667 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
4668 return bnx2x_nic_load(bp, LOAD_NORMAL);
4671 int bnx2x_get_cur_phy_idx(struct bnx2x *bp)
4673 u32 sel_phy_idx = 0;
4674 if (bp->link_params.num_phys <= 1)
4677 if (bp->link_vars.link_up) {
4678 sel_phy_idx = EXT_PHY1;
4679 /* In case link is SERDES, check if the EXT_PHY2 is the one */
4680 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
4681 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
4682 sel_phy_idx = EXT_PHY2;
4685 switch (bnx2x_phy_selection(&bp->link_params)) {
4686 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
4687 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
4688 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
4689 sel_phy_idx = EXT_PHY1;
4691 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
4692 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
4693 sel_phy_idx = EXT_PHY2;
4700 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
4702 u32 sel_phy_idx = bnx2x_get_cur_phy_idx(bp);
4704 * The selected activated PHY is always after swapping (in case PHY
4705 * swapping is enabled). So when swapping is enabled, we need to reverse
4709 if (bp->link_params.multi_phy_config &
4710 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
4711 if (sel_phy_idx == EXT_PHY1)
4712 sel_phy_idx = EXT_PHY2;
4713 else if (sel_phy_idx == EXT_PHY2)
4714 sel_phy_idx = EXT_PHY1;
4716 return LINK_CONFIG_IDX(sel_phy_idx);
4719 #ifdef NETDEV_FCOE_WWNN
4720 int bnx2x_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type)
4722 struct bnx2x *bp = netdev_priv(dev);
4723 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
4726 case NETDEV_FCOE_WWNN:
4727 *wwn = HILO_U64(cp->fcoe_wwn_node_name_hi,
4728 cp->fcoe_wwn_node_name_lo);
4730 case NETDEV_FCOE_WWPN:
4731 *wwn = HILO_U64(cp->fcoe_wwn_port_name_hi,
4732 cp->fcoe_wwn_port_name_lo);
4735 BNX2X_ERR("Wrong WWN type requested - %d\n", type);
4743 /* called with rtnl_lock */
4744 int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
4746 struct bnx2x *bp = netdev_priv(dev);
4748 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4749 BNX2X_ERR("Can't perform change MTU during parity recovery\n");
4753 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
4754 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE)) {
4755 BNX2X_ERR("Can't support requested MTU size\n");
4759 /* This does not race with packet allocation
4760 * because the actual alloc size is
4761 * only updated as part of load
4765 return bnx2x_reload_if_running(dev);
4768 netdev_features_t bnx2x_fix_features(struct net_device *dev,
4769 netdev_features_t features)
4771 struct bnx2x *bp = netdev_priv(dev);
4773 /* TPA requires Rx CSUM offloading */
4774 if (!(features & NETIF_F_RXCSUM) || bp->disable_tpa) {
4775 features &= ~NETIF_F_LRO;
4776 features &= ~NETIF_F_GRO;
4782 int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
4784 struct bnx2x *bp = netdev_priv(dev);
4785 u32 flags = bp->flags;
4787 bool bnx2x_reload = false;
4789 if (features & NETIF_F_LRO)
4790 flags |= TPA_ENABLE_FLAG;
4792 flags &= ~TPA_ENABLE_FLAG;
4794 if (features & NETIF_F_GRO)
4795 flags |= GRO_ENABLE_FLAG;
4797 flags &= ~GRO_ENABLE_FLAG;
4799 if (features & NETIF_F_LOOPBACK) {
4800 if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
4801 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4802 bnx2x_reload = true;
4805 if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
4806 bp->link_params.loopback_mode = LOOPBACK_NONE;
4807 bnx2x_reload = true;
4811 changes = flags ^ bp->flags;
4813 /* if GRO is changed while LRO is enabled, don't force a reload */
4814 if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
4815 changes &= ~GRO_ENABLE_FLAG;
4818 bnx2x_reload = true;
4823 if (bp->recovery_state == BNX2X_RECOVERY_DONE)
4824 return bnx2x_reload_if_running(dev);
4825 /* else: bnx2x_nic_load() will be called at end of recovery */
4831 void bnx2x_tx_timeout(struct net_device *dev)
4833 struct bnx2x *bp = netdev_priv(dev);
4835 #ifdef BNX2X_STOP_ON_ERROR
4840 /* This allows the netif to be shutdown gracefully before resetting */
4841 bnx2x_schedule_sp_rtnl(bp, BNX2X_SP_RTNL_TX_TIMEOUT, 0);
4844 int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
4846 struct net_device *dev = pci_get_drvdata(pdev);
4850 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4853 bp = netdev_priv(dev);
4857 pci_save_state(pdev);
4859 if (!netif_running(dev)) {
4864 netif_device_detach(dev);
4866 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
4868 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
4875 int bnx2x_resume(struct pci_dev *pdev)
4877 struct net_device *dev = pci_get_drvdata(pdev);
4882 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
4885 bp = netdev_priv(dev);
4887 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
4888 BNX2X_ERR("Handling parity error recovery. Try again later\n");
4894 pci_restore_state(pdev);
4896 if (!netif_running(dev)) {
4901 bnx2x_set_power_state(bp, PCI_D0);
4902 netif_device_attach(dev);
4904 rc = bnx2x_nic_load(bp, LOAD_OPEN);
4911 void bnx2x_set_ctx_validation(struct bnx2x *bp, struct eth_context *cxt,
4915 BNX2X_ERR("bad context pointer %p\n", cxt);
4919 /* ustorm cxt validation */
4920 cxt->ustorm_ag_context.cdu_usage =
4921 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4922 CDU_REGION_NUMBER_UCM_AG, ETH_CONNECTION_TYPE);
4923 /* xcontext validation */
4924 cxt->xstorm_ag_context.cdu_reserved =
4925 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, cid),
4926 CDU_REGION_NUMBER_XCM_AG, ETH_CONNECTION_TYPE);
4929 static void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
4930 u8 fw_sb_id, u8 sb_index,
4933 u32 addr = BAR_CSTRORM_INTMEM +
4934 CSTORM_STATUS_BLOCK_DATA_TIMEOUT_OFFSET(fw_sb_id, sb_index);
4935 REG_WR8(bp, addr, ticks);
4937 "port %x fw_sb_id %d sb_index %d ticks %d\n",
4938 port, fw_sb_id, sb_index, ticks);
4941 static void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
4942 u16 fw_sb_id, u8 sb_index,
4945 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
4946 u32 addr = BAR_CSTRORM_INTMEM +
4947 CSTORM_STATUS_BLOCK_DATA_FLAGS_OFFSET(fw_sb_id, sb_index);
4948 u8 flags = REG_RD8(bp, addr);
4950 flags &= ~HC_INDEX_DATA_HC_ENABLED;
4951 flags |= enable_flag;
4952 REG_WR8(bp, addr, flags);
4954 "port %x fw_sb_id %d sb_index %d disable %d\n",
4955 port, fw_sb_id, sb_index, disable);
4958 void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u8 fw_sb_id,
4959 u8 sb_index, u8 disable, u16 usec)
4961 int port = BP_PORT(bp);
4962 u8 ticks = usec / BNX2X_BTR;
4964 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4966 disable = disable ? 1 : (usec ? 0 : 1);
4967 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4970 void bnx2x_schedule_sp_rtnl(struct bnx2x *bp, enum sp_rtnl_flag flag,
4973 smp_mb__before_atomic();
4974 set_bit(flag, &bp->sp_rtnl_state);
4975 smp_mb__after_atomic();
4976 DP((BNX2X_MSG_SP | verbose), "Scheduling sp_rtnl task [Flag: %d]\n",
4978 schedule_delayed_work(&bp->sp_rtnl_task, 0);
4980 EXPORT_SYMBOL(bnx2x_schedule_sp_rtnl);