1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct *bnx2x_wq;
109 enum bnx2x_board_type {
115 /* indexed by board_type, above */
118 } board_info[] __devinitdata = {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
164 static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
191 struct dmae_command *dmae = &bp->init_dmae;
192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
204 mutex_lock(&bp->dmae_mutex);
206 memset(dmae, 0, sizeof(struct dmae_command));
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
214 DMAE_CMD_ENDIANITY_DW_SWAP |
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
248 BNX2X_ERR("DMAE timeout!\n");
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
259 mutex_unlock(&bp->dmae_mutex);
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
264 struct dmae_command *dmae = &bp->init_dmae;
265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
279 mutex_lock(&bp->dmae_mutex);
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
290 DMAE_CMD_ENDIANITY_DW_SWAP |
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301 dmae->comp_val = DMAE_COMP_VAL;
303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
317 while (*wb_comp != DMAE_COMP_VAL) {
320 BNX2X_ERR("DMAE timeout!\n");
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
334 mutex_unlock(&bp->dmae_mutex);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
352 REG_RD_DMAE(bp, reg, wb_data, 2);
354 return HILO_U64(wb_data[0], wb_data[1]);
358 static int bnx2x_mc_assert(struct bnx2x *bp)
362 u32 row0, row1, row2, row3;
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
479 static void bnx2x_fw_dump(struct bnx2x *bp)
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
489 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
494 printk(KERN_CONT "%s", (char *)data);
496 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
497 for (word = 0; word < 8; word++)
498 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
501 printk(KERN_CONT "%s", (char *)data);
503 printk("\n" KERN_ERR PFX "end of fw dump\n");
506 static void bnx2x_panic_dump(struct bnx2x *bp)
511 bp->stats_state = STATS_STATE_DISABLED;
512 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
514 BNX2X_ERR("begin crash dump -----------------\n");
518 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
519 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
520 " spq_prod_idx(%u)\n",
521 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
522 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
525 for_each_rx_queue(bp, i) {
526 struct bnx2x_fastpath *fp = &bp->fp[i];
528 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
529 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
530 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
531 i, fp->rx_bd_prod, fp->rx_bd_cons,
532 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
533 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
534 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
535 " fp_u_idx(%x) *sb_u_idx(%x)\n",
536 fp->rx_sge_prod, fp->last_max_sge,
537 le16_to_cpu(fp->fp_u_idx),
538 fp->status_blk->u_status_block.status_block_index);
542 for_each_tx_queue(bp, i) {
543 struct bnx2x_fastpath *fp = &bp->fp[i];
544 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
546 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
547 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
548 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
549 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
550 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
551 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
552 fp->status_blk->c_status_block.status_block_index,
553 hw_prods->packets_prod, hw_prods->bds_prod);
558 for_each_rx_queue(bp, i) {
559 struct bnx2x_fastpath *fp = &bp->fp[i];
561 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
562 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
563 for (j = start; j != end; j = RX_BD(j + 1)) {
564 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
565 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
567 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
568 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
571 start = RX_SGE(fp->rx_sge_prod);
572 end = RX_SGE(fp->last_max_sge);
573 for (j = start; j != end; j = RX_SGE(j + 1)) {
574 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
575 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
577 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
578 i, j, rx_sge[1], rx_sge[0], sw_page->page);
581 start = RCQ_BD(fp->rx_comp_cons - 10);
582 end = RCQ_BD(fp->rx_comp_cons + 503);
583 for (j = start; j != end; j = RCQ_BD(j + 1)) {
584 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
586 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
587 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
592 for_each_tx_queue(bp, i) {
593 struct bnx2x_fastpath *fp = &bp->fp[i];
595 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
596 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
597 for (j = start; j != end; j = TX_BD(j + 1)) {
598 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
600 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
601 i, j, sw_bd->skb, sw_bd->first_bd);
604 start = TX_BD(fp->tx_bd_cons - 10);
605 end = TX_BD(fp->tx_bd_cons + 254);
606 for (j = start; j != end; j = TX_BD(j + 1)) {
607 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
609 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
610 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
616 BNX2X_ERR("end crash dump -----------------\n");
619 static void bnx2x_int_enable(struct bnx2x *bp)
621 int port = BP_PORT(bp);
622 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
623 u32 val = REG_RD(bp, addr);
624 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
625 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
628 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
629 HC_CONFIG_0_REG_INT_LINE_EN_0);
630 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
631 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
633 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
634 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
635 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
636 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
638 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
639 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0 |
641 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
646 REG_WR(bp, addr, val);
648 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
651 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
652 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
654 REG_WR(bp, addr, val);
656 * Ensure that HC_CONFIG is written before leading/trailing edge config
661 if (CHIP_IS_E1H(bp)) {
662 /* init leading/trailing edge */
664 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
666 /* enable nig and gpio3 attention */
671 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
672 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
675 /* Make sure that interrupts are indeed enabled from here on */
679 static void bnx2x_int_disable(struct bnx2x *bp)
681 int port = BP_PORT(bp);
682 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
683 u32 val = REG_RD(bp, addr);
685 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
686 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
687 HC_CONFIG_0_REG_INT_LINE_EN_0 |
688 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
690 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
693 /* flush all outstanding writes */
696 REG_WR(bp, addr, val);
697 if (REG_RD(bp, addr) != val)
698 BNX2X_ERR("BUG! proper val not read from IGU!\n");
702 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
704 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
707 /* disable interrupt handling */
708 atomic_inc(&bp->intr_sem);
709 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
712 /* prevent the HW from sending interrupts */
713 bnx2x_int_disable(bp);
715 /* make sure all ISRs are done */
717 synchronize_irq(bp->msix_table[0].vector);
719 for_each_queue(bp, i)
720 synchronize_irq(bp->msix_table[i + offset].vector);
722 synchronize_irq(bp->pdev->irq);
724 /* make sure sp_task is not running */
725 cancel_delayed_work(&bp->sp_task);
726 flush_workqueue(bnx2x_wq);
732 * General service functions
735 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
736 u8 storm, u16 index, u8 op, u8 update)
738 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
739 COMMAND_REG_INT_ACK);
740 struct igu_ack_register igu_ack;
742 igu_ack.status_block_index = index;
743 igu_ack.sb_id_and_flags =
744 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
745 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
746 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
747 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
749 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
750 (*(u32 *)&igu_ack), hc_addr);
751 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
753 /* Make sure that ACK is written */
758 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
760 struct host_status_block *fpsb = fp->status_blk;
763 barrier(); /* status block is written to by the chip */
764 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
765 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
768 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
769 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
775 static u16 bnx2x_ack_int(struct bnx2x *bp)
777 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
778 COMMAND_REG_SIMD_MASK);
779 u32 result = REG_RD(bp, hc_addr);
781 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
789 * fast path service functions
792 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
796 /* Tell compiler that status block fields can change */
798 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
799 return (fp->tx_pkt_cons != tx_cons_sb);
802 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
804 /* Tell compiler that consumer and producer can change */
806 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
809 /* free skb in the packet ring at pos idx
810 * return idx of last bd freed
812 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
815 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
816 struct eth_tx_bd *tx_bd;
817 struct sk_buff *skb = tx_buf->skb;
818 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
821 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
825 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
826 tx_bd = &fp->tx_desc_ring[bd_idx];
827 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
828 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
830 nbd = le16_to_cpu(tx_bd->nbd) - 1;
831 new_cons = nbd + tx_buf->first_bd;
832 #ifdef BNX2X_STOP_ON_ERROR
833 if (nbd > (MAX_SKB_FRAGS + 2)) {
834 BNX2X_ERR("BAD nbd!\n");
839 /* Skip a parse bd and the TSO split header bd
840 since they have no mapping */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
845 ETH_TX_BD_FLAGS_TCP_CSUM |
846 ETH_TX_BD_FLAGS_SW_LSO)) {
848 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
849 tx_bd = &fp->tx_desc_ring[bd_idx];
850 /* is this a TSO split header bd? */
851 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
853 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
860 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
861 tx_bd = &fp->tx_desc_ring[bd_idx];
862 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
863 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
865 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
871 tx_buf->first_bd = 0;
877 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
883 barrier(); /* Tell compiler that prod and cons can change */
884 prod = fp->tx_bd_prod;
885 cons = fp->tx_bd_cons;
887 /* NUM_TX_RINGS = number of "next-page" entries
888 It will be used as a threshold */
889 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
891 #ifdef BNX2X_STOP_ON_ERROR
893 WARN_ON(used > fp->bp->tx_ring_size);
894 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
897 return (s16)(fp->bp->tx_ring_size) - used;
900 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
902 struct bnx2x *bp = fp->bp;
903 struct netdev_queue *txq;
904 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
907 #ifdef BNX2X_STOP_ON_ERROR
908 if (unlikely(bp->panic))
912 txq = netdev_get_tx_queue(bp->dev, fp->index);
913 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
914 sw_cons = fp->tx_pkt_cons;
916 while (sw_cons != hw_cons) {
919 pkt_cons = TX_BD(sw_cons);
921 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
923 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
924 hw_cons, sw_cons, pkt_cons);
926 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
928 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
931 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
936 fp->tx_pkt_cons = sw_cons;
937 fp->tx_bd_cons = bd_cons;
939 /* TBD need a thresh? */
940 if (unlikely(netif_tx_queue_stopped(txq))) {
942 __netif_tx_lock(txq, smp_processor_id());
944 /* Need to make the tx_bd_cons update visible to start_xmit()
945 * before checking for netif_tx_queue_stopped(). Without the
946 * memory barrier, there is a small possibility that
947 * start_xmit() will miss it and cause the queue to be stopped
952 if ((netif_tx_queue_stopped(txq)) &&
953 (bp->state == BNX2X_STATE_OPEN) &&
954 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
955 netif_tx_wake_queue(txq);
957 __netif_tx_unlock(txq);
962 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
963 union eth_rx_cqe *rr_cqe)
965 struct bnx2x *bp = fp->bp;
966 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
967 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
970 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
971 fp->index, cid, command, bp->state,
972 rr_cqe->ramrod_cqe.ramrod_type);
977 switch (command | fp->state) {
978 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
979 BNX2X_FP_STATE_OPENING):
980 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
982 fp->state = BNX2X_FP_STATE_OPEN;
985 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
986 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
988 fp->state = BNX2X_FP_STATE_HALTED;
992 BNX2X_ERR("unexpected MC reply (%d) "
993 "fp->state is %x\n", command, fp->state);
996 mb(); /* force bnx2x_wait_ramrod() to see the change */
1000 switch (command | bp->state) {
1001 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1002 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1003 bp->state = BNX2X_STATE_OPEN;
1006 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1007 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1008 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1009 fp->state = BNX2X_FP_STATE_HALTED;
1012 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1013 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1014 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1020 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1021 bp->set_mac_pending = 0;
1024 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1025 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1029 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1030 command, bp->state);
1033 mb(); /* force bnx2x_wait_ramrod() to see the change */
1036 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1037 struct bnx2x_fastpath *fp, u16 index)
1039 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1040 struct page *page = sw_buf->page;
1041 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1043 /* Skip "next page" elements */
1047 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1048 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1049 __free_pages(page, PAGES_PER_SGE_SHIFT);
1051 sw_buf->page = NULL;
1056 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1057 struct bnx2x_fastpath *fp, int last)
1061 for (i = 0; i < last; i++)
1062 bnx2x_free_rx_sge(bp, fp, i);
1065 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1066 struct bnx2x_fastpath *fp, u16 index)
1068 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1069 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1070 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1073 if (unlikely(page == NULL))
1076 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1077 PCI_DMA_FROMDEVICE);
1078 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1079 __free_pages(page, PAGES_PER_SGE_SHIFT);
1083 sw_buf->page = page;
1084 pci_unmap_addr_set(sw_buf, mapping, mapping);
1086 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1087 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1092 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1093 struct bnx2x_fastpath *fp, u16 index)
1095 struct sk_buff *skb;
1096 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1097 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1100 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1101 if (unlikely(skb == NULL))
1104 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1105 PCI_DMA_FROMDEVICE);
1106 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1112 pci_unmap_addr_set(rx_buf, mapping, mapping);
1114 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1115 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1120 /* note that we are not allocating a new skb,
1121 * we are just moving one from cons to prod
1122 * we are not creating a new mapping,
1123 * so there is no need to check for dma_mapping_error().
1125 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1126 struct sk_buff *skb, u16 cons, u16 prod)
1128 struct bnx2x *bp = fp->bp;
1129 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1130 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1131 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1132 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1134 pci_dma_sync_single_for_device(bp->pdev,
1135 pci_unmap_addr(cons_rx_buf, mapping),
1136 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1138 prod_rx_buf->skb = cons_rx_buf->skb;
1139 pci_unmap_addr_set(prod_rx_buf, mapping,
1140 pci_unmap_addr(cons_rx_buf, mapping));
1141 *prod_bd = *cons_bd;
1144 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1147 u16 last_max = fp->last_max_sge;
1149 if (SUB_S16(idx, last_max) > 0)
1150 fp->last_max_sge = idx;
1153 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1157 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1158 int idx = RX_SGE_CNT * i - 1;
1160 for (j = 0; j < 2; j++) {
1161 SGE_MASK_CLEAR_BIT(fp, idx);
1167 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1168 struct eth_fast_path_rx_cqe *fp_cqe)
1170 struct bnx2x *bp = fp->bp;
1171 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1172 le16_to_cpu(fp_cqe->len_on_bd)) >>
1174 u16 last_max, last_elem, first_elem;
1181 /* First mark all used pages */
1182 for (i = 0; i < sge_len; i++)
1183 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1185 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1186 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1188 /* Here we assume that the last SGE index is the biggest */
1189 prefetch((void *)(fp->sge_mask));
1190 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1192 last_max = RX_SGE(fp->last_max_sge);
1193 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1194 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1196 /* If ring is not full */
1197 if (last_elem + 1 != first_elem)
1200 /* Now update the prod */
1201 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1202 if (likely(fp->sge_mask[i]))
1205 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1206 delta += RX_SGE_MASK_ELEM_SZ;
1210 fp->rx_sge_prod += delta;
1211 /* clear page-end entries */
1212 bnx2x_clear_sge_mask_next_elems(fp);
1215 DP(NETIF_MSG_RX_STATUS,
1216 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1217 fp->last_max_sge, fp->rx_sge_prod);
1220 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1222 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1223 memset(fp->sge_mask, 0xff,
1224 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1226 /* Clear the two last indices in the page to 1:
1227 these are the indices that correspond to the "next" element,
1228 hence will never be indicated and should be removed from
1229 the calculations. */
1230 bnx2x_clear_sge_mask_next_elems(fp);
1233 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1234 struct sk_buff *skb, u16 cons, u16 prod)
1236 struct bnx2x *bp = fp->bp;
1237 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1238 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1239 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1242 /* move empty skb from pool to prod and map it */
1243 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1244 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1245 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1246 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1248 /* move partial skb from cons to pool (don't unmap yet) */
1249 fp->tpa_pool[queue] = *cons_rx_buf;
1251 /* mark bin state as start - print error if current state != stop */
1252 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1253 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1255 fp->tpa_state[queue] = BNX2X_TPA_START;
1257 /* point prod_bd to new skb */
1258 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1259 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1261 #ifdef BNX2X_STOP_ON_ERROR
1262 fp->tpa_queue_used |= (1 << queue);
1263 #ifdef __powerpc64__
1264 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1266 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1268 fp->tpa_queue_used);
1272 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1273 struct sk_buff *skb,
1274 struct eth_fast_path_rx_cqe *fp_cqe,
1277 struct sw_rx_page *rx_pg, old_rx_pg;
1278 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1279 u32 i, frag_len, frag_size, pages;
1283 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1284 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1286 /* This is needed in order to enable forwarding support */
1288 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1289 max(frag_size, (u32)len_on_bd));
1291 #ifdef BNX2X_STOP_ON_ERROR
1293 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1294 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1296 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1297 fp_cqe->pkt_len, len_on_bd);
1303 /* Run through the SGL and compose the fragmented skb */
1304 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1305 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1307 /* FW gives the indices of the SGE as if the ring is an array
1308 (meaning that "next" element will consume 2 indices) */
1309 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1310 rx_pg = &fp->rx_page_ring[sge_idx];
1313 /* If we fail to allocate a substitute page, we simply stop
1314 where we are and drop the whole packet */
1315 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1316 if (unlikely(err)) {
1317 fp->eth_q_stats.rx_skb_alloc_failed++;
1321 /* Unmap the page as we r going to pass it to the stack */
1322 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1323 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1325 /* Add one frag and update the appropriate fields in the skb */
1326 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1328 skb->data_len += frag_len;
1329 skb->truesize += frag_len;
1330 skb->len += frag_len;
1332 frag_size -= frag_len;
1338 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1339 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1342 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1343 struct sk_buff *skb = rx_buf->skb;
1345 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1347 /* Unmap skb in the pool anyway, as we are going to change
1348 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1350 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1351 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1353 if (likely(new_skb)) {
1354 /* fix ip xsum and give it to the stack */
1355 /* (no need to map the new skb) */
1358 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1359 PARSING_FLAGS_VLAN);
1360 int is_not_hwaccel_vlan_cqe =
1361 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1365 prefetch(((char *)(skb)) + 128);
1367 #ifdef BNX2X_STOP_ON_ERROR
1368 if (pad + len > bp->rx_buf_size) {
1369 BNX2X_ERR("skb_put is about to fail... "
1370 "pad %d len %d rx_buf_size %d\n",
1371 pad, len, bp->rx_buf_size);
1377 skb_reserve(skb, pad);
1380 skb->protocol = eth_type_trans(skb, bp->dev);
1381 skb->ip_summed = CHECKSUM_UNNECESSARY;
1386 iph = (struct iphdr *)skb->data;
1388 /* If there is no Rx VLAN offloading -
1389 take VLAN tag into an account */
1390 if (unlikely(is_not_hwaccel_vlan_cqe))
1391 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1394 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1397 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1398 &cqe->fast_path_cqe, cqe_idx)) {
1400 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1401 (!is_not_hwaccel_vlan_cqe))
1402 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1403 le16_to_cpu(cqe->fast_path_cqe.
1407 netif_receive_skb(skb);
1409 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1410 " - dropping packet!\n");
1415 /* put new skb in bin */
1416 fp->tpa_pool[queue].skb = new_skb;
1419 /* else drop the packet and keep the buffer in the bin */
1420 DP(NETIF_MSG_RX_STATUS,
1421 "Failed to allocate new skb - dropping packet!\n");
1422 fp->eth_q_stats.rx_skb_alloc_failed++;
1425 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1428 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1429 struct bnx2x_fastpath *fp,
1430 u16 bd_prod, u16 rx_comp_prod,
1433 struct ustorm_eth_rx_producers rx_prods = {0};
1436 /* Update producers */
1437 rx_prods.bd_prod = bd_prod;
1438 rx_prods.cqe_prod = rx_comp_prod;
1439 rx_prods.sge_prod = rx_sge_prod;
1442 * Make sure that the BD and SGE data is updated before updating the
1443 * producers since FW might read the BD/SGE right after the producer
1445 * This is only applicable for weak-ordered memory model archs such
1446 * as IA-64. The following barrier is also mandatory since FW will
1447 * assumes BDs must have buffers.
1451 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1452 REG_WR(bp, BAR_USTRORM_INTMEM +
1453 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1454 ((u32 *)&rx_prods)[i]);
1456 mmiowb(); /* keep prod updates ordered */
1458 DP(NETIF_MSG_RX_STATUS,
1459 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1460 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1463 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1465 struct bnx2x *bp = fp->bp;
1466 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1467 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1470 #ifdef BNX2X_STOP_ON_ERROR
1471 if (unlikely(bp->panic))
1475 /* CQ "next element" is of the size of the regular element,
1476 that's why it's ok here */
1477 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1478 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1481 bd_cons = fp->rx_bd_cons;
1482 bd_prod = fp->rx_bd_prod;
1483 bd_prod_fw = bd_prod;
1484 sw_comp_cons = fp->rx_comp_cons;
1485 sw_comp_prod = fp->rx_comp_prod;
1487 /* Memory barrier necessary as speculative reads of the rx
1488 * buffer can be ahead of the index in the status block
1492 DP(NETIF_MSG_RX_STATUS,
1493 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1494 fp->index, hw_comp_cons, sw_comp_cons);
1496 while (sw_comp_cons != hw_comp_cons) {
1497 struct sw_rx_bd *rx_buf = NULL;
1498 struct sk_buff *skb;
1499 union eth_rx_cqe *cqe;
1503 comp_ring_cons = RCQ_BD(sw_comp_cons);
1504 bd_prod = RX_BD(bd_prod);
1505 bd_cons = RX_BD(bd_cons);
1507 cqe = &fp->rx_comp_ring[comp_ring_cons];
1508 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1510 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1511 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1512 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1513 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1514 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1515 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1517 /* is this a slowpath msg? */
1518 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1519 bnx2x_sp_event(fp, cqe);
1522 /* this is an rx packet */
1524 rx_buf = &fp->rx_buf_ring[bd_cons];
1526 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1527 pad = cqe->fast_path_cqe.placement_offset;
1529 /* If CQE is marked both TPA_START and TPA_END
1530 it is a non-TPA CQE */
1531 if ((!fp->disable_tpa) &&
1532 (TPA_TYPE(cqe_fp_flags) !=
1533 (TPA_TYPE_START | TPA_TYPE_END))) {
1534 u16 queue = cqe->fast_path_cqe.queue_index;
1536 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1537 DP(NETIF_MSG_RX_STATUS,
1538 "calling tpa_start on queue %d\n",
1541 bnx2x_tpa_start(fp, queue, skb,
1546 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1547 DP(NETIF_MSG_RX_STATUS,
1548 "calling tpa_stop on queue %d\n",
1551 if (!BNX2X_RX_SUM_FIX(cqe))
1552 BNX2X_ERR("STOP on none TCP "
1555 /* This is a size of the linear data
1557 len = le16_to_cpu(cqe->fast_path_cqe.
1559 bnx2x_tpa_stop(bp, fp, queue, pad,
1560 len, cqe, comp_ring_cons);
1561 #ifdef BNX2X_STOP_ON_ERROR
1566 bnx2x_update_sge_prod(fp,
1567 &cqe->fast_path_cqe);
1572 pci_dma_sync_single_for_device(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
1574 pad + RX_COPY_THRESH,
1575 PCI_DMA_FROMDEVICE);
1577 prefetch(((char *)(skb)) + 128);
1579 /* is this an error packet? */
1580 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1581 DP(NETIF_MSG_RX_ERR,
1582 "ERROR flags %x rx packet %u\n",
1583 cqe_fp_flags, sw_comp_cons);
1584 fp->eth_q_stats.rx_err_discard_pkt++;
1588 /* Since we don't have a jumbo ring
1589 * copy small packets if mtu > 1500
1591 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1592 (len <= RX_COPY_THRESH)) {
1593 struct sk_buff *new_skb;
1595 new_skb = netdev_alloc_skb(bp->dev,
1597 if (new_skb == NULL) {
1598 DP(NETIF_MSG_RX_ERR,
1599 "ERROR packet dropped "
1600 "because of alloc failure\n");
1601 fp->eth_q_stats.rx_skb_alloc_failed++;
1606 skb_copy_from_linear_data_offset(skb, pad,
1607 new_skb->data + pad, len);
1608 skb_reserve(new_skb, pad);
1609 skb_put(new_skb, len);
1611 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1615 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1616 pci_unmap_single(bp->pdev,
1617 pci_unmap_addr(rx_buf, mapping),
1619 PCI_DMA_FROMDEVICE);
1620 skb_reserve(skb, pad);
1624 DP(NETIF_MSG_RX_ERR,
1625 "ERROR packet dropped because "
1626 "of alloc failure\n");
1627 fp->eth_q_stats.rx_skb_alloc_failed++;
1629 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1633 skb->protocol = eth_type_trans(skb, bp->dev);
1635 skb->ip_summed = CHECKSUM_NONE;
1637 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1638 skb->ip_summed = CHECKSUM_UNNECESSARY;
1640 fp->eth_q_stats.hw_csum_err++;
1644 skb_record_rx_queue(skb, fp->index);
1646 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1647 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1648 PARSING_FLAGS_VLAN))
1649 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1650 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1653 netif_receive_skb(skb);
1659 bd_cons = NEXT_RX_IDX(bd_cons);
1660 bd_prod = NEXT_RX_IDX(bd_prod);
1661 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1664 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1665 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1667 if (rx_pkt == budget)
1671 fp->rx_bd_cons = bd_cons;
1672 fp->rx_bd_prod = bd_prod_fw;
1673 fp->rx_comp_cons = sw_comp_cons;
1674 fp->rx_comp_prod = sw_comp_prod;
1676 /* Update producers */
1677 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1680 fp->rx_pkt += rx_pkt;
1686 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1688 struct bnx2x_fastpath *fp = fp_cookie;
1689 struct bnx2x *bp = fp->bp;
1690 int index = fp->index;
1692 /* Return here if interrupt is disabled */
1693 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1694 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1698 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1700 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1702 #ifdef BNX2X_STOP_ON_ERROR
1703 if (unlikely(bp->panic))
1707 prefetch(fp->rx_cons_sb);
1708 prefetch(fp->tx_cons_sb);
1709 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 prefetch(&fp->status_blk->u_status_block.status_block_index);
1712 napi_schedule(&bnx2x_fp(bp, index, napi));
1717 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1719 struct bnx2x *bp = netdev_priv(dev_instance);
1720 u16 status = bnx2x_ack_int(bp);
1723 /* Return here if interrupt is shared and it's not for us */
1724 if (unlikely(status == 0)) {
1725 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1728 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1730 /* Return here if interrupt is disabled */
1731 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1732 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1736 #ifdef BNX2X_STOP_ON_ERROR
1737 if (unlikely(bp->panic))
1741 mask = 0x2 << bp->fp[0].sb_id;
1742 if (status & mask) {
1743 struct bnx2x_fastpath *fp = &bp->fp[0];
1745 prefetch(fp->rx_cons_sb);
1746 prefetch(fp->tx_cons_sb);
1747 prefetch(&fp->status_blk->c_status_block.status_block_index);
1748 prefetch(&fp->status_blk->u_status_block.status_block_index);
1750 napi_schedule(&bnx2x_fp(bp, 0, napi));
1756 if (unlikely(status & 0x1)) {
1757 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1765 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1771 /* end of fast path */
1773 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1778 * General service functions
1781 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1784 u32 resource_bit = (1 << resource);
1785 int func = BP_FUNC(bp);
1786 u32 hw_lock_control_reg;
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1804 /* Validating that the resource is not already taken */
1805 lock_status = REG_RD(bp, hw_lock_control_reg);
1806 if (lock_status & resource_bit) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1812 /* Try for 5 second every 5ms */
1813 for (cnt = 0; cnt < 1000; cnt++) {
1814 /* Try to acquire the lock */
1815 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1816 lock_status = REG_RD(bp, hw_lock_control_reg);
1817 if (lock_status & resource_bit)
1822 DP(NETIF_MSG_HW, "Timeout\n");
1826 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1829 u32 resource_bit = (1 << resource);
1830 int func = BP_FUNC(bp);
1831 u32 hw_lock_control_reg;
1833 /* Validating that the resource is within range */
1834 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1836 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1837 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1842 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1844 hw_lock_control_reg =
1845 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1848 /* Validating that the resource is currently taken */
1849 lock_status = REG_RD(bp, hw_lock_control_reg);
1850 if (!(lock_status & resource_bit)) {
1851 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1852 lock_status, resource_bit);
1856 REG_WR(bp, hw_lock_control_reg, resource_bit);
1860 /* HW Lock for shared dual port PHYs */
1861 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1863 mutex_lock(&bp->port.phy_mutex);
1865 if (bp->port.need_hw_lock)
1866 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1869 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1871 if (bp->port.need_hw_lock)
1872 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1874 mutex_unlock(&bp->port.phy_mutex);
1877 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1879 /* The GPIO should be swapped if swap register is set and active */
1880 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1881 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1882 int gpio_shift = gpio_num +
1883 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1884 u32 gpio_mask = (1 << gpio_shift);
1888 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1889 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1893 /* read GPIO value */
1894 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1896 /* get the requested pin value */
1897 if ((gpio_reg & gpio_mask) == gpio_mask)
1902 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1907 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1909 /* The GPIO should be swapped if swap register is set and active */
1910 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1911 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1912 int gpio_shift = gpio_num +
1913 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1914 u32 gpio_mask = (1 << gpio_shift);
1917 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1918 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1922 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1923 /* read GPIO and mask except the float bits */
1924 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1927 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1928 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1929 gpio_num, gpio_shift);
1930 /* clear FLOAT and set CLR */
1931 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1932 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1935 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1936 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1937 gpio_num, gpio_shift);
1938 /* clear FLOAT and set SET */
1939 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1940 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1943 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1944 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1945 gpio_num, gpio_shift);
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1954 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1955 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1960 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1962 /* The GPIO should be swapped if swap register is set and active */
1963 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1964 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1965 int gpio_shift = gpio_num +
1966 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1967 u32 gpio_mask = (1 << gpio_shift);
1970 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1971 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1975 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1977 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1980 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1981 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1982 "output low\n", gpio_num, gpio_shift);
1983 /* clear SET and set CLR */
1984 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1985 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1988 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1989 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1990 "output high\n", gpio_num, gpio_shift);
1991 /* clear CLR and set SET */
1992 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1993 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2000 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2001 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2006 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2008 u32 spio_mask = (1 << spio_num);
2011 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2012 (spio_num > MISC_REGISTERS_SPIO_7)) {
2013 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2017 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2018 /* read SPIO and mask except the float bits */
2019 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2022 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2023 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2024 /* clear FLOAT and set CLR */
2025 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2026 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2029 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2030 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2031 /* clear FLOAT and set SET */
2032 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2033 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2036 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2037 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2039 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2046 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2047 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2052 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2054 switch (bp->link_vars.ieee_fc &
2055 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2056 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2057 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2061 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2062 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2066 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2067 bp->port.advertising |= ADVERTISED_Asym_Pause;
2071 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2077 static void bnx2x_link_report(struct bnx2x *bp)
2079 if (bp->link_vars.link_up) {
2080 if (bp->state == BNX2X_STATE_OPEN)
2081 netif_carrier_on(bp->dev);
2082 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2084 printk("%d Mbps ", bp->link_vars.line_speed);
2086 if (bp->link_vars.duplex == DUPLEX_FULL)
2087 printk("full duplex");
2089 printk("half duplex");
2091 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2092 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2093 printk(", receive ");
2094 if (bp->link_vars.flow_ctrl &
2096 printk("& transmit ");
2098 printk(", transmit ");
2100 printk("flow control ON");
2104 } else { /* link_down */
2105 netif_carrier_off(bp->dev);
2106 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2110 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2112 if (!BP_NOMCP(bp)) {
2115 /* Initialize link parameters structure variables */
2116 /* It is recommended to turn off RX FC for jumbo frames
2117 for better performance */
2119 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2120 else if (bp->dev->mtu > 5000)
2121 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2123 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2125 bnx2x_acquire_phy_lock(bp);
2127 if (load_mode == LOAD_DIAG)
2128 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2130 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2132 bnx2x_release_phy_lock(bp);
2134 bnx2x_calc_fc_adv(bp);
2136 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2137 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2138 bnx2x_link_report(bp);
2143 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2147 static void bnx2x_link_set(struct bnx2x *bp)
2149 if (!BP_NOMCP(bp)) {
2150 bnx2x_acquire_phy_lock(bp);
2151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2152 bnx2x_release_phy_lock(bp);
2154 bnx2x_calc_fc_adv(bp);
2156 BNX2X_ERR("Bootcode is missing - can not set link\n");
2159 static void bnx2x__link_reset(struct bnx2x *bp)
2161 if (!BP_NOMCP(bp)) {
2162 bnx2x_acquire_phy_lock(bp);
2163 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2164 bnx2x_release_phy_lock(bp);
2166 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2169 static u8 bnx2x_link_test(struct bnx2x *bp)
2173 bnx2x_acquire_phy_lock(bp);
2174 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2175 bnx2x_release_phy_lock(bp);
2180 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2182 u32 r_param = bp->link_vars.line_speed / 8;
2183 u32 fair_periodic_timeout_usec;
2186 memset(&(bp->cmng.rs_vars), 0,
2187 sizeof(struct rate_shaping_vars_per_port));
2188 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2190 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2191 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2193 /* this is the threshold below which no timer arming will occur
2194 1.25 coefficient is for the threshold to be a little bigger
2195 than the real time, to compensate for timer in-accuracy */
2196 bp->cmng.rs_vars.rs_threshold =
2197 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2199 /* resolution of fairness timer */
2200 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2201 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2202 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2204 /* this is the threshold below which we won't arm the timer anymore */
2205 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2207 /* we multiply by 1e3/8 to get bytes/msec.
2208 We don't want the credits to pass a credit
2209 of the t_fair*FAIR_MEM (algorithm resolution) */
2210 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2211 /* since each tick is 4 usec */
2212 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2215 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2217 struct rate_shaping_vars_per_vn m_rs_vn;
2218 struct fairness_vars_per_vn m_fair_vn;
2219 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2220 u16 vn_min_rate, vn_max_rate;
2223 /* If function is hidden - set min and max to zeroes */
2224 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2229 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2230 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2231 /* If fairness is enabled (not all min rates are zeroes) and
2232 if current min rate is zero - set it to 1.
2233 This is a requirement of the algorithm. */
2234 if (bp->vn_weight_sum && (vn_min_rate == 0))
2235 vn_min_rate = DEF_MIN_RATE;
2236 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2237 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2241 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2242 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2244 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2245 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2247 /* global vn counter - maximal Mbps for this vn */
2248 m_rs_vn.vn_counter.rate = vn_max_rate;
2250 /* quota - number of bytes transmitted in this period */
2251 m_rs_vn.vn_counter.quota =
2252 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2254 if (bp->vn_weight_sum) {
2255 /* credit for each period of the fairness algorithm:
2256 number of bytes in T_FAIR (the vn share the port rate).
2257 vn_weight_sum should not be larger than 10000, thus
2258 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2260 m_fair_vn.vn_credit_delta =
2261 max((u32)(vn_min_rate * (T_FAIR_COEF /
2262 (8 * bp->vn_weight_sum))),
2263 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2264 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2265 m_fair_vn.vn_credit_delta);
2268 /* Store it to internal memory */
2269 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2270 REG_WR(bp, BAR_XSTRORM_INTMEM +
2271 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2272 ((u32 *)(&m_rs_vn))[i]);
2274 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2275 REG_WR(bp, BAR_XSTRORM_INTMEM +
2276 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2277 ((u32 *)(&m_fair_vn))[i]);
2281 /* This function is called upon link interrupt */
2282 static void bnx2x_link_attn(struct bnx2x *bp)
2284 /* Make sure that we are synced with the current statistics */
2285 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2287 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2289 if (bp->link_vars.link_up) {
2291 /* dropless flow control */
2292 if (CHIP_IS_E1H(bp)) {
2293 int port = BP_PORT(bp);
2294 u32 pause_enabled = 0;
2296 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2299 REG_WR(bp, BAR_USTRORM_INTMEM +
2300 USTORM_PAUSE_ENABLED_OFFSET(port),
2304 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2305 struct host_port_stats *pstats;
2307 pstats = bnx2x_sp(bp, port_stats);
2308 /* reset old bmac stats */
2309 memset(&(pstats->mac_stx[0]), 0,
2310 sizeof(struct mac_stx));
2312 if ((bp->state == BNX2X_STATE_OPEN) ||
2313 (bp->state == BNX2X_STATE_DISABLED))
2314 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2317 /* indicate link status */
2318 bnx2x_link_report(bp);
2321 int port = BP_PORT(bp);
2325 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2326 if (vn == BP_E1HVN(bp))
2329 func = ((vn << 1) | port);
2331 /* Set the attention towards other drivers
2333 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2334 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2337 if (bp->link_vars.link_up) {
2340 /* Init rate shaping and fairness contexts */
2341 bnx2x_init_port_minmax(bp);
2343 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2344 bnx2x_init_vn_minmax(bp, 2*vn + port);
2346 /* Store it to internal memory */
2348 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2349 REG_WR(bp, BAR_XSTRORM_INTMEM +
2350 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2351 ((u32 *)(&bp->cmng))[i]);
2356 static void bnx2x__link_status_update(struct bnx2x *bp)
2358 if (bp->state != BNX2X_STATE_OPEN)
2361 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2363 if (bp->link_vars.link_up)
2364 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2366 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2368 /* indicate link status */
2369 bnx2x_link_report(bp);
2372 static void bnx2x_pmf_update(struct bnx2x *bp)
2374 int port = BP_PORT(bp);
2378 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2380 /* enable nig attention */
2381 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2382 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2383 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2385 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2393 * General service functions
2396 /* the slow path queue is odd since completions arrive on the fastpath ring */
2397 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2398 u32 data_hi, u32 data_lo, int common)
2400 int func = BP_FUNC(bp);
2402 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2403 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2404 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2405 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2406 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2408 #ifdef BNX2X_STOP_ON_ERROR
2409 if (unlikely(bp->panic))
2413 spin_lock_bh(&bp->spq_lock);
2415 if (!bp->spq_left) {
2416 BNX2X_ERR("BUG! SPQ ring full!\n");
2417 spin_unlock_bh(&bp->spq_lock);
2422 /* CID needs port number to be encoded int it */
2423 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2424 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2426 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2428 bp->spq_prod_bd->hdr.type |=
2429 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2431 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2432 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2436 if (bp->spq_prod_bd == bp->spq_last_bd) {
2437 bp->spq_prod_bd = bp->spq;
2438 bp->spq_prod_idx = 0;
2439 DP(NETIF_MSG_TIMER, "end of spq\n");
2446 /* Make sure that BD data is updated before writing the producer */
2449 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2454 spin_unlock_bh(&bp->spq_lock);
2458 /* acquire split MCP access lock register */
2459 static int bnx2x_acquire_alr(struct bnx2x *bp)
2466 for (j = 0; j < i*10; j++) {
2468 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2469 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2470 if (val & (1L << 31))
2475 if (!(val & (1L << 31))) {
2476 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2483 /* release split MCP access lock register */
2484 static void bnx2x_release_alr(struct bnx2x *bp)
2488 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2491 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2493 struct host_def_status_block *def_sb = bp->def_status_blk;
2496 barrier(); /* status block is written to by the chip */
2497 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2498 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2501 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2502 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2505 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2506 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2509 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2510 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2513 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2514 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2521 * slow path service functions
2524 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2526 int port = BP_PORT(bp);
2527 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2528 COMMAND_REG_ATTN_BITS_SET);
2529 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2530 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2531 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2532 NIG_REG_MASK_INTERRUPT_PORT0;
2536 if (bp->attn_state & asserted)
2537 BNX2X_ERR("IGU ERROR\n");
2539 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2540 aeu_mask = REG_RD(bp, aeu_addr);
2542 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2543 aeu_mask, asserted);
2544 aeu_mask &= ~(asserted & 0xff);
2545 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2547 REG_WR(bp, aeu_addr, aeu_mask);
2548 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2550 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2551 bp->attn_state |= asserted;
2552 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2554 if (asserted & ATTN_HARD_WIRED_MASK) {
2555 if (asserted & ATTN_NIG_FOR_FUNC) {
2557 bnx2x_acquire_phy_lock(bp);
2559 /* save nig interrupt mask */
2560 nig_mask = REG_RD(bp, nig_int_mask_addr);
2561 REG_WR(bp, nig_int_mask_addr, 0);
2563 bnx2x_link_attn(bp);
2565 /* handle unicore attn? */
2567 if (asserted & ATTN_SW_TIMER_4_FUNC)
2568 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2570 if (asserted & GPIO_2_FUNC)
2571 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2573 if (asserted & GPIO_3_FUNC)
2574 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2576 if (asserted & GPIO_4_FUNC)
2577 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2580 if (asserted & ATTN_GENERAL_ATTN_1) {
2581 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2582 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2584 if (asserted & ATTN_GENERAL_ATTN_2) {
2585 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2586 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2588 if (asserted & ATTN_GENERAL_ATTN_3) {
2589 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2593 if (asserted & ATTN_GENERAL_ATTN_4) {
2594 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2595 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2597 if (asserted & ATTN_GENERAL_ATTN_5) {
2598 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2601 if (asserted & ATTN_GENERAL_ATTN_6) {
2602 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2603 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2607 } /* if hardwired */
2609 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2611 REG_WR(bp, hc_addr, asserted);
2613 /* now set back the mask */
2614 if (asserted & ATTN_NIG_FOR_FUNC) {
2615 REG_WR(bp, nig_int_mask_addr, nig_mask);
2616 bnx2x_release_phy_lock(bp);
2620 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2622 int port = BP_PORT(bp);
2624 /* mark the failure */
2625 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2626 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2627 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2628 bp->link_params.ext_phy_config);
2630 /* log the failure */
2631 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2632 " the driver to shutdown the card to prevent permanent"
2633 " damage. Please contact Dell Support for assistance\n",
2636 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2638 int port = BP_PORT(bp);
2640 u32 val, swap_val, swap_override;
2642 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2643 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2645 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2647 val = REG_RD(bp, reg_offset);
2648 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2649 REG_WR(bp, reg_offset, val);
2651 BNX2X_ERR("SPIO5 hw attention\n");
2653 /* Fan failure attention */
2654 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2655 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2656 /* Low power mode is controlled by GPIO 2 */
2657 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2658 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2659 /* The PHY reset is controlled by GPIO 1 */
2660 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2661 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2664 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2665 /* The PHY reset is controlled by GPIO 1 */
2666 /* fake the port number to cancel the swap done in
2668 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2669 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2670 port = (swap_val && swap_override) ^ 1;
2671 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2672 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2678 bnx2x_fan_failure(bp);
2681 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2682 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2683 bnx2x_acquire_phy_lock(bp);
2684 bnx2x_handle_module_detect_int(&bp->link_params);
2685 bnx2x_release_phy_lock(bp);
2688 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2690 val = REG_RD(bp, reg_offset);
2691 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2692 REG_WR(bp, reg_offset, val);
2694 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2695 (attn & HW_INTERRUT_ASSERT_SET_0));
2700 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2704 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2706 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2707 BNX2X_ERR("DB hw attention 0x%x\n", val);
2708 /* DORQ discard attention */
2710 BNX2X_ERR("FATAL error from DORQ\n");
2713 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2715 int port = BP_PORT(bp);
2718 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2719 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2721 val = REG_RD(bp, reg_offset);
2722 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2723 REG_WR(bp, reg_offset, val);
2725 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2726 (attn & HW_INTERRUT_ASSERT_SET_1));
2731 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2735 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2737 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2738 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2739 /* CFC error attention */
2741 BNX2X_ERR("FATAL error from CFC\n");
2744 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2746 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2747 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2748 /* RQ_USDMDP_FIFO_OVERFLOW */
2750 BNX2X_ERR("FATAL error from PXP\n");
2753 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2755 int port = BP_PORT(bp);
2758 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2759 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2761 val = REG_RD(bp, reg_offset);
2762 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2763 REG_WR(bp, reg_offset, val);
2765 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2766 (attn & HW_INTERRUT_ASSERT_SET_2));
2771 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2775 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2777 if (attn & BNX2X_PMF_LINK_ASSERT) {
2778 int func = BP_FUNC(bp);
2780 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2781 bnx2x__link_status_update(bp);
2782 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2784 bnx2x_pmf_update(bp);
2786 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2788 BNX2X_ERR("MC assert!\n");
2789 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2791 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2792 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2795 } else if (attn & BNX2X_MCP_ASSERT) {
2797 BNX2X_ERR("MCP assert!\n");
2798 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2802 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2805 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2806 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2807 if (attn & BNX2X_GRC_TIMEOUT) {
2808 val = CHIP_IS_E1H(bp) ?
2809 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2810 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2812 if (attn & BNX2X_GRC_RSV) {
2813 val = CHIP_IS_E1H(bp) ?
2814 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2815 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2817 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2821 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2823 struct attn_route attn;
2824 struct attn_route group_mask;
2825 int port = BP_PORT(bp);
2831 /* need to take HW lock because MCP or other port might also
2832 try to handle this event */
2833 bnx2x_acquire_alr(bp);
2835 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2836 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2837 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2838 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2839 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2840 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2842 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2843 if (deasserted & (1 << index)) {
2844 group_mask = bp->attn_group[index];
2846 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2847 index, group_mask.sig[0], group_mask.sig[1],
2848 group_mask.sig[2], group_mask.sig[3]);
2850 bnx2x_attn_int_deasserted3(bp,
2851 attn.sig[3] & group_mask.sig[3]);
2852 bnx2x_attn_int_deasserted1(bp,
2853 attn.sig[1] & group_mask.sig[1]);
2854 bnx2x_attn_int_deasserted2(bp,
2855 attn.sig[2] & group_mask.sig[2]);
2856 bnx2x_attn_int_deasserted0(bp,
2857 attn.sig[0] & group_mask.sig[0]);
2859 if ((attn.sig[0] & group_mask.sig[0] &
2860 HW_PRTY_ASSERT_SET_0) ||
2861 (attn.sig[1] & group_mask.sig[1] &
2862 HW_PRTY_ASSERT_SET_1) ||
2863 (attn.sig[2] & group_mask.sig[2] &
2864 HW_PRTY_ASSERT_SET_2))
2865 BNX2X_ERR("FATAL HW block parity attention\n");
2869 bnx2x_release_alr(bp);
2871 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2874 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2876 REG_WR(bp, reg_addr, val);
2878 if (~bp->attn_state & deasserted)
2879 BNX2X_ERR("IGU ERROR\n");
2881 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2882 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2885 aeu_mask = REG_RD(bp, reg_addr);
2887 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2888 aeu_mask, deasserted);
2889 aeu_mask |= (deasserted & 0xff);
2890 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2892 REG_WR(bp, reg_addr, aeu_mask);
2893 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2895 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2896 bp->attn_state &= ~deasserted;
2897 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2900 static void bnx2x_attn_int(struct bnx2x *bp)
2902 /* read local copy of bits */
2903 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2905 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2907 u32 attn_state = bp->attn_state;
2909 /* look for changed bits */
2910 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2911 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2914 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2915 attn_bits, attn_ack, asserted, deasserted);
2917 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2918 BNX2X_ERR("BAD attention state\n");
2920 /* handle bits that were raised */
2922 bnx2x_attn_int_asserted(bp, asserted);
2925 bnx2x_attn_int_deasserted(bp, deasserted);
2928 static void bnx2x_sp_task(struct work_struct *work)
2930 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2934 /* Return here if interrupt is disabled */
2935 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2936 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2940 status = bnx2x_update_dsb_idx(bp);
2941 /* if (status == 0) */
2942 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2944 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2950 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2952 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2954 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2956 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2958 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2963 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2965 struct net_device *dev = dev_instance;
2966 struct bnx2x *bp = netdev_priv(dev);
2968 /* Return here if interrupt is disabled */
2969 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2970 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2974 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2976 #ifdef BNX2X_STOP_ON_ERROR
2977 if (unlikely(bp->panic))
2981 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2986 /* end of slow path */
2990 /****************************************************************************
2992 ****************************************************************************/
2994 /* sum[hi:lo] += add[hi:lo] */
2995 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2998 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3001 /* difference = minuend - subtrahend */
3002 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3004 if (m_lo < s_lo) { \
3006 d_hi = m_hi - s_hi; \
3008 /* we can 'loan' 1 */ \
3010 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3012 /* m_hi <= s_hi */ \
3017 /* m_lo >= s_lo */ \
3018 if (m_hi < s_hi) { \
3022 /* m_hi >= s_hi */ \
3023 d_hi = m_hi - s_hi; \
3024 d_lo = m_lo - s_lo; \
3029 #define UPDATE_STAT64(s, t) \
3031 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3032 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3033 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3034 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3035 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3036 pstats->mac_stx[1].t##_lo, diff.lo); \
3039 #define UPDATE_STAT64_NIG(s, t) \
3041 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3042 diff.lo, new->s##_lo, old->s##_lo); \
3043 ADD_64(estats->t##_hi, diff.hi, \
3044 estats->t##_lo, diff.lo); \
3047 /* sum[hi:lo] += add */
3048 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3051 s_hi += (s_lo < a) ? 1 : 0; \
3054 #define UPDATE_EXTEND_STAT(s) \
3056 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3057 pstats->mac_stx[1].s##_lo, \
3061 #define UPDATE_EXTEND_TSTAT(s, t) \
3063 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3064 old_tclient->s = tclient->s; \
3065 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3068 #define UPDATE_EXTEND_USTAT(s, t) \
3070 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3071 old_uclient->s = uclient->s; \
3072 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3075 #define UPDATE_EXTEND_XSTAT(s, t) \
3077 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3078 old_xclient->s = xclient->s; \
3079 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3082 /* minuend -= subtrahend */
3083 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3085 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3088 /* minuend[hi:lo] -= subtrahend */
3089 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3091 SUB_64(m_hi, 0, m_lo, s); \
3094 #define SUB_EXTEND_USTAT(s, t) \
3096 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3097 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3101 * General service functions
3104 static inline long bnx2x_hilo(u32 *hiref)
3106 u32 lo = *(hiref + 1);
3107 #if (BITS_PER_LONG == 64)
3110 return HILO_U64(hi, lo);
3117 * Init service functions
3120 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3122 if (!bp->stats_pending) {
3123 struct eth_query_ramrod_data ramrod_data = {0};
3126 ramrod_data.drv_counter = bp->stats_counter++;
3127 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3128 for_each_queue(bp, i)
3129 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3131 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3132 ((u32 *)&ramrod_data)[1],
3133 ((u32 *)&ramrod_data)[0], 0);
3135 /* stats ramrod has it's own slot on the spq */
3137 bp->stats_pending = 1;
3142 static void bnx2x_stats_init(struct bnx2x *bp)
3144 int port = BP_PORT(bp);
3147 bp->stats_pending = 0;
3148 bp->executer_idx = 0;
3149 bp->stats_counter = 0;
3153 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3155 bp->port.port_stx = 0;
3156 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3158 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3159 bp->port.old_nig_stats.brb_discard =
3160 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3161 bp->port.old_nig_stats.brb_truncate =
3162 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3163 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3164 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3165 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3166 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3168 /* function stats */
3169 for_each_queue(bp, i) {
3170 struct bnx2x_fastpath *fp = &bp->fp[i];
3172 memset(&fp->old_tclient, 0,
3173 sizeof(struct tstorm_per_client_stats));
3174 memset(&fp->old_uclient, 0,
3175 sizeof(struct ustorm_per_client_stats));
3176 memset(&fp->old_xclient, 0,
3177 sizeof(struct xstorm_per_client_stats));
3178 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3181 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3182 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3184 bp->stats_state = STATS_STATE_DISABLED;
3185 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3189 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3191 struct dmae_command *dmae = &bp->stats_dmae;
3192 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3194 *stats_comp = DMAE_COMP_VAL;
3195 if (CHIP_REV_IS_SLOW(bp))
3199 if (bp->executer_idx) {
3200 int loader_idx = PMF_DMAE_C(bp);
3202 memset(dmae, 0, sizeof(struct dmae_command));
3204 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3205 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3206 DMAE_CMD_DST_RESET |
3208 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3210 DMAE_CMD_ENDIANITY_DW_SWAP |
3212 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3214 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3215 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3216 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3217 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3218 sizeof(struct dmae_command) *
3219 (loader_idx + 1)) >> 2;
3220 dmae->dst_addr_hi = 0;
3221 dmae->len = sizeof(struct dmae_command) >> 2;
3224 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3225 dmae->comp_addr_hi = 0;
3229 bnx2x_post_dmae(bp, dmae, loader_idx);
3231 } else if (bp->func_stx) {
3233 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3237 static int bnx2x_stats_comp(struct bnx2x *bp)
3239 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3243 while (*stats_comp != DMAE_COMP_VAL) {
3245 BNX2X_ERR("timeout waiting for stats finished\n");
3255 * Statistics service functions
3258 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3260 struct dmae_command *dmae;
3262 int loader_idx = PMF_DMAE_C(bp);
3263 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3266 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3267 BNX2X_ERR("BUG!\n");
3271 bp->executer_idx = 0;
3273 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3279 DMAE_CMD_ENDIANITY_DW_SWAP |
3281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3284 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3285 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3286 dmae->src_addr_lo = bp->port.port_stx >> 2;
3287 dmae->src_addr_hi = 0;
3288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3290 dmae->len = DMAE_LEN32_RD_MAX;
3291 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3292 dmae->comp_addr_hi = 0;
3295 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3296 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3297 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3298 dmae->src_addr_hi = 0;
3299 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3300 DMAE_LEN32_RD_MAX * 4);
3301 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3302 DMAE_LEN32_RD_MAX * 4);
3303 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3304 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3305 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3306 dmae->comp_val = DMAE_COMP_VAL;
3309 bnx2x_hw_stats_post(bp);
3310 bnx2x_stats_comp(bp);
3313 static void bnx2x_port_stats_init(struct bnx2x *bp)
3315 struct dmae_command *dmae;
3316 int port = BP_PORT(bp);
3317 int vn = BP_E1HVN(bp);
3319 int loader_idx = PMF_DMAE_C(bp);
3321 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3324 if (!bp->link_vars.link_up || !bp->port.pmf) {
3325 BNX2X_ERR("BUG!\n");
3329 bp->executer_idx = 0;
3332 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3333 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3334 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3336 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3338 DMAE_CMD_ENDIANITY_DW_SWAP |
3340 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3341 (vn << DMAE_CMD_E1HVN_SHIFT));
3343 if (bp->port.port_stx) {
3345 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3346 dmae->opcode = opcode;
3347 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3348 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3349 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3350 dmae->dst_addr_hi = 0;
3351 dmae->len = sizeof(struct host_port_stats) >> 2;
3352 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3353 dmae->comp_addr_hi = 0;
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3362 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3363 dmae->dst_addr_lo = bp->func_stx >> 2;
3364 dmae->dst_addr_hi = 0;
3365 dmae->len = sizeof(struct host_func_stats) >> 2;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3372 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3373 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3374 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3376 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3378 DMAE_CMD_ENDIANITY_DW_SWAP |
3380 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3381 (vn << DMAE_CMD_E1HVN_SHIFT));
3383 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3385 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3386 NIG_REG_INGRESS_BMAC0_MEM);
3388 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3389 BIGMAC_REGISTER_TX_STAT_GTBYT */
3390 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3391 dmae->opcode = opcode;
3392 dmae->src_addr_lo = (mac_addr +
3393 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3394 dmae->src_addr_hi = 0;
3395 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3396 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3397 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3398 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3399 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3400 dmae->comp_addr_hi = 0;
3403 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3404 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3405 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3406 dmae->opcode = opcode;
3407 dmae->src_addr_lo = (mac_addr +
3408 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3409 dmae->src_addr_hi = 0;
3410 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3411 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3412 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3413 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3414 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3415 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3416 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3417 dmae->comp_addr_hi = 0;
3420 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3422 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3424 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3425 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3426 dmae->opcode = opcode;
3427 dmae->src_addr_lo = (mac_addr +
3428 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3429 dmae->src_addr_hi = 0;
3430 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3431 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3432 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3437 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3438 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3439 dmae->opcode = opcode;
3440 dmae->src_addr_lo = (mac_addr +
3441 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3442 dmae->src_addr_hi = 0;
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3444 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3446 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3448 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3449 dmae->comp_addr_hi = 0;
3452 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = opcode;
3455 dmae->src_addr_lo = (mac_addr +
3456 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3457 dmae->src_addr_hi = 0;
3458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3459 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3460 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3461 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3462 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3463 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3464 dmae->comp_addr_hi = 0;
3469 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3470 dmae->opcode = opcode;
3471 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3472 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3473 dmae->src_addr_hi = 0;
3474 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3475 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3476 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3477 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3478 dmae->comp_addr_hi = 0;
3481 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482 dmae->opcode = opcode;
3483 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3484 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3485 dmae->src_addr_hi = 0;
3486 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3487 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3488 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3489 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3490 dmae->len = (2*sizeof(u32)) >> 2;
3491 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492 dmae->comp_addr_hi = 0;
3495 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3496 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3497 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3498 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3500 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3502 DMAE_CMD_ENDIANITY_DW_SWAP |
3504 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3505 (vn << DMAE_CMD_E1HVN_SHIFT));
3506 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3507 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3508 dmae->src_addr_hi = 0;
3509 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3510 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3511 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3512 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3513 dmae->len = (2*sizeof(u32)) >> 2;
3514 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3515 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3516 dmae->comp_val = DMAE_COMP_VAL;
3521 static void bnx2x_func_stats_init(struct bnx2x *bp)
3523 struct dmae_command *dmae = &bp->stats_dmae;
3524 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3527 if (!bp->func_stx) {
3528 BNX2X_ERR("BUG!\n");
3532 bp->executer_idx = 0;
3533 memset(dmae, 0, sizeof(struct dmae_command));
3535 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3537 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3539 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3541 DMAE_CMD_ENDIANITY_DW_SWAP |
3543 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3545 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3546 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3547 dmae->dst_addr_lo = bp->func_stx >> 2;
3548 dmae->dst_addr_hi = 0;
3549 dmae->len = sizeof(struct host_func_stats) >> 2;
3550 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3551 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3552 dmae->comp_val = DMAE_COMP_VAL;
3557 static void bnx2x_stats_start(struct bnx2x *bp)
3560 bnx2x_port_stats_init(bp);
3562 else if (bp->func_stx)
3563 bnx2x_func_stats_init(bp);
3565 bnx2x_hw_stats_post(bp);
3566 bnx2x_storm_stats_post(bp);
3569 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3571 bnx2x_stats_comp(bp);
3572 bnx2x_stats_pmf_update(bp);
3573 bnx2x_stats_start(bp);
3576 static void bnx2x_stats_restart(struct bnx2x *bp)
3578 bnx2x_stats_comp(bp);
3579 bnx2x_stats_start(bp);
3582 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3584 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3585 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3586 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3592 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3593 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3594 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3595 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3596 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3597 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3598 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3599 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3601 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3603 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3604 UPDATE_STAT64(tx_stat_gt127,
3605 tx_stat_etherstatspkts65octetsto127octets);
3606 UPDATE_STAT64(tx_stat_gt255,
3607 tx_stat_etherstatspkts128octetsto255octets);
3608 UPDATE_STAT64(tx_stat_gt511,
3609 tx_stat_etherstatspkts256octetsto511octets);
3610 UPDATE_STAT64(tx_stat_gt1023,
3611 tx_stat_etherstatspkts512octetsto1023octets);
3612 UPDATE_STAT64(tx_stat_gt1518,
3613 tx_stat_etherstatspkts1024octetsto1522octets);
3614 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3615 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3616 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3617 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3618 UPDATE_STAT64(tx_stat_gterr,
3619 tx_stat_dot3statsinternalmactransmiterrors);
3620 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3622 estats->pause_frames_received_hi =
3623 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3624 estats->pause_frames_received_lo =
3625 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3627 estats->pause_frames_sent_hi =
3628 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3629 estats->pause_frames_sent_lo =
3630 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3633 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3635 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3636 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3637 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3639 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3640 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3641 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3642 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3643 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3644 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3645 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3646 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3647 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3648 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3649 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3650 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3651 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3652 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3653 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3654 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3655 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3656 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3657 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3659 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3660 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3661 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3662 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3668 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3669 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3671 estats->pause_frames_received_hi =
3672 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3673 estats->pause_frames_received_lo =
3674 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3675 ADD_64(estats->pause_frames_received_hi,
3676 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3677 estats->pause_frames_received_lo,
3678 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3680 estats->pause_frames_sent_hi =
3681 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3682 estats->pause_frames_sent_lo =
3683 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3684 ADD_64(estats->pause_frames_sent_hi,
3685 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3686 estats->pause_frames_sent_lo,
3687 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3690 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3692 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3693 struct nig_stats *old = &(bp->port.old_nig_stats);
3694 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3695 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3702 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3703 bnx2x_bmac_stats_update(bp);
3705 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3706 bnx2x_emac_stats_update(bp);
3708 else { /* unreached */
3709 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3713 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3714 new->brb_discard - old->brb_discard);
3715 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3716 new->brb_truncate - old->brb_truncate);
3718 UPDATE_STAT64_NIG(egress_mac_pkt0,
3719 etherstatspkts1024octetsto1522octets);
3720 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3722 memcpy(old, new, sizeof(struct nig_stats));
3724 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3725 sizeof(struct mac_stx));
3726 estats->brb_drop_hi = pstats->brb_drop_hi;
3727 estats->brb_drop_lo = pstats->brb_drop_lo;
3729 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3731 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3732 if (nig_timer_max != estats->nig_timer_max) {
3733 estats->nig_timer_max = nig_timer_max;
3734 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3740 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3742 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3743 struct tstorm_per_port_stats *tport =
3744 &stats->tstorm_common.port_statistics;
3745 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3746 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3749 memset(&(fstats->total_bytes_received_hi), 0,
3750 sizeof(struct host_func_stats) - 2*sizeof(u32));
3751 estats->error_bytes_received_hi = 0;
3752 estats->error_bytes_received_lo = 0;
3753 estats->etherstatsoverrsizepkts_hi = 0;
3754 estats->etherstatsoverrsizepkts_lo = 0;
3755 estats->no_buff_discard_hi = 0;
3756 estats->no_buff_discard_lo = 0;
3758 for_each_queue(bp, i) {
3759 struct bnx2x_fastpath *fp = &bp->fp[i];
3760 int cl_id = fp->cl_id;
3761 struct tstorm_per_client_stats *tclient =
3762 &stats->tstorm_common.client_statistics[cl_id];
3763 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3764 struct ustorm_per_client_stats *uclient =
3765 &stats->ustorm_common.client_statistics[cl_id];
3766 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3767 struct xstorm_per_client_stats *xclient =
3768 &stats->xstorm_common.client_statistics[cl_id];
3769 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3770 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3773 /* are storm stats valid? */
3774 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3775 bp->stats_counter) {
3776 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3777 " xstorm counter (%d) != stats_counter (%d)\n",
3778 i, xclient->stats_counter, bp->stats_counter);
3781 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3782 bp->stats_counter) {
3783 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3784 " tstorm counter (%d) != stats_counter (%d)\n",
3785 i, tclient->stats_counter, bp->stats_counter);
3788 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3789 bp->stats_counter) {
3790 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3791 " ustorm counter (%d) != stats_counter (%d)\n",
3792 i, uclient->stats_counter, bp->stats_counter);
3796 qstats->total_bytes_received_hi =
3797 qstats->valid_bytes_received_hi =
3798 le32_to_cpu(tclient->total_rcv_bytes.hi);
3799 qstats->total_bytes_received_lo =
3800 qstats->valid_bytes_received_lo =
3801 le32_to_cpu(tclient->total_rcv_bytes.lo);
3803 qstats->error_bytes_received_hi =
3804 le32_to_cpu(tclient->rcv_error_bytes.hi);
3805 qstats->error_bytes_received_lo =
3806 le32_to_cpu(tclient->rcv_error_bytes.lo);
3808 ADD_64(qstats->total_bytes_received_hi,
3809 qstats->error_bytes_received_hi,
3810 qstats->total_bytes_received_lo,
3811 qstats->error_bytes_received_lo);
3813 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3814 total_unicast_packets_received);
3815 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3816 total_multicast_packets_received);
3817 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3818 total_broadcast_packets_received);
3819 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3820 etherstatsoverrsizepkts);
3821 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3823 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3824 total_unicast_packets_received);
3825 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3826 total_multicast_packets_received);
3827 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3828 total_broadcast_packets_received);
3829 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3830 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3831 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3833 qstats->total_bytes_transmitted_hi =
3834 le32_to_cpu(xclient->total_sent_bytes.hi);
3835 qstats->total_bytes_transmitted_lo =
3836 le32_to_cpu(xclient->total_sent_bytes.lo);
3838 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3839 total_unicast_packets_transmitted);
3840 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3841 total_multicast_packets_transmitted);
3842 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3843 total_broadcast_packets_transmitted);
3845 old_tclient->checksum_discard = tclient->checksum_discard;
3846 old_tclient->ttl0_discard = tclient->ttl0_discard;
3848 ADD_64(fstats->total_bytes_received_hi,
3849 qstats->total_bytes_received_hi,
3850 fstats->total_bytes_received_lo,
3851 qstats->total_bytes_received_lo);
3852 ADD_64(fstats->total_bytes_transmitted_hi,
3853 qstats->total_bytes_transmitted_hi,
3854 fstats->total_bytes_transmitted_lo,
3855 qstats->total_bytes_transmitted_lo);
3856 ADD_64(fstats->total_unicast_packets_received_hi,
3857 qstats->total_unicast_packets_received_hi,
3858 fstats->total_unicast_packets_received_lo,
3859 qstats->total_unicast_packets_received_lo);
3860 ADD_64(fstats->total_multicast_packets_received_hi,
3861 qstats->total_multicast_packets_received_hi,
3862 fstats->total_multicast_packets_received_lo,
3863 qstats->total_multicast_packets_received_lo);
3864 ADD_64(fstats->total_broadcast_packets_received_hi,
3865 qstats->total_broadcast_packets_received_hi,
3866 fstats->total_broadcast_packets_received_lo,
3867 qstats->total_broadcast_packets_received_lo);
3868 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3869 qstats->total_unicast_packets_transmitted_hi,
3870 fstats->total_unicast_packets_transmitted_lo,
3871 qstats->total_unicast_packets_transmitted_lo);
3872 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3873 qstats->total_multicast_packets_transmitted_hi,
3874 fstats->total_multicast_packets_transmitted_lo,
3875 qstats->total_multicast_packets_transmitted_lo);
3876 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3877 qstats->total_broadcast_packets_transmitted_hi,
3878 fstats->total_broadcast_packets_transmitted_lo,
3879 qstats->total_broadcast_packets_transmitted_lo);
3880 ADD_64(fstats->valid_bytes_received_hi,
3881 qstats->valid_bytes_received_hi,
3882 fstats->valid_bytes_received_lo,
3883 qstats->valid_bytes_received_lo);
3885 ADD_64(estats->error_bytes_received_hi,
3886 qstats->error_bytes_received_hi,
3887 estats->error_bytes_received_lo,
3888 qstats->error_bytes_received_lo);
3889 ADD_64(estats->etherstatsoverrsizepkts_hi,
3890 qstats->etherstatsoverrsizepkts_hi,
3891 estats->etherstatsoverrsizepkts_lo,
3892 qstats->etherstatsoverrsizepkts_lo);
3893 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3894 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3897 ADD_64(fstats->total_bytes_received_hi,
3898 estats->rx_stat_ifhcinbadoctets_hi,
3899 fstats->total_bytes_received_lo,
3900 estats->rx_stat_ifhcinbadoctets_lo);
3902 memcpy(estats, &(fstats->total_bytes_received_hi),
3903 sizeof(struct host_func_stats) - 2*sizeof(u32));
3905 ADD_64(estats->etherstatsoverrsizepkts_hi,
3906 estats->rx_stat_dot3statsframestoolong_hi,
3907 estats->etherstatsoverrsizepkts_lo,
3908 estats->rx_stat_dot3statsframestoolong_lo);
3909 ADD_64(estats->error_bytes_received_hi,
3910 estats->rx_stat_ifhcinbadoctets_hi,
3911 estats->error_bytes_received_lo,
3912 estats->rx_stat_ifhcinbadoctets_lo);
3915 estats->mac_filter_discard =
3916 le32_to_cpu(tport->mac_filter_discard);
3917 estats->xxoverflow_discard =
3918 le32_to_cpu(tport->xxoverflow_discard);
3919 estats->brb_truncate_discard =
3920 le32_to_cpu(tport->brb_truncate_discard);
3921 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3924 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3926 bp->stats_pending = 0;
3931 static void bnx2x_net_stats_update(struct bnx2x *bp)
3933 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3934 struct net_device_stats *nstats = &bp->dev->stats;
3937 nstats->rx_packets =
3938 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3939 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3940 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3942 nstats->tx_packets =
3943 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3944 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3945 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3947 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3949 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3951 nstats->rx_dropped = estats->mac_discard;
3952 for_each_queue(bp, i)
3953 nstats->rx_dropped +=
3954 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3956 nstats->tx_dropped = 0;
3959 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3961 nstats->collisions =
3962 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3964 nstats->rx_length_errors =
3965 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3966 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3967 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3968 bnx2x_hilo(&estats->brb_truncate_hi);
3969 nstats->rx_crc_errors =
3970 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3971 nstats->rx_frame_errors =
3972 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3973 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3974 nstats->rx_missed_errors = estats->xxoverflow_discard;
3976 nstats->rx_errors = nstats->rx_length_errors +
3977 nstats->rx_over_errors +
3978 nstats->rx_crc_errors +
3979 nstats->rx_frame_errors +
3980 nstats->rx_fifo_errors +
3981 nstats->rx_missed_errors;
3983 nstats->tx_aborted_errors =
3984 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3985 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3986 nstats->tx_carrier_errors =
3987 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3988 nstats->tx_fifo_errors = 0;
3989 nstats->tx_heartbeat_errors = 0;
3990 nstats->tx_window_errors = 0;
3992 nstats->tx_errors = nstats->tx_aborted_errors +
3993 nstats->tx_carrier_errors +
3994 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3997 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3999 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4002 estats->driver_xoff = 0;
4003 estats->rx_err_discard_pkt = 0;
4004 estats->rx_skb_alloc_failed = 0;
4005 estats->hw_csum_err = 0;
4006 for_each_queue(bp, i) {
4007 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4009 estats->driver_xoff += qstats->driver_xoff;
4010 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4011 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4012 estats->hw_csum_err += qstats->hw_csum_err;
4016 static void bnx2x_stats_update(struct bnx2x *bp)
4018 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4020 if (*stats_comp != DMAE_COMP_VAL)
4024 bnx2x_hw_stats_update(bp);
4026 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4027 BNX2X_ERR("storm stats were not updated for 3 times\n");
4032 bnx2x_net_stats_update(bp);
4033 bnx2x_drv_stats_update(bp);
4035 if (bp->msglevel & NETIF_MSG_TIMER) {
4036 struct tstorm_per_client_stats *old_tclient =
4037 &bp->fp->old_tclient;
4038 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4039 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4040 struct net_device_stats *nstats = &bp->dev->stats;
4043 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4044 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4046 bnx2x_tx_avail(bp->fp),
4047 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4048 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4050 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4051 bp->fp->rx_comp_cons),
4052 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4053 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4054 "brb truncate %u\n",
4055 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4056 qstats->driver_xoff,
4057 estats->brb_drop_lo, estats->brb_truncate_lo);
4058 printk(KERN_DEBUG "tstats: checksum_discard %u "
4059 "packets_too_big_discard %lu no_buff_discard %lu "
4060 "mac_discard %u mac_filter_discard %u "
4061 "xxovrflow_discard %u brb_truncate_discard %u "
4062 "ttl0_discard %u\n",
4063 le32_to_cpu(old_tclient->checksum_discard),
4064 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4065 bnx2x_hilo(&qstats->no_buff_discard_hi),
4066 estats->mac_discard, estats->mac_filter_discard,
4067 estats->xxoverflow_discard, estats->brb_truncate_discard,
4068 le32_to_cpu(old_tclient->ttl0_discard));
4070 for_each_queue(bp, i) {
4071 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4072 bnx2x_fp(bp, i, tx_pkt),
4073 bnx2x_fp(bp, i, rx_pkt),
4074 bnx2x_fp(bp, i, rx_calls));
4078 bnx2x_hw_stats_post(bp);
4079 bnx2x_storm_stats_post(bp);
4082 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4084 struct dmae_command *dmae;
4086 int loader_idx = PMF_DMAE_C(bp);
4087 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4089 bp->executer_idx = 0;
4091 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4093 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4095 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4097 DMAE_CMD_ENDIANITY_DW_SWAP |
4099 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4100 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4102 if (bp->port.port_stx) {
4104 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4106 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4108 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4109 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4110 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4111 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4112 dmae->dst_addr_hi = 0;
4113 dmae->len = sizeof(struct host_port_stats) >> 2;
4115 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4116 dmae->comp_addr_hi = 0;
4119 dmae->comp_addr_lo =
4120 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4121 dmae->comp_addr_hi =
4122 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4123 dmae->comp_val = DMAE_COMP_VAL;
4131 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4132 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4133 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4134 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4135 dmae->dst_addr_lo = bp->func_stx >> 2;
4136 dmae->dst_addr_hi = 0;
4137 dmae->len = sizeof(struct host_func_stats) >> 2;
4138 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4139 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4140 dmae->comp_val = DMAE_COMP_VAL;
4146 static void bnx2x_stats_stop(struct bnx2x *bp)
4150 bnx2x_stats_comp(bp);
4153 update = (bnx2x_hw_stats_update(bp) == 0);
4155 update |= (bnx2x_storm_stats_update(bp) == 0);
4158 bnx2x_net_stats_update(bp);
4161 bnx2x_port_stats_stop(bp);
4163 bnx2x_hw_stats_post(bp);
4164 bnx2x_stats_comp(bp);
4168 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4172 static const struct {
4173 void (*action)(struct bnx2x *bp);
4174 enum bnx2x_stats_state next_state;
4175 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4178 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4179 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4180 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4181 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4184 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4185 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4186 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4187 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4191 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4193 enum bnx2x_stats_state state = bp->stats_state;
4195 bnx2x_stats_stm[state][event].action(bp);
4196 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4198 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4199 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4200 state, event, bp->stats_state);
4203 static void bnx2x_timer(unsigned long data)
4205 struct bnx2x *bp = (struct bnx2x *) data;
4207 if (!netif_running(bp->dev))
4210 if (atomic_read(&bp->intr_sem) != 0)
4214 struct bnx2x_fastpath *fp = &bp->fp[0];
4218 rc = bnx2x_rx_int(fp, 1000);
4221 if (!BP_NOMCP(bp)) {
4222 int func = BP_FUNC(bp);
4226 ++bp->fw_drv_pulse_wr_seq;
4227 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4228 /* TBD - add SYSTEM_TIME */
4229 drv_pulse = bp->fw_drv_pulse_wr_seq;
4230 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4232 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4233 MCP_PULSE_SEQ_MASK);
4234 /* The delta between driver pulse and mcp response
4235 * should be 1 (before mcp response) or 0 (after mcp response)
4237 if ((drv_pulse != mcp_pulse) &&
4238 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4239 /* someone lost a heartbeat... */
4240 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4241 drv_pulse, mcp_pulse);
4245 if ((bp->state == BNX2X_STATE_OPEN) ||
4246 (bp->state == BNX2X_STATE_DISABLED))
4247 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4250 mod_timer(&bp->timer, jiffies + bp->current_interval);
4253 /* end of Statistics */
4258 * nic init service functions
4261 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4263 int port = BP_PORT(bp);
4265 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4266 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4267 sizeof(struct ustorm_status_block)/4);
4268 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4269 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4270 sizeof(struct cstorm_status_block)/4);
4273 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4274 dma_addr_t mapping, int sb_id)
4276 int port = BP_PORT(bp);
4277 int func = BP_FUNC(bp);
4282 section = ((u64)mapping) + offsetof(struct host_status_block,
4284 sb->u_status_block.status_block_id = sb_id;
4286 REG_WR(bp, BAR_USTRORM_INTMEM +
4287 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4288 REG_WR(bp, BAR_USTRORM_INTMEM +
4289 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4291 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4292 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4294 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4295 REG_WR16(bp, BAR_USTRORM_INTMEM +
4296 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4299 section = ((u64)mapping) + offsetof(struct host_status_block,
4301 sb->c_status_block.status_block_id = sb_id;
4303 REG_WR(bp, BAR_CSTRORM_INTMEM +
4304 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4305 REG_WR(bp, BAR_CSTRORM_INTMEM +
4306 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4308 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4309 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4311 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4312 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4313 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4315 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4318 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4320 int func = BP_FUNC(bp);
4322 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4323 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4324 sizeof(struct tstorm_def_status_block)/4);
4325 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4326 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4327 sizeof(struct ustorm_def_status_block)/4);
4328 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4329 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4330 sizeof(struct cstorm_def_status_block)/4);
4331 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4332 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4333 sizeof(struct xstorm_def_status_block)/4);
4336 static void bnx2x_init_def_sb(struct bnx2x *bp,
4337 struct host_def_status_block *def_sb,
4338 dma_addr_t mapping, int sb_id)
4340 int port = BP_PORT(bp);
4341 int func = BP_FUNC(bp);
4342 int index, val, reg_offset;
4346 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4347 atten_status_block);
4348 def_sb->atten_status_block.status_block_id = sb_id;
4352 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4353 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4355 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4356 bp->attn_group[index].sig[0] = REG_RD(bp,
4357 reg_offset + 0x10*index);
4358 bp->attn_group[index].sig[1] = REG_RD(bp,
4359 reg_offset + 0x4 + 0x10*index);
4360 bp->attn_group[index].sig[2] = REG_RD(bp,
4361 reg_offset + 0x8 + 0x10*index);
4362 bp->attn_group[index].sig[3] = REG_RD(bp,
4363 reg_offset + 0xc + 0x10*index);
4366 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4367 HC_REG_ATTN_MSG0_ADDR_L);
4369 REG_WR(bp, reg_offset, U64_LO(section));
4370 REG_WR(bp, reg_offset + 4, U64_HI(section));
4372 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4374 val = REG_RD(bp, reg_offset);
4376 REG_WR(bp, reg_offset, val);
4379 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4380 u_def_status_block);
4381 def_sb->u_def_status_block.status_block_id = sb_id;
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
4384 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4385 REG_WR(bp, BAR_USTRORM_INTMEM +
4386 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4388 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4389 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4391 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4392 REG_WR16(bp, BAR_USTRORM_INTMEM +
4393 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4396 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4397 c_def_status_block);
4398 def_sb->c_def_status_block.status_block_id = sb_id;
4400 REG_WR(bp, BAR_CSTRORM_INTMEM +
4401 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4402 REG_WR(bp, BAR_CSTRORM_INTMEM +
4403 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4405 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4406 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4408 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4409 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4410 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4413 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4414 t_def_status_block);
4415 def_sb->t_def_status_block.status_block_id = sb_id;
4417 REG_WR(bp, BAR_TSTRORM_INTMEM +
4418 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4419 REG_WR(bp, BAR_TSTRORM_INTMEM +
4420 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4422 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4423 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4425 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4426 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4427 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4430 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4431 x_def_status_block);
4432 def_sb->x_def_status_block.status_block_id = sb_id;
4434 REG_WR(bp, BAR_XSTRORM_INTMEM +
4435 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4436 REG_WR(bp, BAR_XSTRORM_INTMEM +
4437 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4439 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4440 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4442 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4443 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4444 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4446 bp->stats_pending = 0;
4447 bp->set_mac_pending = 0;
4449 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4452 static void bnx2x_update_coalesce(struct bnx2x *bp)
4454 int port = BP_PORT(bp);
4457 for_each_queue(bp, i) {
4458 int sb_id = bp->fp[i].sb_id;
4460 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4461 REG_WR8(bp, BAR_USTRORM_INTMEM +
4462 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4463 U_SB_ETH_RX_CQ_INDEX),
4465 REG_WR16(bp, BAR_USTRORM_INTMEM +
4466 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4467 U_SB_ETH_RX_CQ_INDEX),
4468 (bp->rx_ticks/12) ? 0 : 1);
4470 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4471 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4472 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4473 C_SB_ETH_TX_CQ_INDEX),
4475 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4476 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4477 C_SB_ETH_TX_CQ_INDEX),
4478 (bp->tx_ticks/12) ? 0 : 1);
4482 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4483 struct bnx2x_fastpath *fp, int last)
4487 for (i = 0; i < last; i++) {
4488 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4489 struct sk_buff *skb = rx_buf->skb;
4492 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4496 if (fp->tpa_state[i] == BNX2X_TPA_START)
4497 pci_unmap_single(bp->pdev,
4498 pci_unmap_addr(rx_buf, mapping),
4499 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4506 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4508 int func = BP_FUNC(bp);
4509 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4510 ETH_MAX_AGGREGATION_QUEUES_E1H;
4511 u16 ring_prod, cqe_ring_prod;
4514 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4516 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4518 if (bp->flags & TPA_ENABLE_FLAG) {
4520 for_each_rx_queue(bp, j) {
4521 struct bnx2x_fastpath *fp = &bp->fp[j];
4523 for (i = 0; i < max_agg_queues; i++) {
4524 fp->tpa_pool[i].skb =
4525 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4526 if (!fp->tpa_pool[i].skb) {
4527 BNX2X_ERR("Failed to allocate TPA "
4528 "skb pool for queue[%d] - "
4529 "disabling TPA on this "
4531 bnx2x_free_tpa_pool(bp, fp, i);
4532 fp->disable_tpa = 1;
4535 pci_unmap_addr_set((struct sw_rx_bd *)
4536 &bp->fp->tpa_pool[i],
4538 fp->tpa_state[i] = BNX2X_TPA_STOP;
4543 for_each_rx_queue(bp, j) {
4544 struct bnx2x_fastpath *fp = &bp->fp[j];
4547 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4548 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4550 /* "next page" elements initialization */
4552 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4553 struct eth_rx_sge *sge;
4555 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4557 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4558 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4560 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4561 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4564 bnx2x_init_sge_ring_bit_mask(fp);
4567 for (i = 1; i <= NUM_RX_RINGS; i++) {
4568 struct eth_rx_bd *rx_bd;
4570 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4572 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4573 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4575 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4576 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4580 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4581 struct eth_rx_cqe_next_page *nextpg;
4583 nextpg = (struct eth_rx_cqe_next_page *)
4584 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4586 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4587 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4589 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4590 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4593 /* Allocate SGEs and initialize the ring elements */
4594 for (i = 0, ring_prod = 0;
4595 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4597 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4598 BNX2X_ERR("was only able to allocate "
4600 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4601 /* Cleanup already allocated elements */
4602 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4603 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4604 fp->disable_tpa = 1;
4608 ring_prod = NEXT_SGE_IDX(ring_prod);
4610 fp->rx_sge_prod = ring_prod;
4612 /* Allocate BDs and initialize BD ring */
4613 fp->rx_comp_cons = 0;
4614 cqe_ring_prod = ring_prod = 0;
4615 for (i = 0; i < bp->rx_ring_size; i++) {
4616 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4617 BNX2X_ERR("was only able to allocate "
4618 "%d rx skbs on queue[%d]\n", i, j);
4619 fp->eth_q_stats.rx_skb_alloc_failed++;
4622 ring_prod = NEXT_RX_IDX(ring_prod);
4623 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4624 WARN_ON(ring_prod <= i);
4627 fp->rx_bd_prod = ring_prod;
4628 /* must not have more available CQEs than BDs */
4629 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4631 fp->rx_pkt = fp->rx_calls = 0;
4634 * this will generate an interrupt (to the TSTORM)
4635 * must only be done after chip is initialized
4637 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4642 REG_WR(bp, BAR_USTRORM_INTMEM +
4643 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4644 U64_LO(fp->rx_comp_mapping));
4645 REG_WR(bp, BAR_USTRORM_INTMEM +
4646 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4647 U64_HI(fp->rx_comp_mapping));
4651 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4655 for_each_tx_queue(bp, j) {
4656 struct bnx2x_fastpath *fp = &bp->fp[j];
4658 for (i = 1; i <= NUM_TX_RINGS; i++) {
4659 struct eth_tx_bd *tx_bd =
4660 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4663 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4664 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4666 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4667 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4670 fp->tx_pkt_prod = 0;
4671 fp->tx_pkt_cons = 0;
4674 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4679 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4681 int func = BP_FUNC(bp);
4683 spin_lock_init(&bp->spq_lock);
4685 bp->spq_left = MAX_SPQ_PENDING;
4686 bp->spq_prod_idx = 0;
4687 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4688 bp->spq_prod_bd = bp->spq;
4689 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4691 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4692 U64_LO(bp->spq_mapping));
4694 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4695 U64_HI(bp->spq_mapping));
4697 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4701 static void bnx2x_init_context(struct bnx2x *bp)
4705 for_each_queue(bp, i) {
4706 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4707 struct bnx2x_fastpath *fp = &bp->fp[i];
4708 u8 cl_id = fp->cl_id;
4709 u8 sb_id = fp->sb_id;
4711 context->ustorm_st_context.common.sb_index_numbers =
4712 BNX2X_RX_SB_INDEX_NUM;
4713 context->ustorm_st_context.common.clientId = cl_id;
4714 context->ustorm_st_context.common.status_block_id = sb_id;
4715 context->ustorm_st_context.common.flags =
4716 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4717 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4718 context->ustorm_st_context.common.statistics_counter_id =
4720 context->ustorm_st_context.common.mc_alignment_log_size =
4721 BNX2X_RX_ALIGN_SHIFT;
4722 context->ustorm_st_context.common.bd_buff_size =
4724 context->ustorm_st_context.common.bd_page_base_hi =
4725 U64_HI(fp->rx_desc_mapping);
4726 context->ustorm_st_context.common.bd_page_base_lo =
4727 U64_LO(fp->rx_desc_mapping);
4728 if (!fp->disable_tpa) {
4729 context->ustorm_st_context.common.flags |=
4730 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4731 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4732 context->ustorm_st_context.common.sge_buff_size =
4733 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4735 context->ustorm_st_context.common.sge_page_base_hi =
4736 U64_HI(fp->rx_sge_mapping);
4737 context->ustorm_st_context.common.sge_page_base_lo =
4738 U64_LO(fp->rx_sge_mapping);
4741 context->ustorm_ag_context.cdu_usage =
4742 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4743 CDU_REGION_NUMBER_UCM_AG,
4744 ETH_CONNECTION_TYPE);
4746 context->xstorm_st_context.tx_bd_page_base_hi =
4747 U64_HI(fp->tx_desc_mapping);
4748 context->xstorm_st_context.tx_bd_page_base_lo =
4749 U64_LO(fp->tx_desc_mapping);
4750 context->xstorm_st_context.db_data_addr_hi =
4751 U64_HI(fp->tx_prods_mapping);
4752 context->xstorm_st_context.db_data_addr_lo =
4753 U64_LO(fp->tx_prods_mapping);
4754 context->xstorm_st_context.statistics_data = (cl_id |
4755 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4756 context->cstorm_st_context.sb_index_number =
4757 C_SB_ETH_TX_CQ_INDEX;
4758 context->cstorm_st_context.status_block_id = sb_id;
4760 context->xstorm_ag_context.cdu_reserved =
4761 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4762 CDU_REGION_NUMBER_XCM_AG,
4763 ETH_CONNECTION_TYPE);
4767 static void bnx2x_init_ind_table(struct bnx2x *bp)
4769 int func = BP_FUNC(bp);
4772 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4776 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4777 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4778 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4779 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4780 bp->fp->cl_id + (i % bp->num_rx_queues));
4783 static void bnx2x_set_client_config(struct bnx2x *bp)
4785 struct tstorm_eth_client_config tstorm_client = {0};
4786 int port = BP_PORT(bp);
4789 tstorm_client.mtu = bp->dev->mtu;
4790 tstorm_client.config_flags =
4791 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4792 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4794 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4795 tstorm_client.config_flags |=
4796 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4797 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4801 if (bp->flags & TPA_ENABLE_FLAG) {
4802 tstorm_client.max_sges_for_packet =
4803 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4804 tstorm_client.max_sges_for_packet =
4805 ((tstorm_client.max_sges_for_packet +
4806 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4807 PAGES_PER_SGE_SHIFT;
4809 tstorm_client.config_flags |=
4810 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4813 for_each_queue(bp, i) {
4814 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4816 REG_WR(bp, BAR_TSTRORM_INTMEM +
4817 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4818 ((u32 *)&tstorm_client)[0]);
4819 REG_WR(bp, BAR_TSTRORM_INTMEM +
4820 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4821 ((u32 *)&tstorm_client)[1]);
4824 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4825 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4828 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4830 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4831 int mode = bp->rx_mode;
4832 int mask = (1 << BP_L_ID(bp));
4833 int func = BP_FUNC(bp);
4834 int port = BP_PORT(bp);
4836 /* All but management unicast packets should pass to the host as well */
4838 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4839 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4840 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4841 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4843 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4846 case BNX2X_RX_MODE_NONE: /* no Rx */
4847 tstorm_mac_filter.ucast_drop_all = mask;
4848 tstorm_mac_filter.mcast_drop_all = mask;
4849 tstorm_mac_filter.bcast_drop_all = mask;
4852 case BNX2X_RX_MODE_NORMAL:
4853 tstorm_mac_filter.bcast_accept_all = mask;
4856 case BNX2X_RX_MODE_ALLMULTI:
4857 tstorm_mac_filter.mcast_accept_all = mask;
4858 tstorm_mac_filter.bcast_accept_all = mask;
4861 case BNX2X_RX_MODE_PROMISC:
4862 tstorm_mac_filter.ucast_accept_all = mask;
4863 tstorm_mac_filter.mcast_accept_all = mask;
4864 tstorm_mac_filter.bcast_accept_all = mask;
4865 /* pass management unicast packets as well */
4866 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4870 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4875 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4878 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4879 REG_WR(bp, BAR_TSTRORM_INTMEM +
4880 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4881 ((u32 *)&tstorm_mac_filter)[i]);
4883 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4884 ((u32 *)&tstorm_mac_filter)[i]); */
4887 if (mode != BNX2X_RX_MODE_NONE)
4888 bnx2x_set_client_config(bp);
4891 static void bnx2x_init_internal_common(struct bnx2x *bp)
4895 if (bp->flags & TPA_ENABLE_FLAG) {
4896 struct tstorm_eth_tpa_exist tpa = {0};
4900 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4902 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4906 /* Zero this manually as its initialization is
4907 currently missing in the initTool */
4908 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4909 REG_WR(bp, BAR_USTRORM_INTMEM +
4910 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4913 static void bnx2x_init_internal_port(struct bnx2x *bp)
4915 int port = BP_PORT(bp);
4917 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4918 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4919 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4920 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4923 /* Calculates the sum of vn_min_rates.
4924 It's needed for further normalizing of the min_rates.
4926 sum of vn_min_rates.
4928 0 - if all the min_rates are 0.
4929 In the later case fainess algorithm should be deactivated.
4930 If not all min_rates are zero then those that are zeroes will be set to 1.
4932 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4935 int port = BP_PORT(bp);
4938 bp->vn_weight_sum = 0;
4939 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4940 int func = 2*vn + port;
4942 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4943 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4944 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4946 /* Skip hidden vns */
4947 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4950 /* If min rate is zero - set it to 1 */
4952 vn_min_rate = DEF_MIN_RATE;
4956 bp->vn_weight_sum += vn_min_rate;
4959 /* ... only if all min rates are zeros - disable fairness */
4961 bp->vn_weight_sum = 0;
4964 static void bnx2x_init_internal_func(struct bnx2x *bp)
4966 struct tstorm_eth_function_common_config tstorm_config = {0};
4967 struct stats_indication_flags stats_flags = {0};
4968 int port = BP_PORT(bp);
4969 int func = BP_FUNC(bp);
4975 tstorm_config.config_flags = MULTI_FLAGS(bp);
4976 tstorm_config.rss_result_mask = MULTI_MASK;
4979 tstorm_config.config_flags |=
4980 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4982 tstorm_config.leading_client_id = BP_L_ID(bp);
4984 REG_WR(bp, BAR_TSTRORM_INTMEM +
4985 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4986 (*(u32 *)&tstorm_config));
4988 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4989 bnx2x_set_storm_rx_mode(bp);
4991 for_each_queue(bp, i) {
4992 u8 cl_id = bp->fp[i].cl_id;
4994 /* reset xstorm per client statistics */
4995 offset = BAR_XSTRORM_INTMEM +
4996 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4998 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4999 REG_WR(bp, offset + j*4, 0);
5001 /* reset tstorm per client statistics */
5002 offset = BAR_TSTRORM_INTMEM +
5003 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5005 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5006 REG_WR(bp, offset + j*4, 0);
5008 /* reset ustorm per client statistics */
5009 offset = BAR_USTRORM_INTMEM +
5010 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5012 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5013 REG_WR(bp, offset + j*4, 0);
5016 /* Init statistics related context */
5017 stats_flags.collect_eth = 1;
5019 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5020 ((u32 *)&stats_flags)[0]);
5021 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5022 ((u32 *)&stats_flags)[1]);
5024 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5025 ((u32 *)&stats_flags)[0]);
5026 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5027 ((u32 *)&stats_flags)[1]);
5029 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5030 ((u32 *)&stats_flags)[0]);
5031 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5032 ((u32 *)&stats_flags)[1]);
5034 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5035 ((u32 *)&stats_flags)[0]);
5036 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5037 ((u32 *)&stats_flags)[1]);
5039 REG_WR(bp, BAR_XSTRORM_INTMEM +
5040 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5041 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5042 REG_WR(bp, BAR_XSTRORM_INTMEM +
5043 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5044 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5046 REG_WR(bp, BAR_TSTRORM_INTMEM +
5047 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5048 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5049 REG_WR(bp, BAR_TSTRORM_INTMEM +
5050 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5051 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5053 REG_WR(bp, BAR_USTRORM_INTMEM +
5054 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5055 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5056 REG_WR(bp, BAR_USTRORM_INTMEM +
5057 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5058 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5060 if (CHIP_IS_E1H(bp)) {
5061 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5063 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5065 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5067 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5070 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5074 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5076 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5077 SGE_PAGE_SIZE * PAGES_PER_SGE),
5079 for_each_rx_queue(bp, i) {
5080 struct bnx2x_fastpath *fp = &bp->fp[i];
5082 REG_WR(bp, BAR_USTRORM_INTMEM +
5083 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5084 U64_LO(fp->rx_comp_mapping));
5085 REG_WR(bp, BAR_USTRORM_INTMEM +
5086 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5087 U64_HI(fp->rx_comp_mapping));
5089 REG_WR16(bp, BAR_USTRORM_INTMEM +
5090 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5094 /* dropless flow control */
5095 if (CHIP_IS_E1H(bp)) {
5096 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5098 rx_pause.bd_thr_low = 250;
5099 rx_pause.cqe_thr_low = 250;
5101 rx_pause.sge_thr_low = 0;
5102 rx_pause.bd_thr_high = 350;
5103 rx_pause.cqe_thr_high = 350;
5104 rx_pause.sge_thr_high = 0;
5106 for_each_rx_queue(bp, i) {
5107 struct bnx2x_fastpath *fp = &bp->fp[i];
5109 if (!fp->disable_tpa) {
5110 rx_pause.sge_thr_low = 150;
5111 rx_pause.sge_thr_high = 250;
5115 offset = BAR_USTRORM_INTMEM +
5116 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5119 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5121 REG_WR(bp, offset + j*4,
5122 ((u32 *)&rx_pause)[j]);
5126 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5128 /* Init rate shaping and fairness contexts */
5132 /* During init there is no active link
5133 Until link is up, set link rate to 10Gbps */
5134 bp->link_vars.line_speed = SPEED_10000;
5135 bnx2x_init_port_minmax(bp);
5137 bnx2x_calc_vn_weight_sum(bp);
5139 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5140 bnx2x_init_vn_minmax(bp, 2*vn + port);
5142 /* Enable rate shaping and fairness */
5143 bp->cmng.flags.cmng_enables =
5144 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5145 if (bp->vn_weight_sum)
5146 bp->cmng.flags.cmng_enables |=
5147 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5149 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5150 " fairness will be disabled\n");
5152 /* rate shaping and fairness are disabled */
5154 "single function mode minmax will be disabled\n");
5158 /* Store it to internal memory */
5160 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5161 REG_WR(bp, BAR_XSTRORM_INTMEM +
5162 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5163 ((u32 *)(&bp->cmng))[i]);
5166 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5168 switch (load_code) {
5169 case FW_MSG_CODE_DRV_LOAD_COMMON:
5170 bnx2x_init_internal_common(bp);
5173 case FW_MSG_CODE_DRV_LOAD_PORT:
5174 bnx2x_init_internal_port(bp);
5177 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5178 bnx2x_init_internal_func(bp);
5182 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5187 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5191 for_each_queue(bp, i) {
5192 struct bnx2x_fastpath *fp = &bp->fp[i];
5195 fp->state = BNX2X_FP_STATE_CLOSED;
5197 fp->cl_id = BP_L_ID(bp) + i;
5198 fp->sb_id = fp->cl_id;
5200 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5201 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5202 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5204 bnx2x_update_fpsb_idx(fp);
5207 /* ensure status block indices were read */
5211 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5213 bnx2x_update_dsb_idx(bp);
5214 bnx2x_update_coalesce(bp);
5215 bnx2x_init_rx_rings(bp);
5216 bnx2x_init_tx_ring(bp);
5217 bnx2x_init_sp_ring(bp);
5218 bnx2x_init_context(bp);
5219 bnx2x_init_internal(bp, load_code);
5220 bnx2x_init_ind_table(bp);
5221 bnx2x_stats_init(bp);
5223 /* At this point, we are ready for interrupts */
5224 atomic_set(&bp->intr_sem, 0);
5226 /* flush all before enabling interrupts */
5230 bnx2x_int_enable(bp);
5232 /* Check for SPIO5 */
5233 bnx2x_attn_int_deasserted0(bp,
5234 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5235 AEU_INPUTS_ATTN_BITS_SPIO5);
5238 /* end of nic init */
5241 * gzip service functions
5244 static int bnx2x_gunzip_init(struct bnx2x *bp)
5246 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5247 &bp->gunzip_mapping);
5248 if (bp->gunzip_buf == NULL)
5251 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5252 if (bp->strm == NULL)
5255 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5257 if (bp->strm->workspace == NULL)
5267 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5268 bp->gunzip_mapping);
5269 bp->gunzip_buf = NULL;
5272 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5273 " un-compression\n", bp->dev->name);
5277 static void bnx2x_gunzip_end(struct bnx2x *bp)
5279 kfree(bp->strm->workspace);
5284 if (bp->gunzip_buf) {
5285 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5286 bp->gunzip_mapping);
5287 bp->gunzip_buf = NULL;
5291 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5295 /* check gzip header */
5296 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5297 BNX2X_ERR("Bad gzip header\n");
5305 if (zbuf[3] & FNAME)
5306 while ((zbuf[n++] != 0) && (n < len));
5308 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5309 bp->strm->avail_in = len - n;
5310 bp->strm->next_out = bp->gunzip_buf;
5311 bp->strm->avail_out = FW_BUF_SIZE;
5313 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5317 rc = zlib_inflate(bp->strm, Z_FINISH);
5318 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5319 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5320 bp->dev->name, bp->strm->msg);
5322 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5323 if (bp->gunzip_outlen & 0x3)
5324 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5325 " gunzip_outlen (%d) not aligned\n",
5326 bp->dev->name, bp->gunzip_outlen);
5327 bp->gunzip_outlen >>= 2;
5329 zlib_inflateEnd(bp->strm);
5331 if (rc == Z_STREAM_END)
5337 /* nic load/unload */
5340 * General service functions
5343 /* send a NIG loopback debug packet */
5344 static void bnx2x_lb_pckt(struct bnx2x *bp)
5348 /* Ethernet source and destination addresses */
5349 wb_write[0] = 0x55555555;
5350 wb_write[1] = 0x55555555;
5351 wb_write[2] = 0x20; /* SOP */
5352 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5354 /* NON-IP protocol */
5355 wb_write[0] = 0x09000000;
5356 wb_write[1] = 0x55555555;
5357 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5358 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5361 /* some of the internal memories
5362 * are not directly readable from the driver
5363 * to test them we send debug packets
5365 static int bnx2x_int_mem_test(struct bnx2x *bp)
5371 if (CHIP_REV_IS_FPGA(bp))
5373 else if (CHIP_REV_IS_EMUL(bp))
5378 DP(NETIF_MSG_HW, "start part1\n");
5380 /* Disable inputs of parser neighbor blocks */
5381 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5382 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5383 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5384 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5386 /* Write 0 to parser credits for CFC search request */
5387 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5389 /* send Ethernet packet */
5392 /* TODO do i reset NIG statistic? */
5393 /* Wait until NIG register shows 1 packet of size 0x10 */
5394 count = 1000 * factor;
5397 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5398 val = *bnx2x_sp(bp, wb_data[0]);
5406 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5410 /* Wait until PRS register shows 1 packet */
5411 count = 1000 * factor;
5413 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5421 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5425 /* Reset and init BRB, PRS */
5426 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5428 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5430 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5431 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5433 DP(NETIF_MSG_HW, "part2\n");
5435 /* Disable inputs of parser neighbor blocks */
5436 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5437 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5438 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5439 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5441 /* Write 0 to parser credits for CFC search request */
5442 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5444 /* send 10 Ethernet packets */
5445 for (i = 0; i < 10; i++)
5448 /* Wait until NIG register shows 10 + 1
5449 packets of size 11*0x10 = 0xb0 */
5450 count = 1000 * factor;
5453 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5454 val = *bnx2x_sp(bp, wb_data[0]);
5462 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5466 /* Wait until PRS register shows 2 packets */
5467 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5469 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5471 /* Write 1 to parser credits for CFC search request */
5472 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5474 /* Wait until PRS register shows 3 packets */
5475 msleep(10 * factor);
5476 /* Wait until NIG register shows 1 packet of size 0x10 */
5477 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5479 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5481 /* clear NIG EOP FIFO */
5482 for (i = 0; i < 11; i++)
5483 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5484 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5486 BNX2X_ERR("clear of NIG failed\n");
5490 /* Reset and init BRB, PRS, NIG */
5491 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5493 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5495 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5496 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5499 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5502 /* Enable inputs of parser neighbor blocks */
5503 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5504 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5505 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5506 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5508 DP(NETIF_MSG_HW, "done\n");
5513 static void enable_blocks_attention(struct bnx2x *bp)
5515 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5516 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5517 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5518 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5519 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5520 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5521 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5522 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5523 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5524 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5525 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5526 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5527 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5528 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5529 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5530 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5531 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5532 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5533 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5534 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5535 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5536 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5537 if (CHIP_REV_IS_FPGA(bp))
5538 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5540 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5541 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5542 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5543 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5544 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5545 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5546 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5547 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5548 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5549 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5553 static void bnx2x_reset_common(struct bnx2x *bp)
5556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5558 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5562 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5568 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5569 SHARED_HW_CFG_FAN_FAILURE_MASK;
5571 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5575 * The fan failure mechanism is usually related to the PHY type since
5576 * the power consumption of the board is affected by the PHY. Currently,
5577 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5579 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5580 for (port = PORT_0; port < PORT_MAX; port++) {
5582 SHMEM_RD(bp, dev_info.port_hw_config[port].
5583 external_phy_config) &
5584 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5587 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5589 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5591 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5594 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5596 if (is_required == 0)
5599 /* Fan failure is indicated by SPIO 5 */
5600 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5601 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5603 /* set to active low mode */
5604 val = REG_RD(bp, MISC_REG_SPIO_INT);
5605 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5606 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5607 REG_WR(bp, MISC_REG_SPIO_INT, val);
5609 /* enable interrupt to signal the IGU */
5610 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5611 val |= (1 << MISC_REGISTERS_SPIO_5);
5612 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5615 static int bnx2x_init_common(struct bnx2x *bp)
5619 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5621 bnx2x_reset_common(bp);
5622 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5625 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5626 if (CHIP_IS_E1H(bp))
5627 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5629 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5631 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5633 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5634 if (CHIP_IS_E1(bp)) {
5635 /* enable HW interrupt from PXP on USDM overflow
5636 bit 16 on INT_MASK_0 */
5637 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5640 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5644 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5645 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5646 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5647 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5648 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5649 /* make sure this value is 0 */
5650 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5652 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5653 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5654 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5655 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5656 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5659 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5661 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5662 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5663 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5666 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5667 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5669 /* let the HW do it's magic ... */
5671 /* finish PXP init */
5672 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5674 BNX2X_ERR("PXP2 CFG failed\n");
5677 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5679 BNX2X_ERR("PXP2 RD_INIT failed\n");
5683 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5684 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5686 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5688 /* clean the DMAE memory */
5690 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5692 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5693 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5694 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5695 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5697 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5698 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5699 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5700 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5702 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5703 /* soft reset pulse */
5704 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5705 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5708 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5711 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5712 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5713 if (!CHIP_REV_IS_SLOW(bp)) {
5714 /* enable hw interrupt from doorbell Q */
5715 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5718 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5719 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5720 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5722 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5723 if (CHIP_IS_E1H(bp))
5724 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5726 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5727 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5728 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5729 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5731 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5732 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5733 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5734 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5736 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5737 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5738 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5739 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5742 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5744 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5747 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5748 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5749 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5751 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5752 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5753 REG_WR(bp, i, 0xc0cac01a);
5754 /* TODO: replace with something meaningful */
5756 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5757 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5759 if (sizeof(union cdu_context) != 1024)
5760 /* we currently assume that a context is 1024 bytes */
5761 printk(KERN_ALERT PFX "please adjust the size of"
5762 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5764 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5765 val = (4 << 24) + (0 << 12) + 1024;
5766 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5767 if (CHIP_IS_E1(bp)) {
5768 /* !!! fix pxp client crdit until excel update */
5769 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5770 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5773 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5774 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5775 /* enable context validation interrupt from CFC */
5776 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5778 /* set the thresholds to prevent CFC/CDU race */
5779 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5781 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5782 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5784 /* PXPCS COMMON comes here */
5785 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5786 /* Reset PCIE errors for debug */
5787 REG_WR(bp, 0x2814, 0xffffffff);
5788 REG_WR(bp, 0x3820, 0xffffffff);
5790 /* EMAC0 COMMON comes here */
5791 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5792 /* EMAC1 COMMON comes here */
5793 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5794 /* DBU COMMON comes here */
5795 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5796 /* DBG COMMON comes here */
5797 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5799 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5800 if (CHIP_IS_E1H(bp)) {
5801 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5802 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5805 if (CHIP_REV_IS_SLOW(bp))
5808 /* finish CFC init */
5809 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5811 BNX2X_ERR("CFC LL_INIT failed\n");
5814 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5816 BNX2X_ERR("CFC AC_INIT failed\n");
5819 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5821 BNX2X_ERR("CFC CAM_INIT failed\n");
5824 REG_WR(bp, CFC_REG_DEBUG0, 0);
5826 /* read NIG statistic
5827 to see if this is our first up since powerup */
5828 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5829 val = *bnx2x_sp(bp, wb_data[0]);
5831 /* do internal memory self test */
5832 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5833 BNX2X_ERR("internal mem self test failed\n");
5837 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5839 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5840 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5841 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5842 bp->port.need_hw_lock = 1;
5849 bnx2x_setup_fan_failure_detection(bp);
5851 /* clear PXP2 attentions */
5852 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5854 enable_blocks_attention(bp);
5856 if (!BP_NOMCP(bp)) {
5857 bnx2x_acquire_phy_lock(bp);
5858 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5859 bnx2x_release_phy_lock(bp);
5861 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5866 static int bnx2x_init_port(struct bnx2x *bp)
5868 int port = BP_PORT(bp);
5869 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5873 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5875 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5877 /* Port PXP comes here */
5878 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5879 /* Port PXP2 comes here */
5880 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5885 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5886 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5887 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5888 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5893 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5894 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5895 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5896 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5901 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5902 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5903 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5904 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5906 /* Port CMs come here */
5907 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5909 /* Port QM comes here */
5911 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5912 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5914 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5916 /* Port DQ comes here */
5917 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5919 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5920 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5921 /* no pause for emulation and FPGA */
5926 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5927 else if (bp->dev->mtu > 4096) {
5928 if (bp->flags & ONE_PORT_FLAG)
5932 /* (24*1024 + val*4)/256 */
5933 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5936 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5937 high = low + 56; /* 14*1024/256 */
5939 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5940 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5943 /* Port PRS comes here */
5944 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5945 /* Port TSDM comes here */
5946 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5947 /* Port CSDM comes here */
5948 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5949 /* Port USDM comes here */
5950 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5951 /* Port XSDM comes here */
5952 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5954 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5955 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5956 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5957 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5959 /* Port UPB comes here */
5960 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5961 /* Port XPB comes here */
5962 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5964 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5966 /* configure PBF to work without PAUSE mtu 9000 */
5967 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5969 /* update threshold */
5970 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5971 /* update init credit */
5972 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5975 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5977 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5980 /* tell the searcher where the T2 table is */
5981 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5983 wb_write[0] = U64_LO(bp->t2_mapping);
5984 wb_write[1] = U64_HI(bp->t2_mapping);
5985 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5986 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5987 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5988 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5990 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5991 /* Port SRCH comes here */
5993 /* Port CDU comes here */
5994 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5995 /* Port CFC comes here */
5996 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5998 if (CHIP_IS_E1(bp)) {
5999 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6000 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6002 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6004 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6005 /* init aeu_mask_attn_func_0/1:
6006 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6007 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6008 * bits 4-7 are used for "per vn group attention" */
6009 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6010 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6012 /* Port PXPCS comes here */
6013 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6014 /* Port EMAC0 comes here */
6015 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6016 /* Port EMAC1 comes here */
6017 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6018 /* Port DBU comes here */
6019 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6020 /* Port DBG comes here */
6021 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6023 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6025 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6027 if (CHIP_IS_E1H(bp)) {
6028 /* 0x2 disable e1hov, 0x1 enable */
6029 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6030 (IS_E1HMF(bp) ? 0x1 : 0x2));
6032 /* support pause requests from USDM, TSDM and BRB */
6033 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6036 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6037 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6038 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6042 /* Port MCP comes here */
6043 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6044 /* Port DMAE comes here */
6045 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6047 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6050 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6052 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6053 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6055 /* The GPIO should be swapped if the swap register is
6057 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6058 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6060 /* Select function upon port-swap configuration */
6062 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6063 aeu_gpio_mask = (swap_val && swap_override) ?
6064 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6065 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6067 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6068 aeu_gpio_mask = (swap_val && swap_override) ?
6069 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6070 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6072 val = REG_RD(bp, offset);
6073 /* add GPIO3 to group */
6074 val |= aeu_gpio_mask;
6075 REG_WR(bp, offset, val);
6079 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6081 /* add SPIO 5 to group 0 */
6083 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6084 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6085 val = REG_RD(bp, reg_addr);
6086 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6087 REG_WR(bp, reg_addr, val);
6095 bnx2x__link_reset(bp);
6100 #define ILT_PER_FUNC (768/2)
6101 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6102 /* the phys address is shifted right 12 bits and has an added
6103 1=valid bit added to the 53rd bit
6104 then since this is a wide register(TM)
6105 we split it into two 32 bit writes
6107 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6108 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6109 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6110 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6112 #define CNIC_ILT_LINES 0
6114 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6118 if (CHIP_IS_E1H(bp))
6119 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6121 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6123 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6126 static int bnx2x_init_func(struct bnx2x *bp)
6128 int port = BP_PORT(bp);
6129 int func = BP_FUNC(bp);
6133 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6135 /* set MSI reconfigure capability */
6136 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6137 val = REG_RD(bp, addr);
6138 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6139 REG_WR(bp, addr, val);
6141 i = FUNC_ILT_BASE(func);
6143 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6144 if (CHIP_IS_E1H(bp)) {
6145 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6146 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6148 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6149 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6152 if (CHIP_IS_E1H(bp)) {
6153 for (i = 0; i < 9; i++)
6154 bnx2x_init_block(bp,
6155 cm_blocks[i], FUNC0_STAGE + func);
6157 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6158 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6161 /* HC init per function */
6162 if (CHIP_IS_E1H(bp)) {
6163 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6165 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6166 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6168 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6170 /* Reset PCIE errors for debug */
6171 REG_WR(bp, 0x2114, 0xffffffff);
6172 REG_WR(bp, 0x2120, 0xffffffff);
6177 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6181 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6182 BP_FUNC(bp), load_code);
6185 mutex_init(&bp->dmae_mutex);
6186 bnx2x_gunzip_init(bp);
6188 switch (load_code) {
6189 case FW_MSG_CODE_DRV_LOAD_COMMON:
6190 rc = bnx2x_init_common(bp);
6195 case FW_MSG_CODE_DRV_LOAD_PORT:
6197 rc = bnx2x_init_port(bp);
6202 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6204 rc = bnx2x_init_func(bp);
6210 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6214 if (!BP_NOMCP(bp)) {
6215 int func = BP_FUNC(bp);
6217 bp->fw_drv_pulse_wr_seq =
6218 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6219 DRV_PULSE_SEQ_MASK);
6220 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6221 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6222 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6226 /* this needs to be done before gunzip end */
6227 bnx2x_zero_def_sb(bp);
6228 for_each_queue(bp, i)
6229 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6232 bnx2x_gunzip_end(bp);
6237 /* send the MCP a request, block until there is a reply */
6238 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6240 int func = BP_FUNC(bp);
6241 u32 seq = ++bp->fw_seq;
6244 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6246 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6247 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6250 /* let the FW do it's magic ... */
6253 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6255 /* Give the FW up to 2 second (200*10ms) */
6256 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6258 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6259 cnt*delay, rc, seq);
6261 /* is this a reply to our command? */
6262 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6263 rc &= FW_MSG_CODE_MASK;
6267 BNX2X_ERR("FW failed to respond!\n");
6275 static void bnx2x_free_mem(struct bnx2x *bp)
6278 #define BNX2X_PCI_FREE(x, y, size) \
6281 pci_free_consistent(bp->pdev, size, x, y); \
6287 #define BNX2X_FREE(x) \
6299 for_each_queue(bp, i) {
6302 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6303 bnx2x_fp(bp, i, status_blk_mapping),
6304 sizeof(struct host_status_block) +
6305 sizeof(struct eth_tx_db_data));
6308 for_each_rx_queue(bp, i) {
6310 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6311 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6312 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6313 bnx2x_fp(bp, i, rx_desc_mapping),
6314 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6316 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6317 bnx2x_fp(bp, i, rx_comp_mapping),
6318 sizeof(struct eth_fast_path_rx_cqe) *
6322 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6323 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6324 bnx2x_fp(bp, i, rx_sge_mapping),
6325 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6328 for_each_tx_queue(bp, i) {
6330 /* fastpath tx rings: tx_buf tx_desc */
6331 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6332 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6333 bnx2x_fp(bp, i, tx_desc_mapping),
6334 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6336 /* end of fastpath */
6338 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6339 sizeof(struct host_def_status_block));
6341 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6342 sizeof(struct bnx2x_slowpath));
6345 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6346 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6347 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6348 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6350 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6352 #undef BNX2X_PCI_FREE
6356 static int bnx2x_alloc_mem(struct bnx2x *bp)
6359 #define BNX2X_PCI_ALLOC(x, y, size) \
6361 x = pci_alloc_consistent(bp->pdev, size, y); \
6363 goto alloc_mem_err; \
6364 memset(x, 0, size); \
6367 #define BNX2X_ALLOC(x, size) \
6369 x = vmalloc(size); \
6371 goto alloc_mem_err; \
6372 memset(x, 0, size); \
6379 for_each_queue(bp, i) {
6380 bnx2x_fp(bp, i, bp) = bp;
6383 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6384 &bnx2x_fp(bp, i, status_blk_mapping),
6385 sizeof(struct host_status_block) +
6386 sizeof(struct eth_tx_db_data));
6389 for_each_rx_queue(bp, i) {
6391 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6392 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6393 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6394 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6395 &bnx2x_fp(bp, i, rx_desc_mapping),
6396 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6398 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6399 &bnx2x_fp(bp, i, rx_comp_mapping),
6400 sizeof(struct eth_fast_path_rx_cqe) *
6404 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6405 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6406 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6407 &bnx2x_fp(bp, i, rx_sge_mapping),
6408 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6411 for_each_tx_queue(bp, i) {
6413 bnx2x_fp(bp, i, hw_tx_prods) =
6414 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6416 bnx2x_fp(bp, i, tx_prods_mapping) =
6417 bnx2x_fp(bp, i, status_blk_mapping) +
6418 sizeof(struct host_status_block);
6420 /* fastpath tx rings: tx_buf tx_desc */
6421 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6422 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6423 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6424 &bnx2x_fp(bp, i, tx_desc_mapping),
6425 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6427 /* end of fastpath */
6429 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6430 sizeof(struct host_def_status_block));
6432 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6433 sizeof(struct bnx2x_slowpath));
6436 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6439 for (i = 0; i < 64*1024; i += 64) {
6440 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6441 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6444 /* allocate searcher T2 table
6445 we allocate 1/4 of alloc num for T2
6446 (which is not entered into the ILT) */
6447 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6450 for (i = 0; i < 16*1024; i += 64)
6451 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6453 /* now fixup the last line in the block to point to the next block */
6454 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6456 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6457 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6459 /* QM queues (128*MAX_CONN) */
6460 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6463 /* Slow path ring */
6464 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6472 #undef BNX2X_PCI_ALLOC
6476 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6480 for_each_tx_queue(bp, i) {
6481 struct bnx2x_fastpath *fp = &bp->fp[i];
6483 u16 bd_cons = fp->tx_bd_cons;
6484 u16 sw_prod = fp->tx_pkt_prod;
6485 u16 sw_cons = fp->tx_pkt_cons;
6487 while (sw_cons != sw_prod) {
6488 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6494 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6498 for_each_rx_queue(bp, j) {
6499 struct bnx2x_fastpath *fp = &bp->fp[j];
6501 for (i = 0; i < NUM_RX_BD; i++) {
6502 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6503 struct sk_buff *skb = rx_buf->skb;
6508 pci_unmap_single(bp->pdev,
6509 pci_unmap_addr(rx_buf, mapping),
6510 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6515 if (!fp->disable_tpa)
6516 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6517 ETH_MAX_AGGREGATION_QUEUES_E1 :
6518 ETH_MAX_AGGREGATION_QUEUES_E1H);
6522 static void bnx2x_free_skbs(struct bnx2x *bp)
6524 bnx2x_free_tx_skbs(bp);
6525 bnx2x_free_rx_skbs(bp);
6528 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6532 free_irq(bp->msix_table[0].vector, bp->dev);
6533 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6534 bp->msix_table[0].vector);
6536 for_each_queue(bp, i) {
6537 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6538 "state %x\n", i, bp->msix_table[i + offset].vector,
6539 bnx2x_fp(bp, i, state));
6541 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6545 static void bnx2x_free_irq(struct bnx2x *bp)
6547 if (bp->flags & USING_MSIX_FLAG) {
6548 bnx2x_free_msix_irqs(bp);
6549 pci_disable_msix(bp->pdev);
6550 bp->flags &= ~USING_MSIX_FLAG;
6552 } else if (bp->flags & USING_MSI_FLAG) {
6553 free_irq(bp->pdev->irq, bp->dev);
6554 pci_disable_msi(bp->pdev);
6555 bp->flags &= ~USING_MSI_FLAG;
6558 free_irq(bp->pdev->irq, bp->dev);
6561 static int bnx2x_enable_msix(struct bnx2x *bp)
6563 int i, rc, offset = 1;
6566 bp->msix_table[0].entry = igu_vec;
6567 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6569 for_each_queue(bp, i) {
6570 igu_vec = BP_L_ID(bp) + offset + i;
6571 bp->msix_table[i + offset].entry = igu_vec;
6572 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6573 "(fastpath #%u)\n", i + offset, igu_vec, i);
6576 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6577 BNX2X_NUM_QUEUES(bp) + offset);
6579 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6583 bp->flags |= USING_MSIX_FLAG;
6588 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6590 int i, rc, offset = 1;
6592 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6593 bp->dev->name, bp->dev);
6595 BNX2X_ERR("request sp irq failed\n");
6599 for_each_queue(bp, i) {
6600 struct bnx2x_fastpath *fp = &bp->fp[i];
6602 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6603 rc = request_irq(bp->msix_table[i + offset].vector,
6604 bnx2x_msix_fp_int, 0, fp->name, fp);
6606 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6607 bnx2x_free_msix_irqs(bp);
6611 fp->state = BNX2X_FP_STATE_IRQ;
6614 i = BNX2X_NUM_QUEUES(bp);
6616 printk(KERN_INFO PFX
6617 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6618 bp->dev->name, bp->msix_table[0].vector,
6619 bp->msix_table[offset].vector,
6620 bp->msix_table[offset + i - 1].vector);
6622 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6623 bp->dev->name, bp->msix_table[0].vector,
6624 bp->msix_table[offset + i - 1].vector);
6629 static int bnx2x_enable_msi(struct bnx2x *bp)
6633 rc = pci_enable_msi(bp->pdev);
6635 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6638 bp->flags |= USING_MSI_FLAG;
6643 static int bnx2x_req_irq(struct bnx2x *bp)
6645 unsigned long flags;
6648 if (bp->flags & USING_MSI_FLAG)
6651 flags = IRQF_SHARED;
6653 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6654 bp->dev->name, bp->dev);
6656 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6661 static void bnx2x_napi_enable(struct bnx2x *bp)
6665 for_each_rx_queue(bp, i)
6666 napi_enable(&bnx2x_fp(bp, i, napi));
6669 static void bnx2x_napi_disable(struct bnx2x *bp)
6673 for_each_rx_queue(bp, i)
6674 napi_disable(&bnx2x_fp(bp, i, napi));
6677 static void bnx2x_netif_start(struct bnx2x *bp)
6681 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6682 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6685 if (netif_running(bp->dev)) {
6686 bnx2x_napi_enable(bp);
6687 bnx2x_int_enable(bp);
6688 if (bp->state == BNX2X_STATE_OPEN)
6689 netif_tx_wake_all_queues(bp->dev);
6694 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6696 bnx2x_int_disable_sync(bp, disable_hw);
6697 bnx2x_napi_disable(bp);
6698 netif_tx_disable(bp->dev);
6699 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6703 * Init service functions
6706 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6708 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6709 int port = BP_PORT(bp);
6712 * unicasts 0-31:port0 32-63:port1
6713 * multicast 64-127:port0 128-191:port1
6715 config->hdr.length = 2;
6716 config->hdr.offset = port ? 32 : 0;
6717 config->hdr.client_id = bp->fp->cl_id;
6718 config->hdr.reserved1 = 0;
6721 config->config_table[0].cam_entry.msb_mac_addr =
6722 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6723 config->config_table[0].cam_entry.middle_mac_addr =
6724 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6725 config->config_table[0].cam_entry.lsb_mac_addr =
6726 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6727 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6729 config->config_table[0].target_table_entry.flags = 0;
6731 CAM_INVALIDATE(config->config_table[0]);
6732 config->config_table[0].target_table_entry.client_id = 0;
6733 config->config_table[0].target_table_entry.vlan_id = 0;
6735 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6736 (set ? "setting" : "clearing"),
6737 config->config_table[0].cam_entry.msb_mac_addr,
6738 config->config_table[0].cam_entry.middle_mac_addr,
6739 config->config_table[0].cam_entry.lsb_mac_addr);
6742 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6743 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6744 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6745 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6747 config->config_table[1].target_table_entry.flags =
6748 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6750 CAM_INVALIDATE(config->config_table[1]);
6751 config->config_table[1].target_table_entry.client_id = 0;
6752 config->config_table[1].target_table_entry.vlan_id = 0;
6754 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6755 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6756 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6759 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6761 struct mac_configuration_cmd_e1h *config =
6762 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6764 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6765 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6769 /* CAM allocation for E1H
6770 * unicasts: by func number
6771 * multicast: 20+FUNC*20, 20 each
6773 config->hdr.length = 1;
6774 config->hdr.offset = BP_FUNC(bp);
6775 config->hdr.client_id = bp->fp->cl_id;
6776 config->hdr.reserved1 = 0;
6779 config->config_table[0].msb_mac_addr =
6780 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6781 config->config_table[0].middle_mac_addr =
6782 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6783 config->config_table[0].lsb_mac_addr =
6784 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6785 config->config_table[0].client_id = BP_L_ID(bp);
6786 config->config_table[0].vlan_id = 0;
6787 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6789 config->config_table[0].flags = BP_PORT(bp);
6791 config->config_table[0].flags =
6792 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6794 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6795 (set ? "setting" : "clearing"),
6796 config->config_table[0].msb_mac_addr,
6797 config->config_table[0].middle_mac_addr,
6798 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6800 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6801 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6802 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6805 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6806 int *state_p, int poll)
6808 /* can take a while if any port is running */
6811 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6812 poll ? "polling" : "waiting", state, idx);
6817 bnx2x_rx_int(bp->fp, 10);
6818 /* if index is different from 0
6819 * the reply for some commands will
6820 * be on the non default queue
6823 bnx2x_rx_int(&bp->fp[idx], 10);
6826 mb(); /* state is changed by bnx2x_sp_event() */
6827 if (*state_p == state) {
6828 #ifdef BNX2X_STOP_ON_ERROR
6829 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6838 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6839 poll ? "polling" : "waiting", state, idx);
6840 #ifdef BNX2X_STOP_ON_ERROR
6847 static int bnx2x_setup_leading(struct bnx2x *bp)
6851 /* reset IGU state */
6852 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6855 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6857 /* Wait for completion */
6858 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6863 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6865 struct bnx2x_fastpath *fp = &bp->fp[index];
6867 /* reset IGU state */
6868 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6871 fp->state = BNX2X_FP_STATE_OPENING;
6872 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6875 /* Wait for completion */
6876 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6880 static int bnx2x_poll(struct napi_struct *napi, int budget);
6882 static void bnx2x_set_int_mode(struct bnx2x *bp)
6890 bp->num_rx_queues = num_queues;
6891 bp->num_tx_queues = num_queues;
6893 "set number of queues to %d\n", num_queues);
6898 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6899 num_queues = min_t(u32, num_online_cpus(),
6900 BNX2X_MAX_QUEUES(bp));
6903 bp->num_rx_queues = num_queues;
6904 bp->num_tx_queues = num_queues;
6905 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6906 " number of tx queues to %d\n",
6907 bp->num_rx_queues, bp->num_tx_queues);
6908 /* if we can't use MSI-X we only need one fp,
6909 * so try to enable MSI-X with the requested number of fp's
6910 * and fallback to MSI or legacy INTx with one fp
6912 if (bnx2x_enable_msix(bp)) {
6913 /* failed to enable MSI-X */
6915 bp->num_rx_queues = num_queues;
6916 bp->num_tx_queues = num_queues;
6918 BNX2X_ERR("Multi requested but failed to "
6919 "enable MSI-X set number of "
6920 "queues to %d\n", num_queues);
6924 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6927 static void bnx2x_set_rx_mode(struct net_device *dev);
6929 /* must be called with rtnl_lock */
6930 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6934 #ifdef BNX2X_STOP_ON_ERROR
6935 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6936 if (unlikely(bp->panic))
6940 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6942 bnx2x_set_int_mode(bp);
6944 if (bnx2x_alloc_mem(bp))
6947 for_each_rx_queue(bp, i)
6948 bnx2x_fp(bp, i, disable_tpa) =
6949 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6951 for_each_rx_queue(bp, i)
6952 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6955 #ifdef BNX2X_STOP_ON_ERROR
6956 for_each_rx_queue(bp, i) {
6957 struct bnx2x_fastpath *fp = &bp->fp[i];
6959 fp->poll_no_work = 0;
6961 fp->poll_max_calls = 0;
6962 fp->poll_complete = 0;
6966 bnx2x_napi_enable(bp);
6968 if (bp->flags & USING_MSIX_FLAG) {
6969 rc = bnx2x_req_msix_irqs(bp);
6971 pci_disable_msix(bp->pdev);
6975 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6976 bnx2x_enable_msi(bp);
6978 rc = bnx2x_req_irq(bp);
6980 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6981 if (bp->flags & USING_MSI_FLAG)
6982 pci_disable_msi(bp->pdev);
6985 if (bp->flags & USING_MSI_FLAG) {
6986 bp->dev->irq = bp->pdev->irq;
6987 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6988 bp->dev->name, bp->pdev->irq);
6992 /* Send LOAD_REQUEST command to MCP
6993 Returns the type of LOAD command:
6994 if it is the first port to be initialized
6995 common blocks should be initialized, otherwise - not
6997 if (!BP_NOMCP(bp)) {
6998 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7000 BNX2X_ERR("MCP response failure, aborting\n");
7004 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7005 rc = -EBUSY; /* other port in diagnostic mode */
7010 int port = BP_PORT(bp);
7012 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7013 load_count[0], load_count[1], load_count[2]);
7015 load_count[1 + port]++;
7016 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7017 load_count[0], load_count[1], load_count[2]);
7018 if (load_count[0] == 1)
7019 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7020 else if (load_count[1 + port] == 1)
7021 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7023 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7026 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7027 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7031 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7034 rc = bnx2x_init_hw(bp, load_code);
7036 BNX2X_ERR("HW init failed, aborting\n");
7040 /* Setup NIC internals and enable interrupts */
7041 bnx2x_nic_init(bp, load_code);
7043 /* Send LOAD_DONE command to MCP */
7044 if (!BP_NOMCP(bp)) {
7045 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7047 BNX2X_ERR("MCP response failure, aborting\n");
7053 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7055 rc = bnx2x_setup_leading(bp);
7057 BNX2X_ERR("Setup leading failed!\n");
7061 if (CHIP_IS_E1H(bp))
7062 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7063 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7064 bp->state = BNX2X_STATE_DISABLED;
7067 if (bp->state == BNX2X_STATE_OPEN)
7068 for_each_nondefault_queue(bp, i) {
7069 rc = bnx2x_setup_multi(bp, i);
7075 bnx2x_set_mac_addr_e1(bp, 1);
7077 bnx2x_set_mac_addr_e1h(bp, 1);
7080 bnx2x_initial_phy_init(bp, load_mode);
7082 /* Start fast path */
7083 switch (load_mode) {
7085 /* Tx queue should be only reenabled */
7086 netif_tx_wake_all_queues(bp->dev);
7087 /* Initialize the receive filter. */
7088 bnx2x_set_rx_mode(bp->dev);
7092 netif_tx_start_all_queues(bp->dev);
7093 /* Initialize the receive filter. */
7094 bnx2x_set_rx_mode(bp->dev);
7098 /* Initialize the receive filter. */
7099 bnx2x_set_rx_mode(bp->dev);
7100 bp->state = BNX2X_STATE_DIAG;
7108 bnx2x__link_status_update(bp);
7110 /* start the timer */
7111 mod_timer(&bp->timer, jiffies + bp->current_interval);
7117 bnx2x_int_disable_sync(bp, 1);
7118 if (!BP_NOMCP(bp)) {
7119 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7120 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7123 /* Free SKBs, SGEs, TPA pool and driver internals */
7124 bnx2x_free_skbs(bp);
7125 for_each_rx_queue(bp, i)
7126 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7131 bnx2x_napi_disable(bp);
7132 for_each_rx_queue(bp, i)
7133 netif_napi_del(&bnx2x_fp(bp, i, napi));
7139 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7141 struct bnx2x_fastpath *fp = &bp->fp[index];
7144 /* halt the connection */
7145 fp->state = BNX2X_FP_STATE_HALTING;
7146 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7148 /* Wait for completion */
7149 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7151 if (rc) /* timeout */
7154 /* delete cfc entry */
7155 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7157 /* Wait for completion */
7158 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7163 static int bnx2x_stop_leading(struct bnx2x *bp)
7165 __le16 dsb_sp_prod_idx;
7166 /* if the other port is handling traffic,
7167 this can take a lot of time */
7173 /* Send HALT ramrod */
7174 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7175 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7177 /* Wait for completion */
7178 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7179 &(bp->fp[0].state), 1);
7180 if (rc) /* timeout */
7183 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7185 /* Send PORT_DELETE ramrod */
7186 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7188 /* Wait for completion to arrive on default status block
7189 we are going to reset the chip anyway
7190 so there is not much to do if this times out
7192 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7194 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7195 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7196 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7197 #ifdef BNX2X_STOP_ON_ERROR
7205 rmb(); /* Refresh the dsb_sp_prod */
7207 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7208 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7213 static void bnx2x_reset_func(struct bnx2x *bp)
7215 int port = BP_PORT(bp);
7216 int func = BP_FUNC(bp);
7220 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7221 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7224 base = FUNC_ILT_BASE(func);
7225 for (i = base; i < base + ILT_PER_FUNC; i++)
7226 bnx2x_ilt_wr(bp, i, 0);
7229 static void bnx2x_reset_port(struct bnx2x *bp)
7231 int port = BP_PORT(bp);
7234 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7236 /* Do not rcv packets to BRB */
7237 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7238 /* Do not direct rcv packets that are not for MCP to the BRB */
7239 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7240 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7243 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7246 /* Check for BRB port occupancy */
7247 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7249 DP(NETIF_MSG_IFDOWN,
7250 "BRB1 is not empty %d blocks are occupied\n", val);
7252 /* TODO: Close Doorbell port? */
7255 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7257 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7258 BP_FUNC(bp), reset_code);
7260 switch (reset_code) {
7261 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7262 bnx2x_reset_port(bp);
7263 bnx2x_reset_func(bp);
7264 bnx2x_reset_common(bp);
7267 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7268 bnx2x_reset_port(bp);
7269 bnx2x_reset_func(bp);
7272 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7273 bnx2x_reset_func(bp);
7277 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7282 /* must be called with rtnl_lock */
7283 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7285 int port = BP_PORT(bp);
7289 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7291 bp->rx_mode = BNX2X_RX_MODE_NONE;
7292 bnx2x_set_storm_rx_mode(bp);
7294 bnx2x_netif_stop(bp, 1);
7296 del_timer_sync(&bp->timer);
7297 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7298 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7299 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7304 /* Wait until tx fastpath tasks complete */
7305 for_each_tx_queue(bp, i) {
7306 struct bnx2x_fastpath *fp = &bp->fp[i];
7309 while (bnx2x_has_tx_work_unload(fp)) {
7313 BNX2X_ERR("timeout waiting for queue[%d]\n",
7315 #ifdef BNX2X_STOP_ON_ERROR
7326 /* Give HW time to discard old tx messages */
7329 if (CHIP_IS_E1(bp)) {
7330 struct mac_configuration_cmd *config =
7331 bnx2x_sp(bp, mcast_config);
7333 bnx2x_set_mac_addr_e1(bp, 0);
7335 for (i = 0; i < config->hdr.length; i++)
7336 CAM_INVALIDATE(config->config_table[i]);
7338 config->hdr.length = i;
7339 if (CHIP_REV_IS_SLOW(bp))
7340 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7342 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7343 config->hdr.client_id = bp->fp->cl_id;
7344 config->hdr.reserved1 = 0;
7346 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7347 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7348 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7351 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7353 bnx2x_set_mac_addr_e1h(bp, 0);
7355 for (i = 0; i < MC_HASH_SIZE; i++)
7356 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7358 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7361 if (unload_mode == UNLOAD_NORMAL)
7362 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7364 else if (bp->flags & NO_WOL_FLAG)
7365 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7368 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7369 u8 *mac_addr = bp->dev->dev_addr;
7371 /* The mac address is written to entries 1-4 to
7372 preserve entry 0 which is used by the PMF */
7373 u8 entry = (BP_E1HVN(bp) + 1)*8;
7375 val = (mac_addr[0] << 8) | mac_addr[1];
7376 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7378 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7379 (mac_addr[4] << 8) | mac_addr[5];
7380 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7382 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7385 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7387 /* Close multi and leading connections
7388 Completions for ramrods are collected in a synchronous way */
7389 for_each_nondefault_queue(bp, i)
7390 if (bnx2x_stop_multi(bp, i))
7393 rc = bnx2x_stop_leading(bp);
7395 BNX2X_ERR("Stop leading failed!\n");
7396 #ifdef BNX2X_STOP_ON_ERROR
7405 reset_code = bnx2x_fw_command(bp, reset_code);
7407 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7408 load_count[0], load_count[1], load_count[2]);
7410 load_count[1 + port]--;
7411 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7412 load_count[0], load_count[1], load_count[2]);
7413 if (load_count[0] == 0)
7414 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7415 else if (load_count[1 + port] == 0)
7416 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7418 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7421 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7422 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7423 bnx2x__link_reset(bp);
7425 /* Reset the chip */
7426 bnx2x_reset_chip(bp, reset_code);
7428 /* Report UNLOAD_DONE to MCP */
7430 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7434 /* Free SKBs, SGEs, TPA pool and driver internals */
7435 bnx2x_free_skbs(bp);
7436 for_each_rx_queue(bp, i)
7437 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7438 for_each_rx_queue(bp, i)
7439 netif_napi_del(&bnx2x_fp(bp, i, napi));
7442 bp->state = BNX2X_STATE_CLOSED;
7444 netif_carrier_off(bp->dev);
7449 static void bnx2x_reset_task(struct work_struct *work)
7451 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7453 #ifdef BNX2X_STOP_ON_ERROR
7454 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7455 " so reset not done to allow debug dump,\n"
7456 KERN_ERR " you will need to reboot when done\n");
7462 if (!netif_running(bp->dev))
7463 goto reset_task_exit;
7465 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7466 bnx2x_nic_load(bp, LOAD_NORMAL);
7472 /* end of nic load/unload */
7477 * Init service functions
7480 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7483 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7484 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7485 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7486 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7487 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7488 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7489 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7490 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7492 BNX2X_ERR("Unsupported function index: %d\n", func);
7497 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7499 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7501 /* Flush all outstanding writes */
7504 /* Pretend to be function 0 */
7506 /* Flush the GRC transaction (in the chip) */
7507 new_val = REG_RD(bp, reg);
7509 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7514 /* From now we are in the "like-E1" mode */
7515 bnx2x_int_disable(bp);
7517 /* Flush all outstanding writes */
7520 /* Restore the original funtion settings */
7521 REG_WR(bp, reg, orig_func);
7522 new_val = REG_RD(bp, reg);
7523 if (new_val != orig_func) {
7524 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7525 orig_func, new_val);
7530 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7532 if (CHIP_IS_E1H(bp))
7533 bnx2x_undi_int_disable_e1h(bp, func);
7535 bnx2x_int_disable(bp);
7538 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7542 /* Check if there is any driver already loaded */
7543 val = REG_RD(bp, MISC_REG_UNPREPARED);
7545 /* Check if it is the UNDI driver
7546 * UNDI driver initializes CID offset for normal bell to 0x7
7548 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7549 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7551 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7553 int func = BP_FUNC(bp);
7557 /* clear the UNDI indication */
7558 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7560 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7562 /* try unload UNDI on port 0 */
7565 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7566 DRV_MSG_SEQ_NUMBER_MASK);
7567 reset_code = bnx2x_fw_command(bp, reset_code);
7569 /* if UNDI is loaded on the other port */
7570 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7572 /* send "DONE" for previous unload */
7573 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7575 /* unload UNDI on port 1 */
7578 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7579 DRV_MSG_SEQ_NUMBER_MASK);
7580 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7582 bnx2x_fw_command(bp, reset_code);
7585 /* now it's safe to release the lock */
7586 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7588 bnx2x_undi_int_disable(bp, func);
7590 /* close input traffic and wait for it */
7591 /* Do not rcv packets to BRB */
7593 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7594 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7595 /* Do not direct rcv packets that are not for MCP to
7598 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7599 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7602 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7603 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7606 /* save NIG port swap info */
7607 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7608 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7611 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7614 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7616 /* take the NIG out of reset and restore swap values */
7618 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7619 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7620 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7621 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7623 /* send unload done to the MCP */
7624 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7626 /* restore our func and fw_seq */
7629 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7630 DRV_MSG_SEQ_NUMBER_MASK);
7633 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7637 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7639 u32 val, val2, val3, val4, id;
7642 /* Get the chip revision id and number. */
7643 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7644 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7645 id = ((val & 0xffff) << 16);
7646 val = REG_RD(bp, MISC_REG_CHIP_REV);
7647 id |= ((val & 0xf) << 12);
7648 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7649 id |= ((val & 0xff) << 4);
7650 val = REG_RD(bp, MISC_REG_BOND_ID);
7652 bp->common.chip_id = id;
7653 bp->link_params.chip_id = bp->common.chip_id;
7654 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7656 val = (REG_RD(bp, 0x2874) & 0x55);
7657 if ((bp->common.chip_id & 0x1) ||
7658 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7659 bp->flags |= ONE_PORT_FLAG;
7660 BNX2X_DEV_INFO("single port device\n");
7663 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7664 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7665 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7666 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7667 bp->common.flash_size, bp->common.flash_size);
7669 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7670 bp->link_params.shmem_base = bp->common.shmem_base;
7671 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7673 if (!bp->common.shmem_base ||
7674 (bp->common.shmem_base < 0xA0000) ||
7675 (bp->common.shmem_base >= 0xC0000)) {
7676 BNX2X_DEV_INFO("MCP not active\n");
7677 bp->flags |= NO_MCP_FLAG;
7681 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7682 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7683 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7684 BNX2X_ERR("BAD MCP validity signature\n");
7686 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7687 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7689 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7690 SHARED_HW_CFG_LED_MODE_MASK) >>
7691 SHARED_HW_CFG_LED_MODE_SHIFT);
7693 bp->link_params.feature_config_flags = 0;
7694 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7695 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7696 bp->link_params.feature_config_flags |=
7697 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7699 bp->link_params.feature_config_flags &=
7700 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7702 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7703 bp->common.bc_ver = val;
7704 BNX2X_DEV_INFO("bc_ver %X\n", val);
7705 if (val < BNX2X_BC_VER) {
7706 /* for now only warn
7707 * later we might need to enforce this */
7708 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7709 " please upgrade BC\n", BNX2X_BC_VER, val);
7711 bp->link_params.feature_config_flags |=
7712 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7713 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7715 if (BP_E1HVN(bp) == 0) {
7716 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7717 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7719 /* no WOL capability for E1HVN != 0 */
7720 bp->flags |= NO_WOL_FLAG;
7722 BNX2X_DEV_INFO("%sWoL capable\n",
7723 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7725 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7726 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7727 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7728 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7730 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7731 val, val2, val3, val4);
7734 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7737 int port = BP_PORT(bp);
7740 switch (switch_cfg) {
7742 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7745 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7746 switch (ext_phy_type) {
7747 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7748 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7751 bp->port.supported |= (SUPPORTED_10baseT_Half |
7752 SUPPORTED_10baseT_Full |
7753 SUPPORTED_100baseT_Half |
7754 SUPPORTED_100baseT_Full |
7755 SUPPORTED_1000baseT_Full |
7756 SUPPORTED_2500baseX_Full |
7761 SUPPORTED_Asym_Pause);
7764 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7765 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7768 bp->port.supported |= (SUPPORTED_10baseT_Half |
7769 SUPPORTED_10baseT_Full |
7770 SUPPORTED_100baseT_Half |
7771 SUPPORTED_100baseT_Full |
7772 SUPPORTED_1000baseT_Full |
7777 SUPPORTED_Asym_Pause);
7781 BNX2X_ERR("NVRAM config error. "
7782 "BAD SerDes ext_phy_config 0x%x\n",
7783 bp->link_params.ext_phy_config);
7787 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7789 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7792 case SWITCH_CFG_10G:
7793 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7796 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7797 switch (ext_phy_type) {
7798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7799 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7802 bp->port.supported |= (SUPPORTED_10baseT_Half |
7803 SUPPORTED_10baseT_Full |
7804 SUPPORTED_100baseT_Half |
7805 SUPPORTED_100baseT_Full |
7806 SUPPORTED_1000baseT_Full |
7807 SUPPORTED_2500baseX_Full |
7808 SUPPORTED_10000baseT_Full |
7813 SUPPORTED_Asym_Pause);
7816 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7817 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7820 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7821 SUPPORTED_1000baseT_Full |
7825 SUPPORTED_Asym_Pause);
7828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7829 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7832 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7833 SUPPORTED_2500baseX_Full |
7834 SUPPORTED_1000baseT_Full |
7838 SUPPORTED_Asym_Pause);
7841 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7842 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7845 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7848 SUPPORTED_Asym_Pause);
7851 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7852 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7855 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7856 SUPPORTED_1000baseT_Full |
7859 SUPPORTED_Asym_Pause);
7862 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7863 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7866 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7867 SUPPORTED_1000baseT_Full |
7871 SUPPORTED_Asym_Pause);
7874 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7875 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7878 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7879 SUPPORTED_1000baseT_Full |
7883 SUPPORTED_Asym_Pause);
7886 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7887 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7890 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7894 SUPPORTED_Asym_Pause);
7897 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7898 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7901 bp->port.supported |= (SUPPORTED_10baseT_Half |
7902 SUPPORTED_10baseT_Full |
7903 SUPPORTED_100baseT_Half |
7904 SUPPORTED_100baseT_Full |
7905 SUPPORTED_1000baseT_Full |
7906 SUPPORTED_10000baseT_Full |
7910 SUPPORTED_Asym_Pause);
7913 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7914 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7915 bp->link_params.ext_phy_config);
7919 BNX2X_ERR("NVRAM config error. "
7920 "BAD XGXS ext_phy_config 0x%x\n",
7921 bp->link_params.ext_phy_config);
7925 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7927 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7932 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7933 bp->port.link_config);
7936 bp->link_params.phy_addr = bp->port.phy_addr;
7938 /* mask what we support according to speed_cap_mask */
7939 if (!(bp->link_params.speed_cap_mask &
7940 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7941 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7943 if (!(bp->link_params.speed_cap_mask &
7944 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7945 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7947 if (!(bp->link_params.speed_cap_mask &
7948 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7949 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7951 if (!(bp->link_params.speed_cap_mask &
7952 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7953 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7955 if (!(bp->link_params.speed_cap_mask &
7956 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7957 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7958 SUPPORTED_1000baseT_Full);
7960 if (!(bp->link_params.speed_cap_mask &
7961 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7962 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7964 if (!(bp->link_params.speed_cap_mask &
7965 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7966 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7968 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7971 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7973 bp->link_params.req_duplex = DUPLEX_FULL;
7975 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7976 case PORT_FEATURE_LINK_SPEED_AUTO:
7977 if (bp->port.supported & SUPPORTED_Autoneg) {
7978 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7979 bp->port.advertising = bp->port.supported;
7982 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7984 if ((ext_phy_type ==
7985 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7987 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7988 /* force 10G, no AN */
7989 bp->link_params.req_line_speed = SPEED_10000;
7990 bp->port.advertising =
7991 (ADVERTISED_10000baseT_Full |
7995 BNX2X_ERR("NVRAM config error. "
7996 "Invalid link_config 0x%x"
7997 " Autoneg not supported\n",
7998 bp->port.link_config);
8003 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8004 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8005 bp->link_params.req_line_speed = SPEED_10;
8006 bp->port.advertising = (ADVERTISED_10baseT_Full |
8009 BNX2X_ERR("NVRAM config error. "
8010 "Invalid link_config 0x%x"
8011 " speed_cap_mask 0x%x\n",
8012 bp->port.link_config,
8013 bp->link_params.speed_cap_mask);
8018 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8019 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8020 bp->link_params.req_line_speed = SPEED_10;
8021 bp->link_params.req_duplex = DUPLEX_HALF;
8022 bp->port.advertising = (ADVERTISED_10baseT_Half |
8025 BNX2X_ERR("NVRAM config error. "
8026 "Invalid link_config 0x%x"
8027 " speed_cap_mask 0x%x\n",
8028 bp->port.link_config,
8029 bp->link_params.speed_cap_mask);
8034 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8035 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8036 bp->link_params.req_line_speed = SPEED_100;
8037 bp->port.advertising = (ADVERTISED_100baseT_Full |
8040 BNX2X_ERR("NVRAM config error. "
8041 "Invalid link_config 0x%x"
8042 " speed_cap_mask 0x%x\n",
8043 bp->port.link_config,
8044 bp->link_params.speed_cap_mask);
8049 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8050 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8051 bp->link_params.req_line_speed = SPEED_100;
8052 bp->link_params.req_duplex = DUPLEX_HALF;
8053 bp->port.advertising = (ADVERTISED_100baseT_Half |
8056 BNX2X_ERR("NVRAM config error. "
8057 "Invalid link_config 0x%x"
8058 " speed_cap_mask 0x%x\n",
8059 bp->port.link_config,
8060 bp->link_params.speed_cap_mask);
8065 case PORT_FEATURE_LINK_SPEED_1G:
8066 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8067 bp->link_params.req_line_speed = SPEED_1000;
8068 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8071 BNX2X_ERR("NVRAM config error. "
8072 "Invalid link_config 0x%x"
8073 " speed_cap_mask 0x%x\n",
8074 bp->port.link_config,
8075 bp->link_params.speed_cap_mask);
8080 case PORT_FEATURE_LINK_SPEED_2_5G:
8081 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8082 bp->link_params.req_line_speed = SPEED_2500;
8083 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8086 BNX2X_ERR("NVRAM config error. "
8087 "Invalid link_config 0x%x"
8088 " speed_cap_mask 0x%x\n",
8089 bp->port.link_config,
8090 bp->link_params.speed_cap_mask);
8095 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8096 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8097 case PORT_FEATURE_LINK_SPEED_10G_KR:
8098 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8099 bp->link_params.req_line_speed = SPEED_10000;
8100 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8103 BNX2X_ERR("NVRAM config error. "
8104 "Invalid link_config 0x%x"
8105 " speed_cap_mask 0x%x\n",
8106 bp->port.link_config,
8107 bp->link_params.speed_cap_mask);
8113 BNX2X_ERR("NVRAM config error. "
8114 "BAD link speed link_config 0x%x\n",
8115 bp->port.link_config);
8116 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8117 bp->port.advertising = bp->port.supported;
8121 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8122 PORT_FEATURE_FLOW_CONTROL_MASK);
8123 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8124 !(bp->port.supported & SUPPORTED_Autoneg))
8125 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8127 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8128 " advertising 0x%x\n",
8129 bp->link_params.req_line_speed,
8130 bp->link_params.req_duplex,
8131 bp->link_params.req_flow_ctrl, bp->port.advertising);
8134 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8136 int port = BP_PORT(bp);
8141 bp->link_params.bp = bp;
8142 bp->link_params.port = port;
8144 bp->link_params.lane_config =
8145 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8146 bp->link_params.ext_phy_config =
8148 dev_info.port_hw_config[port].external_phy_config);
8149 /* BCM8727_NOC => BCM8727 no over current */
8150 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8151 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8152 bp->link_params.ext_phy_config &=
8153 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8154 bp->link_params.ext_phy_config |=
8155 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8156 bp->link_params.feature_config_flags |=
8157 FEATURE_CONFIG_BCM8727_NOC;
8160 bp->link_params.speed_cap_mask =
8162 dev_info.port_hw_config[port].speed_capability_mask);
8164 bp->port.link_config =
8165 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8167 /* Get the 4 lanes xgxs config rx and tx */
8168 for (i = 0; i < 2; i++) {
8170 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8171 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8172 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8175 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8176 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8177 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8180 /* If the device is capable of WoL, set the default state according
8183 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8184 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8185 (config & PORT_FEATURE_WOL_ENABLED));
8187 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8188 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8189 bp->link_params.lane_config,
8190 bp->link_params.ext_phy_config,
8191 bp->link_params.speed_cap_mask, bp->port.link_config);
8193 bp->link_params.switch_cfg |= (bp->port.link_config &
8194 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8195 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8197 bnx2x_link_settings_requested(bp);
8199 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8200 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8201 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8202 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8203 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8204 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8205 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8206 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8207 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8208 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8211 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8213 int func = BP_FUNC(bp);
8217 bnx2x_get_common_hwinfo(bp);
8221 if (CHIP_IS_E1H(bp)) {
8223 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8225 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8226 FUNC_MF_CFG_E1HOV_TAG_MASK);
8227 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8231 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8233 func, bp->e1hov, bp->e1hov);
8235 BNX2X_DEV_INFO("single function mode\n");
8237 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8238 " aborting\n", func);
8244 if (!BP_NOMCP(bp)) {
8245 bnx2x_get_port_hwinfo(bp);
8247 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8248 DRV_MSG_SEQ_NUMBER_MASK);
8249 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8253 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8254 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8255 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8256 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8257 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8258 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8259 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8260 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8261 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8262 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8263 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8265 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8273 /* only supposed to happen on emulation/FPGA */
8274 BNX2X_ERR("warning random MAC workaround active\n");
8275 random_ether_addr(bp->dev->dev_addr);
8276 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8282 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8284 int func = BP_FUNC(bp);
8288 /* Disable interrupt handling until HW is initialized */
8289 atomic_set(&bp->intr_sem, 1);
8290 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8292 mutex_init(&bp->port.phy_mutex);
8294 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8295 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8297 rc = bnx2x_get_hwinfo(bp);
8299 /* need to reset chip if undi was active */
8301 bnx2x_undi_unload(bp);
8303 if (CHIP_REV_IS_FPGA(bp))
8304 printk(KERN_ERR PFX "FPGA detected\n");
8306 if (BP_NOMCP(bp) && (func == 0))
8308 "MCP disabled, must load devices in order!\n");
8310 /* Set multi queue mode */
8311 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8312 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8314 "Multi disabled since int_mode requested is not MSI-X\n");
8315 multi_mode = ETH_RSS_MODE_DISABLED;
8317 bp->multi_mode = multi_mode;
8322 bp->flags &= ~TPA_ENABLE_FLAG;
8323 bp->dev->features &= ~NETIF_F_LRO;
8325 bp->flags |= TPA_ENABLE_FLAG;
8326 bp->dev->features |= NETIF_F_LRO;
8331 bp->tx_ring_size = MAX_TX_AVAIL;
8332 bp->rx_ring_size = MAX_RX_AVAIL;
8339 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8340 bp->current_interval = (poll ? poll : timer_interval);
8342 init_timer(&bp->timer);
8343 bp->timer.expires = jiffies + bp->current_interval;
8344 bp->timer.data = (unsigned long) bp;
8345 bp->timer.function = bnx2x_timer;
8351 * ethtool service functions
8354 /* All ethtool functions called with rtnl_lock */
8356 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8358 struct bnx2x *bp = netdev_priv(dev);
8360 cmd->supported = bp->port.supported;
8361 cmd->advertising = bp->port.advertising;
8363 if (netif_carrier_ok(dev)) {
8364 cmd->speed = bp->link_vars.line_speed;
8365 cmd->duplex = bp->link_vars.duplex;
8367 cmd->speed = bp->link_params.req_line_speed;
8368 cmd->duplex = bp->link_params.req_duplex;
8373 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8374 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8375 if (vn_max_rate < cmd->speed)
8376 cmd->speed = vn_max_rate;
8379 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8381 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8383 switch (ext_phy_type) {
8384 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8385 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8387 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8390 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8391 cmd->port = PORT_FIBRE;
8394 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8396 cmd->port = PORT_TP;
8399 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8400 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8401 bp->link_params.ext_phy_config);
8405 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8406 bp->link_params.ext_phy_config);
8410 cmd->port = PORT_TP;
8412 cmd->phy_address = bp->port.phy_addr;
8413 cmd->transceiver = XCVR_INTERNAL;
8415 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8416 cmd->autoneg = AUTONEG_ENABLE;
8418 cmd->autoneg = AUTONEG_DISABLE;
8423 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8424 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8425 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8426 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8427 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8428 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8429 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8434 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8436 struct bnx2x *bp = netdev_priv(dev);
8442 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8443 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8444 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8445 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8446 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8447 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8448 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8450 if (cmd->autoneg == AUTONEG_ENABLE) {
8451 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8452 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8456 /* advertise the requested speed and duplex if supported */
8457 cmd->advertising &= bp->port.supported;
8459 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8460 bp->link_params.req_duplex = DUPLEX_FULL;
8461 bp->port.advertising |= (ADVERTISED_Autoneg |
8464 } else { /* forced speed */
8465 /* advertise the requested speed and duplex if supported */
8466 switch (cmd->speed) {
8468 if (cmd->duplex == DUPLEX_FULL) {
8469 if (!(bp->port.supported &
8470 SUPPORTED_10baseT_Full)) {
8472 "10M full not supported\n");
8476 advertising = (ADVERTISED_10baseT_Full |
8479 if (!(bp->port.supported &
8480 SUPPORTED_10baseT_Half)) {
8482 "10M half not supported\n");
8486 advertising = (ADVERTISED_10baseT_Half |
8492 if (cmd->duplex == DUPLEX_FULL) {
8493 if (!(bp->port.supported &
8494 SUPPORTED_100baseT_Full)) {
8496 "100M full not supported\n");
8500 advertising = (ADVERTISED_100baseT_Full |
8503 if (!(bp->port.supported &
8504 SUPPORTED_100baseT_Half)) {
8506 "100M half not supported\n");
8510 advertising = (ADVERTISED_100baseT_Half |
8516 if (cmd->duplex != DUPLEX_FULL) {
8517 DP(NETIF_MSG_LINK, "1G half not supported\n");
8521 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8522 DP(NETIF_MSG_LINK, "1G full not supported\n");
8526 advertising = (ADVERTISED_1000baseT_Full |
8531 if (cmd->duplex != DUPLEX_FULL) {
8533 "2.5G half not supported\n");
8537 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8539 "2.5G full not supported\n");
8543 advertising = (ADVERTISED_2500baseX_Full |
8548 if (cmd->duplex != DUPLEX_FULL) {
8549 DP(NETIF_MSG_LINK, "10G half not supported\n");
8553 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8554 DP(NETIF_MSG_LINK, "10G full not supported\n");
8558 advertising = (ADVERTISED_10000baseT_Full |
8563 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8567 bp->link_params.req_line_speed = cmd->speed;
8568 bp->link_params.req_duplex = cmd->duplex;
8569 bp->port.advertising = advertising;
8572 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8573 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8574 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8575 bp->port.advertising);
8577 if (netif_running(dev)) {
8578 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8585 #define PHY_FW_VER_LEN 10
8587 static void bnx2x_get_drvinfo(struct net_device *dev,
8588 struct ethtool_drvinfo *info)
8590 struct bnx2x *bp = netdev_priv(dev);
8591 u8 phy_fw_ver[PHY_FW_VER_LEN];
8593 strcpy(info->driver, DRV_MODULE_NAME);
8594 strcpy(info->version, DRV_MODULE_VERSION);
8596 phy_fw_ver[0] = '\0';
8598 bnx2x_acquire_phy_lock(bp);
8599 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8600 (bp->state != BNX2X_STATE_CLOSED),
8601 phy_fw_ver, PHY_FW_VER_LEN);
8602 bnx2x_release_phy_lock(bp);
8605 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8606 (bp->common.bc_ver & 0xff0000) >> 16,
8607 (bp->common.bc_ver & 0xff00) >> 8,
8608 (bp->common.bc_ver & 0xff),
8609 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8610 strcpy(info->bus_info, pci_name(bp->pdev));
8611 info->n_stats = BNX2X_NUM_STATS;
8612 info->testinfo_len = BNX2X_NUM_TESTS;
8613 info->eedump_len = bp->common.flash_size;
8614 info->regdump_len = 0;
8617 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8618 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8620 static int bnx2x_get_regs_len(struct net_device *dev)
8622 static u32 regdump_len;
8623 struct bnx2x *bp = netdev_priv(dev);
8629 if (CHIP_IS_E1(bp)) {
8630 for (i = 0; i < REGS_COUNT; i++)
8631 if (IS_E1_ONLINE(reg_addrs[i].info))
8632 regdump_len += reg_addrs[i].size;
8634 for (i = 0; i < WREGS_COUNT_E1; i++)
8635 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8636 regdump_len += wreg_addrs_e1[i].size *
8637 (1 + wreg_addrs_e1[i].read_regs_count);
8640 for (i = 0; i < REGS_COUNT; i++)
8641 if (IS_E1H_ONLINE(reg_addrs[i].info))
8642 regdump_len += reg_addrs[i].size;
8644 for (i = 0; i < WREGS_COUNT_E1H; i++)
8645 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8646 regdump_len += wreg_addrs_e1h[i].size *
8647 (1 + wreg_addrs_e1h[i].read_regs_count);
8650 regdump_len += sizeof(struct dump_hdr);
8655 static void bnx2x_get_regs(struct net_device *dev,
8656 struct ethtool_regs *regs, void *_p)
8659 struct bnx2x *bp = netdev_priv(dev);
8660 struct dump_hdr dump_hdr = {0};
8663 memset(p, 0, regs->len);
8665 if (!netif_running(bp->dev))
8668 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8669 dump_hdr.dump_sign = dump_sign_all;
8670 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8671 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8672 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8673 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8674 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8676 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8677 p += dump_hdr.hdr_size + 1;
8679 if (CHIP_IS_E1(bp)) {
8680 for (i = 0; i < REGS_COUNT; i++)
8681 if (IS_E1_ONLINE(reg_addrs[i].info))
8682 for (j = 0; j < reg_addrs[i].size; j++)
8684 reg_addrs[i].addr + j*4);
8687 for (i = 0; i < REGS_COUNT; i++)
8688 if (IS_E1H_ONLINE(reg_addrs[i].info))
8689 for (j = 0; j < reg_addrs[i].size; j++)
8691 reg_addrs[i].addr + j*4);
8695 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8697 struct bnx2x *bp = netdev_priv(dev);
8699 if (bp->flags & NO_WOL_FLAG) {
8703 wol->supported = WAKE_MAGIC;
8705 wol->wolopts = WAKE_MAGIC;
8709 memset(&wol->sopass, 0, sizeof(wol->sopass));
8712 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8714 struct bnx2x *bp = netdev_priv(dev);
8716 if (wol->wolopts & ~WAKE_MAGIC)
8719 if (wol->wolopts & WAKE_MAGIC) {
8720 if (bp->flags & NO_WOL_FLAG)
8730 static u32 bnx2x_get_msglevel(struct net_device *dev)
8732 struct bnx2x *bp = netdev_priv(dev);
8734 return bp->msglevel;
8737 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8739 struct bnx2x *bp = netdev_priv(dev);
8741 if (capable(CAP_NET_ADMIN))
8742 bp->msglevel = level;
8745 static int bnx2x_nway_reset(struct net_device *dev)
8747 struct bnx2x *bp = netdev_priv(dev);
8752 if (netif_running(dev)) {
8753 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8761 bnx2x_get_link(struct net_device *dev)
8763 struct bnx2x *bp = netdev_priv(dev);
8765 return bp->link_vars.link_up;
8768 static int bnx2x_get_eeprom_len(struct net_device *dev)
8770 struct bnx2x *bp = netdev_priv(dev);
8772 return bp->common.flash_size;
8775 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8777 int port = BP_PORT(bp);
8781 /* adjust timeout for emulation/FPGA */
8782 count = NVRAM_TIMEOUT_COUNT;
8783 if (CHIP_REV_IS_SLOW(bp))
8786 /* request access to nvram interface */
8787 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8788 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8790 for (i = 0; i < count*10; i++) {
8791 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8792 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8798 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8799 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8806 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8808 int port = BP_PORT(bp);
8812 /* adjust timeout for emulation/FPGA */
8813 count = NVRAM_TIMEOUT_COUNT;
8814 if (CHIP_REV_IS_SLOW(bp))
8817 /* relinquish nvram interface */
8818 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8819 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8821 for (i = 0; i < count*10; i++) {
8822 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8823 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8829 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8830 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8837 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8841 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8843 /* enable both bits, even on read */
8844 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8845 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8846 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8849 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8853 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8855 /* disable both bits, even after read */
8856 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8857 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8858 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8861 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8867 /* build the command word */
8868 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8870 /* need to clear DONE bit separately */
8871 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8873 /* address of the NVRAM to read from */
8874 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8875 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8877 /* issue a read command */
8878 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8880 /* adjust timeout for emulation/FPGA */
8881 count = NVRAM_TIMEOUT_COUNT;
8882 if (CHIP_REV_IS_SLOW(bp))
8885 /* wait for completion */
8888 for (i = 0; i < count; i++) {
8890 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8892 if (val & MCPR_NVM_COMMAND_DONE) {
8893 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8894 /* we read nvram data in cpu order
8895 * but ethtool sees it as an array of bytes
8896 * converting to big-endian will do the work */
8897 *ret_val = cpu_to_be32(val);
8906 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8913 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8915 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8920 if (offset + buf_size > bp->common.flash_size) {
8921 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8922 " buf_size (0x%x) > flash_size (0x%x)\n",
8923 offset, buf_size, bp->common.flash_size);
8927 /* request access to nvram interface */
8928 rc = bnx2x_acquire_nvram_lock(bp);
8932 /* enable access to nvram interface */
8933 bnx2x_enable_nvram_access(bp);
8935 /* read the first word(s) */
8936 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8937 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8938 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8939 memcpy(ret_buf, &val, 4);
8941 /* advance to the next dword */
8942 offset += sizeof(u32);
8943 ret_buf += sizeof(u32);
8944 buf_size -= sizeof(u32);
8949 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8950 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8951 memcpy(ret_buf, &val, 4);
8954 /* disable access to nvram interface */
8955 bnx2x_disable_nvram_access(bp);
8956 bnx2x_release_nvram_lock(bp);
8961 static int bnx2x_get_eeprom(struct net_device *dev,
8962 struct ethtool_eeprom *eeprom, u8 *eebuf)
8964 struct bnx2x *bp = netdev_priv(dev);
8967 if (!netif_running(dev))
8970 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8971 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8972 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8973 eeprom->len, eeprom->len);
8975 /* parameters already validated in ethtool_get_eeprom */
8977 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8982 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8987 /* build the command word */
8988 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8990 /* need to clear DONE bit separately */
8991 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8993 /* write the data */
8994 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8996 /* address of the NVRAM to write to */
8997 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8998 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9000 /* issue the write command */
9001 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9003 /* adjust timeout for emulation/FPGA */
9004 count = NVRAM_TIMEOUT_COUNT;
9005 if (CHIP_REV_IS_SLOW(bp))
9008 /* wait for completion */
9010 for (i = 0; i < count; i++) {
9012 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9013 if (val & MCPR_NVM_COMMAND_DONE) {
9022 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9024 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9032 if (offset + buf_size > bp->common.flash_size) {
9033 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9034 " buf_size (0x%x) > flash_size (0x%x)\n",
9035 offset, buf_size, bp->common.flash_size);
9039 /* request access to nvram interface */
9040 rc = bnx2x_acquire_nvram_lock(bp);
9044 /* enable access to nvram interface */
9045 bnx2x_enable_nvram_access(bp);
9047 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9048 align_offset = (offset & ~0x03);
9049 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9052 val &= ~(0xff << BYTE_OFFSET(offset));
9053 val |= (*data_buf << BYTE_OFFSET(offset));
9055 /* nvram data is returned as an array of bytes
9056 * convert it back to cpu order */
9057 val = be32_to_cpu(val);
9059 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9063 /* disable access to nvram interface */
9064 bnx2x_disable_nvram_access(bp);
9065 bnx2x_release_nvram_lock(bp);
9070 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9078 if (buf_size == 1) /* ethtool */
9079 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9081 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9083 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9088 if (offset + buf_size > bp->common.flash_size) {
9089 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9090 " buf_size (0x%x) > flash_size (0x%x)\n",
9091 offset, buf_size, bp->common.flash_size);
9095 /* request access to nvram interface */
9096 rc = bnx2x_acquire_nvram_lock(bp);
9100 /* enable access to nvram interface */
9101 bnx2x_enable_nvram_access(bp);
9104 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9105 while ((written_so_far < buf_size) && (rc == 0)) {
9106 if (written_so_far == (buf_size - sizeof(u32)))
9107 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9108 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9109 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9110 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9111 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9113 memcpy(&val, data_buf, 4);
9115 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9117 /* advance to the next dword */
9118 offset += sizeof(u32);
9119 data_buf += sizeof(u32);
9120 written_so_far += sizeof(u32);
9124 /* disable access to nvram interface */
9125 bnx2x_disable_nvram_access(bp);
9126 bnx2x_release_nvram_lock(bp);
9131 static int bnx2x_set_eeprom(struct net_device *dev,
9132 struct ethtool_eeprom *eeprom, u8 *eebuf)
9134 struct bnx2x *bp = netdev_priv(dev);
9137 if (!netif_running(dev))
9140 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9141 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9142 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9143 eeprom->len, eeprom->len);
9145 /* parameters already validated in ethtool_set_eeprom */
9147 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9148 if (eeprom->magic == 0x00504859)
9151 bnx2x_acquire_phy_lock(bp);
9152 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9153 bp->link_params.ext_phy_config,
9154 (bp->state != BNX2X_STATE_CLOSED),
9155 eebuf, eeprom->len);
9156 if ((bp->state == BNX2X_STATE_OPEN) ||
9157 (bp->state == BNX2X_STATE_DISABLED)) {
9158 rc |= bnx2x_link_reset(&bp->link_params,
9160 rc |= bnx2x_phy_init(&bp->link_params,
9163 bnx2x_release_phy_lock(bp);
9165 } else /* Only the PMF can access the PHY */
9168 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9173 static int bnx2x_get_coalesce(struct net_device *dev,
9174 struct ethtool_coalesce *coal)
9176 struct bnx2x *bp = netdev_priv(dev);
9178 memset(coal, 0, sizeof(struct ethtool_coalesce));
9180 coal->rx_coalesce_usecs = bp->rx_ticks;
9181 coal->tx_coalesce_usecs = bp->tx_ticks;
9186 static int bnx2x_set_coalesce(struct net_device *dev,
9187 struct ethtool_coalesce *coal)
9189 struct bnx2x *bp = netdev_priv(dev);
9191 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9192 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9193 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9195 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9196 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9197 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9199 if (netif_running(dev))
9200 bnx2x_update_coalesce(bp);
9205 static void bnx2x_get_ringparam(struct net_device *dev,
9206 struct ethtool_ringparam *ering)
9208 struct bnx2x *bp = netdev_priv(dev);
9210 ering->rx_max_pending = MAX_RX_AVAIL;
9211 ering->rx_mini_max_pending = 0;
9212 ering->rx_jumbo_max_pending = 0;
9214 ering->rx_pending = bp->rx_ring_size;
9215 ering->rx_mini_pending = 0;
9216 ering->rx_jumbo_pending = 0;
9218 ering->tx_max_pending = MAX_TX_AVAIL;
9219 ering->tx_pending = bp->tx_ring_size;
9222 static int bnx2x_set_ringparam(struct net_device *dev,
9223 struct ethtool_ringparam *ering)
9225 struct bnx2x *bp = netdev_priv(dev);
9228 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9229 (ering->tx_pending > MAX_TX_AVAIL) ||
9230 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9233 bp->rx_ring_size = ering->rx_pending;
9234 bp->tx_ring_size = ering->tx_pending;
9236 if (netif_running(dev)) {
9237 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9238 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9244 static void bnx2x_get_pauseparam(struct net_device *dev,
9245 struct ethtool_pauseparam *epause)
9247 struct bnx2x *bp = netdev_priv(dev);
9249 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9250 BNX2X_FLOW_CTRL_AUTO) &&
9251 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9253 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9254 BNX2X_FLOW_CTRL_RX);
9255 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9256 BNX2X_FLOW_CTRL_TX);
9258 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9259 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9260 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9263 static int bnx2x_set_pauseparam(struct net_device *dev,
9264 struct ethtool_pauseparam *epause)
9266 struct bnx2x *bp = netdev_priv(dev);
9271 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9272 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9273 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9275 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9277 if (epause->rx_pause)
9278 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9280 if (epause->tx_pause)
9281 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9283 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9284 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9286 if (epause->autoneg) {
9287 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9288 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9292 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9293 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9297 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9299 if (netif_running(dev)) {
9300 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9307 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9309 struct bnx2x *bp = netdev_priv(dev);
9313 /* TPA requires Rx CSUM offloading */
9314 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9315 if (!(dev->features & NETIF_F_LRO)) {
9316 dev->features |= NETIF_F_LRO;
9317 bp->flags |= TPA_ENABLE_FLAG;
9321 } else if (dev->features & NETIF_F_LRO) {
9322 dev->features &= ~NETIF_F_LRO;
9323 bp->flags &= ~TPA_ENABLE_FLAG;
9327 if (changed && netif_running(dev)) {
9328 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9329 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9335 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9337 struct bnx2x *bp = netdev_priv(dev);
9342 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9344 struct bnx2x *bp = netdev_priv(dev);
9349 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9350 TPA'ed packets will be discarded due to wrong TCP CSUM */
9352 u32 flags = ethtool_op_get_flags(dev);
9354 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9360 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9363 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9364 dev->features |= NETIF_F_TSO6;
9366 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9367 dev->features &= ~NETIF_F_TSO6;
9373 static const struct {
9374 char string[ETH_GSTRING_LEN];
9375 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9376 { "register_test (offline)" },
9377 { "memory_test (offline)" },
9378 { "loopback_test (offline)" },
9379 { "nvram_test (online)" },
9380 { "interrupt_test (online)" },
9381 { "link_test (online)" },
9382 { "idle check (online)" }
9385 static int bnx2x_self_test_count(struct net_device *dev)
9387 return BNX2X_NUM_TESTS;
9390 static int bnx2x_test_registers(struct bnx2x *bp)
9392 int idx, i, rc = -ENODEV;
9394 int port = BP_PORT(bp);
9395 static const struct {
9400 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9401 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9402 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9403 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9404 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9405 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9406 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9407 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9408 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9409 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9410 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9411 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9412 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9413 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9414 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9415 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9416 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9417 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9418 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9419 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9420 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9421 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9422 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9423 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9424 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9425 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9426 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9427 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9428 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9429 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9430 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9431 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9432 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9433 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9434 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9435 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9436 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9438 { 0xffffffff, 0, 0x00000000 }
9441 if (!netif_running(bp->dev))
9444 /* Repeat the test twice:
9445 First by writing 0x00000000, second by writing 0xffffffff */
9446 for (idx = 0; idx < 2; idx++) {
9453 wr_val = 0xffffffff;
9457 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9458 u32 offset, mask, save_val, val;
9460 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9461 mask = reg_tbl[i].mask;
9463 save_val = REG_RD(bp, offset);
9465 REG_WR(bp, offset, wr_val);
9466 val = REG_RD(bp, offset);
9468 /* Restore the original register's value */
9469 REG_WR(bp, offset, save_val);
9471 /* verify that value is as expected value */
9472 if ((val & mask) != (wr_val & mask))
9483 static int bnx2x_test_memory(struct bnx2x *bp)
9485 int i, j, rc = -ENODEV;
9487 static const struct {
9491 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9492 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9493 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9494 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9495 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9496 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9497 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9501 static const struct {
9507 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9508 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9509 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9510 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9511 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9512 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9514 { NULL, 0xffffffff, 0, 0 }
9517 if (!netif_running(bp->dev))
9520 /* Go through all the memories */
9521 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9522 for (j = 0; j < mem_tbl[i].size; j++)
9523 REG_RD(bp, mem_tbl[i].offset + j*4);
9525 /* Check the parity status */
9526 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9527 val = REG_RD(bp, prty_tbl[i].offset);
9528 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9529 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9531 "%s is 0x%x\n", prty_tbl[i].name, val);
9542 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9547 while (bnx2x_link_test(bp) && cnt--)
9551 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9553 unsigned int pkt_size, num_pkts, i;
9554 struct sk_buff *skb;
9555 unsigned char *packet;
9556 struct bnx2x_fastpath *fp = &bp->fp[0];
9557 u16 tx_start_idx, tx_idx;
9558 u16 rx_start_idx, rx_idx;
9560 struct sw_tx_bd *tx_buf;
9561 struct eth_tx_bd *tx_bd;
9563 union eth_rx_cqe *cqe;
9565 struct sw_rx_bd *rx_buf;
9569 /* check the loopback mode */
9570 switch (loopback_mode) {
9571 case BNX2X_PHY_LOOPBACK:
9572 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9575 case BNX2X_MAC_LOOPBACK:
9576 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9577 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9583 /* prepare the loopback packet */
9584 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9585 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9586 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9589 goto test_loopback_exit;
9591 packet = skb_put(skb, pkt_size);
9592 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9593 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9594 for (i = ETH_HLEN; i < pkt_size; i++)
9595 packet[i] = (unsigned char) (i & 0xff);
9597 /* send the loopback packet */
9599 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9600 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9602 pkt_prod = fp->tx_pkt_prod++;
9603 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9604 tx_buf->first_bd = fp->tx_bd_prod;
9607 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9608 mapping = pci_map_single(bp->pdev, skb->data,
9609 skb_headlen(skb), PCI_DMA_TODEVICE);
9610 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9611 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9612 tx_bd->nbd = cpu_to_le16(1);
9613 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9614 tx_bd->vlan = cpu_to_le16(pkt_prod);
9615 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9616 ETH_TX_BD_FLAGS_END_BD);
9617 tx_bd->general_data = ((UNICAST_ADDRESS <<
9618 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9622 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9623 mb(); /* FW restriction: must not reorder writing nbd and packets */
9624 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9625 DOORBELL(bp, fp->index, 0);
9631 bp->dev->trans_start = jiffies;
9635 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9636 if (tx_idx != tx_start_idx + num_pkts)
9637 goto test_loopback_exit;
9639 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9640 if (rx_idx != rx_start_idx + num_pkts)
9641 goto test_loopback_exit;
9643 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9644 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9645 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9646 goto test_loopback_rx_exit;
9648 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9649 if (len != pkt_size)
9650 goto test_loopback_rx_exit;
9652 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9654 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9655 for (i = ETH_HLEN; i < pkt_size; i++)
9656 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9657 goto test_loopback_rx_exit;
9661 test_loopback_rx_exit:
9663 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9664 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9665 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9666 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9668 /* Update producers */
9669 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9673 bp->link_params.loopback_mode = LOOPBACK_NONE;
9678 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9682 if (!netif_running(bp->dev))
9683 return BNX2X_LOOPBACK_FAILED;
9685 bnx2x_netif_stop(bp, 1);
9686 bnx2x_acquire_phy_lock(bp);
9688 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9690 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9691 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9694 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9696 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9697 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9700 bnx2x_release_phy_lock(bp);
9701 bnx2x_netif_start(bp);
9706 #define CRC32_RESIDUAL 0xdebb20e3
9708 static int bnx2x_test_nvram(struct bnx2x *bp)
9710 static const struct {
9714 { 0, 0x14 }, /* bootstrap */
9715 { 0x14, 0xec }, /* dir */
9716 { 0x100, 0x350 }, /* manuf_info */
9717 { 0x450, 0xf0 }, /* feature_info */
9718 { 0x640, 0x64 }, /* upgrade_key_info */
9720 { 0x708, 0x70 }, /* manuf_key_info */
9724 __be32 buf[0x350 / 4];
9725 u8 *data = (u8 *)buf;
9729 rc = bnx2x_nvram_read(bp, 0, data, 4);
9731 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9732 goto test_nvram_exit;
9735 magic = be32_to_cpu(buf[0]);
9736 if (magic != 0x669955aa) {
9737 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9739 goto test_nvram_exit;
9742 for (i = 0; nvram_tbl[i].size; i++) {
9744 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9748 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9749 goto test_nvram_exit;
9752 csum = ether_crc_le(nvram_tbl[i].size, data);
9753 if (csum != CRC32_RESIDUAL) {
9755 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9757 goto test_nvram_exit;
9765 static int bnx2x_test_intr(struct bnx2x *bp)
9767 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9770 if (!netif_running(bp->dev))
9773 config->hdr.length = 0;
9775 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9777 config->hdr.offset = BP_FUNC(bp);
9778 config->hdr.client_id = bp->fp->cl_id;
9779 config->hdr.reserved1 = 0;
9781 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9782 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9783 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9785 bp->set_mac_pending++;
9786 for (i = 0; i < 10; i++) {
9787 if (!bp->set_mac_pending)
9789 msleep_interruptible(10);
9798 static void bnx2x_self_test(struct net_device *dev,
9799 struct ethtool_test *etest, u64 *buf)
9801 struct bnx2x *bp = netdev_priv(dev);
9803 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9805 if (!netif_running(dev))
9808 /* offline tests are not supported in MF mode */
9810 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9812 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9813 int port = BP_PORT(bp);
9817 /* save current value of input enable for TX port IF */
9818 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9819 /* disable input for TX port IF */
9820 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9822 link_up = bp->link_vars.link_up;
9823 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9824 bnx2x_nic_load(bp, LOAD_DIAG);
9825 /* wait until link state is restored */
9826 bnx2x_wait_for_link(bp, link_up);
9828 if (bnx2x_test_registers(bp) != 0) {
9830 etest->flags |= ETH_TEST_FL_FAILED;
9832 if (bnx2x_test_memory(bp) != 0) {
9834 etest->flags |= ETH_TEST_FL_FAILED;
9836 buf[2] = bnx2x_test_loopback(bp, link_up);
9838 etest->flags |= ETH_TEST_FL_FAILED;
9840 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9842 /* restore input for TX port IF */
9843 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9845 bnx2x_nic_load(bp, LOAD_NORMAL);
9846 /* wait until link state is restored */
9847 bnx2x_wait_for_link(bp, link_up);
9849 if (bnx2x_test_nvram(bp) != 0) {
9851 etest->flags |= ETH_TEST_FL_FAILED;
9853 if (bnx2x_test_intr(bp) != 0) {
9855 etest->flags |= ETH_TEST_FL_FAILED;
9858 if (bnx2x_link_test(bp) != 0) {
9860 etest->flags |= ETH_TEST_FL_FAILED;
9863 #ifdef BNX2X_EXTRA_DEBUG
9864 bnx2x_panic_dump(bp);
9868 static const struct {
9871 u8 string[ETH_GSTRING_LEN];
9872 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9873 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9874 { Q_STATS_OFFSET32(error_bytes_received_hi),
9875 8, "[%d]: rx_error_bytes" },
9876 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9877 8, "[%d]: rx_ucast_packets" },
9878 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9879 8, "[%d]: rx_mcast_packets" },
9880 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9881 8, "[%d]: rx_bcast_packets" },
9882 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9883 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9884 4, "[%d]: rx_phy_ip_err_discards"},
9885 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9886 4, "[%d]: rx_skb_alloc_discard" },
9887 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9889 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9890 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9891 8, "[%d]: tx_packets" }
9894 static const struct {
9898 #define STATS_FLAGS_PORT 1
9899 #define STATS_FLAGS_FUNC 2
9900 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9901 u8 string[ETH_GSTRING_LEN];
9902 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9903 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9904 8, STATS_FLAGS_BOTH, "rx_bytes" },
9905 { STATS_OFFSET32(error_bytes_received_hi),
9906 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9907 { STATS_OFFSET32(total_unicast_packets_received_hi),
9908 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9909 { STATS_OFFSET32(total_multicast_packets_received_hi),
9910 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9911 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9912 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9913 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9914 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9915 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9916 8, STATS_FLAGS_PORT, "rx_align_errors" },
9917 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9918 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9919 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9920 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9921 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9922 8, STATS_FLAGS_PORT, "rx_fragments" },
9923 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9924 8, STATS_FLAGS_PORT, "rx_jabbers" },
9925 { STATS_OFFSET32(no_buff_discard_hi),
9926 8, STATS_FLAGS_BOTH, "rx_discards" },
9927 { STATS_OFFSET32(mac_filter_discard),
9928 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9929 { STATS_OFFSET32(xxoverflow_discard),
9930 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9931 { STATS_OFFSET32(brb_drop_hi),
9932 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9933 { STATS_OFFSET32(brb_truncate_hi),
9934 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9935 { STATS_OFFSET32(pause_frames_received_hi),
9936 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9937 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9938 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9939 { STATS_OFFSET32(nig_timer_max),
9940 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9941 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9942 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9943 { STATS_OFFSET32(rx_skb_alloc_failed),
9944 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9945 { STATS_OFFSET32(hw_csum_err),
9946 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9948 { STATS_OFFSET32(total_bytes_transmitted_hi),
9949 8, STATS_FLAGS_BOTH, "tx_bytes" },
9950 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9951 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9952 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9953 8, STATS_FLAGS_BOTH, "tx_packets" },
9954 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9955 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9956 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9957 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9958 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9959 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9960 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9961 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9962 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9963 8, STATS_FLAGS_PORT, "tx_deferred" },
9964 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9965 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9966 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9967 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9968 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9969 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9970 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9971 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9972 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9973 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9974 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9975 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9976 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9977 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9978 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9979 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9980 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9981 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9982 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9983 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9984 { STATS_OFFSET32(pause_frames_sent_hi),
9985 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9988 #define IS_PORT_STAT(i) \
9989 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9990 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9991 #define IS_E1HMF_MODE_STAT(bp) \
9992 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9994 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9996 struct bnx2x *bp = netdev_priv(dev);
9999 switch (stringset) {
10001 if (is_multi(bp)) {
10003 for_each_queue(bp, i) {
10004 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10005 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10006 bnx2x_q_stats_arr[j].string, i);
10007 k += BNX2X_NUM_Q_STATS;
10009 if (IS_E1HMF_MODE_STAT(bp))
10011 for (j = 0; j < BNX2X_NUM_STATS; j++)
10012 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10013 bnx2x_stats_arr[j].string);
10015 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10016 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10018 strcpy(buf + j*ETH_GSTRING_LEN,
10019 bnx2x_stats_arr[i].string);
10026 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10031 static int bnx2x_get_stats_count(struct net_device *dev)
10033 struct bnx2x *bp = netdev_priv(dev);
10036 if (is_multi(bp)) {
10037 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10038 if (!IS_E1HMF_MODE_STAT(bp))
10039 num_stats += BNX2X_NUM_STATS;
10041 if (IS_E1HMF_MODE_STAT(bp)) {
10043 for (i = 0; i < BNX2X_NUM_STATS; i++)
10044 if (IS_FUNC_STAT(i))
10047 num_stats = BNX2X_NUM_STATS;
10053 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10054 struct ethtool_stats *stats, u64 *buf)
10056 struct bnx2x *bp = netdev_priv(dev);
10057 u32 *hw_stats, *offset;
10060 if (is_multi(bp)) {
10062 for_each_queue(bp, i) {
10063 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10064 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10065 if (bnx2x_q_stats_arr[j].size == 0) {
10066 /* skip this counter */
10070 offset = (hw_stats +
10071 bnx2x_q_stats_arr[j].offset);
10072 if (bnx2x_q_stats_arr[j].size == 4) {
10073 /* 4-byte counter */
10074 buf[k + j] = (u64) *offset;
10077 /* 8-byte counter */
10078 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10080 k += BNX2X_NUM_Q_STATS;
10082 if (IS_E1HMF_MODE_STAT(bp))
10084 hw_stats = (u32 *)&bp->eth_stats;
10085 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10086 if (bnx2x_stats_arr[j].size == 0) {
10087 /* skip this counter */
10091 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10092 if (bnx2x_stats_arr[j].size == 4) {
10093 /* 4-byte counter */
10094 buf[k + j] = (u64) *offset;
10097 /* 8-byte counter */
10098 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10101 hw_stats = (u32 *)&bp->eth_stats;
10102 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10103 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10105 if (bnx2x_stats_arr[i].size == 0) {
10106 /* skip this counter */
10111 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10112 if (bnx2x_stats_arr[i].size == 4) {
10113 /* 4-byte counter */
10114 buf[j] = (u64) *offset;
10118 /* 8-byte counter */
10119 buf[j] = HILO_U64(*offset, *(offset + 1));
10125 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10127 struct bnx2x *bp = netdev_priv(dev);
10128 int port = BP_PORT(bp);
10131 if (!netif_running(dev))
10140 for (i = 0; i < (data * 2); i++) {
10142 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10143 bp->link_params.hw_led_mode,
10144 bp->link_params.chip_id);
10146 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10147 bp->link_params.hw_led_mode,
10148 bp->link_params.chip_id);
10150 msleep_interruptible(500);
10151 if (signal_pending(current))
10155 if (bp->link_vars.link_up)
10156 bnx2x_set_led(bp, port, LED_MODE_OPER,
10157 bp->link_vars.line_speed,
10158 bp->link_params.hw_led_mode,
10159 bp->link_params.chip_id);
10164 static struct ethtool_ops bnx2x_ethtool_ops = {
10165 .get_settings = bnx2x_get_settings,
10166 .set_settings = bnx2x_set_settings,
10167 .get_drvinfo = bnx2x_get_drvinfo,
10168 .get_regs_len = bnx2x_get_regs_len,
10169 .get_regs = bnx2x_get_regs,
10170 .get_wol = bnx2x_get_wol,
10171 .set_wol = bnx2x_set_wol,
10172 .get_msglevel = bnx2x_get_msglevel,
10173 .set_msglevel = bnx2x_set_msglevel,
10174 .nway_reset = bnx2x_nway_reset,
10175 .get_link = bnx2x_get_link,
10176 .get_eeprom_len = bnx2x_get_eeprom_len,
10177 .get_eeprom = bnx2x_get_eeprom,
10178 .set_eeprom = bnx2x_set_eeprom,
10179 .get_coalesce = bnx2x_get_coalesce,
10180 .set_coalesce = bnx2x_set_coalesce,
10181 .get_ringparam = bnx2x_get_ringparam,
10182 .set_ringparam = bnx2x_set_ringparam,
10183 .get_pauseparam = bnx2x_get_pauseparam,
10184 .set_pauseparam = bnx2x_set_pauseparam,
10185 .get_rx_csum = bnx2x_get_rx_csum,
10186 .set_rx_csum = bnx2x_set_rx_csum,
10187 .get_tx_csum = ethtool_op_get_tx_csum,
10188 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10189 .set_flags = bnx2x_set_flags,
10190 .get_flags = ethtool_op_get_flags,
10191 .get_sg = ethtool_op_get_sg,
10192 .set_sg = ethtool_op_set_sg,
10193 .get_tso = ethtool_op_get_tso,
10194 .set_tso = bnx2x_set_tso,
10195 .self_test_count = bnx2x_self_test_count,
10196 .self_test = bnx2x_self_test,
10197 .get_strings = bnx2x_get_strings,
10198 .phys_id = bnx2x_phys_id,
10199 .get_stats_count = bnx2x_get_stats_count,
10200 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10203 /* end of ethtool_ops */
10205 /****************************************************************************
10206 * General service functions
10207 ****************************************************************************/
10209 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10213 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10217 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10218 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10219 PCI_PM_CTRL_PME_STATUS));
10221 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10222 /* delay required during transition out of D3hot */
10227 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10231 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10233 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10236 /* No more memory access after this point until
10237 * device is brought back to D0.
10247 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10251 /* Tell compiler that status block fields can change */
10253 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10254 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10256 return (fp->rx_comp_cons != rx_cons_sb);
10260 * net_device service functions
10263 static int bnx2x_poll(struct napi_struct *napi, int budget)
10265 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10267 struct bnx2x *bp = fp->bp;
10270 #ifdef BNX2X_STOP_ON_ERROR
10271 if (unlikely(bp->panic))
10275 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10276 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10277 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10279 bnx2x_update_fpsb_idx(fp);
10281 if (bnx2x_has_tx_work(fp))
10284 if (bnx2x_has_rx_work(fp)) {
10285 work_done = bnx2x_rx_int(fp, budget);
10287 /* must not complete if we consumed full budget */
10288 if (work_done >= budget)
10292 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10293 * ensure that status block indices have been actually read
10294 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10295 * so that we won't write the "newer" value of the status block to IGU
10296 * (if there was a DMA right after BNX2X_HAS_WORK and
10297 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10298 * may be postponed to right before bnx2x_ack_sb). In this case
10299 * there will never be another interrupt until there is another update
10300 * of the status block, while there is still unhandled work.
10304 if (!BNX2X_HAS_WORK(fp)) {
10305 #ifdef BNX2X_STOP_ON_ERROR
10308 napi_complete(napi);
10310 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10311 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10312 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10313 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10321 /* we split the first BD into headers and data BDs
10322 * to ease the pain of our fellow microcode engineers
10323 * we use one mapping for both BDs
10324 * So far this has only been observed to happen
10325 * in Other Operating Systems(TM)
10327 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10328 struct bnx2x_fastpath *fp,
10329 struct eth_tx_bd **tx_bd, u16 hlen,
10330 u16 bd_prod, int nbd)
10332 struct eth_tx_bd *h_tx_bd = *tx_bd;
10333 struct eth_tx_bd *d_tx_bd;
10334 dma_addr_t mapping;
10335 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10337 /* first fix first BD */
10338 h_tx_bd->nbd = cpu_to_le16(nbd);
10339 h_tx_bd->nbytes = cpu_to_le16(hlen);
10341 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10342 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10343 h_tx_bd->addr_lo, h_tx_bd->nbd);
10345 /* now get a new data BD
10346 * (after the pbd) and fill it */
10347 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10348 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10350 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10351 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10353 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10354 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10355 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10357 /* this marks the BD as one that has no individual mapping
10358 * the FW ignores this flag in a BD not marked start
10360 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10361 DP(NETIF_MSG_TX_QUEUED,
10362 "TSO split data size is %d (%x:%x)\n",
10363 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10365 /* update tx_bd for marking the last BD flag */
10371 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10374 csum = (u16) ~csum_fold(csum_sub(csum,
10375 csum_partial(t_header - fix, fix, 0)));
10378 csum = (u16) ~csum_fold(csum_add(csum,
10379 csum_partial(t_header, -fix, 0)));
10381 return swab16(csum);
10384 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10388 if (skb->ip_summed != CHECKSUM_PARTIAL)
10392 if (skb->protocol == htons(ETH_P_IPV6)) {
10394 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10395 rc |= XMIT_CSUM_TCP;
10399 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10400 rc |= XMIT_CSUM_TCP;
10404 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10407 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10413 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10414 /* check if packet requires linearization (packet is too fragmented)
10415 no need to check fragmentation if page size > 8K (there will be no
10416 violation to FW restrictions) */
10417 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10422 int first_bd_sz = 0;
10424 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10425 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10427 if (xmit_type & XMIT_GSO) {
10428 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10429 /* Check if LSO packet needs to be copied:
10430 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10431 int wnd_size = MAX_FETCH_BD - 3;
10432 /* Number of windows to check */
10433 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10438 /* Headers length */
10439 hlen = (int)(skb_transport_header(skb) - skb->data) +
10442 /* Amount of data (w/o headers) on linear part of SKB*/
10443 first_bd_sz = skb_headlen(skb) - hlen;
10445 wnd_sum = first_bd_sz;
10447 /* Calculate the first sum - it's special */
10448 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10450 skb_shinfo(skb)->frags[frag_idx].size;
10452 /* If there was data on linear skb data - check it */
10453 if (first_bd_sz > 0) {
10454 if (unlikely(wnd_sum < lso_mss)) {
10459 wnd_sum -= first_bd_sz;
10462 /* Others are easier: run through the frag list and
10463 check all windows */
10464 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10466 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10468 if (unlikely(wnd_sum < lso_mss)) {
10473 skb_shinfo(skb)->frags[wnd_idx].size;
10476 /* in non-LSO too fragmented packet should always
10483 if (unlikely(to_copy))
10484 DP(NETIF_MSG_TX_QUEUED,
10485 "Linearization IS REQUIRED for %s packet. "
10486 "num_frags %d hlen %d first_bd_sz %d\n",
10487 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10488 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10494 /* called with netif_tx_lock
10495 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10496 * netif_wake_queue()
10498 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10500 struct bnx2x *bp = netdev_priv(dev);
10501 struct bnx2x_fastpath *fp;
10502 struct netdev_queue *txq;
10503 struct sw_tx_bd *tx_buf;
10504 struct eth_tx_bd *tx_bd;
10505 struct eth_tx_parse_bd *pbd = NULL;
10506 u16 pkt_prod, bd_prod;
10508 dma_addr_t mapping;
10509 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10510 int vlan_off = (bp->e1hov ? 4 : 0);
10514 #ifdef BNX2X_STOP_ON_ERROR
10515 if (unlikely(bp->panic))
10516 return NETDEV_TX_BUSY;
10519 fp_index = skb_get_queue_mapping(skb);
10520 txq = netdev_get_tx_queue(dev, fp_index);
10522 fp = &bp->fp[fp_index];
10524 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10525 fp->eth_q_stats.driver_xoff++,
10526 netif_tx_stop_queue(txq);
10527 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10528 return NETDEV_TX_BUSY;
10531 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10532 " gso type %x xmit_type %x\n",
10533 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10534 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10536 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10537 /* First, check if we need to linearize the skb (due to FW
10538 restrictions). No need to check fragmentation if page size > 8K
10539 (there will be no violation to FW restrictions) */
10540 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10541 /* Statistics of linearization */
10543 if (skb_linearize(skb) != 0) {
10544 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10545 "silently dropping this SKB\n");
10546 dev_kfree_skb_any(skb);
10547 return NETDEV_TX_OK;
10553 Please read carefully. First we use one BD which we mark as start,
10554 then for TSO or xsum we have a parsing info BD,
10555 and only then we have the rest of the TSO BDs.
10556 (don't forget to mark the last one as last,
10557 and to unmap only AFTER you write to the BD ...)
10558 And above all, all pdb sizes are in words - NOT DWORDS!
10561 pkt_prod = fp->tx_pkt_prod++;
10562 bd_prod = TX_BD(fp->tx_bd_prod);
10564 /* get a tx_buf and first BD */
10565 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10566 tx_bd = &fp->tx_desc_ring[bd_prod];
10568 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10569 tx_bd->general_data = (UNICAST_ADDRESS <<
10570 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10572 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10574 /* remember the first BD of the packet */
10575 tx_buf->first_bd = fp->tx_bd_prod;
10578 DP(NETIF_MSG_TX_QUEUED,
10579 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10580 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10583 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10584 (bp->flags & HW_VLAN_TX_FLAG)) {
10585 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10586 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10590 tx_bd->vlan = cpu_to_le16(pkt_prod);
10593 /* turn on parsing and get a BD */
10594 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10595 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10597 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10600 if (xmit_type & XMIT_CSUM) {
10601 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10603 /* for now NS flag is not used in Linux */
10605 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10606 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10608 pbd->ip_hlen = (skb_transport_header(skb) -
10609 skb_network_header(skb)) / 2;
10611 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10613 pbd->total_hlen = cpu_to_le16(hlen);
10614 hlen = hlen*2 - vlan_off;
10616 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10618 if (xmit_type & XMIT_CSUM_V4)
10619 tx_bd->bd_flags.as_bitfield |=
10620 ETH_TX_BD_FLAGS_IP_CSUM;
10622 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10624 if (xmit_type & XMIT_CSUM_TCP) {
10625 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10628 s8 fix = SKB_CS_OFF(skb); /* signed! */
10630 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10631 pbd->cs_offset = fix / 2;
10633 DP(NETIF_MSG_TX_QUEUED,
10634 "hlen %d offset %d fix %d csum before fix %x\n",
10635 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10638 /* HW bug: fixup the CSUM */
10639 pbd->tcp_pseudo_csum =
10640 bnx2x_csum_fix(skb_transport_header(skb),
10643 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10644 pbd->tcp_pseudo_csum);
10648 mapping = pci_map_single(bp->pdev, skb->data,
10649 skb_headlen(skb), PCI_DMA_TODEVICE);
10651 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10652 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10653 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10654 tx_bd->nbd = cpu_to_le16(nbd);
10655 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10657 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10658 " nbytes %d flags %x vlan %x\n",
10659 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10660 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10661 le16_to_cpu(tx_bd->vlan));
10663 if (xmit_type & XMIT_GSO) {
10665 DP(NETIF_MSG_TX_QUEUED,
10666 "TSO packet len %d hlen %d total len %d tso size %d\n",
10667 skb->len, hlen, skb_headlen(skb),
10668 skb_shinfo(skb)->gso_size);
10670 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10672 if (unlikely(skb_headlen(skb) > hlen))
10673 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10676 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10677 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10678 pbd->tcp_flags = pbd_tcp_flags(skb);
10680 if (xmit_type & XMIT_GSO_V4) {
10681 pbd->ip_id = swab16(ip_hdr(skb)->id);
10682 pbd->tcp_pseudo_csum =
10683 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10684 ip_hdr(skb)->daddr,
10685 0, IPPROTO_TCP, 0));
10688 pbd->tcp_pseudo_csum =
10689 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10690 &ipv6_hdr(skb)->daddr,
10691 0, IPPROTO_TCP, 0));
10693 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10696 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10697 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10699 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10700 tx_bd = &fp->tx_desc_ring[bd_prod];
10702 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10703 frag->size, PCI_DMA_TODEVICE);
10705 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10706 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10707 tx_bd->nbytes = cpu_to_le16(frag->size);
10708 tx_bd->vlan = cpu_to_le16(pkt_prod);
10709 tx_bd->bd_flags.as_bitfield = 0;
10711 DP(NETIF_MSG_TX_QUEUED,
10712 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10713 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10714 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10717 /* now at last mark the BD as the last BD */
10718 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10720 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10721 tx_bd, tx_bd->bd_flags.as_bitfield);
10723 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10725 /* now send a tx doorbell, counting the next BD
10726 * if the packet contains or ends with it
10728 if (TX_BD_POFF(bd_prod) < nbd)
10732 DP(NETIF_MSG_TX_QUEUED,
10733 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10734 " tcp_flags %x xsum %x seq %u hlen %u\n",
10735 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10736 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10737 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10739 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10742 * Make sure that the BD data is updated before updating the producer
10743 * since FW might read the BD right after the producer is updated.
10744 * This is only applicable for weak-ordered memory model archs such
10745 * as IA-64. The following barrier is also mandatory since FW will
10746 * assumes packets must have BDs.
10750 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10751 mb(); /* FW restriction: must not reorder writing nbd and packets */
10752 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10753 DOORBELL(bp, fp->index, 0);
10757 fp->tx_bd_prod += nbd;
10759 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10760 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10761 if we put Tx into XOFF state. */
10763 netif_tx_stop_queue(txq);
10764 fp->eth_q_stats.driver_xoff++;
10765 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10766 netif_tx_wake_queue(txq);
10770 return NETDEV_TX_OK;
10773 /* called with rtnl_lock */
10774 static int bnx2x_open(struct net_device *dev)
10776 struct bnx2x *bp = netdev_priv(dev);
10778 netif_carrier_off(dev);
10780 bnx2x_set_power_state(bp, PCI_D0);
10782 return bnx2x_nic_load(bp, LOAD_OPEN);
10785 /* called with rtnl_lock */
10786 static int bnx2x_close(struct net_device *dev)
10788 struct bnx2x *bp = netdev_priv(dev);
10790 /* Unload the driver, release IRQs */
10791 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10792 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10793 if (!CHIP_REV_IS_SLOW(bp))
10794 bnx2x_set_power_state(bp, PCI_D3hot);
10799 /* called with netif_tx_lock from dev_mcast.c */
10800 static void bnx2x_set_rx_mode(struct net_device *dev)
10802 struct bnx2x *bp = netdev_priv(dev);
10803 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10804 int port = BP_PORT(bp);
10806 if (bp->state != BNX2X_STATE_OPEN) {
10807 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10811 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10813 if (dev->flags & IFF_PROMISC)
10814 rx_mode = BNX2X_RX_MODE_PROMISC;
10816 else if ((dev->flags & IFF_ALLMULTI) ||
10817 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10818 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10820 else { /* some multicasts */
10821 if (CHIP_IS_E1(bp)) {
10822 int i, old, offset;
10823 struct dev_mc_list *mclist;
10824 struct mac_configuration_cmd *config =
10825 bnx2x_sp(bp, mcast_config);
10827 for (i = 0, mclist = dev->mc_list;
10828 mclist && (i < dev->mc_count);
10829 i++, mclist = mclist->next) {
10831 config->config_table[i].
10832 cam_entry.msb_mac_addr =
10833 swab16(*(u16 *)&mclist->dmi_addr[0]);
10834 config->config_table[i].
10835 cam_entry.middle_mac_addr =
10836 swab16(*(u16 *)&mclist->dmi_addr[2]);
10837 config->config_table[i].
10838 cam_entry.lsb_mac_addr =
10839 swab16(*(u16 *)&mclist->dmi_addr[4]);
10840 config->config_table[i].cam_entry.flags =
10842 config->config_table[i].
10843 target_table_entry.flags = 0;
10844 config->config_table[i].
10845 target_table_entry.client_id = 0;
10846 config->config_table[i].
10847 target_table_entry.vlan_id = 0;
10850 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10851 config->config_table[i].
10852 cam_entry.msb_mac_addr,
10853 config->config_table[i].
10854 cam_entry.middle_mac_addr,
10855 config->config_table[i].
10856 cam_entry.lsb_mac_addr);
10858 old = config->hdr.length;
10860 for (; i < old; i++) {
10861 if (CAM_IS_INVALID(config->
10862 config_table[i])) {
10863 /* already invalidated */
10867 CAM_INVALIDATE(config->
10872 if (CHIP_REV_IS_SLOW(bp))
10873 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10875 offset = BNX2X_MAX_MULTICAST*(1 + port);
10877 config->hdr.length = i;
10878 config->hdr.offset = offset;
10879 config->hdr.client_id = bp->fp->cl_id;
10880 config->hdr.reserved1 = 0;
10882 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10883 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10884 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10887 /* Accept one or more multicasts */
10888 struct dev_mc_list *mclist;
10889 u32 mc_filter[MC_HASH_SIZE];
10890 u32 crc, bit, regidx;
10893 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10895 for (i = 0, mclist = dev->mc_list;
10896 mclist && (i < dev->mc_count);
10897 i++, mclist = mclist->next) {
10899 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10902 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10903 bit = (crc >> 24) & 0xff;
10906 mc_filter[regidx] |= (1 << bit);
10909 for (i = 0; i < MC_HASH_SIZE; i++)
10910 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10915 bp->rx_mode = rx_mode;
10916 bnx2x_set_storm_rx_mode(bp);
10919 /* called with rtnl_lock */
10920 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10922 struct sockaddr *addr = p;
10923 struct bnx2x *bp = netdev_priv(dev);
10925 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10928 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10929 if (netif_running(dev)) {
10930 if (CHIP_IS_E1(bp))
10931 bnx2x_set_mac_addr_e1(bp, 1);
10933 bnx2x_set_mac_addr_e1h(bp, 1);
10939 /* called with rtnl_lock */
10940 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10942 struct mii_ioctl_data *data = if_mii(ifr);
10943 struct bnx2x *bp = netdev_priv(dev);
10944 int port = BP_PORT(bp);
10949 data->phy_id = bp->port.phy_addr;
10953 case SIOCGMIIREG: {
10956 if (!netif_running(dev))
10959 mutex_lock(&bp->port.phy_mutex);
10960 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10961 DEFAULT_PHY_DEV_ADDR,
10962 (data->reg_num & 0x1f), &mii_regval);
10963 data->val_out = mii_regval;
10964 mutex_unlock(&bp->port.phy_mutex);
10969 if (!capable(CAP_NET_ADMIN))
10972 if (!netif_running(dev))
10975 mutex_lock(&bp->port.phy_mutex);
10976 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10977 DEFAULT_PHY_DEV_ADDR,
10978 (data->reg_num & 0x1f), data->val_in);
10979 mutex_unlock(&bp->port.phy_mutex);
10987 return -EOPNOTSUPP;
10990 /* called with rtnl_lock */
10991 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10993 struct bnx2x *bp = netdev_priv(dev);
10996 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10997 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11000 /* This does not race with packet allocation
11001 * because the actual alloc size is
11002 * only updated as part of load
11004 dev->mtu = new_mtu;
11006 if (netif_running(dev)) {
11007 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11008 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11014 static void bnx2x_tx_timeout(struct net_device *dev)
11016 struct bnx2x *bp = netdev_priv(dev);
11018 #ifdef BNX2X_STOP_ON_ERROR
11022 /* This allows the netif to be shutdown gracefully before resetting */
11023 schedule_work(&bp->reset_task);
11027 /* called with rtnl_lock */
11028 static void bnx2x_vlan_rx_register(struct net_device *dev,
11029 struct vlan_group *vlgrp)
11031 struct bnx2x *bp = netdev_priv(dev);
11035 /* Set flags according to the required capabilities */
11036 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11038 if (dev->features & NETIF_F_HW_VLAN_TX)
11039 bp->flags |= HW_VLAN_TX_FLAG;
11041 if (dev->features & NETIF_F_HW_VLAN_RX)
11042 bp->flags |= HW_VLAN_RX_FLAG;
11044 if (netif_running(dev))
11045 bnx2x_set_client_config(bp);
11050 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11051 static void poll_bnx2x(struct net_device *dev)
11053 struct bnx2x *bp = netdev_priv(dev);
11055 disable_irq(bp->pdev->irq);
11056 bnx2x_interrupt(bp->pdev->irq, dev);
11057 enable_irq(bp->pdev->irq);
11061 static const struct net_device_ops bnx2x_netdev_ops = {
11062 .ndo_open = bnx2x_open,
11063 .ndo_stop = bnx2x_close,
11064 .ndo_start_xmit = bnx2x_start_xmit,
11065 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11066 .ndo_set_mac_address = bnx2x_change_mac_addr,
11067 .ndo_validate_addr = eth_validate_addr,
11068 .ndo_do_ioctl = bnx2x_ioctl,
11069 .ndo_change_mtu = bnx2x_change_mtu,
11070 .ndo_tx_timeout = bnx2x_tx_timeout,
11072 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11074 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11075 .ndo_poll_controller = poll_bnx2x,
11079 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11080 struct net_device *dev)
11085 SET_NETDEV_DEV(dev, &pdev->dev);
11086 bp = netdev_priv(dev);
11091 bp->func = PCI_FUNC(pdev->devfn);
11093 rc = pci_enable_device(pdev);
11095 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11099 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11100 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11103 goto err_out_disable;
11106 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11107 printk(KERN_ERR PFX "Cannot find second PCI device"
11108 " base address, aborting\n");
11110 goto err_out_disable;
11113 if (atomic_read(&pdev->enable_cnt) == 1) {
11114 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11116 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11118 goto err_out_disable;
11121 pci_set_master(pdev);
11122 pci_save_state(pdev);
11125 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11126 if (bp->pm_cap == 0) {
11127 printk(KERN_ERR PFX "Cannot find power management"
11128 " capability, aborting\n");
11130 goto err_out_release;
11133 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11134 if (bp->pcie_cap == 0) {
11135 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11138 goto err_out_release;
11141 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11142 bp->flags |= USING_DAC_FLAG;
11143 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11144 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11145 " failed, aborting\n");
11147 goto err_out_release;
11150 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11151 printk(KERN_ERR PFX "System does not support DMA,"
11154 goto err_out_release;
11157 dev->mem_start = pci_resource_start(pdev, 0);
11158 dev->base_addr = dev->mem_start;
11159 dev->mem_end = pci_resource_end(pdev, 0);
11161 dev->irq = pdev->irq;
11163 bp->regview = pci_ioremap_bar(pdev, 0);
11164 if (!bp->regview) {
11165 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11167 goto err_out_release;
11170 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11171 min_t(u64, BNX2X_DB_SIZE,
11172 pci_resource_len(pdev, 2)));
11173 if (!bp->doorbells) {
11174 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11176 goto err_out_unmap;
11179 bnx2x_set_power_state(bp, PCI_D0);
11181 /* clean indirect addresses */
11182 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11183 PCICFG_VENDOR_ID_OFFSET);
11184 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11185 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11186 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11187 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11189 dev->watchdog_timeo = TX_TIMEOUT;
11191 dev->netdev_ops = &bnx2x_netdev_ops;
11192 dev->ethtool_ops = &bnx2x_ethtool_ops;
11193 dev->features |= NETIF_F_SG;
11194 dev->features |= NETIF_F_HW_CSUM;
11195 if (bp->flags & USING_DAC_FLAG)
11196 dev->features |= NETIF_F_HIGHDMA;
11197 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11198 dev->features |= NETIF_F_TSO6;
11200 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11201 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11203 dev->vlan_features |= NETIF_F_SG;
11204 dev->vlan_features |= NETIF_F_HW_CSUM;
11205 if (bp->flags & USING_DAC_FLAG)
11206 dev->vlan_features |= NETIF_F_HIGHDMA;
11207 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11208 dev->vlan_features |= NETIF_F_TSO6;
11215 iounmap(bp->regview);
11216 bp->regview = NULL;
11218 if (bp->doorbells) {
11219 iounmap(bp->doorbells);
11220 bp->doorbells = NULL;
11224 if (atomic_read(&pdev->enable_cnt) == 1)
11225 pci_release_regions(pdev);
11228 pci_disable_device(pdev);
11229 pci_set_drvdata(pdev, NULL);
11235 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11237 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11239 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11243 /* return value of 1=2.5GHz 2=5GHz */
11244 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11246 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11248 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11251 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11253 struct bnx2x_fw_file_hdr *fw_hdr;
11254 struct bnx2x_fw_file_section *sections;
11256 u32 offset, len, num_ops;
11258 const struct firmware *firmware = bp->firmware;
11261 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11264 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11265 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11267 /* Make sure none of the offsets and sizes make us read beyond
11268 * the end of the firmware data */
11269 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11270 offset = be32_to_cpu(sections[i].offset);
11271 len = be32_to_cpu(sections[i].len);
11272 if (offset + len > firmware->size) {
11273 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11278 /* Likewise for the init_ops offsets */
11279 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11280 ops_offsets = (u16 *)(firmware->data + offset);
11281 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11283 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11284 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11285 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11290 /* Check FW version */
11291 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11292 fw_ver = firmware->data + offset;
11293 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11294 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11295 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11296 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11297 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11298 " Should be %d.%d.%d.%d\n",
11299 fw_ver[0], fw_ver[1], fw_ver[2],
11300 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11301 BCM_5710_FW_MINOR_VERSION,
11302 BCM_5710_FW_REVISION_VERSION,
11303 BCM_5710_FW_ENGINEERING_VERSION);
11310 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11313 const __be32 *source = (const __be32*)_source;
11314 u32 *target = (u32*)_target;
11316 for (i = 0; i < n/4; i++)
11317 target[i] = be32_to_cpu(source[i]);
11321 Ops array is stored in the following format:
11322 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11324 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11327 const __be32 *source = (const __be32*)_source;
11328 struct raw_op *target = (struct raw_op*)_target;
11330 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11331 tmp = be32_to_cpu(source[j]);
11332 target[i].op = (tmp >> 24) & 0xff;
11333 target[i].offset = tmp & 0xffffff;
11334 target[i].raw_data = be32_to_cpu(source[j+1]);
11337 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11340 u16 *target = (u16*)_target;
11341 const __be16 *source = (const __be16*)_source;
11343 for (i = 0; i < n/2; i++)
11344 target[i] = be16_to_cpu(source[i]);
11347 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11349 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11350 bp->arr = kmalloc(len, GFP_KERNEL); \
11352 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11355 func(bp->firmware->data + \
11356 be32_to_cpu(fw_hdr->arr.offset), \
11357 (u8*)bp->arr, len); \
11361 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11363 char fw_file_name[40] = {0};
11365 struct bnx2x_fw_file_hdr *fw_hdr;
11367 /* Create a FW file name */
11368 if (CHIP_IS_E1(bp))
11369 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11371 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11373 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11374 BCM_5710_FW_MAJOR_VERSION,
11375 BCM_5710_FW_MINOR_VERSION,
11376 BCM_5710_FW_REVISION_VERSION,
11377 BCM_5710_FW_ENGINEERING_VERSION);
11379 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11381 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11383 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11384 goto request_firmware_exit;
11387 rc = bnx2x_check_firmware(bp);
11389 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11390 goto request_firmware_exit;
11393 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11395 /* Initialize the pointers to the init arrays */
11397 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11400 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11403 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11405 /* STORMs firmware */
11406 bp->tsem_int_table_data = bp->firmware->data +
11407 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11408 bp->tsem_pram_data = bp->firmware->data +
11409 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11410 bp->usem_int_table_data = bp->firmware->data +
11411 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11412 bp->usem_pram_data = bp->firmware->data +
11413 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11414 bp->xsem_int_table_data = bp->firmware->data +
11415 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11416 bp->xsem_pram_data = bp->firmware->data +
11417 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11418 bp->csem_int_table_data = bp->firmware->data +
11419 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11420 bp->csem_pram_data = bp->firmware->data +
11421 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11424 init_offsets_alloc_err:
11425 kfree(bp->init_ops);
11426 init_ops_alloc_err:
11427 kfree(bp->init_data);
11428 request_firmware_exit:
11429 release_firmware(bp->firmware);
11436 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11437 const struct pci_device_id *ent)
11439 static int version_printed;
11440 struct net_device *dev = NULL;
11444 if (version_printed++ == 0)
11445 printk(KERN_INFO "%s", version);
11447 /* dev zeroed in init_etherdev */
11448 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11450 printk(KERN_ERR PFX "Cannot allocate net device\n");
11454 bp = netdev_priv(dev);
11455 bp->msglevel = debug;
11457 rc = bnx2x_init_dev(pdev, dev);
11463 pci_set_drvdata(pdev, dev);
11465 rc = bnx2x_init_bp(bp);
11467 goto init_one_exit;
11469 /* Set init arrays */
11470 rc = bnx2x_init_firmware(bp, &pdev->dev);
11472 printk(KERN_ERR PFX "Error loading firmware\n");
11473 goto init_one_exit;
11476 rc = register_netdev(dev);
11478 dev_err(&pdev->dev, "Cannot register net device\n");
11479 goto init_one_exit;
11482 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11483 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11484 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11485 bnx2x_get_pcie_width(bp),
11486 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11487 dev->base_addr, bp->pdev->irq);
11488 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11494 iounmap(bp->regview);
11497 iounmap(bp->doorbells);
11501 if (atomic_read(&pdev->enable_cnt) == 1)
11502 pci_release_regions(pdev);
11504 pci_disable_device(pdev);
11505 pci_set_drvdata(pdev, NULL);
11510 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11512 struct net_device *dev = pci_get_drvdata(pdev);
11516 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11519 bp = netdev_priv(dev);
11521 unregister_netdev(dev);
11523 kfree(bp->init_ops_offsets);
11524 kfree(bp->init_ops);
11525 kfree(bp->init_data);
11526 release_firmware(bp->firmware);
11529 iounmap(bp->regview);
11532 iounmap(bp->doorbells);
11536 if (atomic_read(&pdev->enable_cnt) == 1)
11537 pci_release_regions(pdev);
11539 pci_disable_device(pdev);
11540 pci_set_drvdata(pdev, NULL);
11543 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11545 struct net_device *dev = pci_get_drvdata(pdev);
11549 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11552 bp = netdev_priv(dev);
11556 pci_save_state(pdev);
11558 if (!netif_running(dev)) {
11563 netif_device_detach(dev);
11565 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11567 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11574 static int bnx2x_resume(struct pci_dev *pdev)
11576 struct net_device *dev = pci_get_drvdata(pdev);
11581 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11584 bp = netdev_priv(dev);
11588 pci_restore_state(pdev);
11590 if (!netif_running(dev)) {
11595 bnx2x_set_power_state(bp, PCI_D0);
11596 netif_device_attach(dev);
11598 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11605 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11609 bp->state = BNX2X_STATE_ERROR;
11611 bp->rx_mode = BNX2X_RX_MODE_NONE;
11613 bnx2x_netif_stop(bp, 0);
11615 del_timer_sync(&bp->timer);
11616 bp->stats_state = STATS_STATE_DISABLED;
11617 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11620 bnx2x_free_irq(bp);
11622 if (CHIP_IS_E1(bp)) {
11623 struct mac_configuration_cmd *config =
11624 bnx2x_sp(bp, mcast_config);
11626 for (i = 0; i < config->hdr.length; i++)
11627 CAM_INVALIDATE(config->config_table[i]);
11630 /* Free SKBs, SGEs, TPA pool and driver internals */
11631 bnx2x_free_skbs(bp);
11632 for_each_rx_queue(bp, i)
11633 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11634 for_each_rx_queue(bp, i)
11635 netif_napi_del(&bnx2x_fp(bp, i, napi));
11636 bnx2x_free_mem(bp);
11638 bp->state = BNX2X_STATE_CLOSED;
11640 netif_carrier_off(bp->dev);
11645 static void bnx2x_eeh_recover(struct bnx2x *bp)
11649 mutex_init(&bp->port.phy_mutex);
11651 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11652 bp->link_params.shmem_base = bp->common.shmem_base;
11653 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11655 if (!bp->common.shmem_base ||
11656 (bp->common.shmem_base < 0xA0000) ||
11657 (bp->common.shmem_base >= 0xC0000)) {
11658 BNX2X_DEV_INFO("MCP not active\n");
11659 bp->flags |= NO_MCP_FLAG;
11663 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11664 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11665 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11666 BNX2X_ERR("BAD MCP validity signature\n");
11668 if (!BP_NOMCP(bp)) {
11669 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11670 & DRV_MSG_SEQ_NUMBER_MASK);
11671 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11676 * bnx2x_io_error_detected - called when PCI error is detected
11677 * @pdev: Pointer to PCI device
11678 * @state: The current pci connection state
11680 * This function is called after a PCI bus error affecting
11681 * this device has been detected.
11683 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11684 pci_channel_state_t state)
11686 struct net_device *dev = pci_get_drvdata(pdev);
11687 struct bnx2x *bp = netdev_priv(dev);
11691 netif_device_detach(dev);
11693 if (netif_running(dev))
11694 bnx2x_eeh_nic_unload(bp);
11696 pci_disable_device(pdev);
11700 /* Request a slot reset */
11701 return PCI_ERS_RESULT_NEED_RESET;
11705 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11706 * @pdev: Pointer to PCI device
11708 * Restart the card from scratch, as if from a cold-boot.
11710 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11712 struct net_device *dev = pci_get_drvdata(pdev);
11713 struct bnx2x *bp = netdev_priv(dev);
11717 if (pci_enable_device(pdev)) {
11718 dev_err(&pdev->dev,
11719 "Cannot re-enable PCI device after reset\n");
11721 return PCI_ERS_RESULT_DISCONNECT;
11724 pci_set_master(pdev);
11725 pci_restore_state(pdev);
11727 if (netif_running(dev))
11728 bnx2x_set_power_state(bp, PCI_D0);
11732 return PCI_ERS_RESULT_RECOVERED;
11736 * bnx2x_io_resume - called when traffic can start flowing again
11737 * @pdev: Pointer to PCI device
11739 * This callback is called when the error recovery driver tells us that
11740 * its OK to resume normal operation.
11742 static void bnx2x_io_resume(struct pci_dev *pdev)
11744 struct net_device *dev = pci_get_drvdata(pdev);
11745 struct bnx2x *bp = netdev_priv(dev);
11749 bnx2x_eeh_recover(bp);
11751 if (netif_running(dev))
11752 bnx2x_nic_load(bp, LOAD_NORMAL);
11754 netif_device_attach(dev);
11759 static struct pci_error_handlers bnx2x_err_handler = {
11760 .error_detected = bnx2x_io_error_detected,
11761 .slot_reset = bnx2x_io_slot_reset,
11762 .resume = bnx2x_io_resume,
11765 static struct pci_driver bnx2x_pci_driver = {
11766 .name = DRV_MODULE_NAME,
11767 .id_table = bnx2x_pci_tbl,
11768 .probe = bnx2x_init_one,
11769 .remove = __devexit_p(bnx2x_remove_one),
11770 .suspend = bnx2x_suspend,
11771 .resume = bnx2x_resume,
11772 .err_handler = &bnx2x_err_handler,
11775 static int __init bnx2x_init(void)
11779 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11780 if (bnx2x_wq == NULL) {
11781 printk(KERN_ERR PFX "Cannot create workqueue\n");
11785 ret = pci_register_driver(&bnx2x_pci_driver);
11787 printk(KERN_ERR PFX "Cannot register driver\n");
11788 destroy_workqueue(bnx2x_wq);
11793 static void __exit bnx2x_cleanup(void)
11795 pci_unregister_driver(&bnx2x_pci_driver);
11797 destroy_workqueue(bnx2x_wq);
11800 module_init(bnx2x_init);
11801 module_exit(bnx2x_cleanup);