2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2007 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/kernel.h>
22 #include <linux/types.h>
23 #include <linux/compiler.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
27 #include <linux/init.h>
28 #include <linux/ioport.h>
29 #include <linux/pci.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/ethtool.h>
34 #include <linux/mii.h>
35 #include <linux/phy.h>
36 #include <linux/brcmphy.h>
37 #include <linux/if_vlan.h>
39 #include <linux/tcp.h>
40 #include <linux/workqueue.h>
41 #include <linux/prefetch.h>
42 #include <linux/dma-mapping.h>
44 #include <net/checksum.h>
47 #include <asm/system.h>
49 #include <asm/byteorder.h>
50 #include <asm/uaccess.h>
53 #include <asm/idprom.h>
60 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
61 #define TG3_VLAN_TAG_USED 1
63 #define TG3_VLAN_TAG_USED 0
66 #define TG3_TSO_SUPPORT 1
70 #define DRV_MODULE_NAME "tg3"
71 #define PFX DRV_MODULE_NAME ": "
72 #define DRV_MODULE_VERSION "3.95"
73 #define DRV_MODULE_RELDATE "November 3, 2008"
75 #define TG3_DEF_MAC_MODE 0
76 #define TG3_DEF_RX_MODE 0
77 #define TG3_DEF_TX_MODE 0
78 #define TG3_DEF_MSG_ENABLE \
88 /* length of time before we decide the hardware is borked,
89 * and dev->tx_timeout() should be called to fix the problem
91 #define TG3_TX_TIMEOUT (5 * HZ)
93 /* hardware minimum and maximum for a single frame's data payload */
94 #define TG3_MIN_MTU 60
95 #define TG3_MAX_MTU(tp) \
96 ((tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) ? 9000 : 1500)
98 /* These numbers seem to be hard coded in the NIC firmware somehow.
99 * You can't change the ring sizes, but you can change where you place
100 * them in the NIC onboard memory.
102 #define TG3_RX_RING_SIZE 512
103 #define TG3_DEF_RX_RING_PENDING 200
104 #define TG3_RX_JUMBO_RING_SIZE 256
105 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
107 /* Do not place this n-ring entries value into the tp struct itself,
108 * we really want to expose these constants to GCC so that modulo et
109 * al. operations are done with shifts and masks instead of with
110 * hw multiply/modulo instructions. Another solution would be to
111 * replace things like '% foo' with '& (foo - 1)'.
113 #define TG3_RX_RCB_RING_SIZE(tp) \
114 ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ? 512 : 1024)
116 #define TG3_TX_RING_SIZE 512
117 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
119 #define TG3_RX_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
121 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
122 TG3_RX_JUMBO_RING_SIZE)
123 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
124 TG3_RX_RCB_RING_SIZE(tp))
125 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
127 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
129 #define RX_PKT_BUF_SZ (1536 + tp->rx_offset + 64)
130 #define RX_JUMBO_PKT_BUF_SZ (9046 + tp->rx_offset + 64)
132 /* minimum number of free TX descriptors required to wake up TX process */
133 #define TG3_TX_WAKEUP_THRESH(tp) ((tp)->tx_pending / 4)
135 /* number of ETHTOOL_GSTATS u64's */
136 #define TG3_NUM_STATS (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
138 #define TG3_NUM_TEST 6
140 static char version[] __devinitdata =
141 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
143 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
144 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
145 MODULE_LICENSE("GPL");
146 MODULE_VERSION(DRV_MODULE_VERSION);
148 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
149 module_param(tg3_debug, int, 0);
150 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
152 static struct pci_device_id tg3_pci_tbl[] = {
153 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
154 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
155 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
156 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
157 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
158 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
159 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
160 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
161 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
162 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
163 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
164 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
165 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
166 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
167 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
168 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
169 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
170 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
171 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
172 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
173 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
174 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
175 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720)},
176 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
177 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
178 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
179 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
180 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M)},
181 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
182 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
183 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
184 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
185 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
186 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
187 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
188 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
189 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
190 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
191 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
192 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
193 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
194 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
195 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
196 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
197 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
198 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
199 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
200 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
201 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
202 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
203 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
204 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
205 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
206 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
207 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
208 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
209 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
210 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
211 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
212 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
213 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5785)},
214 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
215 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
216 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
217 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
218 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
219 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
220 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
224 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
226 static const struct {
227 const char string[ETH_GSTRING_LEN];
228 } ethtool_stats_keys[TG3_NUM_STATS] = {
231 { "rx_ucast_packets" },
232 { "rx_mcast_packets" },
233 { "rx_bcast_packets" },
235 { "rx_align_errors" },
236 { "rx_xon_pause_rcvd" },
237 { "rx_xoff_pause_rcvd" },
238 { "rx_mac_ctrl_rcvd" },
239 { "rx_xoff_entered" },
240 { "rx_frame_too_long_errors" },
242 { "rx_undersize_packets" },
243 { "rx_in_length_errors" },
244 { "rx_out_length_errors" },
245 { "rx_64_or_less_octet_packets" },
246 { "rx_65_to_127_octet_packets" },
247 { "rx_128_to_255_octet_packets" },
248 { "rx_256_to_511_octet_packets" },
249 { "rx_512_to_1023_octet_packets" },
250 { "rx_1024_to_1522_octet_packets" },
251 { "rx_1523_to_2047_octet_packets" },
252 { "rx_2048_to_4095_octet_packets" },
253 { "rx_4096_to_8191_octet_packets" },
254 { "rx_8192_to_9022_octet_packets" },
261 { "tx_flow_control" },
263 { "tx_single_collisions" },
264 { "tx_mult_collisions" },
266 { "tx_excessive_collisions" },
267 { "tx_late_collisions" },
268 { "tx_collide_2times" },
269 { "tx_collide_3times" },
270 { "tx_collide_4times" },
271 { "tx_collide_5times" },
272 { "tx_collide_6times" },
273 { "tx_collide_7times" },
274 { "tx_collide_8times" },
275 { "tx_collide_9times" },
276 { "tx_collide_10times" },
277 { "tx_collide_11times" },
278 { "tx_collide_12times" },
279 { "tx_collide_13times" },
280 { "tx_collide_14times" },
281 { "tx_collide_15times" },
282 { "tx_ucast_packets" },
283 { "tx_mcast_packets" },
284 { "tx_bcast_packets" },
285 { "tx_carrier_sense_errors" },
289 { "dma_writeq_full" },
290 { "dma_write_prioq_full" },
294 { "rx_threshold_hit" },
296 { "dma_readq_full" },
297 { "dma_read_prioq_full" },
298 { "tx_comp_queue_full" },
300 { "ring_set_send_prod_index" },
301 { "ring_status_update" },
303 { "nic_avoided_irqs" },
304 { "nic_tx_threshold_hit" }
307 static const struct {
308 const char string[ETH_GSTRING_LEN];
309 } ethtool_test_keys[TG3_NUM_TEST] = {
310 { "nvram test (online) " },
311 { "link test (online) " },
312 { "register test (offline)" },
313 { "memory test (offline)" },
314 { "loopback test (offline)" },
315 { "interrupt test (offline)" },
318 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
320 writel(val, tp->regs + off);
323 static u32 tg3_read32(struct tg3 *tp, u32 off)
325 return (readl(tp->regs + off));
328 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
330 writel(val, tp->aperegs + off);
333 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
335 return (readl(tp->aperegs + off));
338 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
342 spin_lock_irqsave(&tp->indirect_lock, flags);
343 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
344 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
345 spin_unlock_irqrestore(&tp->indirect_lock, flags);
348 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
350 writel(val, tp->regs + off);
351 readl(tp->regs + off);
354 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
359 spin_lock_irqsave(&tp->indirect_lock, flags);
360 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
361 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
362 spin_unlock_irqrestore(&tp->indirect_lock, flags);
366 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
370 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
371 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
372 TG3_64BIT_REG_LOW, val);
375 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
376 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
377 TG3_64BIT_REG_LOW, val);
381 spin_lock_irqsave(&tp->indirect_lock, flags);
382 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
383 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
384 spin_unlock_irqrestore(&tp->indirect_lock, flags);
386 /* In indirect mode when disabling interrupts, we also need
387 * to clear the interrupt bit in the GRC local ctrl register.
389 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
391 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
392 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
396 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
401 spin_lock_irqsave(&tp->indirect_lock, flags);
402 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
403 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
404 spin_unlock_irqrestore(&tp->indirect_lock, flags);
408 /* usec_wait specifies the wait time in usec when writing to certain registers
409 * where it is unsafe to read back the register without some delay.
410 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
411 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
413 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
415 if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) ||
416 (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
417 /* Non-posted methods */
418 tp->write32(tp, off, val);
421 tg3_write32(tp, off, val);
426 /* Wait again after the read for the posted method to guarantee that
427 * the wait time is met.
433 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
435 tp->write32_mbox(tp, off, val);
436 if (!(tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) &&
437 !(tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND))
438 tp->read32_mbox(tp, off);
441 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
443 void __iomem *mbox = tp->regs + off;
445 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
447 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
451 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
453 return (readl(tp->regs + off + GRCMBOX_BASE));
456 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
458 writel(val, tp->regs + off + GRCMBOX_BASE);
461 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
462 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
463 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
464 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
465 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
467 #define tw32(reg,val) tp->write32(tp, reg, val)
468 #define tw32_f(reg,val) _tw32_flush(tp,(reg),(val), 0)
469 #define tw32_wait_f(reg,val,us) _tw32_flush(tp,(reg),(val), (us))
470 #define tr32(reg) tp->read32(tp, reg)
472 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
476 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
477 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
482 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
483 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
485 /* Always leave this as zero. */
486 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
488 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
489 tw32_f(TG3PCI_MEM_WIN_DATA, val);
491 /* Always leave this as zero. */
492 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
494 spin_unlock_irqrestore(&tp->indirect_lock, flags);
497 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
501 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) &&
502 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
507 spin_lock_irqsave(&tp->indirect_lock, flags);
508 if (tp->tg3_flags & TG3_FLAG_SRAM_USE_CONFIG) {
509 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
510 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
512 /* Always leave this as zero. */
513 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
515 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
516 *val = tr32(TG3PCI_MEM_WIN_DATA);
518 /* Always leave this as zero. */
519 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
521 spin_unlock_irqrestore(&tp->indirect_lock, flags);
524 static void tg3_ape_lock_init(struct tg3 *tp)
528 /* Make sure the driver hasn't any stale locks. */
529 for (i = 0; i < 8; i++)
530 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + 4 * i,
531 APE_LOCK_GRANT_DRIVER);
534 static int tg3_ape_lock(struct tg3 *tp, int locknum)
540 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
544 case TG3_APE_LOCK_GRC:
545 case TG3_APE_LOCK_MEM:
553 tg3_ape_write32(tp, TG3_APE_LOCK_REQ + off, APE_LOCK_REQ_DRIVER);
555 /* Wait for up to 1 millisecond to acquire lock. */
556 for (i = 0; i < 100; i++) {
557 status = tg3_ape_read32(tp, TG3_APE_LOCK_GRANT + off);
558 if (status == APE_LOCK_GRANT_DRIVER)
563 if (status != APE_LOCK_GRANT_DRIVER) {
564 /* Revoke the lock request. */
565 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off,
566 APE_LOCK_GRANT_DRIVER);
574 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
578 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
582 case TG3_APE_LOCK_GRC:
583 case TG3_APE_LOCK_MEM:
590 tg3_ape_write32(tp, TG3_APE_LOCK_GRANT + off, APE_LOCK_GRANT_DRIVER);
593 static void tg3_disable_ints(struct tg3 *tp)
595 tw32(TG3PCI_MISC_HOST_CTRL,
596 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
597 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
600 static inline void tg3_cond_int(struct tg3 *tp)
602 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
603 (tp->hw_status->status & SD_STATUS_UPDATED))
604 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
606 tw32(HOSTCC_MODE, tp->coalesce_mode |
607 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
610 static void tg3_enable_ints(struct tg3 *tp)
615 tw32(TG3PCI_MISC_HOST_CTRL,
616 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
617 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
618 (tp->last_tag << 24));
619 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
620 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
621 (tp->last_tag << 24));
625 static inline unsigned int tg3_has_work(struct tg3 *tp)
627 struct tg3_hw_status *sblk = tp->hw_status;
628 unsigned int work_exists = 0;
630 /* check for phy events */
631 if (!(tp->tg3_flags &
632 (TG3_FLAG_USE_LINKCHG_REG |
633 TG3_FLAG_POLL_SERDES))) {
634 if (sblk->status & SD_STATUS_LINK_CHG)
637 /* check for RX/TX work to do */
638 if (sblk->idx[0].tx_consumer != tp->tx_cons ||
639 sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
646 * similar to tg3_enable_ints, but it accurately determines whether there
647 * is new work pending and can return without flushing the PIO write
648 * which reenables interrupts
650 static void tg3_restart_ints(struct tg3 *tp)
652 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
656 /* When doing tagged status, this work check is unnecessary.
657 * The last_tag we write above tells the chip which piece of
658 * work we've completed.
660 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
662 tw32(HOSTCC_MODE, tp->coalesce_mode |
663 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
666 static inline void tg3_netif_stop(struct tg3 *tp)
668 tp->dev->trans_start = jiffies; /* prevent tx timeout */
669 napi_disable(&tp->napi);
670 netif_tx_disable(tp->dev);
673 static inline void tg3_netif_start(struct tg3 *tp)
675 netif_wake_queue(tp->dev);
676 /* NOTE: unconditional netif_wake_queue is only appropriate
677 * so long as all callers are assured to have free tx slots
678 * (such as after tg3_init_hw)
680 napi_enable(&tp->napi);
681 tp->hw_status->status |= SD_STATUS_UPDATED;
685 static void tg3_switch_clocks(struct tg3 *tp)
687 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
690 if ((tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
691 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
694 orig_clock_ctrl = clock_ctrl;
695 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
696 CLOCK_CTRL_CLKRUN_OENABLE |
698 tp->pci_clock_ctrl = clock_ctrl;
700 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
701 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
702 tw32_wait_f(TG3PCI_CLOCK_CTRL,
703 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
705 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
706 tw32_wait_f(TG3PCI_CLOCK_CTRL,
708 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
710 tw32_wait_f(TG3PCI_CLOCK_CTRL,
711 clock_ctrl | (CLOCK_CTRL_ALTCLK),
714 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
717 #define PHY_BUSY_LOOPS 5000
719 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
725 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
727 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
733 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
734 MI_COM_PHY_ADDR_MASK);
735 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
736 MI_COM_REG_ADDR_MASK);
737 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
739 tw32_f(MAC_MI_COM, frame_val);
741 loops = PHY_BUSY_LOOPS;
744 frame_val = tr32(MAC_MI_COM);
746 if ((frame_val & MI_COM_BUSY) == 0) {
748 frame_val = tr32(MAC_MI_COM);
756 *val = frame_val & MI_COM_DATA_MASK;
760 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
761 tw32_f(MAC_MI_MODE, tp->mi_mode);
768 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
774 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
775 (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
778 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
780 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
784 frame_val = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
785 MI_COM_PHY_ADDR_MASK);
786 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
787 MI_COM_REG_ADDR_MASK);
788 frame_val |= (val & MI_COM_DATA_MASK);
789 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
791 tw32_f(MAC_MI_COM, frame_val);
793 loops = PHY_BUSY_LOOPS;
796 frame_val = tr32(MAC_MI_COM);
797 if ((frame_val & MI_COM_BUSY) == 0) {
799 frame_val = tr32(MAC_MI_COM);
809 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
810 tw32_f(MAC_MI_MODE, tp->mi_mode);
817 static int tg3_bmcr_reset(struct tg3 *tp)
822 /* OK, reset it, and poll the BMCR_RESET bit until it
823 * clears or we time out.
825 phy_control = BMCR_RESET;
826 err = tg3_writephy(tp, MII_BMCR, phy_control);
832 err = tg3_readphy(tp, MII_BMCR, &phy_control);
836 if ((phy_control & BMCR_RESET) == 0) {
848 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
850 struct tg3 *tp = (struct tg3 *)bp->priv;
853 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
856 if (tg3_readphy(tp, reg, &val))
862 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
864 struct tg3 *tp = (struct tg3 *)bp->priv;
866 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_PAUSED)
869 if (tg3_writephy(tp, reg, val))
875 static int tg3_mdio_reset(struct mii_bus *bp)
880 static void tg3_mdio_config_5785(struct tg3 *tp)
883 struct phy_device *phydev;
885 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
886 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
887 case TG3_PHY_ID_BCM50610:
888 val = MAC_PHYCFG2_50610_LED_MODES;
890 case TG3_PHY_ID_BCMAC131:
891 val = MAC_PHYCFG2_AC131_LED_MODES;
893 case TG3_PHY_ID_RTL8211C:
894 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
896 case TG3_PHY_ID_RTL8201E:
897 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
903 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
904 tw32(MAC_PHYCFG2, val);
906 val = tr32(MAC_PHYCFG1);
907 val &= ~MAC_PHYCFG1_RGMII_INT;
908 tw32(MAC_PHYCFG1, val);
913 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE))
914 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
915 MAC_PHYCFG2_FMODE_MASK_MASK |
916 MAC_PHYCFG2_GMODE_MASK_MASK |
917 MAC_PHYCFG2_ACT_MASK_MASK |
918 MAC_PHYCFG2_QUAL_MASK_MASK |
919 MAC_PHYCFG2_INBAND_ENABLE;
921 tw32(MAC_PHYCFG2, val);
923 val = tr32(MAC_PHYCFG1) & ~(MAC_PHYCFG1_RGMII_EXT_RX_DEC |
924 MAC_PHYCFG1_RGMII_SND_STAT_EN);
925 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) {
926 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
927 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
928 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
929 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
931 tw32(MAC_PHYCFG1, val | MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV);
933 val = tr32(MAC_EXT_RGMII_MODE);
934 val &= ~(MAC_RGMII_MODE_RX_INT_B |
935 MAC_RGMII_MODE_RX_QUALITY |
936 MAC_RGMII_MODE_RX_ACTIVITY |
937 MAC_RGMII_MODE_RX_ENG_DET |
938 MAC_RGMII_MODE_TX_ENABLE |
939 MAC_RGMII_MODE_TX_LOWPWR |
940 MAC_RGMII_MODE_TX_RESET);
941 if (!(tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)) {
942 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
943 val |= MAC_RGMII_MODE_RX_INT_B |
944 MAC_RGMII_MODE_RX_QUALITY |
945 MAC_RGMII_MODE_RX_ACTIVITY |
946 MAC_RGMII_MODE_RX_ENG_DET;
947 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
948 val |= MAC_RGMII_MODE_TX_ENABLE |
949 MAC_RGMII_MODE_TX_LOWPWR |
950 MAC_RGMII_MODE_TX_RESET;
952 tw32(MAC_EXT_RGMII_MODE, val);
955 static void tg3_mdio_start(struct tg3 *tp)
957 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
958 mutex_lock(&tp->mdio_bus->mdio_lock);
959 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
960 mutex_unlock(&tp->mdio_bus->mdio_lock);
963 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
964 tw32_f(MAC_MI_MODE, tp->mi_mode);
967 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
968 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
969 tg3_mdio_config_5785(tp);
972 static void tg3_mdio_stop(struct tg3 *tp)
974 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
975 mutex_lock(&tp->mdio_bus->mdio_lock);
976 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_PAUSED;
977 mutex_unlock(&tp->mdio_bus->mdio_lock);
981 static int tg3_mdio_init(struct tg3 *tp)
985 struct phy_device *phydev;
989 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) ||
990 (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED))
993 tp->mdio_bus = mdiobus_alloc();
994 if (tp->mdio_bus == NULL)
997 tp->mdio_bus->name = "tg3 mdio bus";
998 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
999 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1000 tp->mdio_bus->priv = tp;
1001 tp->mdio_bus->parent = &tp->pdev->dev;
1002 tp->mdio_bus->read = &tg3_mdio_read;
1003 tp->mdio_bus->write = &tg3_mdio_write;
1004 tp->mdio_bus->reset = &tg3_mdio_reset;
1005 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR);
1006 tp->mdio_bus->irq = &tp->mdio_irq[0];
1008 for (i = 0; i < PHY_MAX_ADDR; i++)
1009 tp->mdio_bus->irq[i] = PHY_POLL;
1011 /* The bus registration will look for all the PHYs on the mdio bus.
1012 * Unfortunately, it does not ensure the PHY is powered up before
1013 * accessing the PHY ID registers. A chip reset is the
1014 * quickest way to bring the device back to an operational state..
1016 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1019 i = mdiobus_register(tp->mdio_bus);
1021 printk(KERN_WARNING "%s: mdiobus_reg failed (0x%x)\n",
1023 mdiobus_free(tp->mdio_bus);
1027 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1029 if (!phydev || !phydev->drv) {
1030 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
1031 mdiobus_unregister(tp->mdio_bus);
1032 mdiobus_free(tp->mdio_bus);
1036 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1037 case TG3_PHY_ID_BCM50610:
1038 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1039 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1040 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
1041 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1042 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_TX_EN)
1043 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1045 case TG3_PHY_ID_RTL8211C:
1046 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1048 case TG3_PHY_ID_RTL8201E:
1049 case TG3_PHY_ID_BCMAC131:
1050 phydev->interface = PHY_INTERFACE_MODE_MII;
1054 tp->tg3_flags3 |= TG3_FLG3_MDIOBUS_INITED;
1056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1057 tg3_mdio_config_5785(tp);
1062 static void tg3_mdio_fini(struct tg3 *tp)
1064 if (tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) {
1065 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_INITED;
1066 mdiobus_unregister(tp->mdio_bus);
1067 mdiobus_free(tp->mdio_bus);
1068 tp->tg3_flags3 &= ~TG3_FLG3_MDIOBUS_PAUSED;
1072 /* tp->lock is held. */
1073 static inline void tg3_generate_fw_event(struct tg3 *tp)
1077 val = tr32(GRC_RX_CPU_EVENT);
1078 val |= GRC_RX_CPU_DRIVER_EVENT;
1079 tw32_f(GRC_RX_CPU_EVENT, val);
1081 tp->last_event_jiffies = jiffies;
1084 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1086 /* tp->lock is held. */
1087 static void tg3_wait_for_event_ack(struct tg3 *tp)
1090 unsigned int delay_cnt;
1093 /* If enough time has passed, no wait is necessary. */
1094 time_remain = (long)(tp->last_event_jiffies + 1 +
1095 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1097 if (time_remain < 0)
1100 /* Check if we can shorten the wait time. */
1101 delay_cnt = jiffies_to_usecs(time_remain);
1102 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1103 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1104 delay_cnt = (delay_cnt >> 3) + 1;
1106 for (i = 0; i < delay_cnt; i++) {
1107 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1113 /* tp->lock is held. */
1114 static void tg3_ump_link_report(struct tg3 *tp)
1119 if (!(tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
1120 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1123 tg3_wait_for_event_ack(tp);
1125 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1127 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1130 if (!tg3_readphy(tp, MII_BMCR, ®))
1132 if (!tg3_readphy(tp, MII_BMSR, ®))
1133 val |= (reg & 0xffff);
1134 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1137 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1139 if (!tg3_readphy(tp, MII_LPA, ®))
1140 val |= (reg & 0xffff);
1141 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1144 if (!(tp->tg3_flags2 & TG3_FLG2_MII_SERDES)) {
1145 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1147 if (!tg3_readphy(tp, MII_STAT1000, ®))
1148 val |= (reg & 0xffff);
1150 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1152 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1156 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1158 tg3_generate_fw_event(tp);
1161 static void tg3_link_report(struct tg3 *tp)
1163 if (!netif_carrier_ok(tp->dev)) {
1164 if (netif_msg_link(tp))
1165 printk(KERN_INFO PFX "%s: Link is down.\n",
1167 tg3_ump_link_report(tp);
1168 } else if (netif_msg_link(tp)) {
1169 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1171 (tp->link_config.active_speed == SPEED_1000 ?
1173 (tp->link_config.active_speed == SPEED_100 ?
1175 (tp->link_config.active_duplex == DUPLEX_FULL ?
1178 printk(KERN_INFO PFX
1179 "%s: Flow control is %s for TX and %s for RX.\n",
1181 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX) ?
1183 (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX) ?
1185 tg3_ump_link_report(tp);
1189 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1193 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1194 miireg = ADVERTISE_PAUSE_CAP;
1195 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1196 miireg = ADVERTISE_PAUSE_ASYM;
1197 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1198 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1205 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1209 if ((flow_ctrl & TG3_FLOW_CTRL_TX) && (flow_ctrl & TG3_FLOW_CTRL_RX))
1210 miireg = ADVERTISE_1000XPAUSE;
1211 else if (flow_ctrl & TG3_FLOW_CTRL_TX)
1212 miireg = ADVERTISE_1000XPSE_ASYM;
1213 else if (flow_ctrl & TG3_FLOW_CTRL_RX)
1214 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1221 static u8 tg3_resolve_flowctrl_1000T(u16 lcladv, u16 rmtadv)
1225 if (lcladv & ADVERTISE_PAUSE_CAP) {
1226 if (lcladv & ADVERTISE_PAUSE_ASYM) {
1227 if (rmtadv & LPA_PAUSE_CAP)
1228 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1229 else if (rmtadv & LPA_PAUSE_ASYM)
1230 cap = TG3_FLOW_CTRL_RX;
1232 if (rmtadv & LPA_PAUSE_CAP)
1233 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1235 } else if (lcladv & ADVERTISE_PAUSE_ASYM) {
1236 if ((rmtadv & LPA_PAUSE_CAP) && (rmtadv & LPA_PAUSE_ASYM))
1237 cap = TG3_FLOW_CTRL_TX;
1243 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1247 if (lcladv & ADVERTISE_1000XPAUSE) {
1248 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1249 if (rmtadv & LPA_1000XPAUSE)
1250 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1251 else if (rmtadv & LPA_1000XPAUSE_ASYM)
1252 cap = TG3_FLOW_CTRL_RX;
1254 if (rmtadv & LPA_1000XPAUSE)
1255 cap = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
1257 } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1258 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1259 cap = TG3_FLOW_CTRL_TX;
1265 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1269 u32 old_rx_mode = tp->rx_mode;
1270 u32 old_tx_mode = tp->tx_mode;
1272 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1273 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg;
1275 autoneg = tp->link_config.autoneg;
1277 if (autoneg == AUTONEG_ENABLE &&
1278 (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)) {
1279 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
1280 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1282 flowctrl = tg3_resolve_flowctrl_1000T(lcladv, rmtadv);
1284 flowctrl = tp->link_config.flowctrl;
1286 tp->link_config.active_flowctrl = flowctrl;
1288 if (flowctrl & TG3_FLOW_CTRL_RX)
1289 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1291 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1293 if (old_rx_mode != tp->rx_mode)
1294 tw32_f(MAC_RX_MODE, tp->rx_mode);
1296 if (flowctrl & TG3_FLOW_CTRL_TX)
1297 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1299 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1301 if (old_tx_mode != tp->tx_mode)
1302 tw32_f(MAC_TX_MODE, tp->tx_mode);
1305 static void tg3_adjust_link(struct net_device *dev)
1307 u8 oldflowctrl, linkmesg = 0;
1308 u32 mac_mode, lcl_adv, rmt_adv;
1309 struct tg3 *tp = netdev_priv(dev);
1310 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1312 spin_lock(&tp->lock);
1314 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1315 MAC_MODE_HALF_DUPLEX);
1317 oldflowctrl = tp->link_config.active_flowctrl;
1323 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1324 mac_mode |= MAC_MODE_PORT_MODE_MII;
1326 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1328 if (phydev->duplex == DUPLEX_HALF)
1329 mac_mode |= MAC_MODE_HALF_DUPLEX;
1331 lcl_adv = tg3_advert_flowctrl_1000T(
1332 tp->link_config.flowctrl);
1335 rmt_adv = LPA_PAUSE_CAP;
1336 if (phydev->asym_pause)
1337 rmt_adv |= LPA_PAUSE_ASYM;
1340 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1342 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1344 if (mac_mode != tp->mac_mode) {
1345 tp->mac_mode = mac_mode;
1346 tw32_f(MAC_MODE, tp->mac_mode);
1350 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1351 if (phydev->speed == SPEED_10)
1353 MAC_MI_STAT_10MBPS_MODE |
1354 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1356 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1359 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1360 tw32(MAC_TX_LENGTHS,
1361 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1362 (6 << TX_LENGTHS_IPG_SHIFT) |
1363 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1365 tw32(MAC_TX_LENGTHS,
1366 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1367 (6 << TX_LENGTHS_IPG_SHIFT) |
1368 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1370 if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1371 (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1372 phydev->speed != tp->link_config.active_speed ||
1373 phydev->duplex != tp->link_config.active_duplex ||
1374 oldflowctrl != tp->link_config.active_flowctrl)
1377 tp->link_config.active_speed = phydev->speed;
1378 tp->link_config.active_duplex = phydev->duplex;
1380 spin_unlock(&tp->lock);
1383 tg3_link_report(tp);
1386 static int tg3_phy_init(struct tg3 *tp)
1388 struct phy_device *phydev;
1390 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
1393 /* Bring the PHY back to a known state. */
1396 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1398 /* Attach the MAC to the PHY. */
1399 phydev = phy_connect(tp->dev, phydev->dev.bus_id, tg3_adjust_link,
1400 phydev->dev_flags, phydev->interface);
1401 if (IS_ERR(phydev)) {
1402 printk(KERN_ERR "%s: Could not attach to PHY\n", tp->dev->name);
1403 return PTR_ERR(phydev);
1406 /* Mask with MAC supported features. */
1407 switch (phydev->interface) {
1408 case PHY_INTERFACE_MODE_GMII:
1409 case PHY_INTERFACE_MODE_RGMII:
1410 phydev->supported &= (PHY_GBIT_FEATURES |
1412 SUPPORTED_Asym_Pause);
1414 case PHY_INTERFACE_MODE_MII:
1415 phydev->supported &= (PHY_BASIC_FEATURES |
1417 SUPPORTED_Asym_Pause);
1420 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1424 tp->tg3_flags3 |= TG3_FLG3_PHY_CONNECTED;
1426 phydev->advertising = phydev->supported;
1431 static void tg3_phy_start(struct tg3 *tp)
1433 struct phy_device *phydev;
1435 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1438 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
1440 if (tp->link_config.phy_is_low_power) {
1441 tp->link_config.phy_is_low_power = 0;
1442 phydev->speed = tp->link_config.orig_speed;
1443 phydev->duplex = tp->link_config.orig_duplex;
1444 phydev->autoneg = tp->link_config.orig_autoneg;
1445 phydev->advertising = tp->link_config.orig_advertising;
1450 phy_start_aneg(phydev);
1453 static void tg3_phy_stop(struct tg3 *tp)
1455 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1458 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]);
1461 static void tg3_phy_fini(struct tg3 *tp)
1463 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]);
1465 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1469 static void tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1471 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1472 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1475 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1479 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
1480 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
1483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1486 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &ephy)) {
1487 tg3_writephy(tp, MII_TG3_EPHY_TEST,
1488 ephy | MII_TG3_EPHY_SHADOW_EN);
1489 if (!tg3_readphy(tp, MII_TG3_EPHYTST_MISCCTRL, &phy)) {
1491 phy |= MII_TG3_EPHYTST_MISCCTRL_MDIX;
1493 phy &= ~MII_TG3_EPHYTST_MISCCTRL_MDIX;
1494 tg3_writephy(tp, MII_TG3_EPHYTST_MISCCTRL, phy);
1496 tg3_writephy(tp, MII_TG3_EPHY_TEST, ephy);
1499 phy = MII_TG3_AUXCTL_MISC_RDSEL_MISC |
1500 MII_TG3_AUXCTL_SHDWSEL_MISC;
1501 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, phy) &&
1502 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy)) {
1504 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1506 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1507 phy |= MII_TG3_AUXCTL_MISC_WREN;
1508 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1513 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1517 if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
1520 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
1521 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
1522 tg3_writephy(tp, MII_TG3_AUX_CTRL,
1523 (val | (1 << 15) | (1 << 4)));
1526 static void tg3_phy_apply_otp(struct tg3 *tp)
1535 /* Enable SM_DSP clock and tx 6dB coding. */
1536 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1537 MII_TG3_AUXCTL_ACTL_SMDSP_ENA |
1538 MII_TG3_AUXCTL_ACTL_TX_6DB;
1539 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1541 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1542 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1543 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1545 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1546 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1547 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1549 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1550 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1551 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1553 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1554 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1556 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1557 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1559 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1560 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1561 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1563 /* Turn off SM_DSP clock. */
1564 phy = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
1565 MII_TG3_AUXCTL_ACTL_TX_6DB;
1566 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy);
1569 static int tg3_wait_macro_done(struct tg3 *tp)
1576 if (!tg3_readphy(tp, 0x16, &tmp32)) {
1577 if ((tmp32 & 0x1000) == 0)
1587 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1589 static const u32 test_pat[4][6] = {
1590 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1591 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1592 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1593 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1597 for (chan = 0; chan < 4; chan++) {
1600 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1601 (chan * 0x2000) | 0x0200);
1602 tg3_writephy(tp, 0x16, 0x0002);
1604 for (i = 0; i < 6; i++)
1605 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1608 tg3_writephy(tp, 0x16, 0x0202);
1609 if (tg3_wait_macro_done(tp)) {
1614 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1615 (chan * 0x2000) | 0x0200);
1616 tg3_writephy(tp, 0x16, 0x0082);
1617 if (tg3_wait_macro_done(tp)) {
1622 tg3_writephy(tp, 0x16, 0x0802);
1623 if (tg3_wait_macro_done(tp)) {
1628 for (i = 0; i < 6; i += 2) {
1631 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1632 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1633 tg3_wait_macro_done(tp)) {
1639 if (low != test_pat[chan][i] ||
1640 high != test_pat[chan][i+1]) {
1641 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1642 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1643 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1653 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1657 for (chan = 0; chan < 4; chan++) {
1660 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1661 (chan * 0x2000) | 0x0200);
1662 tg3_writephy(tp, 0x16, 0x0002);
1663 for (i = 0; i < 6; i++)
1664 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1665 tg3_writephy(tp, 0x16, 0x0202);
1666 if (tg3_wait_macro_done(tp))
1673 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1675 u32 reg32, phy9_orig;
1676 int retries, do_phy_reset, err;
1682 err = tg3_bmcr_reset(tp);
1688 /* Disable transmitter and interrupt. */
1689 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
1693 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1695 /* Set full-duplex, 1000 mbps. */
1696 tg3_writephy(tp, MII_BMCR,
1697 BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1699 /* Set to master mode. */
1700 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1703 tg3_writephy(tp, MII_TG3_CTRL,
1704 (MII_TG3_CTRL_AS_MASTER |
1705 MII_TG3_CTRL_ENABLE_AS_MASTER));
1707 /* Enable SM_DSP_CLOCK and 6dB. */
1708 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1710 /* Block the PHY control access. */
1711 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1712 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
1714 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
1717 } while (--retries);
1719 err = tg3_phy_reset_chanpat(tp);
1723 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
1724 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
1726 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
1727 tg3_writephy(tp, 0x16, 0x0000);
1729 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1730 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
1731 /* Set Extended packet length bit for jumbo frames */
1732 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
1735 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1738 tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
1740 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
1742 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1749 /* This will reset the tigon3 PHY if there is no valid
1750 * link unless the FORCE argument is non-zero.
1752 static int tg3_phy_reset(struct tg3 *tp)
1758 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1761 val = tr32(GRC_MISC_CFG);
1762 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
1765 err = tg3_readphy(tp, MII_BMSR, &phy_status);
1766 err |= tg3_readphy(tp, MII_BMSR, &phy_status);
1770 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
1771 netif_carrier_off(tp->dev);
1772 tg3_link_report(tp);
1775 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1776 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1777 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
1778 err = tg3_phy_reset_5703_4_5(tp);
1785 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
1786 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
1787 cpmuctrl = tr32(TG3_CPMU_CTRL);
1788 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
1790 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
1793 err = tg3_bmcr_reset(tp);
1797 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
1800 phy = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
1801 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, phy);
1803 tw32(TG3_CPMU_CTRL, cpmuctrl);
1806 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
1807 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
1810 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
1811 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
1812 CPMU_LSPD_1000MB_MACCLK_12_5) {
1813 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
1815 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
1818 /* Disable GPHY autopowerdown. */
1819 tg3_writephy(tp, MII_TG3_MISC_SHDW,
1820 MII_TG3_MISC_SHDW_WREN |
1821 MII_TG3_MISC_SHDW_APD_SEL |
1822 MII_TG3_MISC_SHDW_APD_WKTM_84MS);
1825 tg3_phy_apply_otp(tp);
1828 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
1829 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1830 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1831 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
1832 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1833 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
1834 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1836 if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
1837 tg3_writephy(tp, 0x1c, 0x8d68);
1838 tg3_writephy(tp, 0x1c, 0x8d68);
1840 if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
1841 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1842 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1843 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
1844 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1845 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
1846 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
1847 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
1848 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1850 else if (tp->tg3_flags2 & TG3_FLG2_PHY_JITTER_BUG) {
1851 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
1852 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
1853 if (tp->tg3_flags2 & TG3_FLG2_PHY_ADJUST_TRIM) {
1854 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
1855 tg3_writephy(tp, MII_TG3_TEST1,
1856 MII_TG3_TEST1_TRIM_EN | 0x4);
1858 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
1859 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
1861 /* Set Extended packet length bit (bit 14) on all chips that */
1862 /* support jumbo frames */
1863 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1864 /* Cannot do read-modify-write on 5401 */
1865 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1866 } else if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1869 /* Set bit 14 with read-modify-write to preserve other bits */
1870 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
1871 !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
1872 tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
1875 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
1876 * jumbo frames transmission.
1878 if (tp->tg3_flags2 & TG3_FLG2_JUMBO_CAPABLE) {
1881 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
1882 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1883 phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
1886 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1887 /* adjust output voltage */
1888 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x12);
1891 tg3_phy_toggle_automdix(tp, 1);
1892 tg3_phy_set_wirespeed(tp);
1896 static void tg3_frob_aux_power(struct tg3 *tp)
1898 struct tg3 *tp_peer = tp;
1900 if ((tp->tg3_flags2 & TG3_FLG2_IS_NIC) == 0)
1903 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
1904 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
1905 struct net_device *dev_peer;
1907 dev_peer = pci_get_drvdata(tp->pdev_peer);
1908 /* remove_one() may have been run on the peer. */
1912 tp_peer = netdev_priv(dev_peer);
1915 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1916 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
1917 (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
1918 (tp_peer->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
1919 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1920 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1921 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1922 (GRC_LCLCTRL_GPIO_OE0 |
1923 GRC_LCLCTRL_GPIO_OE1 |
1924 GRC_LCLCTRL_GPIO_OE2 |
1925 GRC_LCLCTRL_GPIO_OUTPUT0 |
1926 GRC_LCLCTRL_GPIO_OUTPUT1),
1928 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
1929 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
1930 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
1931 GRC_LCLCTRL_GPIO_OE1 |
1932 GRC_LCLCTRL_GPIO_OE2 |
1933 GRC_LCLCTRL_GPIO_OUTPUT0 |
1934 GRC_LCLCTRL_GPIO_OUTPUT1 |
1936 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1938 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
1939 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1941 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
1942 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
1945 u32 grc_local_ctrl = 0;
1947 if (tp_peer != tp &&
1948 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1951 /* Workaround to prevent overdrawing Amps. */
1952 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
1954 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
1955 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1956 grc_local_ctrl, 100);
1959 /* On 5753 and variants, GPIO2 cannot be used. */
1960 no_gpio2 = tp->nic_sram_data_cfg &
1961 NIC_SRAM_DATA_CFG_NO_GPIO2;
1963 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
1964 GRC_LCLCTRL_GPIO_OE1 |
1965 GRC_LCLCTRL_GPIO_OE2 |
1966 GRC_LCLCTRL_GPIO_OUTPUT1 |
1967 GRC_LCLCTRL_GPIO_OUTPUT2;
1969 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
1970 GRC_LCLCTRL_GPIO_OUTPUT2);
1972 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1973 grc_local_ctrl, 100);
1975 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
1977 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1978 grc_local_ctrl, 100);
1981 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
1982 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1983 grc_local_ctrl, 100);
1987 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
1988 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
1989 if (tp_peer != tp &&
1990 (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
1993 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1994 (GRC_LCLCTRL_GPIO_OE1 |
1995 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
1997 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
1998 GRC_LCLCTRL_GPIO_OE1, 100);
2000 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2001 (GRC_LCLCTRL_GPIO_OE1 |
2002 GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2007 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2009 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2011 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411) {
2012 if (speed != SPEED_10)
2014 } else if (speed == SPEED_10)
2020 static int tg3_setup_phy(struct tg3 *, int);
2022 #define RESET_KIND_SHUTDOWN 0
2023 #define RESET_KIND_INIT 1
2024 #define RESET_KIND_SUSPEND 2
2026 static void tg3_write_sig_post_reset(struct tg3 *, int);
2027 static int tg3_halt_cpu(struct tg3 *, u32);
2028 static int tg3_nvram_lock(struct tg3 *);
2029 static void tg3_nvram_unlock(struct tg3 *);
2031 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2035 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2036 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2037 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2038 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2041 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2042 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2043 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2048 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2050 val = tr32(GRC_MISC_CFG);
2051 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2054 } else if (do_low_power) {
2055 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2056 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2058 tg3_writephy(tp, MII_TG3_AUX_CTRL,
2059 MII_TG3_AUXCTL_SHDWSEL_PWRCTL |
2060 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2061 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2062 MII_TG3_AUXCTL_PCTL_VREG_11V);
2065 /* The PHY should not be powered down on some chips because
2068 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2069 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2070 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2071 (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)))
2074 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2075 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2076 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2077 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2078 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2079 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2082 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2085 /* tp->lock is held. */
2086 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2088 u32 addr_high, addr_low;
2091 addr_high = ((tp->dev->dev_addr[0] << 8) |
2092 tp->dev->dev_addr[1]);
2093 addr_low = ((tp->dev->dev_addr[2] << 24) |
2094 (tp->dev->dev_addr[3] << 16) |
2095 (tp->dev->dev_addr[4] << 8) |
2096 (tp->dev->dev_addr[5] << 0));
2097 for (i = 0; i < 4; i++) {
2098 if (i == 1 && skip_mac_1)
2100 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2101 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2104 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2105 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2106 for (i = 0; i < 12; i++) {
2107 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2108 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2112 addr_high = (tp->dev->dev_addr[0] +
2113 tp->dev->dev_addr[1] +
2114 tp->dev->dev_addr[2] +
2115 tp->dev->dev_addr[3] +
2116 tp->dev->dev_addr[4] +
2117 tp->dev->dev_addr[5]) &
2118 TX_BACKOFF_SEED_MASK;
2119 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2122 static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2125 bool device_should_wake, do_low_power;
2127 /* Make sure register accesses (indirect or otherwise)
2128 * will function correctly.
2130 pci_write_config_dword(tp->pdev,
2131 TG3PCI_MISC_HOST_CTRL,
2132 tp->misc_host_ctrl);
2136 pci_enable_wake(tp->pdev, state, false);
2137 pci_set_power_state(tp->pdev, PCI_D0);
2139 /* Switch out of Vaux if it is a NIC */
2140 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2141 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2151 printk(KERN_ERR PFX "%s: Invalid power state (D%d) requested\n",
2152 tp->dev->name, state);
2155 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2156 tw32(TG3PCI_MISC_HOST_CTRL,
2157 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2159 device_should_wake = pci_pme_capable(tp->pdev, state) &&
2160 device_may_wakeup(&tp->pdev->dev) &&
2161 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2163 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
2164 do_low_power = false;
2165 if ((tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) &&
2166 !tp->link_config.phy_is_low_power) {
2167 struct phy_device *phydev;
2168 u32 phyid, advertising;
2170 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
2172 tp->link_config.phy_is_low_power = 1;
2174 tp->link_config.orig_speed = phydev->speed;
2175 tp->link_config.orig_duplex = phydev->duplex;
2176 tp->link_config.orig_autoneg = phydev->autoneg;
2177 tp->link_config.orig_advertising = phydev->advertising;
2179 advertising = ADVERTISED_TP |
2181 ADVERTISED_Autoneg |
2182 ADVERTISED_10baseT_Half;
2184 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2185 device_should_wake) {
2186 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2188 ADVERTISED_100baseT_Half |
2189 ADVERTISED_100baseT_Full |
2190 ADVERTISED_10baseT_Full;
2192 advertising |= ADVERTISED_10baseT_Full;
2195 phydev->advertising = advertising;
2197 phy_start_aneg(phydev);
2199 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2200 if (phyid != TG3_PHY_ID_BCMAC131) {
2201 phyid &= TG3_PHY_OUI_MASK;
2202 if (phyid == TG3_PHY_OUI_1 &&
2203 phyid == TG3_PHY_OUI_2 &&
2204 phyid == TG3_PHY_OUI_3)
2205 do_low_power = true;
2209 do_low_power = false;
2211 if (tp->link_config.phy_is_low_power == 0) {
2212 tp->link_config.phy_is_low_power = 1;
2213 tp->link_config.orig_speed = tp->link_config.speed;
2214 tp->link_config.orig_duplex = tp->link_config.duplex;
2215 tp->link_config.orig_autoneg = tp->link_config.autoneg;
2218 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
2219 tp->link_config.speed = SPEED_10;
2220 tp->link_config.duplex = DUPLEX_HALF;
2221 tp->link_config.autoneg = AUTONEG_ENABLE;
2222 tg3_setup_phy(tp, 0);
2226 __tg3_set_mac_addr(tp, 0);
2228 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2231 val = tr32(GRC_VCPU_EXT_CTRL);
2232 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2233 } else if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2237 for (i = 0; i < 200; i++) {
2238 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2239 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2244 if (tp->tg3_flags & TG3_FLAG_WOL_CAP)
2245 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2246 WOL_DRV_STATE_SHUTDOWN |
2250 if (device_should_wake) {
2253 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
2255 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
2259 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
2260 mac_mode = MAC_MODE_PORT_MODE_GMII;
2262 mac_mode = MAC_MODE_PORT_MODE_MII;
2264 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2265 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2267 u32 speed = (tp->tg3_flags &
2268 TG3_FLAG_WOL_SPEED_100MB) ?
2269 SPEED_100 : SPEED_10;
2270 if (tg3_5700_link_polarity(tp, speed))
2271 mac_mode |= MAC_MODE_LINK_POLARITY;
2273 mac_mode &= ~MAC_MODE_LINK_POLARITY;
2276 mac_mode = MAC_MODE_PORT_MODE_TBI;
2279 if (!(tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
2280 tw32(MAC_LED_CTRL, tp->led_ctrl);
2282 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2283 if (((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
2284 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) &&
2285 ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
2286 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2287 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2289 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
2290 mac_mode |= tp->mac_mode &
2291 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
2292 if (mac_mode & MAC_MODE_APE_TX_EN)
2293 mac_mode |= MAC_MODE_TDE_ENABLE;
2296 tw32_f(MAC_MODE, mac_mode);
2299 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2303 if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
2304 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2305 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2308 base_val = tp->pci_clock_ctrl;
2309 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2310 CLOCK_CTRL_TXCLK_DISABLE);
2312 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2313 CLOCK_CTRL_PWRDOWN_PLL133, 40);
2314 } else if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
2315 (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT) ||
2316 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)) {
2318 } else if (!((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2319 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
2320 u32 newbits1, newbits2;
2322 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2323 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2324 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2325 CLOCK_CTRL_TXCLK_DISABLE |
2327 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2328 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
2329 newbits1 = CLOCK_CTRL_625_CORE;
2330 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2332 newbits1 = CLOCK_CTRL_ALTCLK;
2333 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2336 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2339 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2342 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2345 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2346 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2347 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2348 CLOCK_CTRL_TXCLK_DISABLE |
2349 CLOCK_CTRL_44MHZ_CORE);
2351 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2354 tw32_wait_f(TG3PCI_CLOCK_CTRL,
2355 tp->pci_clock_ctrl | newbits3, 40);
2359 if (!(device_should_wake) &&
2360 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
2361 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
2362 tg3_power_down_phy(tp, do_low_power);
2364 tg3_frob_aux_power(tp);
2366 /* Workaround for unstable PLL clock */
2367 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2368 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2369 u32 val = tr32(0x7d00);
2371 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2373 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
2376 err = tg3_nvram_lock(tp);
2377 tg3_halt_cpu(tp, RX_CPU_BASE);
2379 tg3_nvram_unlock(tp);
2383 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2385 if (device_should_wake)
2386 pci_enable_wake(tp->pdev, state, true);
2388 /* Finally, set the new power state. */
2389 pci_set_power_state(tp->pdev, state);
2394 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2396 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2397 case MII_TG3_AUX_STAT_10HALF:
2399 *duplex = DUPLEX_HALF;
2402 case MII_TG3_AUX_STAT_10FULL:
2404 *duplex = DUPLEX_FULL;
2407 case MII_TG3_AUX_STAT_100HALF:
2409 *duplex = DUPLEX_HALF;
2412 case MII_TG3_AUX_STAT_100FULL:
2414 *duplex = DUPLEX_FULL;
2417 case MII_TG3_AUX_STAT_1000HALF:
2418 *speed = SPEED_1000;
2419 *duplex = DUPLEX_HALF;
2422 case MII_TG3_AUX_STAT_1000FULL:
2423 *speed = SPEED_1000;
2424 *duplex = DUPLEX_FULL;
2428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2429 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2431 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2435 *speed = SPEED_INVALID;
2436 *duplex = DUPLEX_INVALID;
2441 static void tg3_phy_copper_begin(struct tg3 *tp)
2446 if (tp->link_config.phy_is_low_power) {
2447 /* Entering low power mode. Disable gigabit and
2448 * 100baseT advertisements.
2450 tg3_writephy(tp, MII_TG3_CTRL, 0);
2452 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
2453 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
2454 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
2455 new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
2457 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2458 } else if (tp->link_config.speed == SPEED_INVALID) {
2459 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
2460 tp->link_config.advertising &=
2461 ~(ADVERTISED_1000baseT_Half |
2462 ADVERTISED_1000baseT_Full);
2464 new_adv = ADVERTISE_CSMA;
2465 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
2466 new_adv |= ADVERTISE_10HALF;
2467 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
2468 new_adv |= ADVERTISE_10FULL;
2469 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
2470 new_adv |= ADVERTISE_100HALF;
2471 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
2472 new_adv |= ADVERTISE_100FULL;
2474 new_adv |= tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2476 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2478 if (tp->link_config.advertising &
2479 (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
2481 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
2482 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2483 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
2484 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2485 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
2486 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2487 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
2488 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2489 MII_TG3_CTRL_ENABLE_AS_MASTER);
2490 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2492 tg3_writephy(tp, MII_TG3_CTRL, 0);
2495 new_adv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2496 new_adv |= ADVERTISE_CSMA;
2498 /* Asking for a specific link mode. */
2499 if (tp->link_config.speed == SPEED_1000) {
2500 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2502 if (tp->link_config.duplex == DUPLEX_FULL)
2503 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
2505 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
2506 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2507 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2508 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2509 MII_TG3_CTRL_ENABLE_AS_MASTER);
2511 if (tp->link_config.speed == SPEED_100) {
2512 if (tp->link_config.duplex == DUPLEX_FULL)
2513 new_adv |= ADVERTISE_100FULL;
2515 new_adv |= ADVERTISE_100HALF;
2517 if (tp->link_config.duplex == DUPLEX_FULL)
2518 new_adv |= ADVERTISE_10FULL;
2520 new_adv |= ADVERTISE_10HALF;
2522 tg3_writephy(tp, MII_ADVERTISE, new_adv);
2527 tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2530 if (tp->link_config.autoneg == AUTONEG_DISABLE &&
2531 tp->link_config.speed != SPEED_INVALID) {
2532 u32 bmcr, orig_bmcr;
2534 tp->link_config.active_speed = tp->link_config.speed;
2535 tp->link_config.active_duplex = tp->link_config.duplex;
2538 switch (tp->link_config.speed) {
2544 bmcr |= BMCR_SPEED100;
2548 bmcr |= TG3_BMCR_SPEED1000;
2552 if (tp->link_config.duplex == DUPLEX_FULL)
2553 bmcr |= BMCR_FULLDPLX;
2555 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
2556 (bmcr != orig_bmcr)) {
2557 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
2558 for (i = 0; i < 1500; i++) {
2562 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
2563 tg3_readphy(tp, MII_BMSR, &tmp))
2565 if (!(tmp & BMSR_LSTATUS)) {
2570 tg3_writephy(tp, MII_BMCR, bmcr);
2574 tg3_writephy(tp, MII_BMCR,
2575 BMCR_ANENABLE | BMCR_ANRESTART);
2579 static int tg3_init_5401phy_dsp(struct tg3 *tp)
2583 /* Turn off tap power management. */
2584 /* Set Extended packet length bit */
2585 err = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
2587 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
2588 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
2590 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
2591 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
2593 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2594 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
2596 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
2597 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
2599 err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
2600 err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
2607 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
2609 u32 adv_reg, all_mask = 0;
2611 if (mask & ADVERTISED_10baseT_Half)
2612 all_mask |= ADVERTISE_10HALF;
2613 if (mask & ADVERTISED_10baseT_Full)
2614 all_mask |= ADVERTISE_10FULL;
2615 if (mask & ADVERTISED_100baseT_Half)
2616 all_mask |= ADVERTISE_100HALF;
2617 if (mask & ADVERTISED_100baseT_Full)
2618 all_mask |= ADVERTISE_100FULL;
2620 if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
2623 if ((adv_reg & all_mask) != all_mask)
2625 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
2629 if (mask & ADVERTISED_1000baseT_Half)
2630 all_mask |= ADVERTISE_1000HALF;
2631 if (mask & ADVERTISED_1000baseT_Full)
2632 all_mask |= ADVERTISE_1000FULL;
2634 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
2637 if ((tg3_ctrl & all_mask) != all_mask)
2643 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
2647 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
2650 curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
2651 reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
2653 if (tp->link_config.active_duplex == DUPLEX_FULL) {
2654 if (curadv != reqadv)
2657 if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG)
2658 tg3_readphy(tp, MII_LPA, rmtadv);
2660 /* Reprogram the advertisement register, even if it
2661 * does not affect the current link. If the link
2662 * gets renegotiated in the future, we can save an
2663 * additional renegotiation cycle by advertising
2664 * it correctly in the first place.
2666 if (curadv != reqadv) {
2667 *lcladv &= ~(ADVERTISE_PAUSE_CAP |
2668 ADVERTISE_PAUSE_ASYM);
2669 tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
2676 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
2678 int current_link_up;
2680 u32 lcl_adv, rmt_adv;
2688 (MAC_STATUS_SYNC_CHANGED |
2689 MAC_STATUS_CFG_CHANGED |
2690 MAC_STATUS_MI_COMPLETION |
2691 MAC_STATUS_LNKSTATE_CHANGED));
2694 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
2696 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
2700 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
2702 /* Some third-party PHYs need to be reset on link going
2705 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2706 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2707 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
2708 netif_carrier_ok(tp->dev)) {
2709 tg3_readphy(tp, MII_BMSR, &bmsr);
2710 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2711 !(bmsr & BMSR_LSTATUS))
2717 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
2718 tg3_readphy(tp, MII_BMSR, &bmsr);
2719 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
2720 !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
2723 if (!(bmsr & BMSR_LSTATUS)) {
2724 err = tg3_init_5401phy_dsp(tp);
2728 tg3_readphy(tp, MII_BMSR, &bmsr);
2729 for (i = 0; i < 1000; i++) {
2731 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2732 (bmsr & BMSR_LSTATUS)) {
2738 if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
2739 !(bmsr & BMSR_LSTATUS) &&
2740 tp->link_config.active_speed == SPEED_1000) {
2741 err = tg3_phy_reset(tp);
2743 err = tg3_init_5401phy_dsp(tp);
2748 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2749 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
2750 /* 5701 {A0,B0} CRC bug workaround */
2751 tg3_writephy(tp, 0x15, 0x0a75);
2752 tg3_writephy(tp, 0x1c, 0x8c68);
2753 tg3_writephy(tp, 0x1c, 0x8d68);
2754 tg3_writephy(tp, 0x1c, 0x8c68);
2757 /* Clear pending interrupts... */
2758 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2759 tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
2761 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
2762 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
2763 else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)
2764 tg3_writephy(tp, MII_TG3_IMASK, ~0);
2766 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2767 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2768 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
2769 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2770 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
2772 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
2775 current_link_up = 0;
2776 current_speed = SPEED_INVALID;
2777 current_duplex = DUPLEX_INVALID;
2779 if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
2782 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
2783 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
2784 if (!(val & (1 << 10))) {
2786 tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
2792 for (i = 0; i < 100; i++) {
2793 tg3_readphy(tp, MII_BMSR, &bmsr);
2794 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
2795 (bmsr & BMSR_LSTATUS))
2800 if (bmsr & BMSR_LSTATUS) {
2803 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
2804 for (i = 0; i < 2000; i++) {
2806 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
2811 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
2816 for (i = 0; i < 200; i++) {
2817 tg3_readphy(tp, MII_BMCR, &bmcr);
2818 if (tg3_readphy(tp, MII_BMCR, &bmcr))
2820 if (bmcr && bmcr != 0x7fff)
2828 tp->link_config.active_speed = current_speed;
2829 tp->link_config.active_duplex = current_duplex;
2831 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2832 if ((bmcr & BMCR_ANENABLE) &&
2833 tg3_copper_is_advertising_all(tp,
2834 tp->link_config.advertising)) {
2835 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
2837 current_link_up = 1;
2840 if (!(bmcr & BMCR_ANENABLE) &&
2841 tp->link_config.speed == current_speed &&
2842 tp->link_config.duplex == current_duplex &&
2843 tp->link_config.flowctrl ==
2844 tp->link_config.active_flowctrl) {
2845 current_link_up = 1;
2849 if (current_link_up == 1 &&
2850 tp->link_config.active_duplex == DUPLEX_FULL)
2851 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2855 if (current_link_up == 0 || tp->link_config.phy_is_low_power) {
2858 tg3_phy_copper_begin(tp);
2860 tg3_readphy(tp, MII_BMSR, &tmp);
2861 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
2862 (tmp & BMSR_LSTATUS))
2863 current_link_up = 1;
2866 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
2867 if (current_link_up == 1) {
2868 if (tp->link_config.active_speed == SPEED_100 ||
2869 tp->link_config.active_speed == SPEED_10)
2870 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
2872 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2874 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
2876 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
2877 if (tp->link_config.active_duplex == DUPLEX_HALF)
2878 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
2880 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
2881 if (current_link_up == 1 &&
2882 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
2883 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
2885 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2888 /* ??? Without this setting Netgear GA302T PHY does not
2889 * ??? send/receive packets...
2891 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
2892 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
2893 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
2894 tw32_f(MAC_MI_MODE, tp->mi_mode);
2898 tw32_f(MAC_MODE, tp->mac_mode);
2901 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
2902 /* Polled via timer. */
2903 tw32_f(MAC_EVENT, 0);
2905 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2909 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
2910 current_link_up == 1 &&
2911 tp->link_config.active_speed == SPEED_1000 &&
2912 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
2913 (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
2916 (MAC_STATUS_SYNC_CHANGED |
2917 MAC_STATUS_CFG_CHANGED));
2920 NIC_SRAM_FIRMWARE_MBOX,
2921 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
2924 if (current_link_up != netif_carrier_ok(tp->dev)) {
2925 if (current_link_up)
2926 netif_carrier_on(tp->dev);
2928 netif_carrier_off(tp->dev);
2929 tg3_link_report(tp);
2935 struct tg3_fiber_aneginfo {
2937 #define ANEG_STATE_UNKNOWN 0
2938 #define ANEG_STATE_AN_ENABLE 1
2939 #define ANEG_STATE_RESTART_INIT 2
2940 #define ANEG_STATE_RESTART 3
2941 #define ANEG_STATE_DISABLE_LINK_OK 4
2942 #define ANEG_STATE_ABILITY_DETECT_INIT 5
2943 #define ANEG_STATE_ABILITY_DETECT 6
2944 #define ANEG_STATE_ACK_DETECT_INIT 7
2945 #define ANEG_STATE_ACK_DETECT 8
2946 #define ANEG_STATE_COMPLETE_ACK_INIT 9
2947 #define ANEG_STATE_COMPLETE_ACK 10
2948 #define ANEG_STATE_IDLE_DETECT_INIT 11
2949 #define ANEG_STATE_IDLE_DETECT 12
2950 #define ANEG_STATE_LINK_OK 13
2951 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
2952 #define ANEG_STATE_NEXT_PAGE_WAIT 15
2955 #define MR_AN_ENABLE 0x00000001
2956 #define MR_RESTART_AN 0x00000002
2957 #define MR_AN_COMPLETE 0x00000004
2958 #define MR_PAGE_RX 0x00000008
2959 #define MR_NP_LOADED 0x00000010
2960 #define MR_TOGGLE_TX 0x00000020
2961 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
2962 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
2963 #define MR_LP_ADV_SYM_PAUSE 0x00000100
2964 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
2965 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
2966 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
2967 #define MR_LP_ADV_NEXT_PAGE 0x00001000
2968 #define MR_TOGGLE_RX 0x00002000
2969 #define MR_NP_RX 0x00004000
2971 #define MR_LINK_OK 0x80000000
2973 unsigned long link_time, cur_time;
2975 u32 ability_match_cfg;
2976 int ability_match_count;
2978 char ability_match, idle_match, ack_match;
2980 u32 txconfig, rxconfig;
2981 #define ANEG_CFG_NP 0x00000080
2982 #define ANEG_CFG_ACK 0x00000040
2983 #define ANEG_CFG_RF2 0x00000020
2984 #define ANEG_CFG_RF1 0x00000010
2985 #define ANEG_CFG_PS2 0x00000001
2986 #define ANEG_CFG_PS1 0x00008000
2987 #define ANEG_CFG_HD 0x00004000
2988 #define ANEG_CFG_FD 0x00002000
2989 #define ANEG_CFG_INVAL 0x00001f06
2994 #define ANEG_TIMER_ENAB 2
2995 #define ANEG_FAILED -1
2997 #define ANEG_STATE_SETTLE_TIME 10000
2999 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3000 struct tg3_fiber_aneginfo *ap)
3003 unsigned long delta;
3007 if (ap->state == ANEG_STATE_UNKNOWN) {
3011 ap->ability_match_cfg = 0;
3012 ap->ability_match_count = 0;
3013 ap->ability_match = 0;
3019 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3020 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3022 if (rx_cfg_reg != ap->ability_match_cfg) {
3023 ap->ability_match_cfg = rx_cfg_reg;
3024 ap->ability_match = 0;
3025 ap->ability_match_count = 0;
3027 if (++ap->ability_match_count > 1) {
3028 ap->ability_match = 1;
3029 ap->ability_match_cfg = rx_cfg_reg;
3032 if (rx_cfg_reg & ANEG_CFG_ACK)
3040 ap->ability_match_cfg = 0;
3041 ap->ability_match_count = 0;
3042 ap->ability_match = 0;
3048 ap->rxconfig = rx_cfg_reg;
3052 case ANEG_STATE_UNKNOWN:
3053 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3054 ap->state = ANEG_STATE_AN_ENABLE;
3057 case ANEG_STATE_AN_ENABLE:
3058 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3059 if (ap->flags & MR_AN_ENABLE) {
3062 ap->ability_match_cfg = 0;
3063 ap->ability_match_count = 0;
3064 ap->ability_match = 0;
3068 ap->state = ANEG_STATE_RESTART_INIT;
3070 ap->state = ANEG_STATE_DISABLE_LINK_OK;
3074 case ANEG_STATE_RESTART_INIT:
3075 ap->link_time = ap->cur_time;
3076 ap->flags &= ~(MR_NP_LOADED);
3078 tw32(MAC_TX_AUTO_NEG, 0);
3079 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3080 tw32_f(MAC_MODE, tp->mac_mode);
3083 ret = ANEG_TIMER_ENAB;
3084 ap->state = ANEG_STATE_RESTART;
3087 case ANEG_STATE_RESTART:
3088 delta = ap->cur_time - ap->link_time;
3089 if (delta > ANEG_STATE_SETTLE_TIME) {
3090 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3092 ret = ANEG_TIMER_ENAB;
3096 case ANEG_STATE_DISABLE_LINK_OK:
3100 case ANEG_STATE_ABILITY_DETECT_INIT:
3101 ap->flags &= ~(MR_TOGGLE_TX);
3102 ap->txconfig = ANEG_CFG_FD;
3103 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3104 if (flowctrl & ADVERTISE_1000XPAUSE)
3105 ap->txconfig |= ANEG_CFG_PS1;
3106 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3107 ap->txconfig |= ANEG_CFG_PS2;
3108 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3109 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3110 tw32_f(MAC_MODE, tp->mac_mode);
3113 ap->state = ANEG_STATE_ABILITY_DETECT;
3116 case ANEG_STATE_ABILITY_DETECT:
3117 if (ap->ability_match != 0 && ap->rxconfig != 0) {
3118 ap->state = ANEG_STATE_ACK_DETECT_INIT;
3122 case ANEG_STATE_ACK_DETECT_INIT:
3123 ap->txconfig |= ANEG_CFG_ACK;
3124 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3125 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3126 tw32_f(MAC_MODE, tp->mac_mode);
3129 ap->state = ANEG_STATE_ACK_DETECT;
3132 case ANEG_STATE_ACK_DETECT:
3133 if (ap->ack_match != 0) {
3134 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3135 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3136 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3138 ap->state = ANEG_STATE_AN_ENABLE;
3140 } else if (ap->ability_match != 0 &&
3141 ap->rxconfig == 0) {
3142 ap->state = ANEG_STATE_AN_ENABLE;
3146 case ANEG_STATE_COMPLETE_ACK_INIT:
3147 if (ap->rxconfig & ANEG_CFG_INVAL) {
3151 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3152 MR_LP_ADV_HALF_DUPLEX |
3153 MR_LP_ADV_SYM_PAUSE |
3154 MR_LP_ADV_ASYM_PAUSE |
3155 MR_LP_ADV_REMOTE_FAULT1 |
3156 MR_LP_ADV_REMOTE_FAULT2 |
3157 MR_LP_ADV_NEXT_PAGE |
3160 if (ap->rxconfig & ANEG_CFG_FD)
3161 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3162 if (ap->rxconfig & ANEG_CFG_HD)
3163 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3164 if (ap->rxconfig & ANEG_CFG_PS1)
3165 ap->flags |= MR_LP_ADV_SYM_PAUSE;
3166 if (ap->rxconfig & ANEG_CFG_PS2)
3167 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3168 if (ap->rxconfig & ANEG_CFG_RF1)
3169 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3170 if (ap->rxconfig & ANEG_CFG_RF2)
3171 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3172 if (ap->rxconfig & ANEG_CFG_NP)
3173 ap->flags |= MR_LP_ADV_NEXT_PAGE;
3175 ap->link_time = ap->cur_time;
3177 ap->flags ^= (MR_TOGGLE_TX);
3178 if (ap->rxconfig & 0x0008)
3179 ap->flags |= MR_TOGGLE_RX;
3180 if (ap->rxconfig & ANEG_CFG_NP)
3181 ap->flags |= MR_NP_RX;
3182 ap->flags |= MR_PAGE_RX;
3184 ap->state = ANEG_STATE_COMPLETE_ACK;
3185 ret = ANEG_TIMER_ENAB;
3188 case ANEG_STATE_COMPLETE_ACK:
3189 if (ap->ability_match != 0 &&
3190 ap->rxconfig == 0) {
3191 ap->state = ANEG_STATE_AN_ENABLE;
3194 delta = ap->cur_time - ap->link_time;
3195 if (delta > ANEG_STATE_SETTLE_TIME) {
3196 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3197 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3199 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3200 !(ap->flags & MR_NP_RX)) {
3201 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3209 case ANEG_STATE_IDLE_DETECT_INIT:
3210 ap->link_time = ap->cur_time;
3211 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3212 tw32_f(MAC_MODE, tp->mac_mode);
3215 ap->state = ANEG_STATE_IDLE_DETECT;
3216 ret = ANEG_TIMER_ENAB;
3219 case ANEG_STATE_IDLE_DETECT:
3220 if (ap->ability_match != 0 &&
3221 ap->rxconfig == 0) {
3222 ap->state = ANEG_STATE_AN_ENABLE;
3225 delta = ap->cur_time - ap->link_time;
3226 if (delta > ANEG_STATE_SETTLE_TIME) {
3227 /* XXX another gem from the Broadcom driver :( */
3228 ap->state = ANEG_STATE_LINK_OK;
3232 case ANEG_STATE_LINK_OK:
3233 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3237 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3238 /* ??? unimplemented */
3241 case ANEG_STATE_NEXT_PAGE_WAIT:
3242 /* ??? unimplemented */
3253 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3256 struct tg3_fiber_aneginfo aninfo;
3257 int status = ANEG_FAILED;
3261 tw32_f(MAC_TX_AUTO_NEG, 0);
3263 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3264 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3267 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3270 memset(&aninfo, 0, sizeof(aninfo));
3271 aninfo.flags |= MR_AN_ENABLE;
3272 aninfo.state = ANEG_STATE_UNKNOWN;
3273 aninfo.cur_time = 0;
3275 while (++tick < 195000) {
3276 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3277 if (status == ANEG_DONE || status == ANEG_FAILED)
3283 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3284 tw32_f(MAC_MODE, tp->mac_mode);
3287 *txflags = aninfo.txconfig;
3288 *rxflags = aninfo.flags;
3290 if (status == ANEG_DONE &&
3291 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3292 MR_LP_ADV_FULL_DUPLEX)))
3298 static void tg3_init_bcm8002(struct tg3 *tp)
3300 u32 mac_status = tr32(MAC_STATUS);
3303 /* Reset when initting first time or we have a link. */
3304 if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
3305 !(mac_status & MAC_STATUS_PCS_SYNCED))
3308 /* Set PLL lock range. */
3309 tg3_writephy(tp, 0x16, 0x8007);
3312 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3314 /* Wait for reset to complete. */
3315 /* XXX schedule_timeout() ... */
3316 for (i = 0; i < 500; i++)
3319 /* Config mode; select PMA/Ch 1 regs. */
3320 tg3_writephy(tp, 0x10, 0x8411);
3322 /* Enable auto-lock and comdet, select txclk for tx. */
3323 tg3_writephy(tp, 0x11, 0x0a10);
3325 tg3_writephy(tp, 0x18, 0x00a0);
3326 tg3_writephy(tp, 0x16, 0x41ff);
3328 /* Assert and deassert POR. */
3329 tg3_writephy(tp, 0x13, 0x0400);
3331 tg3_writephy(tp, 0x13, 0x0000);
3333 tg3_writephy(tp, 0x11, 0x0a50);
3335 tg3_writephy(tp, 0x11, 0x0a10);
3337 /* Wait for signal to stabilize */
3338 /* XXX schedule_timeout() ... */
3339 for (i = 0; i < 15000; i++)
3342 /* Deselect the channel register so we can read the PHYID
3345 tg3_writephy(tp, 0x10, 0x8011);
3348 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3351 u32 sg_dig_ctrl, sg_dig_status;
3352 u32 serdes_cfg, expected_sg_dig_ctrl;
3353 int workaround, port_a;
3354 int current_link_up;
3357 expected_sg_dig_ctrl = 0;
3360 current_link_up = 0;
3362 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3363 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3365 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3368 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3369 /* preserve bits 20-23 for voltage regulator */
3370 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3373 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3375 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3376 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3378 u32 val = serdes_cfg;
3384 tw32_f(MAC_SERDES_CFG, val);
3387 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3389 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3390 tg3_setup_flow_control(tp, 0, 0);
3391 current_link_up = 1;
3396 /* Want auto-negotiation. */
3397 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3399 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3400 if (flowctrl & ADVERTISE_1000XPAUSE)
3401 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3402 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3403 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3405 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3406 if ((tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT) &&
3407 tp->serdes_counter &&
3408 ((mac_status & (MAC_STATUS_PCS_SYNCED |
3409 MAC_STATUS_RCVD_CFG)) ==
3410 MAC_STATUS_PCS_SYNCED)) {
3411 tp->serdes_counter--;
3412 current_link_up = 1;
3417 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3418 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3420 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3422 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3423 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3424 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3425 MAC_STATUS_SIGNAL_DET)) {
3426 sg_dig_status = tr32(SG_DIG_STATUS);
3427 mac_status = tr32(MAC_STATUS);
3429 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3430 (mac_status & MAC_STATUS_PCS_SYNCED)) {
3431 u32 local_adv = 0, remote_adv = 0;
3433 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3434 local_adv |= ADVERTISE_1000XPAUSE;
3435 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3436 local_adv |= ADVERTISE_1000XPSE_ASYM;
3438 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3439 remote_adv |= LPA_1000XPAUSE;
3440 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3441 remote_adv |= LPA_1000XPAUSE_ASYM;
3443 tg3_setup_flow_control(tp, local_adv, remote_adv);
3444 current_link_up = 1;
3445 tp->serdes_counter = 0;
3446 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3447 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3448 if (tp->serdes_counter)
3449 tp->serdes_counter--;
3452 u32 val = serdes_cfg;
3459 tw32_f(MAC_SERDES_CFG, val);
3462 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3465 /* Link parallel detection - link is up */
3466 /* only if we have PCS_SYNC and not */
3467 /* receiving config code words */
3468 mac_status = tr32(MAC_STATUS);
3469 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
3470 !(mac_status & MAC_STATUS_RCVD_CFG)) {
3471 tg3_setup_flow_control(tp, 0, 0);
3472 current_link_up = 1;
3474 TG3_FLG2_PARALLEL_DETECT;
3475 tp->serdes_counter =
3476 SERDES_PARALLEL_DET_TIMEOUT;
3478 goto restart_autoneg;
3482 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3483 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3487 return current_link_up;
3490 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
3492 int current_link_up = 0;
3494 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
3497 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3498 u32 txflags, rxflags;
3501 if (fiber_autoneg(tp, &txflags, &rxflags)) {
3502 u32 local_adv = 0, remote_adv = 0;
3504 if (txflags & ANEG_CFG_PS1)
3505 local_adv |= ADVERTISE_1000XPAUSE;
3506 if (txflags & ANEG_CFG_PS2)
3507 local_adv |= ADVERTISE_1000XPSE_ASYM;
3509 if (rxflags & MR_LP_ADV_SYM_PAUSE)
3510 remote_adv |= LPA_1000XPAUSE;
3511 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
3512 remote_adv |= LPA_1000XPAUSE_ASYM;
3514 tg3_setup_flow_control(tp, local_adv, remote_adv);
3516 current_link_up = 1;
3518 for (i = 0; i < 30; i++) {
3521 (MAC_STATUS_SYNC_CHANGED |
3522 MAC_STATUS_CFG_CHANGED));
3524 if ((tr32(MAC_STATUS) &
3525 (MAC_STATUS_SYNC_CHANGED |
3526 MAC_STATUS_CFG_CHANGED)) == 0)
3530 mac_status = tr32(MAC_STATUS);
3531 if (current_link_up == 0 &&
3532 (mac_status & MAC_STATUS_PCS_SYNCED) &&
3533 !(mac_status & MAC_STATUS_RCVD_CFG))
3534 current_link_up = 1;
3536 tg3_setup_flow_control(tp, 0, 0);
3538 /* Forcing 1000FD link up. */
3539 current_link_up = 1;
3541 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
3544 tw32_f(MAC_MODE, tp->mac_mode);
3549 return current_link_up;
3552 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
3555 u16 orig_active_speed;
3556 u8 orig_active_duplex;
3558 int current_link_up;
3561 orig_pause_cfg = tp->link_config.active_flowctrl;
3562 orig_active_speed = tp->link_config.active_speed;
3563 orig_active_duplex = tp->link_config.active_duplex;
3565 if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
3566 netif_carrier_ok(tp->dev) &&
3567 (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
3568 mac_status = tr32(MAC_STATUS);
3569 mac_status &= (MAC_STATUS_PCS_SYNCED |
3570 MAC_STATUS_SIGNAL_DET |
3571 MAC_STATUS_CFG_CHANGED |
3572 MAC_STATUS_RCVD_CFG);
3573 if (mac_status == (MAC_STATUS_PCS_SYNCED |
3574 MAC_STATUS_SIGNAL_DET)) {
3575 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3576 MAC_STATUS_CFG_CHANGED));
3581 tw32_f(MAC_TX_AUTO_NEG, 0);
3583 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
3584 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
3585 tw32_f(MAC_MODE, tp->mac_mode);
3588 if (tp->phy_id == PHY_ID_BCM8002)
3589 tg3_init_bcm8002(tp);
3591 /* Enable link change event even when serdes polling. */
3592 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3595 current_link_up = 0;
3596 mac_status = tr32(MAC_STATUS);
3598 if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
3599 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
3601 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
3603 tp->hw_status->status =
3604 (SD_STATUS_UPDATED |
3605 (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
3607 for (i = 0; i < 100; i++) {
3608 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
3609 MAC_STATUS_CFG_CHANGED));
3611 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
3612 MAC_STATUS_CFG_CHANGED |
3613 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
3617 mac_status = tr32(MAC_STATUS);
3618 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
3619 current_link_up = 0;
3620 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
3621 tp->serdes_counter == 0) {
3622 tw32_f(MAC_MODE, (tp->mac_mode |
3623 MAC_MODE_SEND_CONFIGS));
3625 tw32_f(MAC_MODE, tp->mac_mode);
3629 if (current_link_up == 1) {
3630 tp->link_config.active_speed = SPEED_1000;
3631 tp->link_config.active_duplex = DUPLEX_FULL;
3632 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3633 LED_CTRL_LNKLED_OVERRIDE |
3634 LED_CTRL_1000MBPS_ON));
3636 tp->link_config.active_speed = SPEED_INVALID;
3637 tp->link_config.active_duplex = DUPLEX_INVALID;
3638 tw32(MAC_LED_CTRL, (tp->led_ctrl |
3639 LED_CTRL_LNKLED_OVERRIDE |
3640 LED_CTRL_TRAFFIC_OVERRIDE));
3643 if (current_link_up != netif_carrier_ok(tp->dev)) {
3644 if (current_link_up)
3645 netif_carrier_on(tp->dev);
3647 netif_carrier_off(tp->dev);
3648 tg3_link_report(tp);
3650 u32 now_pause_cfg = tp->link_config.active_flowctrl;
3651 if (orig_pause_cfg != now_pause_cfg ||
3652 orig_active_speed != tp->link_config.active_speed ||
3653 orig_active_duplex != tp->link_config.active_duplex)
3654 tg3_link_report(tp);
3660 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
3662 int current_link_up, err = 0;
3666 u32 local_adv, remote_adv;
3668 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3669 tw32_f(MAC_MODE, tp->mac_mode);
3675 (MAC_STATUS_SYNC_CHANGED |
3676 MAC_STATUS_CFG_CHANGED |
3677 MAC_STATUS_MI_COMPLETION |
3678 MAC_STATUS_LNKSTATE_CHANGED));
3684 current_link_up = 0;
3685 current_speed = SPEED_INVALID;
3686 current_duplex = DUPLEX_INVALID;
3688 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3689 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3690 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
3691 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3692 bmsr |= BMSR_LSTATUS;
3694 bmsr &= ~BMSR_LSTATUS;
3697 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
3699 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
3700 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3701 /* do nothing, just check for link up at the end */
3702 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3705 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3706 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
3707 ADVERTISE_1000XPAUSE |
3708 ADVERTISE_1000XPSE_ASYM |
3711 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3713 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
3714 new_adv |= ADVERTISE_1000XHALF;
3715 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
3716 new_adv |= ADVERTISE_1000XFULL;
3718 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
3719 tg3_writephy(tp, MII_ADVERTISE, new_adv);
3720 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
3721 tg3_writephy(tp, MII_BMCR, bmcr);
3723 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3724 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
3725 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3732 bmcr &= ~BMCR_SPEED1000;
3733 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
3735 if (tp->link_config.duplex == DUPLEX_FULL)
3736 new_bmcr |= BMCR_FULLDPLX;
3738 if (new_bmcr != bmcr) {
3739 /* BMCR_SPEED1000 is a reserved bit that needs
3740 * to be set on write.
3742 new_bmcr |= BMCR_SPEED1000;
3744 /* Force a linkdown */
3745 if (netif_carrier_ok(tp->dev)) {
3748 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
3749 adv &= ~(ADVERTISE_1000XFULL |
3750 ADVERTISE_1000XHALF |
3752 tg3_writephy(tp, MII_ADVERTISE, adv);
3753 tg3_writephy(tp, MII_BMCR, bmcr |
3757 netif_carrier_off(tp->dev);
3759 tg3_writephy(tp, MII_BMCR, new_bmcr);
3761 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3762 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
3763 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3765 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
3766 bmsr |= BMSR_LSTATUS;
3768 bmsr &= ~BMSR_LSTATUS;
3770 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3774 if (bmsr & BMSR_LSTATUS) {
3775 current_speed = SPEED_1000;
3776 current_link_up = 1;
3777 if (bmcr & BMCR_FULLDPLX)
3778 current_duplex = DUPLEX_FULL;
3780 current_duplex = DUPLEX_HALF;
3785 if (bmcr & BMCR_ANENABLE) {
3788 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
3789 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
3790 common = local_adv & remote_adv;
3791 if (common & (ADVERTISE_1000XHALF |
3792 ADVERTISE_1000XFULL)) {
3793 if (common & ADVERTISE_1000XFULL)
3794 current_duplex = DUPLEX_FULL;
3796 current_duplex = DUPLEX_HALF;
3799 current_link_up = 0;
3803 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
3804 tg3_setup_flow_control(tp, local_adv, remote_adv);
3806 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3807 if (tp->link_config.active_duplex == DUPLEX_HALF)
3808 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3810 tw32_f(MAC_MODE, tp->mac_mode);
3813 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3815 tp->link_config.active_speed = current_speed;
3816 tp->link_config.active_duplex = current_duplex;
3818 if (current_link_up != netif_carrier_ok(tp->dev)) {
3819 if (current_link_up)
3820 netif_carrier_on(tp->dev);
3822 netif_carrier_off(tp->dev);
3823 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3825 tg3_link_report(tp);
3830 static void tg3_serdes_parallel_detect(struct tg3 *tp)
3832 if (tp->serdes_counter) {
3833 /* Give autoneg time to complete. */
3834 tp->serdes_counter--;
3837 if (!netif_carrier_ok(tp->dev) &&
3838 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
3841 tg3_readphy(tp, MII_BMCR, &bmcr);
3842 if (bmcr & BMCR_ANENABLE) {
3845 /* Select shadow register 0x1f */
3846 tg3_writephy(tp, 0x1c, 0x7c00);
3847 tg3_readphy(tp, 0x1c, &phy1);
3849 /* Select expansion interrupt status register */
3850 tg3_writephy(tp, 0x17, 0x0f01);
3851 tg3_readphy(tp, 0x15, &phy2);
3852 tg3_readphy(tp, 0x15, &phy2);
3854 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
3855 /* We have signal detect and not receiving
3856 * config code words, link is up by parallel
3860 bmcr &= ~BMCR_ANENABLE;
3861 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
3862 tg3_writephy(tp, MII_BMCR, bmcr);
3863 tp->tg3_flags2 |= TG3_FLG2_PARALLEL_DETECT;
3867 else if (netif_carrier_ok(tp->dev) &&
3868 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
3869 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT)) {
3872 /* Select expansion interrupt status register */
3873 tg3_writephy(tp, 0x17, 0x0f01);
3874 tg3_readphy(tp, 0x15, &phy2);
3878 /* Config code words received, turn on autoneg. */
3879 tg3_readphy(tp, MII_BMCR, &bmcr);
3880 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
3882 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
3888 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
3892 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3893 err = tg3_setup_fiber_phy(tp, force_reset);
3894 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
3895 err = tg3_setup_fiber_mii_phy(tp, force_reset);
3897 err = tg3_setup_copper_phy(tp, force_reset);
3900 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
3903 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
3904 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
3906 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
3911 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
3912 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
3913 tw32(GRC_MISC_CFG, val);
3916 if (tp->link_config.active_speed == SPEED_1000 &&
3917 tp->link_config.active_duplex == DUPLEX_HALF)
3918 tw32(MAC_TX_LENGTHS,
3919 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3920 (6 << TX_LENGTHS_IPG_SHIFT) |
3921 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
3923 tw32(MAC_TX_LENGTHS,
3924 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
3925 (6 << TX_LENGTHS_IPG_SHIFT) |
3926 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
3928 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
3929 if (netif_carrier_ok(tp->dev)) {
3930 tw32(HOSTCC_STAT_COAL_TICKS,
3931 tp->coal.stats_block_coalesce_usecs);
3933 tw32(HOSTCC_STAT_COAL_TICKS, 0);
3937 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND) {
3938 u32 val = tr32(PCIE_PWR_MGMT_THRESH);
3939 if (!netif_carrier_ok(tp->dev))
3940 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
3943 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
3944 tw32(PCIE_PWR_MGMT_THRESH, val);
3950 /* This is called whenever we suspect that the system chipset is re-
3951 * ordering the sequence of MMIO to the tx send mailbox. The symptom
3952 * is bogus tx completions. We try to recover by setting the
3953 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
3956 static void tg3_tx_recover(struct tg3 *tp)
3958 BUG_ON((tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER) ||
3959 tp->write32_tx_mbox == tg3_write_indirect_mbox);
3961 printk(KERN_WARNING PFX "%s: The system may be re-ordering memory-"
3962 "mapped I/O cycles to the network device, attempting to "
3963 "recover. Please report the problem to the driver maintainer "
3964 "and include system chipset information.\n", tp->dev->name);
3966 spin_lock(&tp->lock);
3967 tp->tg3_flags |= TG3_FLAG_TX_RECOVERY_PENDING;
3968 spin_unlock(&tp->lock);
3971 static inline u32 tg3_tx_avail(struct tg3 *tp)
3974 return (tp->tx_pending -
3975 ((tp->tx_prod - tp->tx_cons) & (TG3_TX_RING_SIZE - 1)));
3978 /* Tigon3 never reports partial packet sends. So we do not
3979 * need special logic to handle SKBs that have not had all
3980 * of their frags sent yet, like SunGEM does.
3982 static void tg3_tx(struct tg3 *tp)
3984 u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
3985 u32 sw_idx = tp->tx_cons;
3987 while (sw_idx != hw_idx) {
3988 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
3989 struct sk_buff *skb = ri->skb;
3992 if (unlikely(skb == NULL)) {
3997 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4001 sw_idx = NEXT_TX(sw_idx);
4003 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4004 ri = &tp->tx_buffers[sw_idx];
4005 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4007 sw_idx = NEXT_TX(sw_idx);
4012 if (unlikely(tx_bug)) {
4018 tp->tx_cons = sw_idx;
4020 /* Need to make the tx_cons update visible to tg3_start_xmit()
4021 * before checking for netif_queue_stopped(). Without the
4022 * memory barrier, there is a small possibility that tg3_start_xmit()
4023 * will miss it and cause the queue to be stopped forever.
4027 if (unlikely(netif_queue_stopped(tp->dev) &&
4028 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))) {
4029 netif_tx_lock(tp->dev);
4030 if (netif_queue_stopped(tp->dev) &&
4031 (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp)))
4032 netif_wake_queue(tp->dev);
4033 netif_tx_unlock(tp->dev);
4037 /* Returns size of skb allocated or < 0 on error.
4039 * We only need to fill in the address because the other members
4040 * of the RX descriptor are invariant, see tg3_init_rings.
4042 * Note the purposeful assymetry of cpu vs. chip accesses. For
4043 * posting buffers we only dirty the first cache line of the RX
4044 * descriptor (containing the address). Whereas for the RX status
4045 * buffers the cpu only reads the last cacheline of the RX descriptor
4046 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4048 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
4049 int src_idx, u32 dest_idx_unmasked)
4051 struct tg3_rx_buffer_desc *desc;
4052 struct ring_info *map, *src_map;
4053 struct sk_buff *skb;
4055 int skb_size, dest_idx;
4058 switch (opaque_key) {
4059 case RXD_OPAQUE_RING_STD:
4060 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4061 desc = &tp->rx_std[dest_idx];
4062 map = &tp->rx_std_buffers[dest_idx];
4064 src_map = &tp->rx_std_buffers[src_idx];
4065 skb_size = tp->rx_pkt_buf_sz;
4068 case RXD_OPAQUE_RING_JUMBO:
4069 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4070 desc = &tp->rx_jumbo[dest_idx];
4071 map = &tp->rx_jumbo_buffers[dest_idx];
4073 src_map = &tp->rx_jumbo_buffers[src_idx];
4074 skb_size = RX_JUMBO_PKT_BUF_SZ;
4081 /* Do not overwrite any of the map or rp information
4082 * until we are sure we can commit to a new buffer.
4084 * Callers depend upon this behavior and assume that
4085 * we leave everything unchanged if we fail.
4087 skb = netdev_alloc_skb(tp->dev, skb_size);
4091 skb_reserve(skb, tp->rx_offset);
4093 mapping = pci_map_single(tp->pdev, skb->data,
4094 skb_size - tp->rx_offset,
4095 PCI_DMA_FROMDEVICE);
4098 pci_unmap_addr_set(map, mapping, mapping);
4100 if (src_map != NULL)
4101 src_map->skb = NULL;
4103 desc->addr_hi = ((u64)mapping >> 32);
4104 desc->addr_lo = ((u64)mapping & 0xffffffff);
4109 /* We only need to move over in the address because the other
4110 * members of the RX descriptor are invariant. See notes above
4111 * tg3_alloc_rx_skb for full details.
4113 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
4114 int src_idx, u32 dest_idx_unmasked)
4116 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4117 struct ring_info *src_map, *dest_map;
4120 switch (opaque_key) {
4121 case RXD_OPAQUE_RING_STD:
4122 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4123 dest_desc = &tp->rx_std[dest_idx];
4124 dest_map = &tp->rx_std_buffers[dest_idx];
4125 src_desc = &tp->rx_std[src_idx];
4126 src_map = &tp->rx_std_buffers[src_idx];
4129 case RXD_OPAQUE_RING_JUMBO:
4130 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4131 dest_desc = &tp->rx_jumbo[dest_idx];
4132 dest_map = &tp->rx_jumbo_buffers[dest_idx];
4133 src_desc = &tp->rx_jumbo[src_idx];
4134 src_map = &tp->rx_jumbo_buffers[src_idx];
4141 dest_map->skb = src_map->skb;
4142 pci_unmap_addr_set(dest_map, mapping,
4143 pci_unmap_addr(src_map, mapping));
4144 dest_desc->addr_hi = src_desc->addr_hi;
4145 dest_desc->addr_lo = src_desc->addr_lo;
4147 src_map->skb = NULL;
4150 #if TG3_VLAN_TAG_USED
4151 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
4153 return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
4157 /* The RX ring scheme is composed of multiple rings which post fresh
4158 * buffers to the chip, and one special ring the chip uses to report
4159 * status back to the host.
4161 * The special ring reports the status of received packets to the
4162 * host. The chip does not write into the original descriptor the
4163 * RX buffer was obtained from. The chip simply takes the original
4164 * descriptor as provided by the host, updates the status and length
4165 * field, then writes this into the next status ring entry.
4167 * Each ring the host uses to post buffers to the chip is described
4168 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
4169 * it is first placed into the on-chip ram. When the packet's length
4170 * is known, it walks down the TG3_BDINFO entries to select the ring.
4171 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4172 * which is within the range of the new packet's length is chosen.
4174 * The "separate ring for rx status" scheme may sound queer, but it makes
4175 * sense from a cache coherency perspective. If only the host writes
4176 * to the buffer post rings, and only the chip writes to the rx status
4177 * rings, then cache lines never move beyond shared-modified state.
4178 * If both the host and chip were to write into the same ring, cache line
4179 * eviction could occur since both entities want it in an exclusive state.
4181 static int tg3_rx(struct tg3 *tp, int budget)
4183 u32 work_mask, rx_std_posted = 0;
4184 u32 sw_idx = tp->rx_rcb_ptr;
4188 hw_idx = tp->hw_status->idx[0].rx_producer;
4190 * We need to order the read of hw_idx and the read of
4191 * the opaque cookie.
4196 while (sw_idx != hw_idx && budget > 0) {
4197 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
4199 struct sk_buff *skb;
4200 dma_addr_t dma_addr;
4201 u32 opaque_key, desc_idx, *post_ptr;
4203 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4204 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4205 if (opaque_key == RXD_OPAQUE_RING_STD) {
4206 dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
4208 skb = tp->rx_std_buffers[desc_idx].skb;
4209 post_ptr = &tp->rx_std_ptr;
4211 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4212 dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
4214 skb = tp->rx_jumbo_buffers[desc_idx].skb;
4215 post_ptr = &tp->rx_jumbo_ptr;
4218 goto next_pkt_nopost;
4221 work_mask |= opaque_key;
4223 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4224 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4226 tg3_recycle_rx(tp, opaque_key,
4227 desc_idx, *post_ptr);
4229 /* Other statistics kept track of by card. */
4230 tp->net_stats.rx_dropped++;
4234 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
4236 if (len > RX_COPY_THRESHOLD
4237 && tp->rx_offset == 2
4238 /* rx_offset != 2 iff this is a 5701 card running
4239 * in PCI-X mode [see tg3_get_invariants()] */
4243 skb_size = tg3_alloc_rx_skb(tp, opaque_key,
4244 desc_idx, *post_ptr);
4248 pci_unmap_single(tp->pdev, dma_addr,
4249 skb_size - tp->rx_offset,
4250 PCI_DMA_FROMDEVICE);
4254 struct sk_buff *copy_skb;
4256 tg3_recycle_rx(tp, opaque_key,
4257 desc_idx, *post_ptr);
4259 copy_skb = netdev_alloc_skb(tp->dev, len + 2);
4260 if (copy_skb == NULL)
4261 goto drop_it_no_recycle;
4263 skb_reserve(copy_skb, 2);
4264 skb_put(copy_skb, len);
4265 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4266 skb_copy_from_linear_data(skb, copy_skb->data, len);
4267 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4269 /* We'll reuse the original ring buffer. */
4273 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
4274 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4275 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4276 >> RXD_TCPCSUM_SHIFT) == 0xffff))
4277 skb->ip_summed = CHECKSUM_UNNECESSARY;
4279 skb->ip_summed = CHECKSUM_NONE;
4281 skb->protocol = eth_type_trans(skb, tp->dev);
4282 #if TG3_VLAN_TAG_USED
4283 if (tp->vlgrp != NULL &&
4284 desc->type_flags & RXD_FLAG_VLAN) {
4285 tg3_vlan_rx(tp, skb,
4286 desc->err_vlan & RXD_VLAN_MASK);
4289 netif_receive_skb(skb);
4291 tp->dev->last_rx = jiffies;
4298 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4299 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4301 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4302 TG3_64BIT_REG_LOW, idx);
4303 work_mask &= ~RXD_OPAQUE_RING_STD;
4308 sw_idx &= (TG3_RX_RCB_RING_SIZE(tp) - 1);
4310 /* Refresh hw_idx to see if there is new work */
4311 if (sw_idx == hw_idx) {
4312 hw_idx = tp->hw_status->idx[0].rx_producer;
4317 /* ACK the status ring. */
4318 tp->rx_rcb_ptr = sw_idx;
4319 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, sw_idx);
4321 /* Refill RX ring(s). */
4322 if (work_mask & RXD_OPAQUE_RING_STD) {
4323 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
4324 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4327 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4328 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
4329 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4337 static int tg3_poll_work(struct tg3 *tp, int work_done, int budget)
4339 struct tg3_hw_status *sblk = tp->hw_status;
4341 /* handle link change and other phy events */
4342 if (!(tp->tg3_flags &
4343 (TG3_FLAG_USE_LINKCHG_REG |
4344 TG3_FLAG_POLL_SERDES))) {
4345 if (sblk->status & SD_STATUS_LINK_CHG) {
4346 sblk->status = SD_STATUS_UPDATED |
4347 (sblk->status & ~SD_STATUS_LINK_CHG);
4348 spin_lock(&tp->lock);
4349 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4351 (MAC_STATUS_SYNC_CHANGED |
4352 MAC_STATUS_CFG_CHANGED |
4353 MAC_STATUS_MI_COMPLETION |
4354 MAC_STATUS_LNKSTATE_CHANGED));
4357 tg3_setup_phy(tp, 0);
4358 spin_unlock(&tp->lock);
4362 /* run TX completion thread */
4363 if (sblk->idx[0].tx_consumer != tp->tx_cons) {
4365 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4369 /* run RX thread, within the bounds set by NAPI.
4370 * All RX "locking" is done by ensuring outside
4371 * code synchronizes with tg3->napi.poll()
4373 if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
4374 work_done += tg3_rx(tp, budget - work_done);
4379 static int tg3_poll(struct napi_struct *napi, int budget)
4381 struct tg3 *tp = container_of(napi, struct tg3, napi);
4383 struct tg3_hw_status *sblk = tp->hw_status;
4386 work_done = tg3_poll_work(tp, work_done, budget);
4388 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4391 if (unlikely(work_done >= budget))
4394 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
4395 /* tp->last_tag is used in tg3_restart_ints() below
4396 * to tell the hw how much work has been processed,
4397 * so we must read it before checking for more work.
4399 tp->last_tag = sblk->status_tag;
4402 sblk->status &= ~SD_STATUS_UPDATED;
4404 if (likely(!tg3_has_work(tp))) {
4405 netif_rx_complete(tp->dev, napi);
4406 tg3_restart_ints(tp);
4414 /* work_done is guaranteed to be less than budget. */
4415 netif_rx_complete(tp->dev, napi);
4416 schedule_work(&tp->reset_task);
4420 static void tg3_irq_quiesce(struct tg3 *tp)
4422 BUG_ON(tp->irq_sync);
4427 synchronize_irq(tp->pdev->irq);
4430 static inline int tg3_irq_sync(struct tg3 *tp)
4432 return tp->irq_sync;
4435 /* Fully shutdown all tg3 driver activity elsewhere in the system.
4436 * If irq_sync is non-zero, then the IRQ handler must be synchronized
4437 * with as well. Most of the time, this is not necessary except when
4438 * shutting down the device.
4440 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
4442 spin_lock_bh(&tp->lock);
4444 tg3_irq_quiesce(tp);
4447 static inline void tg3_full_unlock(struct tg3 *tp)
4449 spin_unlock_bh(&tp->lock);
4452 /* One-shot MSI handler - Chip automatically disables interrupt
4453 * after sending MSI so driver doesn't have to do it.
4455 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
4457 struct net_device *dev = dev_id;
4458 struct tg3 *tp = netdev_priv(dev);
4460 prefetch(tp->hw_status);
4461 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4463 if (likely(!tg3_irq_sync(tp)))
4464 netif_rx_schedule(dev, &tp->napi);
4469 /* MSI ISR - No need to check for interrupt sharing and no need to
4470 * flush status block and interrupt mailbox. PCI ordering rules
4471 * guarantee that MSI will arrive after the status block.
4473 static irqreturn_t tg3_msi(int irq, void *dev_id)
4475 struct net_device *dev = dev_id;
4476 struct tg3 *tp = netdev_priv(dev);
4478 prefetch(tp->hw_status);
4479 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4481 * Writing any value to intr-mbox-0 clears PCI INTA# and
4482 * chip-internal interrupt pending events.
4483 * Writing non-zero to intr-mbox-0 additional tells the
4484 * NIC to stop sending us irqs, engaging "in-intr-handler"
4487 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4488 if (likely(!tg3_irq_sync(tp)))
4489 netif_rx_schedule(dev, &tp->napi);
4491 return IRQ_RETVAL(1);
4494 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
4496 struct net_device *dev = dev_id;
4497 struct tg3 *tp = netdev_priv(dev);
4498 struct tg3_hw_status *sblk = tp->hw_status;
4499 unsigned int handled = 1;
4501 /* In INTx mode, it is possible for the interrupt to arrive at
4502 * the CPU before the status block posted prior to the interrupt.
4503 * Reading the PCI State register will confirm whether the
4504 * interrupt is ours and will flush the status block.
4506 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
4507 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4508 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4515 * Writing any value to intr-mbox-0 clears PCI INTA# and
4516 * chip-internal interrupt pending events.
4517 * Writing non-zero to intr-mbox-0 additional tells the
4518 * NIC to stop sending us irqs, engaging "in-intr-handler"
4521 * Flush the mailbox to de-assert the IRQ immediately to prevent
4522 * spurious interrupts. The flush impacts performance but
4523 * excessive spurious interrupts can be worse in some cases.
4525 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4526 if (tg3_irq_sync(tp))
4528 sblk->status &= ~SD_STATUS_UPDATED;
4529 if (likely(tg3_has_work(tp))) {
4530 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4531 netif_rx_schedule(dev, &tp->napi);
4533 /* No work, shared interrupt perhaps? re-enable
4534 * interrupts, and flush that PCI write
4536 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
4540 return IRQ_RETVAL(handled);
4543 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
4545 struct net_device *dev = dev_id;
4546 struct tg3 *tp = netdev_priv(dev);
4547 struct tg3_hw_status *sblk = tp->hw_status;
4548 unsigned int handled = 1;
4550 /* In INTx mode, it is possible for the interrupt to arrive at
4551 * the CPU before the status block posted prior to the interrupt.
4552 * Reading the PCI State register will confirm whether the
4553 * interrupt is ours and will flush the status block.
4555 if (unlikely(sblk->status_tag == tp->last_tag)) {
4556 if ((tp->tg3_flags & TG3_FLAG_CHIP_RESETTING) ||
4557 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4564 * writing any value to intr-mbox-0 clears PCI INTA# and
4565 * chip-internal interrupt pending events.
4566 * writing non-zero to intr-mbox-0 additional tells the
4567 * NIC to stop sending us irqs, engaging "in-intr-handler"
4570 * Flush the mailbox to de-assert the IRQ immediately to prevent
4571 * spurious interrupts. The flush impacts performance but
4572 * excessive spurious interrupts can be worse in some cases.
4574 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
4575 if (tg3_irq_sync(tp))
4577 if (netif_rx_schedule_prep(dev, &tp->napi)) {
4578 prefetch(&tp->rx_rcb[tp->rx_rcb_ptr]);
4579 /* Update last_tag to mark that this status has been
4580 * seen. Because interrupt may be shared, we may be
4581 * racing with tg3_poll(), so only update last_tag
4582 * if tg3_poll() is not scheduled.
4584 tp->last_tag = sblk->status_tag;
4585 __netif_rx_schedule(dev, &tp->napi);
4588 return IRQ_RETVAL(handled);
4591 /* ISR for interrupt test */
4592 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
4594 struct net_device *dev = dev_id;
4595 struct tg3 *tp = netdev_priv(dev);
4596 struct tg3_hw_status *sblk = tp->hw_status;
4598 if ((sblk->status & SD_STATUS_UPDATED) ||
4599 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
4600 tg3_disable_ints(tp);
4601 return IRQ_RETVAL(1);
4603 return IRQ_RETVAL(0);
4606 static int tg3_init_hw(struct tg3 *, int);
4607 static int tg3_halt(struct tg3 *, int, int);
4609 /* Restart hardware after configuration changes, self-test, etc.
4610 * Invoked with tp->lock held.
4612 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
4613 __releases(tp->lock)
4614 __acquires(tp->lock)
4618 err = tg3_init_hw(tp, reset_phy);
4620 printk(KERN_ERR PFX "%s: Failed to re-initialize device, "
4621 "aborting.\n", tp->dev->name);
4622 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
4623 tg3_full_unlock(tp);
4624 del_timer_sync(&tp->timer);
4626 napi_enable(&tp->napi);
4628 tg3_full_lock(tp, 0);
4633 #ifdef CONFIG_NET_POLL_CONTROLLER
4634 static void tg3_poll_controller(struct net_device *dev)
4636 struct tg3 *tp = netdev_priv(dev);
4638 tg3_interrupt(tp->pdev->irq, dev);
4642 static void tg3_reset_task(struct work_struct *work)
4644 struct tg3 *tp = container_of(work, struct tg3, reset_task);
4646 unsigned int restart_timer;
4648 tg3_full_lock(tp, 0);
4650 if (!netif_running(tp->dev)) {
4651 tg3_full_unlock(tp);
4655 tg3_full_unlock(tp);
4661 tg3_full_lock(tp, 1);
4663 restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
4664 tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
4666 if (tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING) {
4667 tp->write32_tx_mbox = tg3_write32_tx_mbox;
4668 tp->write32_rx_mbox = tg3_write_flush_reg32;
4669 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
4670 tp->tg3_flags &= ~TG3_FLAG_TX_RECOVERY_PENDING;
4673 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
4674 err = tg3_init_hw(tp, 1);
4678 tg3_netif_start(tp);
4681 mod_timer(&tp->timer, jiffies + 1);
4684 tg3_full_unlock(tp);
4690 static void tg3_dump_short_state(struct tg3 *tp)
4692 printk(KERN_ERR PFX "DEBUG: MAC_TX_STATUS[%08x] MAC_RX_STATUS[%08x]\n",
4693 tr32(MAC_TX_STATUS), tr32(MAC_RX_STATUS));
4694 printk(KERN_ERR PFX "DEBUG: RDMAC_STATUS[%08x] WDMAC_STATUS[%08x]\n",
4695 tr32(RDMAC_STATUS), tr32(WDMAC_STATUS));
4698 static void tg3_tx_timeout(struct net_device *dev)
4700 struct tg3 *tp = netdev_priv(dev);
4702 if (netif_msg_tx_err(tp)) {
4703 printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
4705 tg3_dump_short_state(tp);
4708 schedule_work(&tp->reset_task);
4711 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
4712 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
4714 u32 base = (u32) mapping & 0xffffffff;
4716 return ((base > 0xffffdcc0) &&
4717 (base + len + 8 < base));
4720 /* Test for DMA addresses > 40-bit */
4721 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
4724 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
4725 if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG)
4726 return (((u64) mapping + len) > DMA_40BIT_MASK);
4733 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
4735 /* Workaround 4GB and 40-bit hardware DMA bugs. */
4736 static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
4737 u32 last_plus_one, u32 *start,
4738 u32 base_flags, u32 mss)
4740 struct sk_buff *new_skb;
4741 dma_addr_t new_addr = 0;
4745 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
4746 new_skb = skb_copy(skb, GFP_ATOMIC);
4748 int more_headroom = 4 - ((unsigned long)skb->data & 3);
4750 new_skb = skb_copy_expand(skb,
4751 skb_headroom(skb) + more_headroom,
4752 skb_tailroom(skb), GFP_ATOMIC);
4758 /* New SKB is guaranteed to be linear. */
4760 ret = skb_dma_map(&tp->pdev->dev, new_skb, DMA_TO_DEVICE);
4761 new_addr = skb_shinfo(new_skb)->dma_maps[0];
4763 /* Make sure new skb does not cross any 4G boundaries.
4764 * Drop the packet if it does.
4766 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) {
4768 skb_dma_unmap(&tp->pdev->dev, new_skb,
4771 dev_kfree_skb(new_skb);
4774 tg3_set_txd(tp, entry, new_addr, new_skb->len,
4775 base_flags, 1 | (mss << 1));
4776 *start = NEXT_TX(entry);
4780 /* Now clean up the sw ring entries. */
4782 while (entry != last_plus_one) {
4784 tp->tx_buffers[entry].skb = new_skb;
4786 tp->tx_buffers[entry].skb = NULL;
4788 entry = NEXT_TX(entry);
4792 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
4798 static void tg3_set_txd(struct tg3 *tp, int entry,
4799 dma_addr_t mapping, int len, u32 flags,
4802 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
4803 int is_end = (mss_and_is_end & 0x1);
4804 u32 mss = (mss_and_is_end >> 1);
4808 flags |= TXD_FLAG_END;
4809 if (flags & TXD_FLAG_VLAN) {
4810 vlan_tag = flags >> 16;
4813 vlan_tag |= (mss << TXD_MSS_SHIFT);
4815 txd->addr_hi = ((u64) mapping >> 32);
4816 txd->addr_lo = ((u64) mapping & 0xffffffff);
4817 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
4818 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
4821 /* hard_start_xmit for devices that don't have any bugs and
4822 * support TG3_FLG2_HW_TSO_2 only.
4824 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
4826 struct tg3 *tp = netdev_priv(dev);
4827 u32 len, entry, base_flags, mss;
4828 struct skb_shared_info *sp;
4831 len = skb_headlen(skb);
4833 /* We are running in BH disabled context with netif_tx_lock
4834 * and TX reclaim runs via tp->napi.poll inside of a software
4835 * interrupt. Furthermore, IRQ processing runs lockless so we have
4836 * no IRQ context deadlocks to worry about either. Rejoice!
4838 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4839 if (!netif_queue_stopped(dev)) {
4840 netif_stop_queue(dev);
4842 /* This is a hard error, log it. */
4843 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
4844 "queue awake!\n", dev->name);
4846 return NETDEV_TX_BUSY;
4849 entry = tp->tx_prod;
4852 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
4853 int tcp_opt_len, ip_tcp_len;
4855 if (skb_header_cloned(skb) &&
4856 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4861 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
4862 mss |= (skb_headlen(skb) - ETH_HLEN) << 9;
4864 struct iphdr *iph = ip_hdr(skb);
4866 tcp_opt_len = tcp_optlen(skb);
4867 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4870 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4871 mss |= (ip_tcp_len + tcp_opt_len) << 9;
4874 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
4875 TXD_FLAG_CPU_POST_DMA);
4877 tcp_hdr(skb)->check = 0;
4880 else if (skb->ip_summed == CHECKSUM_PARTIAL)
4881 base_flags |= TXD_FLAG_TCPUDP_CSUM;
4882 #if TG3_VLAN_TAG_USED
4883 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
4884 base_flags |= (TXD_FLAG_VLAN |
4885 (vlan_tx_tag_get(skb) << 16));
4888 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
4893 sp = skb_shinfo(skb);
4895 mapping = sp->dma_maps[0];
4897 tp->tx_buffers[entry].skb = skb;
4899 tg3_set_txd(tp, entry, mapping, len, base_flags,
4900 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
4902 entry = NEXT_TX(entry);
4904 /* Now loop through additional data fragments, and queue them. */
4905 if (skb_shinfo(skb)->nr_frags > 0) {
4906 unsigned int i, last;
4908 last = skb_shinfo(skb)->nr_frags - 1;
4909 for (i = 0; i <= last; i++) {
4910 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4913 mapping = sp->dma_maps[i + 1];
4914 tp->tx_buffers[entry].skb = NULL;
4916 tg3_set_txd(tp, entry, mapping, len,
4917 base_flags, (i == last) | (mss << 1));
4919 entry = NEXT_TX(entry);
4923 /* Packets are ready, update Tx producer idx local and on card. */
4924 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
4926 tp->tx_prod = entry;
4927 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
4928 netif_stop_queue(dev);
4929 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
4930 netif_wake_queue(tp->dev);
4936 dev->trans_start = jiffies;
4938 return NETDEV_TX_OK;
4941 static int tg3_start_xmit_dma_bug(struct sk_buff *, struct net_device *);
4943 /* Use GSO to workaround a rare TSO bug that may be triggered when the
4944 * TSO header is greater than 80 bytes.
4946 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
4948 struct sk_buff *segs, *nskb;
4950 /* Estimate the number of fragments in the worst case */
4951 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))) {
4952 netif_stop_queue(tp->dev);
4953 if (tg3_tx_avail(tp) <= (skb_shinfo(skb)->gso_segs * 3))
4954 return NETDEV_TX_BUSY;
4956 netif_wake_queue(tp->dev);
4959 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
4961 goto tg3_tso_bug_end;
4967 tg3_start_xmit_dma_bug(nskb, tp->dev);
4973 return NETDEV_TX_OK;
4976 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
4977 * support TG3_FLG2_HW_TSO_1 or firmware TSO only.
4979 static int tg3_start_xmit_dma_bug(struct sk_buff *skb, struct net_device *dev)
4981 struct tg3 *tp = netdev_priv(dev);
4982 u32 len, entry, base_flags, mss;
4983 struct skb_shared_info *sp;
4984 int would_hit_hwbug;
4987 len = skb_headlen(skb);
4989 /* We are running in BH disabled context with netif_tx_lock
4990 * and TX reclaim runs via tp->napi.poll inside of a software
4991 * interrupt. Furthermore, IRQ processing runs lockless so we have
4992 * no IRQ context deadlocks to worry about either. Rejoice!
4994 if (unlikely(tg3_tx_avail(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
4995 if (!netif_queue_stopped(dev)) {
4996 netif_stop_queue(dev);
4998 /* This is a hard error, log it. */
4999 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
5000 "queue awake!\n", dev->name);
5002 return NETDEV_TX_BUSY;
5005 entry = tp->tx_prod;
5007 if (skb->ip_summed == CHECKSUM_PARTIAL)
5008 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5010 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5012 int tcp_opt_len, ip_tcp_len, hdr_len;
5014 if (skb_header_cloned(skb) &&
5015 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5020 tcp_opt_len = tcp_optlen(skb);
5021 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5023 hdr_len = ip_tcp_len + tcp_opt_len;
5024 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5025 (tp->tg3_flags2 & TG3_FLG2_TSO_BUG))
5026 return (tg3_tso_bug(tp, skb));
5028 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5029 TXD_FLAG_CPU_POST_DMA);
5033 iph->tot_len = htons(mss + hdr_len);
5034 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
5035 tcp_hdr(skb)->check = 0;
5036 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5038 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5043 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
5044 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
5045 if (tcp_opt_len || iph->ihl > 5) {
5048 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5049 mss |= (tsflags << 11);
5052 if (tcp_opt_len || iph->ihl > 5) {
5055 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5056 base_flags |= tsflags << 12;
5060 #if TG3_VLAN_TAG_USED
5061 if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
5062 base_flags |= (TXD_FLAG_VLAN |
5063 (vlan_tx_tag_get(skb) << 16));
5066 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5071 sp = skb_shinfo(skb);
5073 mapping = sp->dma_maps[0];
5075 tp->tx_buffers[entry].skb = skb;
5077 would_hit_hwbug = 0;
5079 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5080 would_hit_hwbug = 1;
5081 else if (tg3_4g_overflow_test(mapping, len))
5082 would_hit_hwbug = 1;
5084 tg3_set_txd(tp, entry, mapping, len, base_flags,
5085 (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
5087 entry = NEXT_TX(entry);
5089 /* Now loop through additional data fragments, and queue them. */
5090 if (skb_shinfo(skb)->nr_frags > 0) {
5091 unsigned int i, last;
5093 last = skb_shinfo(skb)->nr_frags - 1;
5094 for (i = 0; i <= last; i++) {
5095 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5098 mapping = sp->dma_maps[i + 1];
5100 tp->tx_buffers[entry].skb = NULL;
5102 if (tg3_4g_overflow_test(mapping, len))
5103 would_hit_hwbug = 1;
5105 if (tg3_40bit_overflow_test(tp, mapping, len))
5106 would_hit_hwbug = 1;
5108 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5109 tg3_set_txd(tp, entry, mapping, len,
5110 base_flags, (i == last)|(mss << 1));
5112 tg3_set_txd(tp, entry, mapping, len,
5113 base_flags, (i == last));
5115 entry = NEXT_TX(entry);
5119 if (would_hit_hwbug) {
5120 u32 last_plus_one = entry;
5123 start = entry - 1 - skb_shinfo(skb)->nr_frags;
5124 start &= (TG3_TX_RING_SIZE - 1);
5126 /* If the workaround fails due to memory/mapping
5127 * failure, silently drop this packet.
5129 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one,
5130 &start, base_flags, mss))
5136 /* Packets are ready, update Tx producer idx local and on card. */
5137 tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
5139 tp->tx_prod = entry;
5140 if (unlikely(tg3_tx_avail(tp) <= (MAX_SKB_FRAGS + 1))) {
5141 netif_stop_queue(dev);
5142 if (tg3_tx_avail(tp) > TG3_TX_WAKEUP_THRESH(tp))
5143 netif_wake_queue(tp->dev);
5149 dev->trans_start = jiffies;
5151 return NETDEV_TX_OK;
5154 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
5159 if (new_mtu > ETH_DATA_LEN) {
5160 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5161 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
5162 ethtool_op_set_tso(dev, 0);
5165 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
5167 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
5168 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
5169 tp->tg3_flags &= ~TG3_FLAG_JUMBO_RING_ENABLE;
5173 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
5175 struct tg3 *tp = netdev_priv(dev);
5178 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
5181 if (!netif_running(dev)) {
5182 /* We'll just catch it later when the
5185 tg3_set_mtu(dev, tp, new_mtu);
5193 tg3_full_lock(tp, 1);
5195 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5197 tg3_set_mtu(dev, tp, new_mtu);
5199 err = tg3_restart_hw(tp, 0);
5202 tg3_netif_start(tp);
5204 tg3_full_unlock(tp);
5212 /* Free up pending packets in all rx/tx rings.
5214 * The chip has been shut down and the driver detached from
5215 * the networking, so no interrupts or new tx packets will
5216 * end up in the driver. tp->{tx,}lock is not held and we are not
5217 * in an interrupt context and thus may sleep.
5219 static void tg3_free_rings(struct tg3 *tp)
5221 struct ring_info *rxp;
5224 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5225 rxp = &tp->rx_std_buffers[i];
5227 if (rxp->skb == NULL)
5229 pci_unmap_single(tp->pdev,
5230 pci_unmap_addr(rxp, mapping),
5231 tp->rx_pkt_buf_sz - tp->rx_offset,
5232 PCI_DMA_FROMDEVICE);
5233 dev_kfree_skb_any(rxp->skb);
5237 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5238 rxp = &tp->rx_jumbo_buffers[i];
5240 if (rxp->skb == NULL)
5242 pci_unmap_single(tp->pdev,
5243 pci_unmap_addr(rxp, mapping),
5244 RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
5245 PCI_DMA_FROMDEVICE);
5246 dev_kfree_skb_any(rxp->skb);
5250 for (i = 0; i < TG3_TX_RING_SIZE; ) {
5251 struct tx_ring_info *txp;
5252 struct sk_buff *skb;
5254 txp = &tp->tx_buffers[i];
5262 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
5266 i += skb_shinfo(skb)->nr_frags + 1;
5268 dev_kfree_skb_any(skb);
5272 /* Initialize tx/rx rings for packet processing.
5274 * The chip has been shut down and the driver detached from
5275 * the networking, so no interrupts or new tx packets will
5276 * end up in the driver. tp->{tx,}lock are held and thus
5279 static int tg3_init_rings(struct tg3 *tp)
5283 /* Free up all the SKBs. */
5286 /* Zero out all descriptors. */
5287 memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
5288 memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
5289 memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
5290 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
5292 tp->rx_pkt_buf_sz = RX_PKT_BUF_SZ;
5293 if ((tp->tg3_flags2 & TG3_FLG2_5780_CLASS) &&
5294 (tp->dev->mtu > ETH_DATA_LEN))
5295 tp->rx_pkt_buf_sz = RX_JUMBO_PKT_BUF_SZ;
5297 /* Initialize invariants of the rings, we only set this
5298 * stuff once. This works because the card does not
5299 * write into the rx buffer posting rings.
5301 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5302 struct tg3_rx_buffer_desc *rxd;
5304 rxd = &tp->rx_std[i];
5305 rxd->idx_len = (tp->rx_pkt_buf_sz - tp->rx_offset - 64)
5307 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
5308 rxd->opaque = (RXD_OPAQUE_RING_STD |
5309 (i << RXD_OPAQUE_INDEX_SHIFT));
5312 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5313 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
5314 struct tg3_rx_buffer_desc *rxd;
5316 rxd = &tp->rx_jumbo[i];
5317 rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
5319 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
5321 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
5322 (i << RXD_OPAQUE_INDEX_SHIFT));
5326 /* Now allocate fresh SKBs for each rx ring. */
5327 for (i = 0; i < tp->rx_pending; i++) {
5328 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD, -1, i) < 0) {
5329 printk(KERN_WARNING PFX
5330 "%s: Using a smaller RX standard ring, "
5331 "only %d out of %d buffers were allocated "
5333 tp->dev->name, i, tp->rx_pending);
5341 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
5342 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5343 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
5345 printk(KERN_WARNING PFX
5346 "%s: Using a smaller RX jumbo ring, "
5347 "only %d out of %d buffers were "
5348 "allocated successfully.\n",
5349 tp->dev->name, i, tp->rx_jumbo_pending);
5354 tp->rx_jumbo_pending = i;
5363 * Must not be invoked with interrupt sources disabled and
5364 * the hardware shutdown down.
5366 static void tg3_free_consistent(struct tg3 *tp)
5368 kfree(tp->rx_std_buffers);
5369 tp->rx_std_buffers = NULL;
5371 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
5372 tp->rx_std, tp->rx_std_mapping);
5376 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5377 tp->rx_jumbo, tp->rx_jumbo_mapping);
5378 tp->rx_jumbo = NULL;
5381 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5382 tp->rx_rcb, tp->rx_rcb_mapping);
5386 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
5387 tp->tx_ring, tp->tx_desc_mapping);
5390 if (tp->hw_status) {
5391 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
5392 tp->hw_status, tp->status_mapping);
5393 tp->hw_status = NULL;
5396 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
5397 tp->hw_stats, tp->stats_mapping);
5398 tp->hw_stats = NULL;
5403 * Must not be invoked with interrupt sources disabled and
5404 * the hardware shutdown down. Can sleep.
5406 static int tg3_alloc_consistent(struct tg3 *tp)
5408 tp->rx_std_buffers = kzalloc((sizeof(struct ring_info) *
5410 TG3_RX_JUMBO_RING_SIZE)) +
5411 (sizeof(struct tx_ring_info) *
5414 if (!tp->rx_std_buffers)
5417 tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
5418 tp->tx_buffers = (struct tx_ring_info *)
5419 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
5421 tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
5422 &tp->rx_std_mapping);
5426 tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
5427 &tp->rx_jumbo_mapping);
5432 tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
5433 &tp->rx_rcb_mapping);
5437 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
5438 &tp->tx_desc_mapping);
5442 tp->hw_status = pci_alloc_consistent(tp->pdev,
5444 &tp->status_mapping);
5448 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5449 sizeof(struct tg3_hw_stats),
5450 &tp->stats_mapping);
5454 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5455 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5460 tg3_free_consistent(tp);
5464 #define MAX_WAIT_CNT 1000
5466 /* To stop a block, clear the enable bit and poll till it
5467 * clears. tp->lock is held.
5469 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
5474 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5481 /* We can't enable/disable these bits of the
5482 * 5705/5750, just say success.
5495 for (i = 0; i < MAX_WAIT_CNT; i++) {
5498 if ((val & enable_bit) == 0)
5502 if (i == MAX_WAIT_CNT && !silent) {
5503 printk(KERN_ERR PFX "tg3_stop_block timed out, "
5504 "ofs=%lx enable_bit=%x\n",
5512 /* tp->lock is held. */
5513 static int tg3_abort_hw(struct tg3 *tp, int silent)
5517 tg3_disable_ints(tp);
5519 tp->rx_mode &= ~RX_MODE_ENABLE;
5520 tw32_f(MAC_RX_MODE, tp->rx_mode);
5523 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
5524 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
5525 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
5526 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
5527 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
5528 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
5530 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
5531 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
5532 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
5533 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
5534 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
5535 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
5536 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
5538 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
5539 tw32_f(MAC_MODE, tp->mac_mode);
5542 tp->tx_mode &= ~TX_MODE_ENABLE;
5543 tw32_f(MAC_TX_MODE, tp->tx_mode);
5545 for (i = 0; i < MAX_WAIT_CNT; i++) {
5547 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
5550 if (i >= MAX_WAIT_CNT) {
5551 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
5552 "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
5553 tp->dev->name, tr32(MAC_TX_MODE));
5557 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
5558 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
5559 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
5561 tw32(FTQ_RESET, 0xffffffff);
5562 tw32(FTQ_RESET, 0x00000000);
5564 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
5565 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
5568 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5570 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
5575 /* tp->lock is held. */
5576 static int tg3_nvram_lock(struct tg3 *tp)
5578 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5581 if (tp->nvram_lock_cnt == 0) {
5582 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
5583 for (i = 0; i < 8000; i++) {
5584 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
5589 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
5593 tp->nvram_lock_cnt++;
5598 /* tp->lock is held. */
5599 static void tg3_nvram_unlock(struct tg3 *tp)
5601 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
5602 if (tp->nvram_lock_cnt > 0)
5603 tp->nvram_lock_cnt--;
5604 if (tp->nvram_lock_cnt == 0)
5605 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
5609 /* tp->lock is held. */
5610 static void tg3_enable_nvram_access(struct tg3 *tp)
5612 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5613 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5614 u32 nvaccess = tr32(NVRAM_ACCESS);
5616 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
5620 /* tp->lock is held. */
5621 static void tg3_disable_nvram_access(struct tg3 *tp)
5623 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
5624 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) {
5625 u32 nvaccess = tr32(NVRAM_ACCESS);
5627 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
5631 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
5636 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
5637 if (apedata != APE_SEG_SIG_MAGIC)
5640 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
5641 if (!(apedata & APE_FW_STATUS_READY))
5644 /* Wait for up to 1 millisecond for APE to service previous event. */
5645 for (i = 0; i < 10; i++) {
5646 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
5649 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
5651 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5652 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
5653 event | APE_EVENT_STATUS_EVENT_PENDING);
5655 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
5657 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5663 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
5664 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
5667 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
5672 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
5676 case RESET_KIND_INIT:
5677 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
5678 APE_HOST_SEG_SIG_MAGIC);
5679 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
5680 APE_HOST_SEG_LEN_MAGIC);
5681 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
5682 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
5683 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
5684 APE_HOST_DRIVER_ID_MAGIC);
5685 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
5686 APE_HOST_BEHAV_NO_PHYLOCK);
5688 event = APE_EVENT_STATUS_STATE_START;
5690 case RESET_KIND_SHUTDOWN:
5691 /* With the interface we are currently using,
5692 * APE does not track driver state. Wiping
5693 * out the HOST SEGMENT SIGNATURE forces
5694 * the APE to assume OS absent status.
5696 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
5698 event = APE_EVENT_STATUS_STATE_UNLOAD;
5700 case RESET_KIND_SUSPEND:
5701 event = APE_EVENT_STATUS_STATE_SUSPEND;
5707 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
5709 tg3_ape_send_event(tp, event);
5712 /* tp->lock is held. */
5713 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
5715 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
5716 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
5718 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5720 case RESET_KIND_INIT:
5721 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5725 case RESET_KIND_SHUTDOWN:
5726 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5730 case RESET_KIND_SUSPEND:
5731 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5740 if (kind == RESET_KIND_INIT ||
5741 kind == RESET_KIND_SUSPEND)
5742 tg3_ape_driver_state_change(tp, kind);
5745 /* tp->lock is held. */
5746 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
5748 if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
5750 case RESET_KIND_INIT:
5751 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5752 DRV_STATE_START_DONE);
5755 case RESET_KIND_SHUTDOWN:
5756 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5757 DRV_STATE_UNLOAD_DONE);
5765 if (kind == RESET_KIND_SHUTDOWN)
5766 tg3_ape_driver_state_change(tp, kind);
5769 /* tp->lock is held. */
5770 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
5772 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5774 case RESET_KIND_INIT:
5775 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5779 case RESET_KIND_SHUTDOWN:
5780 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5784 case RESET_KIND_SUSPEND:
5785 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
5795 static int tg3_poll_fw(struct tg3 *tp)
5800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5801 /* Wait up to 20ms for init done. */
5802 for (i = 0; i < 200; i++) {
5803 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
5810 /* Wait for firmware initialization to complete. */
5811 for (i = 0; i < 100000; i++) {
5812 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
5813 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
5818 /* Chip might not be fitted with firmware. Some Sun onboard
5819 * parts are configured like that. So don't signal the timeout
5820 * of the above loop as an error, but do report the lack of
5821 * running firmware once.
5824 !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
5825 tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
5827 printk(KERN_INFO PFX "%s: No firmware running.\n",
5834 /* Save PCI command register before chip reset */
5835 static void tg3_save_pci_state(struct tg3 *tp)
5837 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
5840 /* Restore PCI state after chip reset */
5841 static void tg3_restore_pci_state(struct tg3 *tp)
5845 /* Re-enable indirect register accesses. */
5846 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
5847 tp->misc_host_ctrl);
5849 /* Set MAX PCI retry to zero. */
5850 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
5851 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
5852 (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
5853 val |= PCISTATE_RETRY_SAME_DMA;
5854 /* Allow reads and writes to the APE register and memory space. */
5855 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
5856 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
5857 PCISTATE_ALLOW_APE_SHMEM_WR;
5858 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
5860 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
5862 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
5863 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
5864 pcie_set_readrq(tp->pdev, 4096);
5866 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
5867 tp->pci_cacheline_sz);
5868 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
5873 /* Make sure PCI-X relaxed ordering bit is clear. */
5877 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5879 pcix_cmd &= ~PCI_X_CMD_ERO;
5880 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
5884 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) {
5886 /* Chip reset on 5780 will reset MSI enable bit,
5887 * so need to restore it.
5889 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
5892 pci_read_config_word(tp->pdev,
5893 tp->msi_cap + PCI_MSI_FLAGS,
5895 pci_write_config_word(tp->pdev,
5896 tp->msi_cap + PCI_MSI_FLAGS,
5897 ctrl | PCI_MSI_FLAGS_ENABLE);
5898 val = tr32(MSGINT_MODE);
5899 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
5904 static void tg3_stop_fw(struct tg3 *);
5906 /* tp->lock is held. */
5907 static int tg3_chip_reset(struct tg3 *tp)
5910 void (*write_op)(struct tg3 *, u32, u32);
5917 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
5919 /* No matching tg3_nvram_unlock() after this because
5920 * chip reset below will undo the nvram lock.
5922 tp->nvram_lock_cnt = 0;
5924 /* GRC_MISC_CFG core clock reset will clear the memory
5925 * enable bit in PCI register 4 and the MSI enable bit
5926 * on some chips, so we save relevant registers here.
5928 tg3_save_pci_state(tp);
5930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
5931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
5932 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
5933 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
5934 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
5935 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
5936 tw32(GRC_FASTBOOT_PC, 0);
5939 * We must avoid the readl() that normally takes place.
5940 * It locks machines, causes machine checks, and other
5941 * fun things. So, temporarily disable the 5701
5942 * hardware workaround, while we do the reset.
5944 write_op = tp->write32;
5945 if (write_op == tg3_write_flush_reg32)
5946 tp->write32 = tg3_write32;
5948 /* Prevent the irq handler from reading or writing PCI registers
5949 * during chip reset when the memory enable bit in the PCI command
5950 * register may be cleared. The chip does not generate interrupt
5951 * at this time, but the irq handler may still be called due to irq
5952 * sharing or irqpoll.
5954 tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
5955 if (tp->hw_status) {
5956 tp->hw_status->status = 0;
5957 tp->hw_status->status_tag = 0;
5961 synchronize_irq(tp->pdev->irq);
5964 val = GRC_MISC_CFG_CORECLK_RESET;
5966 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
5967 if (tr32(0x7e2c) == 0x60) {
5970 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
5971 tw32(GRC_MISC_CFG, (1 << 29));
5976 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
5977 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
5978 tw32(GRC_VCPU_EXT_CTRL,
5979 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
5982 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5983 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
5984 tw32(GRC_MISC_CFG, val);
5986 /* restore 5701 hardware bug workaround write method */
5987 tp->write32 = write_op;
5989 /* Unfortunately, we have to delay before the PCI read back.
5990 * Some 575X chips even will not respond to a PCI cfg access
5991 * when the reset command is given to the chip.
5993 * How do these hardware designers expect things to work
5994 * properly if the PCI write is posted for a long period
5995 * of time? It is always necessary to have some method by
5996 * which a register read back can occur to push the write
5997 * out which does the reset.
5999 * For most tg3 variants the trick below was working.
6004 /* Flush PCI posted writes. The normal MMIO registers
6005 * are inaccessible at this time so this is the only
6006 * way to make this reliably (actually, this is no longer
6007 * the case, see above). I tried to use indirect
6008 * register read/write but this upset some 5701 variants.
6010 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
6014 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
6015 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
6019 /* Wait for link training to complete. */
6020 for (i = 0; i < 5000; i++)
6023 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
6024 pci_write_config_dword(tp->pdev, 0xc4,
6025 cfg_val | (1 << 15));
6027 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
6028 /* Set PCIE max payload size and clear error status. */
6029 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
6032 tg3_restore_pci_state(tp);
6034 tp->tg3_flags &= ~TG3_FLAG_CHIP_RESETTING;
6037 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
6038 val = tr32(MEMARB_MODE);
6039 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
6041 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
6043 tw32(0x5000, 0x400);
6046 tw32(GRC_MODE, tp->grc_mode);
6048 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
6051 tw32(0xc4, val | (1 << 15));
6054 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
6055 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6056 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
6057 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
6058 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
6059 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
6062 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6063 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
6064 tw32_f(MAC_MODE, tp->mac_mode);
6065 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
6066 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
6067 tw32_f(MAC_MODE, tp->mac_mode);
6068 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
6069 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
6070 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
6071 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
6072 tw32_f(MAC_MODE, tp->mac_mode);
6074 tw32_f(MAC_MODE, 0);
6079 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
6081 err = tg3_poll_fw(tp);
6085 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6086 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
6089 tw32(0x7c00, val | (1 << 25));
6092 /* Reprobe ASF enable state. */
6093 tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
6094 tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
6095 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6096 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6099 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6100 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
6101 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6102 tp->last_event_jiffies = jiffies;
6103 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
6104 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
6111 /* tp->lock is held. */
6112 static void tg3_stop_fw(struct tg3 *tp)
6114 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
6115 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
6116 /* Wait for RX cpu to ACK the previous event. */
6117 tg3_wait_for_event_ack(tp);
6119 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
6121 tg3_generate_fw_event(tp);
6123 /* Wait for RX cpu to ACK this event. */
6124 tg3_wait_for_event_ack(tp);
6128 /* tp->lock is held. */
6129 static int tg3_halt(struct tg3 *tp, int kind, int silent)
6135 tg3_write_sig_pre_reset(tp, kind);
6137 tg3_abort_hw(tp, silent);
6138 err = tg3_chip_reset(tp);
6140 tg3_write_sig_legacy(tp, kind);
6141 tg3_write_sig_post_reset(tp, kind);
6149 #define TG3_FW_RELEASE_MAJOR 0x0
6150 #define TG3_FW_RELASE_MINOR 0x0
6151 #define TG3_FW_RELEASE_FIX 0x0
6152 #define TG3_FW_START_ADDR 0x08000000
6153 #define TG3_FW_TEXT_ADDR 0x08000000
6154 #define TG3_FW_TEXT_LEN 0x9c0
6155 #define TG3_FW_RODATA_ADDR 0x080009c0
6156 #define TG3_FW_RODATA_LEN 0x60
6157 #define TG3_FW_DATA_ADDR 0x08000a40
6158 #define TG3_FW_DATA_LEN 0x20
6159 #define TG3_FW_SBSS_ADDR 0x08000a60
6160 #define TG3_FW_SBSS_LEN 0xc
6161 #define TG3_FW_BSS_ADDR 0x08000a70
6162 #define TG3_FW_BSS_LEN 0x10
6164 static const u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
6165 0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
6166 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
6167 0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
6168 0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
6169 0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
6170 0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
6171 0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
6172 0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
6173 0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
6174 0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
6175 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
6176 0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
6177 0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
6178 0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
6179 0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
6180 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6181 0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
6182 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
6183 0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
6184 0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6185 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
6186 0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
6187 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6188 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6189 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6191 0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
6192 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6193 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6194 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6195 0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
6196 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
6197 0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
6198 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
6199 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6200 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
6201 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
6202 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6203 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6204 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
6205 0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
6206 0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
6207 0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
6208 0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
6209 0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
6210 0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
6211 0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
6212 0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
6213 0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
6214 0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
6215 0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
6216 0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
6217 0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
6218 0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
6219 0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
6220 0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
6221 0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
6222 0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
6223 0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
6224 0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
6225 0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
6226 0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
6227 0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
6228 0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
6229 0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
6230 0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
6231 0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
6232 0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
6233 0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
6234 0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
6235 0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
6236 0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
6237 0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
6238 0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
6239 0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
6240 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
6241 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
6242 0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
6243 0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
6244 0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
6245 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
6246 0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
6247 0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
6248 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
6249 0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
6250 0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
6251 0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
6252 0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
6253 0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
6254 0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
6255 0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
6258 static const u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
6259 0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
6260 0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
6261 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6262 0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
6266 #if 0 /* All zeros, don't eat up space with it. */
6267 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
6268 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6269 0x00000000, 0x00000000, 0x00000000, 0x00000000
6273 #define RX_CPU_SCRATCH_BASE 0x30000
6274 #define RX_CPU_SCRATCH_SIZE 0x04000
6275 #define TX_CPU_SCRATCH_BASE 0x34000
6276 #define TX_CPU_SCRATCH_SIZE 0x04000
6278 /* tp->lock is held. */
6279 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
6283 BUG_ON(offset == TX_CPU_BASE &&
6284 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS));
6286 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6287 u32 val = tr32(GRC_VCPU_EXT_CTRL);
6289 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
6292 if (offset == RX_CPU_BASE) {
6293 for (i = 0; i < 10000; i++) {
6294 tw32(offset + CPU_STATE, 0xffffffff);
6295 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6296 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6300 tw32(offset + CPU_STATE, 0xffffffff);
6301 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
6304 for (i = 0; i < 10000; i++) {
6305 tw32(offset + CPU_STATE, 0xffffffff);
6306 tw32(offset + CPU_MODE, CPU_MODE_HALT);
6307 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
6313 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
6316 (offset == RX_CPU_BASE ? "RX" : "TX"));
6320 /* Clear firmware's nvram arbitration. */
6321 if (tp->tg3_flags & TG3_FLAG_NVRAM)
6322 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
6327 unsigned int text_base;
6328 unsigned int text_len;
6329 const u32 *text_data;
6330 unsigned int rodata_base;
6331 unsigned int rodata_len;
6332 const u32 *rodata_data;
6333 unsigned int data_base;
6334 unsigned int data_len;
6335 const u32 *data_data;
6338 /* tp->lock is held. */
6339 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
6340 int cpu_scratch_size, struct fw_info *info)
6342 int err, lock_err, i;
6343 void (*write_op)(struct tg3 *, u32, u32);
6345 if (cpu_base == TX_CPU_BASE &&
6346 (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
6347 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
6348 "TX cpu firmware on %s which is 5705.\n",
6353 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
6354 write_op = tg3_write_mem;
6356 write_op = tg3_write_indirect_reg32;
6358 /* It is possible that bootcode is still loading at this point.
6359 * Get the nvram lock first before halting the cpu.
6361 lock_err = tg3_nvram_lock(tp);
6362 err = tg3_halt_cpu(tp, cpu_base);
6364 tg3_nvram_unlock(tp);
6368 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
6369 write_op(tp, cpu_scratch_base + i, 0);
6370 tw32(cpu_base + CPU_STATE, 0xffffffff);
6371 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
6372 for (i = 0; i < (info->text_len / sizeof(u32)); i++)
6373 write_op(tp, (cpu_scratch_base +
6374 (info->text_base & 0xffff) +
6377 info->text_data[i] : 0));
6378 for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
6379 write_op(tp, (cpu_scratch_base +
6380 (info->rodata_base & 0xffff) +
6382 (info->rodata_data ?
6383 info->rodata_data[i] : 0));
6384 for (i = 0; i < (info->data_len / sizeof(u32)); i++)
6385 write_op(tp, (cpu_scratch_base +
6386 (info->data_base & 0xffff) +
6389 info->data_data[i] : 0));
6397 /* tp->lock is held. */
6398 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
6400 struct fw_info info;
6403 info.text_base = TG3_FW_TEXT_ADDR;
6404 info.text_len = TG3_FW_TEXT_LEN;
6405 info.text_data = &tg3FwText[0];
6406 info.rodata_base = TG3_FW_RODATA_ADDR;
6407 info.rodata_len = TG3_FW_RODATA_LEN;
6408 info.rodata_data = &tg3FwRodata[0];
6409 info.data_base = TG3_FW_DATA_ADDR;
6410 info.data_len = TG3_FW_DATA_LEN;
6411 info.data_data = NULL;
6413 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
6414 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
6419 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
6420 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
6425 /* Now startup only the RX cpu. */
6426 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6427 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6429 for (i = 0; i < 5; i++) {
6430 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
6432 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6433 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
6434 tw32_f(RX_CPU_BASE + CPU_PC, TG3_FW_TEXT_ADDR);
6438 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
6439 "to set RX CPU PC, is %08x should be %08x\n",
6440 tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
6444 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
6445 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
6451 #define TG3_TSO_FW_RELEASE_MAJOR 0x1
6452 #define TG3_TSO_FW_RELASE_MINOR 0x6
6453 #define TG3_TSO_FW_RELEASE_FIX 0x0
6454 #define TG3_TSO_FW_START_ADDR 0x08000000
6455 #define TG3_TSO_FW_TEXT_ADDR 0x08000000
6456 #define TG3_TSO_FW_TEXT_LEN 0x1aa0
6457 #define TG3_TSO_FW_RODATA_ADDR 0x08001aa0
6458 #define TG3_TSO_FW_RODATA_LEN 0x60
6459 #define TG3_TSO_FW_DATA_ADDR 0x08001b20
6460 #define TG3_TSO_FW_DATA_LEN 0x30
6461 #define TG3_TSO_FW_SBSS_ADDR 0x08001b50
6462 #define TG3_TSO_FW_SBSS_LEN 0x2c
6463 #define TG3_TSO_FW_BSS_ADDR 0x08001b80
6464 #define TG3_TSO_FW_BSS_LEN 0x894
6466 static const u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
6467 0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
6468 0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
6469 0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6470 0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
6471 0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
6472 0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
6473 0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
6474 0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
6475 0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
6476 0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
6477 0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
6478 0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
6479 0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
6480 0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
6481 0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
6482 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
6483 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
6484 0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
6485 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6486 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
6487 0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
6488 0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
6489 0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
6490 0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
6491 0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
6492 0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
6493 0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
6494 0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
6495 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
6496 0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6497 0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
6498 0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
6499 0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
6500 0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
6501 0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
6502 0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
6503 0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
6504 0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
6505 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
6506 0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
6507 0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
6508 0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
6509 0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
6510 0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
6511 0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
6512 0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
6513 0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
6514 0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6515 0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
6516 0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
6517 0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
6518 0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
6519 0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
6520 0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
6521 0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
6522 0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
6523 0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
6524 0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
6525 0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
6526 0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
6527 0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
6528 0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
6529 0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
6530 0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
6531 0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
6532 0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
6533 0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
6534 0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
6535 0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
6536 0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
6537 0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
6538 0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
6539 0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
6540 0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
6541 0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
6542 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
6543 0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
6544 0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
6545 0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
6546 0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
6547 0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
6548 0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
6549 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
6550 0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
6551 0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
6552 0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
6553 0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
6554 0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
6555 0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
6556 0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
6557 0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
6558 0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
6559 0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
6560 0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
6561 0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
6562 0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
6563 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
6564 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
6565 0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
6566 0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
6567 0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
6568 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
6569 0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
6570 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
6571 0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
6572 0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
6573 0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
6574 0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
6575 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
6576 0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
6577 0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
6578 0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
6579 0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
6580 0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
6581 0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
6582 0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
6583 0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
6584 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
6585 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
6586 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
6587 0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
6588 0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
6589 0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
6590 0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
6591 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
6592 0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
6593 0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
6594 0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
6595 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
6596 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
6597 0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
6598 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
6599 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
6600 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
6601 0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
6602 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
6603 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
6604 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
6605 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6606 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
6607 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
6608 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
6609 0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
6610 0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
6611 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
6612 0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
6613 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
6614 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
6615 0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
6616 0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
6617 0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
6618 0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
6619 0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
6620 0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
6621 0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
6622 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
6623 0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
6624 0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
6625 0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
6626 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
6627 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
6628 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
6629 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
6630 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
6631 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
6632 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
6633 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
6634 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
6635 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
6636 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
6637 0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
6638 0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
6639 0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
6640 0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
6641 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
6642 0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
6643 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
6644 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
6645 0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
6646 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
6647 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
6648 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
6649 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
6650 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
6651 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
6652 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
6653 0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
6654 0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
6655 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
6656 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
6657 0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
6658 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
6659 0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
6660 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
6661 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
6662 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
6663 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
6664 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
6665 0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
6666 0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
6667 0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
6668 0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
6669 0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
6670 0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
6671 0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
6672 0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
6673 0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
6674 0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
6675 0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
6676 0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
6677 0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
6678 0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
6679 0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
6680 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
6681 0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
6682 0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
6683 0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
6684 0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
6685 0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
6686 0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
6687 0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6688 0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
6689 0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
6690 0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
6691 0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
6692 0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
6693 0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
6694 0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
6695 0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
6696 0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
6697 0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
6698 0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
6699 0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
6700 0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
6701 0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
6702 0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
6703 0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
6704 0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
6705 0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
6706 0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
6707 0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
6708 0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
6709 0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
6710 0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
6711 0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
6712 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
6713 0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
6714 0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
6715 0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
6716 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
6717 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
6718 0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
6719 0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
6720 0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
6721 0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
6722 0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
6723 0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
6724 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
6725 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
6726 0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
6727 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
6728 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
6729 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
6730 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
6731 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
6732 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
6733 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
6734 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
6735 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
6736 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
6737 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
6738 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
6739 0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
6740 0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
6741 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
6742 0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
6743 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
6744 0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
6745 0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
6746 0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
6747 0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
6748 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
6749 0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
6750 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
6753 static const u32 tg3TsoFwRodata[] = {
6754 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6755 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
6756 0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
6757 0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
6761 static const u32 tg3TsoFwData[] = {
6762 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
6763 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
6767 /* 5705 needs a special version of the TSO firmware. */
6768 #define TG3_TSO5_FW_RELEASE_MAJOR 0x1
6769 #define TG3_TSO5_FW_RELASE_MINOR 0x2
6770 #define TG3_TSO5_FW_RELEASE_FIX 0x0
6771 #define TG3_TSO5_FW_START_ADDR 0x00010000
6772 #define TG3_TSO5_FW_TEXT_ADDR 0x00010000
6773 #define TG3_TSO5_FW_TEXT_LEN 0xe90
6774 #define TG3_TSO5_FW_RODATA_ADDR 0x00010e90
6775 #define TG3_TSO5_FW_RODATA_LEN 0x50
6776 #define TG3_TSO5_FW_DATA_ADDR 0x00010f00
6777 #define TG3_TSO5_FW_DATA_LEN 0x20
6778 #define TG3_TSO5_FW_SBSS_ADDR 0x00010f20
6779 #define TG3_TSO5_FW_SBSS_LEN 0x28
6780 #define TG3_TSO5_FW_BSS_ADDR 0x00010f50
6781 #define TG3_TSO5_FW_BSS_LEN 0x88
6783 static const u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
6784 0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
6785 0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
6786 0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
6787 0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
6788 0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
6789 0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
6790 0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6791 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
6792 0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
6793 0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
6794 0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
6795 0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
6796 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
6797 0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
6798 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
6799 0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
6800 0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
6801 0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
6802 0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
6803 0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
6804 0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
6805 0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
6806 0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
6807 0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
6808 0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
6809 0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
6810 0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
6811 0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
6812 0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
6813 0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
6814 0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6815 0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
6816 0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
6817 0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
6818 0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
6819 0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
6820 0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
6821 0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
6822 0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
6823 0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
6824 0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
6825 0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
6826 0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
6827 0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
6828 0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
6829 0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
6830 0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
6831 0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
6832 0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
6833 0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
6834 0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
6835 0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
6836 0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
6837 0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
6838 0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
6839 0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
6840 0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
6841 0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
6842 0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
6843 0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
6844 0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
6845 0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
6846 0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
6847 0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
6848 0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
6849 0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
6850 0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
6851 0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
6852 0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
6853 0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
6854 0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
6855 0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
6856 0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
6857 0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
6858 0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
6859 0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
6860 0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
6861 0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
6862 0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
6863 0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
6864 0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
6865 0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
6866 0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
6867 0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
6868 0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
6869 0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
6870 0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
6871 0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
6872 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
6873 0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
6874 0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
6875 0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
6876 0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
6877 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
6878 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
6879 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
6880 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
6881 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
6882 0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
6883 0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
6884 0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
6885 0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
6886 0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
6887 0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
6888 0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
6889 0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
6890 0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6891 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6892 0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
6893 0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
6894 0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
6895 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
6896 0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
6897 0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
6898 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
6899 0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
6900 0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
6901 0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
6902 0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
6903 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
6904 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
6905 0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
6906 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
6907 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
6908 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
6909 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
6910 0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
6911 0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
6912 0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
6913 0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
6914 0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
6915 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
6916 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
6917 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
6918 0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
6919 0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
6920 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
6921 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
6922 0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
6923 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
6924 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
6925 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
6926 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
6927 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
6928 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
6929 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
6930 0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
6931 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
6932 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
6933 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
6934 0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
6935 0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
6936 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
6937 0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
6938 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
6939 0x00000000, 0x00000000, 0x00000000,
6942 static const u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
6943 0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
6944 0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
6945 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
6946 0x00000000, 0x00000000, 0x00000000,
6949 static const u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
6950 0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
6951 0x00000000, 0x00000000, 0x00000000,
6954 /* tp->lock is held. */
6955 static int tg3_load_tso_firmware(struct tg3 *tp)
6957 struct fw_info info;
6958 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
6961 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
6964 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6965 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
6966 info.text_len = TG3_TSO5_FW_TEXT_LEN;
6967 info.text_data = &tg3Tso5FwText[0];
6968 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
6969 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
6970 info.rodata_data = &tg3Tso5FwRodata[0];
6971 info.data_base = TG3_TSO5_FW_DATA_ADDR;
6972 info.data_len = TG3_TSO5_FW_DATA_LEN;
6973 info.data_data = &tg3Tso5FwData[0];
6974 cpu_base = RX_CPU_BASE;
6975 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
6976 cpu_scratch_size = (info.text_len +
6979 TG3_TSO5_FW_SBSS_LEN +
6980 TG3_TSO5_FW_BSS_LEN);
6982 info.text_base = TG3_TSO_FW_TEXT_ADDR;
6983 info.text_len = TG3_TSO_FW_TEXT_LEN;
6984 info.text_data = &tg3TsoFwText[0];
6985 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
6986 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
6987 info.rodata_data = &tg3TsoFwRodata[0];
6988 info.data_base = TG3_TSO_FW_DATA_ADDR;
6989 info.data_len = TG3_TSO_FW_DATA_LEN;
6990 info.data_data = &tg3TsoFwData[0];
6991 cpu_base = TX_CPU_BASE;
6992 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
6993 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
6996 err = tg3_load_firmware_cpu(tp, cpu_base,
6997 cpu_scratch_base, cpu_scratch_size,
7002 /* Now startup the cpu. */
7003 tw32(cpu_base + CPU_STATE, 0xffffffff);
7004 tw32_f(cpu_base + CPU_PC, info.text_base);
7006 for (i = 0; i < 5; i++) {
7007 if (tr32(cpu_base + CPU_PC) == info.text_base)
7009 tw32(cpu_base + CPU_STATE, 0xffffffff);
7010 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
7011 tw32_f(cpu_base + CPU_PC, info.text_base);
7015 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
7016 "to set CPU PC, is %08x should be %08x\n",
7017 tp->dev->name, tr32(cpu_base + CPU_PC),
7021 tw32(cpu_base + CPU_STATE, 0xffffffff);
7022 tw32_f(cpu_base + CPU_MODE, 0x00000000);
7027 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7029 struct tg3 *tp = netdev_priv(dev);
7030 struct sockaddr *addr = p;
7031 int err = 0, skip_mac_1 = 0;
7033 if (!is_valid_ether_addr(addr->sa_data))
7036 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7038 if (!netif_running(dev))
7041 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7042 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7044 addr0_high = tr32(MAC_ADDR_0_HIGH);
7045 addr0_low = tr32(MAC_ADDR_0_LOW);
7046 addr1_high = tr32(MAC_ADDR_1_HIGH);
7047 addr1_low = tr32(MAC_ADDR_1_LOW);
7049 /* Skip MAC addr 1 if ASF is using it. */
7050 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7051 !(addr1_high == 0 && addr1_low == 0))
7054 spin_lock_bh(&tp->lock);
7055 __tg3_set_mac_addr(tp, skip_mac_1);
7056 spin_unlock_bh(&tp->lock);
7061 /* tp->lock is held. */
7062 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7063 dma_addr_t mapping, u32 maxlen_flags,
7067 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7068 ((u64) mapping >> 32));
7070 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7071 ((u64) mapping & 0xffffffff));
7073 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7076 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7078 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7082 static void __tg3_set_rx_mode(struct net_device *);
7083 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7085 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7086 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7087 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7088 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7089 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7090 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7091 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7093 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7094 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7095 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7096 u32 val = ec->stats_block_coalesce_usecs;
7098 if (!netif_carrier_ok(tp->dev))
7101 tw32(HOSTCC_STAT_COAL_TICKS, val);
7105 /* tp->lock is held. */
7106 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7108 u32 val, rdmac_mode;
7111 tg3_disable_ints(tp);
7115 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7117 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
7118 tg3_abort_hw(tp, 1);
7122 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB))
7125 err = tg3_chip_reset(tp);
7129 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7131 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7132 val = tr32(TG3_CPMU_CTRL);
7133 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7134 tw32(TG3_CPMU_CTRL, val);
7136 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7137 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7138 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7139 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7141 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7142 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7143 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7144 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7146 val = tr32(TG3_CPMU_HST_ACC);
7147 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7148 val |= CPMU_HST_ACC_MACCLK_6_25;
7149 tw32(TG3_CPMU_HST_ACC, val);
7152 /* This works around an issue with Athlon chipsets on
7153 * B3 tigon3 silicon. This bit has no effect on any
7154 * other revision. But do not set this on PCI Express
7155 * chips and don't even touch the clocks if the CPMU is present.
7157 if (!(tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)) {
7158 if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
7159 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
7160 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7163 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7164 (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
7165 val = tr32(TG3PCI_PCISTATE);
7166 val |= PCISTATE_RETRY_SAME_DMA;
7167 tw32(TG3PCI_PCISTATE, val);
7170 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7171 /* Allow reads and writes to the
7172 * APE register and memory space.
7174 val = tr32(TG3PCI_PCISTATE);
7175 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7176 PCISTATE_ALLOW_APE_SHMEM_WR;
7177 tw32(TG3PCI_PCISTATE, val);
7180 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
7181 /* Enable some hw fixes. */
7182 val = tr32(TG3PCI_MSI_DATA);
7183 val |= (1 << 26) | (1 << 28) | (1 << 29);
7184 tw32(TG3PCI_MSI_DATA, val);
7187 /* Descriptor ring init may make accesses to the
7188 * NIC SRAM area to setup the TX descriptors, so we
7189 * can only do this after the hardware has been
7190 * successfully reset.
7192 err = tg3_init_rings(tp);
7196 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7197 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7198 /* This value is determined during the probe time DMA
7199 * engine test, tg3_test_dma.
7201 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7204 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
7205 GRC_MODE_4X_NIC_SEND_RINGS |
7206 GRC_MODE_NO_TX_PHDR_CSUM |
7207 GRC_MODE_NO_RX_PHDR_CSUM);
7208 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
7210 /* Pseudo-header checksum is done by hardware logic and not
7211 * the offload processers, so make the chip do the pseudo-
7212 * header checksums on receive. For transmit it is more
7213 * convenient to do the pseudo-header checksum in software
7214 * as Linux does that on transmit for us in all cases.
7216 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
7220 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
7222 /* Setup the timer prescalar register. Clock is always 66Mhz. */
7223 val = tr32(GRC_MISC_CFG);
7225 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
7226 tw32(GRC_MISC_CFG, val);
7228 /* Initialize MBUF/DESC pool. */
7229 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7231 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
7232 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
7233 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7234 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
7236 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
7237 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
7238 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
7240 else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7243 fw_len = (TG3_TSO5_FW_TEXT_LEN +
7244 TG3_TSO5_FW_RODATA_LEN +
7245 TG3_TSO5_FW_DATA_LEN +
7246 TG3_TSO5_FW_SBSS_LEN +
7247 TG3_TSO5_FW_BSS_LEN);
7248 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
7249 tw32(BUFMGR_MB_POOL_ADDR,
7250 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
7251 tw32(BUFMGR_MB_POOL_SIZE,
7252 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
7255 if (tp->dev->mtu <= ETH_DATA_LEN) {
7256 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7257 tp->bufmgr_config.mbuf_read_dma_low_water);
7258 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7259 tp->bufmgr_config.mbuf_mac_rx_low_water);
7260 tw32(BUFMGR_MB_HIGH_WATER,
7261 tp->bufmgr_config.mbuf_high_water);
7263 tw32(BUFMGR_MB_RDMA_LOW_WATER,
7264 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
7265 tw32(BUFMGR_MB_MACRX_LOW_WATER,
7266 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
7267 tw32(BUFMGR_MB_HIGH_WATER,
7268 tp->bufmgr_config.mbuf_high_water_jumbo);
7270 tw32(BUFMGR_DMA_LOW_WATER,
7271 tp->bufmgr_config.dma_low_water);
7272 tw32(BUFMGR_DMA_HIGH_WATER,
7273 tp->bufmgr_config.dma_high_water);
7275 tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
7276 for (i = 0; i < 2000; i++) {
7277 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
7282 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
7287 /* Setup replenish threshold. */
7288 val = tp->rx_pending / 8;
7291 else if (val > tp->rx_std_max_post)
7292 val = tp->rx_std_max_post;
7293 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7294 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
7295 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
7297 if (val > (TG3_RX_INTERNAL_RING_SZ_5906 / 2))
7298 val = TG3_RX_INTERNAL_RING_SZ_5906 / 2;
7301 tw32(RCVBDI_STD_THRESH, val);
7303 /* Initialize TG3_BDINFO's at:
7304 * RCVDBDI_STD_BD: standard eth size rx ring
7305 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
7306 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
7309 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
7310 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
7311 * ring attribute flags
7312 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
7314 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
7315 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
7317 * The size of each ring is fixed in the firmware, but the location is
7320 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7321 ((u64) tp->rx_std_mapping >> 32));
7322 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7323 ((u64) tp->rx_std_mapping & 0xffffffff));
7324 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7325 NIC_SRAM_RX_BUFFER_DESC);
7327 /* Don't even try to program the JUMBO/MINI buffer descriptor
7330 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
7331 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7332 RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
7334 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
7335 RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7337 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
7338 BDINFO_FLAGS_DISABLED);
7340 /* Setup replenish threshold. */
7341 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
7343 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) {
7344 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
7345 ((u64) tp->rx_jumbo_mapping >> 32));
7346 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7347 ((u64) tp->rx_jumbo_mapping & 0xffffffff));
7348 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7349 RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
7350 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7351 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7353 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7354 BDINFO_FLAGS_DISABLED);
7359 /* There is only one send ring on 5705/5750, no need to explicitly
7360 * disable the others.
7362 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7363 /* Clear out send RCB ring in SRAM. */
7364 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
7365 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7366 BDINFO_FLAGS_DISABLED);
7371 tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7372 tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
7374 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
7375 tp->tx_desc_mapping,
7376 (TG3_TX_RING_SIZE <<
7377 BDINFO_FLAGS_MAXLEN_SHIFT),
7378 NIC_SRAM_TX_BUFFER_DESC);
7380 /* There is only one receive return ring on 5705/5750, no need
7381 * to explicitly disable the others.
7383 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7384 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
7385 i += TG3_BDINFO_SIZE) {
7386 tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
7387 BDINFO_FLAGS_DISABLED);
7392 tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
7394 tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
7396 (TG3_RX_RCB_RING_SIZE(tp) <<
7397 BDINFO_FLAGS_MAXLEN_SHIFT),
7400 tp->rx_std_ptr = tp->rx_pending;
7401 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
7404 tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7405 tp->rx_jumbo_pending : 0;
7406 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
7409 /* Initialize MAC address and backoff seed. */
7410 __tg3_set_mac_addr(tp, 0);
7412 /* MTU + ethernet header + FCS + optional VLAN tag */
7413 tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
7415 /* The slot time is changed by tg3_setup_phy if we
7416 * run at gigabit with half duplex.
7418 tw32(MAC_TX_LENGTHS,
7419 (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
7420 (6 << TX_LENGTHS_IPG_SHIFT) |
7421 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
7423 /* Receive rules. */
7424 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
7425 tw32(RCVLPC_CONFIG, 0x0181);
7427 /* Calculate RDMAC_MODE setting early, we need it to determine
7428 * the RCVLPC_STATE_ENABLE mask.
7430 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
7431 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
7432 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
7433 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
7434 RDMAC_MODE_LNGREAD_ENAB);
7436 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7437 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7438 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
7439 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
7440 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
7442 /* If statement applies to 5705 and 5750 PCI devices only */
7443 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7444 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7445 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
7446 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
7447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7448 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
7449 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7450 !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7451 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7455 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
7456 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
7458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7459 rdmac_mode |= (1 << 27);
7461 /* Receive/send statistics. */
7462 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
7463 val = tr32(RCVLPC_STATS_ENABLE);
7464 val &= ~RCVLPC_STATSENAB_DACK_FIX;
7465 tw32(RCVLPC_STATS_ENABLE, val);
7466 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
7467 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
7468 val = tr32(RCVLPC_STATS_ENABLE);
7469 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
7470 tw32(RCVLPC_STATS_ENABLE, val);
7472 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
7474 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
7475 tw32(SNDDATAI_STATSENAB, 0xffffff);
7476 tw32(SNDDATAI_STATSCTRL,
7477 (SNDDATAI_SCTRL_ENABLE |
7478 SNDDATAI_SCTRL_FASTUPD));
7480 /* Setup host coalescing engine. */
7481 tw32(HOSTCC_MODE, 0);
7482 for (i = 0; i < 2000; i++) {
7483 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
7488 __tg3_set_coalesce(tp, &tp->coal);
7490 /* set status block DMA address */
7491 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7492 ((u64) tp->status_mapping >> 32));
7493 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7494 ((u64) tp->status_mapping & 0xffffffff));
7496 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7497 /* Status/statistics block address. See tg3_timer,
7498 * the tg3_periodic_fetch_stats call there, and
7499 * tg3_get_stats to see how this works for 5705/5750 chips.
7501 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7502 ((u64) tp->stats_mapping >> 32));
7503 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7504 ((u64) tp->stats_mapping & 0xffffffff));
7505 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
7506 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
7509 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
7511 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
7512 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
7513 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7514 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
7516 /* Clear statistics/status block in chip, and status block in ram. */
7517 for (i = NIC_SRAM_STATS_BLK;
7518 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
7520 tg3_write_mem(tp, i, 0);
7523 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
7525 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
7526 tp->tg3_flags2 &= ~TG3_FLG2_PARALLEL_DETECT;
7527 /* reset to prevent losing 1st rx packet intermittently */
7528 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7532 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7533 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
7536 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
7537 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
7538 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7539 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7540 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
7541 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7542 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
7545 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
7546 * If TG3_FLG2_IS_NIC is zero, we should read the
7547 * register to preserve the GPIO settings for LOMs. The GPIOs,
7548 * whether used as inputs or outputs, are set by boot code after
7551 if (!(tp->tg3_flags2 & TG3_FLG2_IS_NIC)) {
7554 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
7555 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
7556 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
7558 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7559 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
7560 GRC_LCLCTRL_GPIO_OUTPUT3;
7562 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
7563 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
7565 tp->grc_local_ctrl &= ~gpio_mask;
7566 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
7568 /* GPIO1 must be driven high for eeprom write protect */
7569 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT)
7570 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
7571 GRC_LCLCTRL_GPIO_OUTPUT1);
7573 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7576 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
7579 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
7580 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
7584 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
7585 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
7586 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
7587 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
7588 WDMAC_MODE_LNGREAD_ENAB);
7590 /* If statement applies to 5705 and 5750 PCI devices only */
7591 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7592 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
7593 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7594 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
7595 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
7596 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
7598 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
7599 !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
7600 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
7601 val |= WDMAC_MODE_RX_ACCEL;
7605 /* Enable host coalescing bug fix */
7606 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755) ||
7607 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787) ||
7608 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784) ||
7609 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) ||
7610 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785))
7611 val |= WDMAC_MODE_STATUS_TAG_FIX;
7613 tw32_f(WDMAC_MODE, val);
7616 if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
7619 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
7622 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
7623 pcix_cmd |= PCI_X_CMD_READ_2K;
7624 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7625 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
7626 pcix_cmd |= PCI_X_CMD_READ_2K;
7628 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7632 tw32_f(RDMAC_MODE, rdmac_mode);
7635 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
7636 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7637 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
7639 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
7641 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
7643 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
7645 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
7646 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
7647 tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
7648 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
7649 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7650 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
7651 tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
7652 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
7654 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
7655 err = tg3_load_5701_a0_firmware_fix(tp);
7660 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
7661 err = tg3_load_tso_firmware(tp);
7666 tp->tx_mode = TX_MODE_ENABLE;
7667 tw32_f(MAC_TX_MODE, tp->tx_mode);
7670 tp->rx_mode = RX_MODE_ENABLE;
7671 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7672 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
7673 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
7674 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7675 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
7677 tw32_f(MAC_RX_MODE, tp->rx_mode);
7680 tw32(MAC_LED_CTRL, tp->led_ctrl);
7682 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
7683 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7684 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7687 tw32_f(MAC_RX_MODE, tp->rx_mode);
7690 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
7691 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
7692 !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
7693 /* Set drive transmission level to 1.2V */
7694 /* only if the signal pre-emphasis bit is not set */
7695 val = tr32(MAC_SERDES_CFG);
7698 tw32(MAC_SERDES_CFG, val);
7700 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
7701 tw32(MAC_SERDES_CFG, 0x616000);
7704 /* Prevent chip from dropping frames when flow control
7707 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
7709 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
7710 (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
7711 /* Use hardware link auto-negotiation */
7712 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
7715 if ((tp->tg3_flags2 & TG3_FLG2_MII_SERDES) &&
7716 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)) {
7719 tmp = tr32(SERDES_RX_CTRL);
7720 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
7721 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
7722 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
7723 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
7726 if (!(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
7727 if (tp->link_config.phy_is_low_power) {
7728 tp->link_config.phy_is_low_power = 0;
7729 tp->link_config.speed = tp->link_config.orig_speed;
7730 tp->link_config.duplex = tp->link_config.orig_duplex;
7731 tp->link_config.autoneg = tp->link_config.orig_autoneg;
7734 err = tg3_setup_phy(tp, 0);
7738 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7739 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906) {
7742 /* Clear CRC stats. */
7743 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
7744 tg3_writephy(tp, MII_TG3_TEST1,
7745 tmp | MII_TG3_TEST1_CRC_EN);
7746 tg3_readphy(tp, 0x14, &tmp);
7751 __tg3_set_rx_mode(tp->dev);
7753 /* Initialize receive rules. */
7754 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
7755 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
7756 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
7757 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
7759 if ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) &&
7760 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
7764 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
7768 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
7770 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
7772 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
7774 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
7776 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
7778 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
7780 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
7782 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
7784 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
7786 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
7788 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
7790 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
7792 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
7794 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
7802 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7803 /* Write our heartbeat update interval to APE. */
7804 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
7805 APE_HOST_HEARTBEAT_INT_DISABLE);
7807 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
7812 /* Called at device open time to get the chip ready for
7813 * packet processing. Invoked with tp->lock held.
7815 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
7817 tg3_switch_clocks(tp);
7819 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
7821 return tg3_reset_hw(tp, reset_phy);
7824 #define TG3_STAT_ADD32(PSTAT, REG) \
7825 do { u32 __val = tr32(REG); \
7826 (PSTAT)->low += __val; \
7827 if ((PSTAT)->low < __val) \
7828 (PSTAT)->high += 1; \
7831 static void tg3_periodic_fetch_stats(struct tg3 *tp)
7833 struct tg3_hw_stats *sp = tp->hw_stats;
7835 if (!netif_carrier_ok(tp->dev))
7838 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
7839 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
7840 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
7841 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
7842 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
7843 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
7844 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
7845 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
7846 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
7847 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
7848 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
7849 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
7850 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
7852 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
7853 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
7854 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
7855 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
7856 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
7857 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
7858 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
7859 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
7860 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
7861 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
7862 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
7863 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
7864 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
7865 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
7867 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
7868 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
7869 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
7872 static void tg3_timer(unsigned long __opaque)
7874 struct tg3 *tp = (struct tg3 *) __opaque;
7879 spin_lock(&tp->lock);
7881 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
7882 /* All of this garbage is because when using non-tagged
7883 * IRQ status the mailbox/status_block protocol the chip
7884 * uses with the cpu is race prone.
7886 if (tp->hw_status->status & SD_STATUS_UPDATED) {
7887 tw32(GRC_LOCAL_CTRL,
7888 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
7890 tw32(HOSTCC_MODE, tp->coalesce_mode |
7891 (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
7894 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
7895 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
7896 spin_unlock(&tp->lock);
7897 schedule_work(&tp->reset_task);
7902 /* This part only runs once per second. */
7903 if (!--tp->timer_counter) {
7904 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
7905 tg3_periodic_fetch_stats(tp);
7907 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
7911 mac_stat = tr32(MAC_STATUS);
7914 if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
7915 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
7917 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
7921 tg3_setup_phy(tp, 0);
7922 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
7923 u32 mac_stat = tr32(MAC_STATUS);
7926 if (netif_carrier_ok(tp->dev) &&
7927 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
7930 if (! netif_carrier_ok(tp->dev) &&
7931 (mac_stat & (MAC_STATUS_PCS_SYNCED |
7932 MAC_STATUS_SIGNAL_DET))) {
7936 if (!tp->serdes_counter) {
7939 ~MAC_MODE_PORT_MODE_MASK));
7941 tw32_f(MAC_MODE, tp->mac_mode);
7944 tg3_setup_phy(tp, 0);
7946 } else if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
7947 tg3_serdes_parallel_detect(tp);
7949 tp->timer_counter = tp->timer_multiplier;
7952 /* Heartbeat is only sent once every 2 seconds.
7954 * The heartbeat is to tell the ASF firmware that the host
7955 * driver is still alive. In the event that the OS crashes,
7956 * ASF needs to reset the hardware to free up the FIFO space
7957 * that may be filled with rx packets destined for the host.
7958 * If the FIFO is full, ASF will no longer function properly.
7960 * Unintended resets have been reported on real time kernels
7961 * where the timer doesn't run on time. Netpoll will also have
7964 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
7965 * to check the ring condition when the heartbeat is expiring
7966 * before doing the reset. This will prevent most unintended
7969 if (!--tp->asf_counter) {
7970 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) &&
7971 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
7972 tg3_wait_for_event_ack(tp);
7974 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
7975 FWCMD_NICDRV_ALIVE3);
7976 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
7977 /* 5 seconds timeout */
7978 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 5);
7980 tg3_generate_fw_event(tp);
7982 tp->asf_counter = tp->asf_multiplier;
7985 spin_unlock(&tp->lock);
7988 tp->timer.expires = jiffies + tp->timer_offset;
7989 add_timer(&tp->timer);
7992 static int tg3_request_irq(struct tg3 *tp)
7995 unsigned long flags;
7996 struct net_device *dev = tp->dev;
7998 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8000 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
8002 flags = IRQF_SAMPLE_RANDOM;
8005 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8006 fn = tg3_interrupt_tagged;
8007 flags = IRQF_SHARED | IRQF_SAMPLE_RANDOM;
8009 return (request_irq(tp->pdev->irq, fn, flags, dev->name, dev));
8012 static int tg3_test_interrupt(struct tg3 *tp)
8014 struct net_device *dev = tp->dev;
8015 int err, i, intr_ok = 0;
8017 if (!netif_running(dev))
8020 tg3_disable_ints(tp);
8022 free_irq(tp->pdev->irq, dev);
8024 err = request_irq(tp->pdev->irq, tg3_test_isr,
8025 IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, dev);
8029 tp->hw_status->status &= ~SD_STATUS_UPDATED;
8030 tg3_enable_ints(tp);
8032 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
8035 for (i = 0; i < 5; i++) {
8036 u32 int_mbox, misc_host_ctrl;
8038 int_mbox = tr32_mailbox(MAILBOX_INTERRUPT_0 +
8040 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
8042 if ((int_mbox != 0) ||
8043 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
8051 tg3_disable_ints(tp);
8053 free_irq(tp->pdev->irq, dev);
8055 err = tg3_request_irq(tp);
8066 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
8067 * successfully restored
8069 static int tg3_test_msi(struct tg3 *tp)
8071 struct net_device *dev = tp->dev;
8075 if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSI))
8078 /* Turn off SERR reporting in case MSI terminates with Master
8081 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
8082 pci_write_config_word(tp->pdev, PCI_COMMAND,
8083 pci_cmd & ~PCI_COMMAND_SERR);
8085 err = tg3_test_interrupt(tp);
8087 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
8092 /* other failures */
8096 /* MSI test failed, go back to INTx mode */
8097 printk(KERN_WARNING PFX "%s: No interrupt was generated using MSI, "
8098 "switching to INTx mode. Please report this failure to "
8099 "the PCI maintainer and include system chipset information.\n",
8102 free_irq(tp->pdev->irq, dev);
8103 pci_disable_msi(tp->pdev);
8105 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8107 err = tg3_request_irq(tp);
8111 /* Need to reset the chip because the MSI cycle may have terminated
8112 * with Master Abort.
8114 tg3_full_lock(tp, 1);
8116 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8117 err = tg3_init_hw(tp, 1);
8119 tg3_full_unlock(tp);
8122 free_irq(tp->pdev->irq, dev);
8127 static int tg3_open(struct net_device *dev)
8129 struct tg3 *tp = netdev_priv(dev);
8132 netif_carrier_off(tp->dev);
8134 err = tg3_set_power_state(tp, PCI_D0);
8138 tg3_full_lock(tp, 0);
8140 tg3_disable_ints(tp);
8141 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8143 tg3_full_unlock(tp);
8145 /* The placement of this call is tied
8146 * to the setup and use of Host TX descriptors.
8148 err = tg3_alloc_consistent(tp);
8152 if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSI) {
8153 /* All MSI supporting chips should support tagged
8154 * status. Assert that this is the case.
8156 if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)) {
8157 printk(KERN_WARNING PFX "%s: MSI without TAGGED? "
8158 "Not using MSI.\n", tp->dev->name);
8159 } else if (pci_enable_msi(tp->pdev) == 0) {
8162 msi_mode = tr32(MSGINT_MODE);
8163 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
8164 tp->tg3_flags2 |= TG3_FLG2_USING_MSI;
8167 err = tg3_request_irq(tp);
8170 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8171 pci_disable_msi(tp->pdev);
8172 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8174 tg3_free_consistent(tp);
8178 napi_enable(&tp->napi);
8180 tg3_full_lock(tp, 0);
8182 err = tg3_init_hw(tp, 1);
8184 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8187 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS)
8188 tp->timer_offset = HZ;
8190 tp->timer_offset = HZ / 10;
8192 BUG_ON(tp->timer_offset > HZ);
8193 tp->timer_counter = tp->timer_multiplier =
8194 (HZ / tp->timer_offset);
8195 tp->asf_counter = tp->asf_multiplier =
8196 ((HZ / tp->timer_offset) * 2);
8198 init_timer(&tp->timer);
8199 tp->timer.expires = jiffies + tp->timer_offset;
8200 tp->timer.data = (unsigned long) tp;
8201 tp->timer.function = tg3_timer;
8204 tg3_full_unlock(tp);
8207 napi_disable(&tp->napi);
8208 free_irq(tp->pdev->irq, dev);
8209 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8210 pci_disable_msi(tp->pdev);
8211 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8213 tg3_free_consistent(tp);
8217 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8218 err = tg3_test_msi(tp);
8221 tg3_full_lock(tp, 0);
8223 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8224 pci_disable_msi(tp->pdev);
8225 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8227 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8229 tg3_free_consistent(tp);
8231 tg3_full_unlock(tp);
8233 napi_disable(&tp->napi);
8238 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8239 if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI) {
8240 u32 val = tr32(PCIE_TRANSACTION_CFG);
8242 tw32(PCIE_TRANSACTION_CFG,
8243 val | PCIE_TRANS_CFG_1SHOT_MSI);
8250 tg3_full_lock(tp, 0);
8252 add_timer(&tp->timer);
8253 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
8254 tg3_enable_ints(tp);
8256 tg3_full_unlock(tp);
8258 netif_start_queue(dev);
8264 /*static*/ void tg3_dump_state(struct tg3 *tp)
8266 u32 val32, val32_2, val32_3, val32_4, val32_5;
8270 pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
8271 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
8272 printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
8276 printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
8277 tr32(MAC_MODE), tr32(MAC_STATUS));
8278 printk(" MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
8279 tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
8280 printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
8281 tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
8282 printk(" MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
8283 tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
8285 /* Send data initiator control block */
8286 printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
8287 tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
8288 printk(" SNDDATAI_STATSCTRL[%08x]\n",
8289 tr32(SNDDATAI_STATSCTRL));
8291 /* Send data completion control block */
8292 printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
8294 /* Send BD ring selector block */
8295 printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
8296 tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
8298 /* Send BD initiator control block */
8299 printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
8300 tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
8302 /* Send BD completion control block */
8303 printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
8305 /* Receive list placement control block */
8306 printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
8307 tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
8308 printk(" RCVLPC_STATSCTRL[%08x]\n",
8309 tr32(RCVLPC_STATSCTRL));
8311 /* Receive data and receive BD initiator control block */
8312 printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
8313 tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
8315 /* Receive data completion control block */
8316 printk("DEBUG: RCVDCC_MODE[%08x]\n",
8319 /* Receive BD initiator control block */
8320 printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
8321 tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
8323 /* Receive BD completion control block */
8324 printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
8325 tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
8327 /* Receive list selector control block */
8328 printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
8329 tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
8331 /* Mbuf cluster free block */
8332 printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
8333 tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
8335 /* Host coalescing control block */
8336 printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
8337 tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
8338 printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
8339 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8340 tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8341 printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
8342 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
8343 tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
8344 printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
8345 tr32(HOSTCC_STATS_BLK_NIC_ADDR));
8346 printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
8347 tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
8349 /* Memory arbiter control block */
8350 printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
8351 tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
8353 /* Buffer manager control block */
8354 printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
8355 tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
8356 printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
8357 tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
8358 printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
8359 "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
8360 tr32(BUFMGR_DMA_DESC_POOL_ADDR),
8361 tr32(BUFMGR_DMA_DESC_POOL_SIZE));
8363 /* Read DMA control block */
8364 printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
8365 tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
8367 /* Write DMA control block */
8368 printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
8369 tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
8371 /* DMA completion block */
8372 printk("DEBUG: DMAC_MODE[%08x]\n",
8376 printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
8377 tr32(GRC_MODE), tr32(GRC_MISC_CFG));
8378 printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
8379 tr32(GRC_LOCAL_CTRL));
8382 printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
8383 tr32(RCVDBDI_JUMBO_BD + 0x0),
8384 tr32(RCVDBDI_JUMBO_BD + 0x4),
8385 tr32(RCVDBDI_JUMBO_BD + 0x8),
8386 tr32(RCVDBDI_JUMBO_BD + 0xc));
8387 printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
8388 tr32(RCVDBDI_STD_BD + 0x0),
8389 tr32(RCVDBDI_STD_BD + 0x4),
8390 tr32(RCVDBDI_STD_BD + 0x8),
8391 tr32(RCVDBDI_STD_BD + 0xc));
8392 printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
8393 tr32(RCVDBDI_MINI_BD + 0x0),
8394 tr32(RCVDBDI_MINI_BD + 0x4),
8395 tr32(RCVDBDI_MINI_BD + 0x8),
8396 tr32(RCVDBDI_MINI_BD + 0xc));
8398 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
8399 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
8400 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
8401 tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
8402 printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
8403 val32, val32_2, val32_3, val32_4);
8405 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
8406 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
8407 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
8408 tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
8409 printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
8410 val32, val32_2, val32_3, val32_4);
8412 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
8413 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
8414 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
8415 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
8416 tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
8417 printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
8418 val32, val32_2, val32_3, val32_4, val32_5);
8420 /* SW status block */
8421 printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
8422 tp->hw_status->status,
8423 tp->hw_status->status_tag,
8424 tp->hw_status->rx_jumbo_consumer,
8425 tp->hw_status->rx_consumer,
8426 tp->hw_status->rx_mini_consumer,
8427 tp->hw_status->idx[0].rx_producer,
8428 tp->hw_status->idx[0].tx_consumer);
8430 /* SW statistics block */
8431 printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
8432 ((u32 *)tp->hw_stats)[0],
8433 ((u32 *)tp->hw_stats)[1],
8434 ((u32 *)tp->hw_stats)[2],
8435 ((u32 *)tp->hw_stats)[3]);
8438 printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
8439 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
8440 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
8441 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
8442 tr32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
8444 /* NIC side send descriptors. */
8445 for (i = 0; i < 6; i++) {
8448 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
8449 + (i * sizeof(struct tg3_tx_buffer_desc));
8450 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
8452 readl(txd + 0x0), readl(txd + 0x4),
8453 readl(txd + 0x8), readl(txd + 0xc));
8456 /* NIC side RX descriptors. */
8457 for (i = 0; i < 6; i++) {
8460 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
8461 + (i * sizeof(struct tg3_rx_buffer_desc));
8462 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
8464 readl(rxd + 0x0), readl(rxd + 0x4),
8465 readl(rxd + 0x8), readl(rxd + 0xc));
8466 rxd += (4 * sizeof(u32));
8467 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
8469 readl(rxd + 0x0), readl(rxd + 0x4),
8470 readl(rxd + 0x8), readl(rxd + 0xc));
8473 for (i = 0; i < 6; i++) {
8476 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
8477 + (i * sizeof(struct tg3_rx_buffer_desc));
8478 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
8480 readl(rxd + 0x0), readl(rxd + 0x4),
8481 readl(rxd + 0x8), readl(rxd + 0xc));
8482 rxd += (4 * sizeof(u32));
8483 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
8485 readl(rxd + 0x0), readl(rxd + 0x4),
8486 readl(rxd + 0x8), readl(rxd + 0xc));
8491 static struct net_device_stats *tg3_get_stats(struct net_device *);
8492 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
8494 static int tg3_close(struct net_device *dev)
8496 struct tg3 *tp = netdev_priv(dev);
8498 napi_disable(&tp->napi);
8499 cancel_work_sync(&tp->reset_task);
8501 netif_stop_queue(dev);
8503 del_timer_sync(&tp->timer);
8505 tg3_full_lock(tp, 1);
8510 tg3_disable_ints(tp);
8512 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
8514 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
8516 tg3_full_unlock(tp);
8518 free_irq(tp->pdev->irq, dev);
8519 if (tp->tg3_flags2 & TG3_FLG2_USING_MSI) {
8520 pci_disable_msi(tp->pdev);
8521 tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI;
8524 memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
8525 sizeof(tp->net_stats_prev));
8526 memcpy(&tp->estats_prev, tg3_get_estats(tp),
8527 sizeof(tp->estats_prev));
8529 tg3_free_consistent(tp);
8531 tg3_set_power_state(tp, PCI_D3hot);
8533 netif_carrier_off(tp->dev);
8538 static inline unsigned long get_stat64(tg3_stat64_t *val)
8542 #if (BITS_PER_LONG == 32)
8545 ret = ((u64)val->high << 32) | ((u64)val->low);
8550 static inline u64 get_estat64(tg3_stat64_t *val)
8552 return ((u64)val->high << 32) | ((u64)val->low);
8555 static unsigned long calc_crc_errors(struct tg3 *tp)
8557 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8559 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
8560 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8561 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
8564 spin_lock_bh(&tp->lock);
8565 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
8566 tg3_writephy(tp, MII_TG3_TEST1,
8567 val | MII_TG3_TEST1_CRC_EN);
8568 tg3_readphy(tp, 0x14, &val);
8571 spin_unlock_bh(&tp->lock);
8573 tp->phy_crc_errors += val;
8575 return tp->phy_crc_errors;
8578 return get_stat64(&hw_stats->rx_fcs_errors);
8581 #define ESTAT_ADD(member) \
8582 estats->member = old_estats->member + \
8583 get_estat64(&hw_stats->member)
8585 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
8587 struct tg3_ethtool_stats *estats = &tp->estats;
8588 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
8589 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8594 ESTAT_ADD(rx_octets);
8595 ESTAT_ADD(rx_fragments);
8596 ESTAT_ADD(rx_ucast_packets);
8597 ESTAT_ADD(rx_mcast_packets);
8598 ESTAT_ADD(rx_bcast_packets);
8599 ESTAT_ADD(rx_fcs_errors);
8600 ESTAT_ADD(rx_align_errors);
8601 ESTAT_ADD(rx_xon_pause_rcvd);
8602 ESTAT_ADD(rx_xoff_pause_rcvd);
8603 ESTAT_ADD(rx_mac_ctrl_rcvd);
8604 ESTAT_ADD(rx_xoff_entered);
8605 ESTAT_ADD(rx_frame_too_long_errors);
8606 ESTAT_ADD(rx_jabbers);
8607 ESTAT_ADD(rx_undersize_packets);
8608 ESTAT_ADD(rx_in_length_errors);
8609 ESTAT_ADD(rx_out_length_errors);
8610 ESTAT_ADD(rx_64_or_less_octet_packets);
8611 ESTAT_ADD(rx_65_to_127_octet_packets);
8612 ESTAT_ADD(rx_128_to_255_octet_packets);
8613 ESTAT_ADD(rx_256_to_511_octet_packets);
8614 ESTAT_ADD(rx_512_to_1023_octet_packets);
8615 ESTAT_ADD(rx_1024_to_1522_octet_packets);
8616 ESTAT_ADD(rx_1523_to_2047_octet_packets);
8617 ESTAT_ADD(rx_2048_to_4095_octet_packets);
8618 ESTAT_ADD(rx_4096_to_8191_octet_packets);
8619 ESTAT_ADD(rx_8192_to_9022_octet_packets);
8621 ESTAT_ADD(tx_octets);
8622 ESTAT_ADD(tx_collisions);
8623 ESTAT_ADD(tx_xon_sent);
8624 ESTAT_ADD(tx_xoff_sent);
8625 ESTAT_ADD(tx_flow_control);
8626 ESTAT_ADD(tx_mac_errors);
8627 ESTAT_ADD(tx_single_collisions);
8628 ESTAT_ADD(tx_mult_collisions);
8629 ESTAT_ADD(tx_deferred);
8630 ESTAT_ADD(tx_excessive_collisions);
8631 ESTAT_ADD(tx_late_collisions);
8632 ESTAT_ADD(tx_collide_2times);
8633 ESTAT_ADD(tx_collide_3times);
8634 ESTAT_ADD(tx_collide_4times);
8635 ESTAT_ADD(tx_collide_5times);
8636 ESTAT_ADD(tx_collide_6times);
8637 ESTAT_ADD(tx_collide_7times);
8638 ESTAT_ADD(tx_collide_8times);
8639 ESTAT_ADD(tx_collide_9times);
8640 ESTAT_ADD(tx_collide_10times);
8641 ESTAT_ADD(tx_collide_11times);
8642 ESTAT_ADD(tx_collide_12times);
8643 ESTAT_ADD(tx_collide_13times);
8644 ESTAT_ADD(tx_collide_14times);
8645 ESTAT_ADD(tx_collide_15times);
8646 ESTAT_ADD(tx_ucast_packets);
8647 ESTAT_ADD(tx_mcast_packets);
8648 ESTAT_ADD(tx_bcast_packets);
8649 ESTAT_ADD(tx_carrier_sense_errors);
8650 ESTAT_ADD(tx_discards);
8651 ESTAT_ADD(tx_errors);
8653 ESTAT_ADD(dma_writeq_full);
8654 ESTAT_ADD(dma_write_prioq_full);
8655 ESTAT_ADD(rxbds_empty);
8656 ESTAT_ADD(rx_discards);
8657 ESTAT_ADD(rx_errors);
8658 ESTAT_ADD(rx_threshold_hit);
8660 ESTAT_ADD(dma_readq_full);
8661 ESTAT_ADD(dma_read_prioq_full);
8662 ESTAT_ADD(tx_comp_queue_full);
8664 ESTAT_ADD(ring_set_send_prod_index);
8665 ESTAT_ADD(ring_status_update);
8666 ESTAT_ADD(nic_irqs);
8667 ESTAT_ADD(nic_avoided_irqs);
8668 ESTAT_ADD(nic_tx_threshold_hit);
8673 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
8675 struct tg3 *tp = netdev_priv(dev);
8676 struct net_device_stats *stats = &tp->net_stats;
8677 struct net_device_stats *old_stats = &tp->net_stats_prev;
8678 struct tg3_hw_stats *hw_stats = tp->hw_stats;
8683 stats->rx_packets = old_stats->rx_packets +
8684 get_stat64(&hw_stats->rx_ucast_packets) +
8685 get_stat64(&hw_stats->rx_mcast_packets) +
8686 get_stat64(&hw_stats->rx_bcast_packets);
8688 stats->tx_packets = old_stats->tx_packets +
8689 get_stat64(&hw_stats->tx_ucast_packets) +
8690 get_stat64(&hw_stats->tx_mcast_packets) +
8691 get_stat64(&hw_stats->tx_bcast_packets);
8693 stats->rx_bytes = old_stats->rx_bytes +
8694 get_stat64(&hw_stats->rx_octets);
8695 stats->tx_bytes = old_stats->tx_bytes +
8696 get_stat64(&hw_stats->tx_octets);
8698 stats->rx_errors = old_stats->rx_errors +
8699 get_stat64(&hw_stats->rx_errors);
8700 stats->tx_errors = old_stats->tx_errors +
8701 get_stat64(&hw_stats->tx_errors) +
8702 get_stat64(&hw_stats->tx_mac_errors) +
8703 get_stat64(&hw_stats->tx_carrier_sense_errors) +
8704 get_stat64(&hw_stats->tx_discards);
8706 stats->multicast = old_stats->multicast +
8707 get_stat64(&hw_stats->rx_mcast_packets);
8708 stats->collisions = old_stats->collisions +
8709 get_stat64(&hw_stats->tx_collisions);
8711 stats->rx_length_errors = old_stats->rx_length_errors +
8712 get_stat64(&hw_stats->rx_frame_too_long_errors) +
8713 get_stat64(&hw_stats->rx_undersize_packets);
8715 stats->rx_over_errors = old_stats->rx_over_errors +
8716 get_stat64(&hw_stats->rxbds_empty);
8717 stats->rx_frame_errors = old_stats->rx_frame_errors +
8718 get_stat64(&hw_stats->rx_align_errors);
8719 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
8720 get_stat64(&hw_stats->tx_discards);
8721 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
8722 get_stat64(&hw_stats->tx_carrier_sense_errors);
8724 stats->rx_crc_errors = old_stats->rx_crc_errors +
8725 calc_crc_errors(tp);
8727 stats->rx_missed_errors = old_stats->rx_missed_errors +
8728 get_stat64(&hw_stats->rx_discards);
8733 static inline u32 calc_crc(unsigned char *buf, int len)
8741 for (j = 0; j < len; j++) {
8744 for (k = 0; k < 8; k++) {
8758 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8760 /* accept or reject all multicast frames */
8761 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8762 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8763 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8764 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8767 static void __tg3_set_rx_mode(struct net_device *dev)
8769 struct tg3 *tp = netdev_priv(dev);
8772 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8773 RX_MODE_KEEP_VLAN_TAG);
8775 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8778 #if TG3_VLAN_TAG_USED
8780 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8781 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8783 /* By definition, VLAN is disabled always in this
8786 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
8787 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8790 if (dev->flags & IFF_PROMISC) {
8791 /* Promiscuous mode. */
8792 rx_mode |= RX_MODE_PROMISC;
8793 } else if (dev->flags & IFF_ALLMULTI) {
8794 /* Accept all multicast. */
8795 tg3_set_multi (tp, 1);
8796 } else if (dev->mc_count < 1) {
8797 /* Reject all multicast. */
8798 tg3_set_multi (tp, 0);
8800 /* Accept one or more multicast(s). */
8801 struct dev_mc_list *mclist;
8803 u32 mc_filter[4] = { 0, };
8808 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
8809 i++, mclist = mclist->next) {
8811 crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
8813 regidx = (bit & 0x60) >> 5;
8815 mc_filter[regidx] |= (1 << bit);
8818 tw32(MAC_HASH_REG_0, mc_filter[0]);
8819 tw32(MAC_HASH_REG_1, mc_filter[1]);
8820 tw32(MAC_HASH_REG_2, mc_filter[2]);
8821 tw32(MAC_HASH_REG_3, mc_filter[3]);
8824 if (rx_mode != tp->rx_mode) {
8825 tp->rx_mode = rx_mode;
8826 tw32_f(MAC_RX_MODE, rx_mode);
8831 static void tg3_set_rx_mode(struct net_device *dev)
8833 struct tg3 *tp = netdev_priv(dev);
8835 if (!netif_running(dev))
8838 tg3_full_lock(tp, 0);
8839 __tg3_set_rx_mode(dev);
8840 tg3_full_unlock(tp);
8843 #define TG3_REGDUMP_LEN (32 * 1024)
8845 static int tg3_get_regs_len(struct net_device *dev)
8847 return TG3_REGDUMP_LEN;
8850 static void tg3_get_regs(struct net_device *dev,
8851 struct ethtool_regs *regs, void *_p)
8854 struct tg3 *tp = netdev_priv(dev);
8860 memset(p, 0, TG3_REGDUMP_LEN);
8862 if (tp->link_config.phy_is_low_power)
8865 tg3_full_lock(tp, 0);
8867 #define __GET_REG32(reg) (*(p)++ = tr32(reg))
8868 #define GET_REG32_LOOP(base,len) \
8869 do { p = (u32 *)(orig_p + (base)); \
8870 for (i = 0; i < len; i += 4) \
8871 __GET_REG32((base) + i); \
8873 #define GET_REG32_1(reg) \
8874 do { p = (u32 *)(orig_p + (reg)); \
8875 __GET_REG32((reg)); \
8878 GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
8879 GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
8880 GET_REG32_LOOP(MAC_MODE, 0x4f0);
8881 GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
8882 GET_REG32_1(SNDDATAC_MODE);
8883 GET_REG32_LOOP(SNDBDS_MODE, 0x80);
8884 GET_REG32_LOOP(SNDBDI_MODE, 0x48);
8885 GET_REG32_1(SNDBDC_MODE);
8886 GET_REG32_LOOP(RCVLPC_MODE, 0x20);
8887 GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
8888 GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
8889 GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
8890 GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
8891 GET_REG32_1(RCVDCC_MODE);
8892 GET_REG32_LOOP(RCVBDI_MODE, 0x20);
8893 GET_REG32_LOOP(RCVCC_MODE, 0x14);
8894 GET_REG32_LOOP(RCVLSC_MODE, 0x08);
8895 GET_REG32_1(MBFREE_MODE);
8896 GET_REG32_LOOP(HOSTCC_MODE, 0x100);
8897 GET_REG32_LOOP(MEMARB_MODE, 0x10);
8898 GET_REG32_LOOP(BUFMGR_MODE, 0x58);
8899 GET_REG32_LOOP(RDMAC_MODE, 0x08);
8900 GET_REG32_LOOP(WDMAC_MODE, 0x08);
8901 GET_REG32_1(RX_CPU_MODE);
8902 GET_REG32_1(RX_CPU_STATE);
8903 GET_REG32_1(RX_CPU_PGMCTR);
8904 GET_REG32_1(RX_CPU_HWBKPT);
8905 GET_REG32_1(TX_CPU_MODE);
8906 GET_REG32_1(TX_CPU_STATE);
8907 GET_REG32_1(TX_CPU_PGMCTR);
8908 GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
8909 GET_REG32_LOOP(FTQ_RESET, 0x120);
8910 GET_REG32_LOOP(MSGINT_MODE, 0x0c);
8911 GET_REG32_1(DMAC_MODE);
8912 GET_REG32_LOOP(GRC_MODE, 0x4c);
8913 if (tp->tg3_flags & TG3_FLAG_NVRAM)
8914 GET_REG32_LOOP(NVRAM_CMD, 0x24);
8917 #undef GET_REG32_LOOP
8920 tg3_full_unlock(tp);
8923 static int tg3_get_eeprom_len(struct net_device *dev)
8925 struct tg3 *tp = netdev_priv(dev);
8927 return tp->nvram_size;
8930 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
8931 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val);
8932 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val);
8934 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8936 struct tg3 *tp = netdev_priv(dev);
8939 u32 i, offset, len, b_offset, b_count;
8942 if (tp->link_config.phy_is_low_power)
8945 offset = eeprom->offset;
8949 eeprom->magic = TG3_EEPROM_MAGIC;
8952 /* adjustments to start on required 4 byte boundary */
8953 b_offset = offset & 3;
8954 b_count = 4 - b_offset;
8955 if (b_count > len) {
8956 /* i.e. offset=1 len=2 */
8959 ret = tg3_nvram_read_le(tp, offset-b_offset, &val);
8962 memcpy(data, ((char*)&val) + b_offset, b_count);
8965 eeprom->len += b_count;
8968 /* read bytes upto the last 4 byte boundary */
8969 pd = &data[eeprom->len];
8970 for (i = 0; i < (len - (len & 3)); i += 4) {
8971 ret = tg3_nvram_read_le(tp, offset + i, &val);
8976 memcpy(pd + i, &val, 4);
8981 /* read last bytes not ending on 4 byte boundary */
8982 pd = &data[eeprom->len];
8984 b_offset = offset + len - b_count;
8985 ret = tg3_nvram_read_le(tp, b_offset, &val);
8988 memcpy(pd, &val, b_count);
8989 eeprom->len += b_count;
8994 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
8996 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
8998 struct tg3 *tp = netdev_priv(dev);
9000 u32 offset, len, b_offset, odd_len;
9004 if (tp->link_config.phy_is_low_power)
9007 if (eeprom->magic != TG3_EEPROM_MAGIC)
9010 offset = eeprom->offset;
9013 if ((b_offset = (offset & 3))) {
9014 /* adjustments to start on required 4 byte boundary */
9015 ret = tg3_nvram_read_le(tp, offset-b_offset, &start);
9026 /* adjustments to end on required 4 byte boundary */
9028 len = (len + 3) & ~3;
9029 ret = tg3_nvram_read_le(tp, offset+len-4, &end);
9035 if (b_offset || odd_len) {
9036 buf = kmalloc(len, GFP_KERNEL);
9040 memcpy(buf, &start, 4);
9042 memcpy(buf+len-4, &end, 4);
9043 memcpy(buf + b_offset, data, eeprom->len);
9046 ret = tg3_nvram_write_block(tp, offset, len, buf);
9054 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9056 struct tg3 *tp = netdev_priv(dev);
9058 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9059 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9061 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9064 cmd->supported = (SUPPORTED_Autoneg);
9066 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9067 cmd->supported |= (SUPPORTED_1000baseT_Half |
9068 SUPPORTED_1000baseT_Full);
9070 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)) {
9071 cmd->supported |= (SUPPORTED_100baseT_Half |
9072 SUPPORTED_100baseT_Full |
9073 SUPPORTED_10baseT_Half |
9074 SUPPORTED_10baseT_Full |
9076 cmd->port = PORT_TP;
9078 cmd->supported |= SUPPORTED_FIBRE;
9079 cmd->port = PORT_FIBRE;
9082 cmd->advertising = tp->link_config.advertising;
9083 if (netif_running(dev)) {
9084 cmd->speed = tp->link_config.active_speed;
9085 cmd->duplex = tp->link_config.active_duplex;
9087 cmd->phy_address = PHY_ADDR;
9088 cmd->transceiver = 0;
9089 cmd->autoneg = tp->link_config.autoneg;
9095 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9097 struct tg3 *tp = netdev_priv(dev);
9099 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9100 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9102 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd);
9105 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) {
9106 /* These are the only valid advertisement bits allowed. */
9107 if (cmd->autoneg == AUTONEG_ENABLE &&
9108 (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
9109 ADVERTISED_1000baseT_Full |
9110 ADVERTISED_Autoneg |
9113 /* Fiber can only do SPEED_1000. */
9114 else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9115 (cmd->speed != SPEED_1000))
9117 /* Copper cannot force SPEED_1000. */
9118 } else if ((cmd->autoneg != AUTONEG_ENABLE) &&
9119 (cmd->speed == SPEED_1000))
9121 else if ((cmd->speed == SPEED_1000) &&
9122 (tp->tg3_flags & TG3_FLAG_10_100_ONLY))
9125 tg3_full_lock(tp, 0);
9127 tp->link_config.autoneg = cmd->autoneg;
9128 if (cmd->autoneg == AUTONEG_ENABLE) {
9129 tp->link_config.advertising = (cmd->advertising |
9130 ADVERTISED_Autoneg);
9131 tp->link_config.speed = SPEED_INVALID;
9132 tp->link_config.duplex = DUPLEX_INVALID;
9134 tp->link_config.advertising = 0;
9135 tp->link_config.speed = cmd->speed;
9136 tp->link_config.duplex = cmd->duplex;
9139 tp->link_config.orig_speed = tp->link_config.speed;
9140 tp->link_config.orig_duplex = tp->link_config.duplex;
9141 tp->link_config.orig_autoneg = tp->link_config.autoneg;
9143 if (netif_running(dev))
9144 tg3_setup_phy(tp, 1);
9146 tg3_full_unlock(tp);
9151 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
9153 struct tg3 *tp = netdev_priv(dev);
9155 strcpy(info->driver, DRV_MODULE_NAME);
9156 strcpy(info->version, DRV_MODULE_VERSION);
9157 strcpy(info->fw_version, tp->fw_ver);
9158 strcpy(info->bus_info, pci_name(tp->pdev));
9161 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9163 struct tg3 *tp = netdev_priv(dev);
9165 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
9166 device_can_wakeup(&tp->pdev->dev))
9167 wol->supported = WAKE_MAGIC;
9171 if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) &&
9172 device_can_wakeup(&tp->pdev->dev))
9173 wol->wolopts = WAKE_MAGIC;
9174 memset(&wol->sopass, 0, sizeof(wol->sopass));
9177 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9179 struct tg3 *tp = netdev_priv(dev);
9180 struct device *dp = &tp->pdev->dev;
9182 if (wol->wolopts & ~WAKE_MAGIC)
9184 if ((wol->wolopts & WAKE_MAGIC) &&
9185 !((tp->tg3_flags & TG3_FLAG_WOL_CAP) && device_can_wakeup(dp)))
9188 spin_lock_bh(&tp->lock);
9189 if (wol->wolopts & WAKE_MAGIC) {
9190 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
9191 device_set_wakeup_enable(dp, true);
9193 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
9194 device_set_wakeup_enable(dp, false);
9196 spin_unlock_bh(&tp->lock);
9201 static u32 tg3_get_msglevel(struct net_device *dev)
9203 struct tg3 *tp = netdev_priv(dev);
9204 return tp->msg_enable;
9207 static void tg3_set_msglevel(struct net_device *dev, u32 value)
9209 struct tg3 *tp = netdev_priv(dev);
9210 tp->msg_enable = value;
9213 static int tg3_set_tso(struct net_device *dev, u32 value)
9215 struct tg3 *tp = netdev_priv(dev);
9217 if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
9222 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
9223 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906)) {
9225 dev->features |= NETIF_F_TSO6;
9226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9227 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9228 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9229 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9230 dev->features |= NETIF_F_TSO_ECN;
9232 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
9234 return ethtool_op_set_tso(dev, value);
9237 static int tg3_nway_reset(struct net_device *dev)
9239 struct tg3 *tp = netdev_priv(dev);
9242 if (!netif_running(dev))
9245 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
9248 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9249 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9251 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]);
9255 spin_lock_bh(&tp->lock);
9257 tg3_readphy(tp, MII_BMCR, &bmcr);
9258 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
9259 ((bmcr & BMCR_ANENABLE) ||
9260 (tp->tg3_flags2 & TG3_FLG2_PARALLEL_DETECT))) {
9261 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
9265 spin_unlock_bh(&tp->lock);
9271 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9273 struct tg3 *tp = netdev_priv(dev);
9275 ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
9276 ering->rx_mini_max_pending = 0;
9277 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9278 ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
9280 ering->rx_jumbo_max_pending = 0;
9282 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
9284 ering->rx_pending = tp->rx_pending;
9285 ering->rx_mini_pending = 0;
9286 if (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE)
9287 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
9289 ering->rx_jumbo_pending = 0;
9291 ering->tx_pending = tp->tx_pending;
9294 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
9296 struct tg3 *tp = netdev_priv(dev);
9297 int irq_sync = 0, err = 0;
9299 if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
9300 (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
9301 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
9302 (ering->tx_pending <= MAX_SKB_FRAGS) ||
9303 ((tp->tg3_flags2 & TG3_FLG2_TSO_BUG) &&
9304 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
9307 if (netif_running(dev)) {
9313 tg3_full_lock(tp, irq_sync);
9315 tp->rx_pending = ering->rx_pending;
9317 if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
9318 tp->rx_pending > 63)
9319 tp->rx_pending = 63;
9320 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
9321 tp->tx_pending = ering->tx_pending;
9323 if (netif_running(dev)) {
9324 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9325 err = tg3_restart_hw(tp, 1);
9327 tg3_netif_start(tp);
9330 tg3_full_unlock(tp);
9332 if (irq_sync && !err)
9338 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9340 struct tg3 *tp = netdev_priv(dev);
9342 epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
9344 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_RX)
9345 epause->rx_pause = 1;
9347 epause->rx_pause = 0;
9349 if (tp->link_config.active_flowctrl & TG3_FLOW_CTRL_TX)
9350 epause->tx_pause = 1;
9352 epause->tx_pause = 0;
9355 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
9357 struct tg3 *tp = netdev_priv(dev);
9360 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9361 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9364 if (epause->autoneg) {
9366 struct phy_device *phydev;
9368 phydev = tp->mdio_bus->phy_map[PHY_ADDR];
9370 if (epause->rx_pause) {
9371 if (epause->tx_pause)
9372 newadv = ADVERTISED_Pause;
9374 newadv = ADVERTISED_Pause |
9375 ADVERTISED_Asym_Pause;
9376 } else if (epause->tx_pause) {
9377 newadv = ADVERTISED_Asym_Pause;
9381 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
9382 u32 oldadv = phydev->advertising &
9384 ADVERTISED_Asym_Pause);
9385 if (oldadv != newadv) {
9386 phydev->advertising &=
9387 ~(ADVERTISED_Pause |
9388 ADVERTISED_Asym_Pause);
9389 phydev->advertising |= newadv;
9390 err = phy_start_aneg(phydev);
9393 tp->link_config.advertising &=
9394 ~(ADVERTISED_Pause |
9395 ADVERTISED_Asym_Pause);
9396 tp->link_config.advertising |= newadv;
9399 if (epause->rx_pause)
9400 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9402 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9404 if (epause->tx_pause)
9405 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9407 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9409 if (netif_running(dev))
9410 tg3_setup_flow_control(tp, 0, 0);
9415 if (netif_running(dev)) {
9420 tg3_full_lock(tp, irq_sync);
9422 if (epause->autoneg)
9423 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
9425 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
9426 if (epause->rx_pause)
9427 tp->link_config.flowctrl |= TG3_FLOW_CTRL_RX;
9429 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_RX;
9430 if (epause->tx_pause)
9431 tp->link_config.flowctrl |= TG3_FLOW_CTRL_TX;
9433 tp->link_config.flowctrl &= ~TG3_FLOW_CTRL_TX;
9435 if (netif_running(dev)) {
9436 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9437 err = tg3_restart_hw(tp, 1);
9439 tg3_netif_start(tp);
9442 tg3_full_unlock(tp);
9448 static u32 tg3_get_rx_csum(struct net_device *dev)
9450 struct tg3 *tp = netdev_priv(dev);
9451 return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
9454 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
9456 struct tg3 *tp = netdev_priv(dev);
9458 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9464 spin_lock_bh(&tp->lock);
9466 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
9468 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
9469 spin_unlock_bh(&tp->lock);
9474 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
9476 struct tg3 *tp = netdev_priv(dev);
9478 if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
9484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
9485 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
9486 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
9487 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9488 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9489 ethtool_op_set_tx_ipv6_csum(dev, data);
9491 ethtool_op_set_tx_csum(dev, data);
9496 static int tg3_get_sset_count (struct net_device *dev, int sset)
9500 return TG3_NUM_TEST;
9502 return TG3_NUM_STATS;
9508 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
9510 switch (stringset) {
9512 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
9515 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
9518 WARN_ON(1); /* we need a WARN() */
9523 static int tg3_phys_id(struct net_device *dev, u32 data)
9525 struct tg3 *tp = netdev_priv(dev);
9528 if (!netif_running(tp->dev))
9532 data = UINT_MAX / 2;
9534 for (i = 0; i < (data * 2); i++) {
9536 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9537 LED_CTRL_1000MBPS_ON |
9538 LED_CTRL_100MBPS_ON |
9539 LED_CTRL_10MBPS_ON |
9540 LED_CTRL_TRAFFIC_OVERRIDE |
9541 LED_CTRL_TRAFFIC_BLINK |
9542 LED_CTRL_TRAFFIC_LED);
9545 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
9546 LED_CTRL_TRAFFIC_OVERRIDE);
9548 if (msleep_interruptible(500))
9551 tw32(MAC_LED_CTRL, tp->led_ctrl);
9555 static void tg3_get_ethtool_stats (struct net_device *dev,
9556 struct ethtool_stats *estats, u64 *tmp_stats)
9558 struct tg3 *tp = netdev_priv(dev);
9559 memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
9562 #define NVRAM_TEST_SIZE 0x100
9563 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
9564 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
9565 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
9566 #define NVRAM_SELFBOOT_HW_SIZE 0x20
9567 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
9569 static int tg3_test_nvram(struct tg3 *tp)
9573 int i, j, k, err = 0, size;
9575 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
9578 if (magic == TG3_EEPROM_MAGIC)
9579 size = NVRAM_TEST_SIZE;
9580 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
9581 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
9582 TG3_EEPROM_SB_FORMAT_1) {
9583 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
9584 case TG3_EEPROM_SB_REVISION_0:
9585 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
9587 case TG3_EEPROM_SB_REVISION_2:
9588 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
9590 case TG3_EEPROM_SB_REVISION_3:
9591 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
9598 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
9599 size = NVRAM_SELFBOOT_HW_SIZE;
9603 buf = kmalloc(size, GFP_KERNEL);
9608 for (i = 0, j = 0; i < size; i += 4, j++) {
9609 if ((err = tg3_nvram_read_le(tp, i, &buf[j])) != 0)
9615 /* Selfboot format */
9616 magic = swab32(le32_to_cpu(buf[0]));
9617 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
9618 TG3_EEPROM_MAGIC_FW) {
9619 u8 *buf8 = (u8 *) buf, csum8 = 0;
9621 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
9622 TG3_EEPROM_SB_REVISION_2) {
9623 /* For rev 2, the csum doesn't include the MBA. */
9624 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
9626 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
9629 for (i = 0; i < size; i++)
9642 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
9643 TG3_EEPROM_MAGIC_HW) {
9644 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
9645 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
9646 u8 *buf8 = (u8 *) buf;
9648 /* Separate the parity bits and the data bytes. */
9649 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
9650 if ((i == 0) || (i == 8)) {
9654 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
9655 parity[k++] = buf8[i] & msk;
9662 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
9663 parity[k++] = buf8[i] & msk;
9666 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
9667 parity[k++] = buf8[i] & msk;
9670 data[j++] = buf8[i];
9674 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
9675 u8 hw8 = hweight8(data[i]);
9677 if ((hw8 & 0x1) && parity[i])
9679 else if (!(hw8 & 0x1) && !parity[i])
9686 /* Bootstrap checksum at offset 0x10 */
9687 csum = calc_crc((unsigned char *) buf, 0x10);
9688 if(csum != le32_to_cpu(buf[0x10/4]))
9691 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
9692 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
9693 if (csum != le32_to_cpu(buf[0xfc/4]))
9703 #define TG3_SERDES_TIMEOUT_SEC 2
9704 #define TG3_COPPER_TIMEOUT_SEC 6
9706 static int tg3_test_link(struct tg3 *tp)
9710 if (!netif_running(tp->dev))
9713 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
9714 max = TG3_SERDES_TIMEOUT_SEC;
9716 max = TG3_COPPER_TIMEOUT_SEC;
9718 for (i = 0; i < max; i++) {
9719 if (netif_carrier_ok(tp->dev))
9722 if (msleep_interruptible(1000))
9729 /* Only test the commonly used registers */
9730 static int tg3_test_registers(struct tg3 *tp)
9732 int i, is_5705, is_5750;
9733 u32 offset, read_mask, write_mask, val, save_val, read_val;
9737 #define TG3_FL_5705 0x1
9738 #define TG3_FL_NOT_5705 0x2
9739 #define TG3_FL_NOT_5788 0x4
9740 #define TG3_FL_NOT_5750 0x8
9744 /* MAC Control Registers */
9745 { MAC_MODE, TG3_FL_NOT_5705,
9746 0x00000000, 0x00ef6f8c },
9747 { MAC_MODE, TG3_FL_5705,
9748 0x00000000, 0x01ef6b8c },
9749 { MAC_STATUS, TG3_FL_NOT_5705,
9750 0x03800107, 0x00000000 },
9751 { MAC_STATUS, TG3_FL_5705,
9752 0x03800100, 0x00000000 },
9753 { MAC_ADDR_0_HIGH, 0x0000,
9754 0x00000000, 0x0000ffff },
9755 { MAC_ADDR_0_LOW, 0x0000,
9756 0x00000000, 0xffffffff },
9757 { MAC_RX_MTU_SIZE, 0x0000,
9758 0x00000000, 0x0000ffff },
9759 { MAC_TX_MODE, 0x0000,
9760 0x00000000, 0x00000070 },
9761 { MAC_TX_LENGTHS, 0x0000,
9762 0x00000000, 0x00003fff },
9763 { MAC_RX_MODE, TG3_FL_NOT_5705,
9764 0x00000000, 0x000007fc },
9765 { MAC_RX_MODE, TG3_FL_5705,
9766 0x00000000, 0x000007dc },
9767 { MAC_HASH_REG_0, 0x0000,
9768 0x00000000, 0xffffffff },
9769 { MAC_HASH_REG_1, 0x0000,
9770 0x00000000, 0xffffffff },
9771 { MAC_HASH_REG_2, 0x0000,
9772 0x00000000, 0xffffffff },
9773 { MAC_HASH_REG_3, 0x0000,
9774 0x00000000, 0xffffffff },
9776 /* Receive Data and Receive BD Initiator Control Registers. */
9777 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
9778 0x00000000, 0xffffffff },
9779 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
9780 0x00000000, 0xffffffff },
9781 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
9782 0x00000000, 0x00000003 },
9783 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
9784 0x00000000, 0xffffffff },
9785 { RCVDBDI_STD_BD+0, 0x0000,
9786 0x00000000, 0xffffffff },
9787 { RCVDBDI_STD_BD+4, 0x0000,
9788 0x00000000, 0xffffffff },
9789 { RCVDBDI_STD_BD+8, 0x0000,
9790 0x00000000, 0xffff0002 },
9791 { RCVDBDI_STD_BD+0xc, 0x0000,
9792 0x00000000, 0xffffffff },
9794 /* Receive BD Initiator Control Registers. */
9795 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
9796 0x00000000, 0xffffffff },
9797 { RCVBDI_STD_THRESH, TG3_FL_5705,
9798 0x00000000, 0x000003ff },
9799 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
9800 0x00000000, 0xffffffff },
9802 /* Host Coalescing Control Registers. */
9803 { HOSTCC_MODE, TG3_FL_NOT_5705,
9804 0x00000000, 0x00000004 },
9805 { HOSTCC_MODE, TG3_FL_5705,
9806 0x00000000, 0x000000f6 },
9807 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
9808 0x00000000, 0xffffffff },
9809 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
9810 0x00000000, 0x000003ff },
9811 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
9812 0x00000000, 0xffffffff },
9813 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
9814 0x00000000, 0x000003ff },
9815 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
9816 0x00000000, 0xffffffff },
9817 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9818 0x00000000, 0x000000ff },
9819 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
9820 0x00000000, 0xffffffff },
9821 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
9822 0x00000000, 0x000000ff },
9823 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
9824 0x00000000, 0xffffffff },
9825 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
9826 0x00000000, 0xffffffff },
9827 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9828 0x00000000, 0xffffffff },
9829 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9830 0x00000000, 0x000000ff },
9831 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
9832 0x00000000, 0xffffffff },
9833 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
9834 0x00000000, 0x000000ff },
9835 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
9836 0x00000000, 0xffffffff },
9837 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
9838 0x00000000, 0xffffffff },
9839 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
9840 0x00000000, 0xffffffff },
9841 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
9842 0x00000000, 0xffffffff },
9843 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
9844 0x00000000, 0xffffffff },
9845 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
9846 0xffffffff, 0x00000000 },
9847 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
9848 0xffffffff, 0x00000000 },
9850 /* Buffer Manager Control Registers. */
9851 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
9852 0x00000000, 0x007fff80 },
9853 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
9854 0x00000000, 0x007fffff },
9855 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
9856 0x00000000, 0x0000003f },
9857 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
9858 0x00000000, 0x000001ff },
9859 { BUFMGR_MB_HIGH_WATER, 0x0000,
9860 0x00000000, 0x000001ff },
9861 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
9862 0xffffffff, 0x00000000 },
9863 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
9864 0xffffffff, 0x00000000 },
9866 /* Mailbox Registers */
9867 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
9868 0x00000000, 0x000001ff },
9869 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
9870 0x00000000, 0x000001ff },
9871 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
9872 0x00000000, 0x000007ff },
9873 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
9874 0x00000000, 0x000001ff },
9876 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
9879 is_5705 = is_5750 = 0;
9880 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
9882 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
9886 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
9887 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
9890 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
9893 if ((tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
9894 (reg_tbl[i].flags & TG3_FL_NOT_5788))
9897 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
9900 offset = (u32) reg_tbl[i].offset;
9901 read_mask = reg_tbl[i].read_mask;
9902 write_mask = reg_tbl[i].write_mask;
9904 /* Save the original register content */
9905 save_val = tr32(offset);
9907 /* Determine the read-only value. */
9908 read_val = save_val & read_mask;
9910 /* Write zero to the register, then make sure the read-only bits
9911 * are not changed and the read/write bits are all zeros.
9917 /* Test the read-only and read/write bits. */
9918 if (((val & read_mask) != read_val) || (val & write_mask))
9921 /* Write ones to all the bits defined by RdMask and WrMask, then
9922 * make sure the read-only bits are not changed and the
9923 * read/write bits are all ones.
9925 tw32(offset, read_mask | write_mask);
9929 /* Test the read-only bits. */
9930 if ((val & read_mask) != read_val)
9933 /* Test the read/write bits. */
9934 if ((val & write_mask) != write_mask)
9937 tw32(offset, save_val);
9943 if (netif_msg_hw(tp))
9944 printk(KERN_ERR PFX "Register test failed at offset %x\n",
9946 tw32(offset, save_val);
9950 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
9952 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
9956 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
9957 for (j = 0; j < len; j += 4) {
9960 tg3_write_mem(tp, offset + j, test_pattern[i]);
9961 tg3_read_mem(tp, offset + j, &val);
9962 if (val != test_pattern[i])
9969 static int tg3_test_memory(struct tg3 *tp)
9971 static struct mem_entry {
9974 } mem_tbl_570x[] = {
9975 { 0x00000000, 0x00b50},
9976 { 0x00002000, 0x1c000},
9977 { 0xffffffff, 0x00000}
9978 }, mem_tbl_5705[] = {
9979 { 0x00000100, 0x0000c},
9980 { 0x00000200, 0x00008},
9981 { 0x00004000, 0x00800},
9982 { 0x00006000, 0x01000},
9983 { 0x00008000, 0x02000},
9984 { 0x00010000, 0x0e000},
9985 { 0xffffffff, 0x00000}
9986 }, mem_tbl_5755[] = {
9987 { 0x00000200, 0x00008},
9988 { 0x00004000, 0x00800},
9989 { 0x00006000, 0x00800},
9990 { 0x00008000, 0x02000},
9991 { 0x00010000, 0x0c000},
9992 { 0xffffffff, 0x00000}
9993 }, mem_tbl_5906[] = {
9994 { 0x00000200, 0x00008},
9995 { 0x00004000, 0x00400},
9996 { 0x00006000, 0x00400},
9997 { 0x00008000, 0x01000},
9998 { 0x00010000, 0x01000},
9999 { 0xffffffff, 0x00000}
10001 struct mem_entry *mem_tbl;
10005 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
10006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
10007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10008 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10009 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10010 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10011 mem_tbl = mem_tbl_5755;
10012 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10013 mem_tbl = mem_tbl_5906;
10015 mem_tbl = mem_tbl_5705;
10017 mem_tbl = mem_tbl_570x;
10019 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10020 if ((err = tg3_do_mem_test(tp, mem_tbl[i].offset,
10021 mem_tbl[i].len)) != 0)
10028 #define TG3_MAC_LOOPBACK 0
10029 #define TG3_PHY_LOOPBACK 1
10031 static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10033 u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10035 struct sk_buff *skb, *rx_skb;
10038 int num_pkts, tx_len, rx_len, i, err;
10039 struct tg3_rx_buffer_desc *desc;
10041 if (loopback_mode == TG3_MAC_LOOPBACK) {
10042 /* HW errata - mac loopback fails in some cases on 5780.
10043 * Normal traffic and PHY loopback are not affected by
10046 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780)
10049 mac_mode = (tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK) |
10050 MAC_MODE_PORT_INT_LPBACK;
10051 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10052 mac_mode |= MAC_MODE_LINK_POLARITY;
10053 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
10054 mac_mode |= MAC_MODE_PORT_MODE_MII;
10056 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10057 tw32(MAC_MODE, mac_mode);
10058 } else if (loopback_mode == TG3_PHY_LOOPBACK) {
10061 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10064 if (!tg3_readphy(tp, MII_TG3_EPHY_TEST, &phytest)) {
10067 tg3_writephy(tp, MII_TG3_EPHY_TEST,
10068 phytest | MII_TG3_EPHY_SHADOW_EN);
10069 if (!tg3_readphy(tp, 0x1b, &phy))
10070 tg3_writephy(tp, 0x1b, phy & ~0x20);
10071 tg3_writephy(tp, MII_TG3_EPHY_TEST, phytest);
10073 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
10075 val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
10077 tg3_phy_toggle_automdix(tp, 0);
10079 tg3_writephy(tp, MII_BMCR, val);
10082 mac_mode = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
10083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
10084 tg3_writephy(tp, MII_TG3_EPHY_PTEST, 0x1800);
10085 mac_mode |= MAC_MODE_PORT_MODE_MII;
10087 mac_mode |= MAC_MODE_PORT_MODE_GMII;
10089 /* reset to prevent losing 1st rx packet intermittently */
10090 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES) {
10091 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10093 tw32_f(MAC_RX_MODE, tp->rx_mode);
10095 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
10096 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)
10097 mac_mode &= ~MAC_MODE_LINK_POLARITY;
10098 else if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411)
10099 mac_mode |= MAC_MODE_LINK_POLARITY;
10100 tg3_writephy(tp, MII_TG3_EXT_CTRL,
10101 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
10103 tw32(MAC_MODE, mac_mode);
10111 skb = netdev_alloc_skb(tp->dev, tx_len);
10115 tx_data = skb_put(skb, tx_len);
10116 memcpy(tx_data, tp->dev->dev_addr, 6);
10117 memset(tx_data + 6, 0x0, 8);
10119 tw32(MAC_RX_MTU_SIZE, tx_len + 4);
10121 for (i = 14; i < tx_len; i++)
10122 tx_data[i] = (u8) (i & 0xff);
10124 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
10126 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10131 rx_start_idx = tp->hw_status->idx[0].rx_producer;
10135 tg3_set_txd(tp, tp->tx_prod, map, tx_len, 0, 1);
10140 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW,
10142 tr32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW);
10146 /* 250 usec to allow enough time on some 10/100 Mbps devices. */
10147 for (i = 0; i < 25; i++) {
10148 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10153 tx_idx = tp->hw_status->idx[0].tx_consumer;
10154 rx_idx = tp->hw_status->idx[0].rx_producer;
10155 if ((tx_idx == tp->tx_prod) &&
10156 (rx_idx == (rx_start_idx + num_pkts)))
10160 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
10161 dev_kfree_skb(skb);
10163 if (tx_idx != tp->tx_prod)
10166 if (rx_idx != rx_start_idx + num_pkts)
10169 desc = &tp->rx_rcb[rx_start_idx];
10170 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
10171 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
10172 if (opaque_key != RXD_OPAQUE_RING_STD)
10175 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
10176 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
10179 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4;
10180 if (rx_len != tx_len)
10183 rx_skb = tp->rx_std_buffers[desc_idx].skb;
10185 map = pci_unmap_addr(&tp->rx_std_buffers[desc_idx], mapping);
10186 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len, PCI_DMA_FROMDEVICE);
10188 for (i = 14; i < tx_len; i++) {
10189 if (*(rx_skb->data + i) != (u8) (i & 0xff))
10194 /* tg3_free_rings will unmap and free the rx_skb */
10199 #define TG3_MAC_LOOPBACK_FAILED 1
10200 #define TG3_PHY_LOOPBACK_FAILED 2
10201 #define TG3_LOOPBACK_FAILED (TG3_MAC_LOOPBACK_FAILED | \
10202 TG3_PHY_LOOPBACK_FAILED)
10204 static int tg3_test_loopback(struct tg3 *tp)
10209 if (!netif_running(tp->dev))
10210 return TG3_LOOPBACK_FAILED;
10212 err = tg3_reset_hw(tp, 1);
10214 return TG3_LOOPBACK_FAILED;
10216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10217 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10218 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10222 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
10224 /* Wait for up to 40 microseconds to acquire lock. */
10225 for (i = 0; i < 4; i++) {
10226 status = tr32(TG3_CPMU_MUTEX_GNT);
10227 if (status == CPMU_MUTEX_GNT_DRIVER)
10232 if (status != CPMU_MUTEX_GNT_DRIVER)
10233 return TG3_LOOPBACK_FAILED;
10235 /* Turn off link-based power management. */
10236 cpmuctrl = tr32(TG3_CPMU_CTRL);
10237 tw32(TG3_CPMU_CTRL,
10238 cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
10239 CPMU_CTRL_LINK_AWARE_MODE));
10242 if (tg3_run_loopback(tp, TG3_MAC_LOOPBACK))
10243 err |= TG3_MAC_LOOPBACK_FAILED;
10245 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10246 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
10247 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
10248 tw32(TG3_CPMU_CTRL, cpmuctrl);
10250 /* Release the mutex */
10251 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
10254 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
10255 !(tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)) {
10256 if (tg3_run_loopback(tp, TG3_PHY_LOOPBACK))
10257 err |= TG3_PHY_LOOPBACK_FAILED;
10263 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
10266 struct tg3 *tp = netdev_priv(dev);
10268 if (tp->link_config.phy_is_low_power)
10269 tg3_set_power_state(tp, PCI_D0);
10271 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
10273 if (tg3_test_nvram(tp) != 0) {
10274 etest->flags |= ETH_TEST_FL_FAILED;
10277 if (tg3_test_link(tp) != 0) {
10278 etest->flags |= ETH_TEST_FL_FAILED;
10281 if (etest->flags & ETH_TEST_FL_OFFLINE) {
10282 int err, err2 = 0, irq_sync = 0;
10284 if (netif_running(dev)) {
10286 tg3_netif_stop(tp);
10290 tg3_full_lock(tp, irq_sync);
10292 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
10293 err = tg3_nvram_lock(tp);
10294 tg3_halt_cpu(tp, RX_CPU_BASE);
10295 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
10296 tg3_halt_cpu(tp, TX_CPU_BASE);
10298 tg3_nvram_unlock(tp);
10300 if (tp->tg3_flags2 & TG3_FLG2_MII_SERDES)
10303 if (tg3_test_registers(tp) != 0) {
10304 etest->flags |= ETH_TEST_FL_FAILED;
10307 if (tg3_test_memory(tp) != 0) {
10308 etest->flags |= ETH_TEST_FL_FAILED;
10311 if ((data[4] = tg3_test_loopback(tp)) != 0)
10312 etest->flags |= ETH_TEST_FL_FAILED;
10314 tg3_full_unlock(tp);
10316 if (tg3_test_interrupt(tp) != 0) {
10317 etest->flags |= ETH_TEST_FL_FAILED;
10321 tg3_full_lock(tp, 0);
10323 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10324 if (netif_running(dev)) {
10325 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
10326 err2 = tg3_restart_hw(tp, 1);
10328 tg3_netif_start(tp);
10331 tg3_full_unlock(tp);
10333 if (irq_sync && !err2)
10336 if (tp->link_config.phy_is_low_power)
10337 tg3_set_power_state(tp, PCI_D3hot);
10341 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10343 struct mii_ioctl_data *data = if_mii(ifr);
10344 struct tg3 *tp = netdev_priv(dev);
10347 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10348 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10350 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd);
10355 data->phy_id = PHY_ADDR;
10358 case SIOCGMIIREG: {
10361 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10362 break; /* We have no PHY */
10364 if (tp->link_config.phy_is_low_power)
10367 spin_lock_bh(&tp->lock);
10368 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
10369 spin_unlock_bh(&tp->lock);
10371 data->val_out = mii_regval;
10377 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
10378 break; /* We have no PHY */
10380 if (!capable(CAP_NET_ADMIN))
10383 if (tp->link_config.phy_is_low_power)
10386 spin_lock_bh(&tp->lock);
10387 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
10388 spin_unlock_bh(&tp->lock);
10396 return -EOPNOTSUPP;
10399 #if TG3_VLAN_TAG_USED
10400 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
10402 struct tg3 *tp = netdev_priv(dev);
10404 if (netif_running(dev))
10405 tg3_netif_stop(tp);
10407 tg3_full_lock(tp, 0);
10411 /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
10412 __tg3_set_rx_mode(dev);
10414 if (netif_running(dev))
10415 tg3_netif_start(tp);
10417 tg3_full_unlock(tp);
10421 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10423 struct tg3 *tp = netdev_priv(dev);
10425 memcpy(ec, &tp->coal, sizeof(*ec));
10429 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
10431 struct tg3 *tp = netdev_priv(dev);
10432 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
10433 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
10435 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
10436 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
10437 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
10438 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
10439 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
10442 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
10443 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
10444 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
10445 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
10446 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
10447 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
10448 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
10449 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
10450 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
10451 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
10454 /* No rx interrupts will be generated if both are zero */
10455 if ((ec->rx_coalesce_usecs == 0) &&
10456 (ec->rx_max_coalesced_frames == 0))
10459 /* No tx interrupts will be generated if both are zero */
10460 if ((ec->tx_coalesce_usecs == 0) &&
10461 (ec->tx_max_coalesced_frames == 0))
10464 /* Only copy relevant parameters, ignore all others. */
10465 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
10466 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
10467 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
10468 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
10469 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
10470 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
10471 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
10472 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
10473 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
10475 if (netif_running(dev)) {
10476 tg3_full_lock(tp, 0);
10477 __tg3_set_coalesce(tp, &tp->coal);
10478 tg3_full_unlock(tp);
10483 static const struct ethtool_ops tg3_ethtool_ops = {
10484 .get_settings = tg3_get_settings,
10485 .set_settings = tg3_set_settings,
10486 .get_drvinfo = tg3_get_drvinfo,
10487 .get_regs_len = tg3_get_regs_len,
10488 .get_regs = tg3_get_regs,
10489 .get_wol = tg3_get_wol,
10490 .set_wol = tg3_set_wol,
10491 .get_msglevel = tg3_get_msglevel,
10492 .set_msglevel = tg3_set_msglevel,
10493 .nway_reset = tg3_nway_reset,
10494 .get_link = ethtool_op_get_link,
10495 .get_eeprom_len = tg3_get_eeprom_len,
10496 .get_eeprom = tg3_get_eeprom,
10497 .set_eeprom = tg3_set_eeprom,
10498 .get_ringparam = tg3_get_ringparam,
10499 .set_ringparam = tg3_set_ringparam,
10500 .get_pauseparam = tg3_get_pauseparam,
10501 .set_pauseparam = tg3_set_pauseparam,
10502 .get_rx_csum = tg3_get_rx_csum,
10503 .set_rx_csum = tg3_set_rx_csum,
10504 .set_tx_csum = tg3_set_tx_csum,
10505 .set_sg = ethtool_op_set_sg,
10506 .set_tso = tg3_set_tso,
10507 .self_test = tg3_self_test,
10508 .get_strings = tg3_get_strings,
10509 .phys_id = tg3_phys_id,
10510 .get_ethtool_stats = tg3_get_ethtool_stats,
10511 .get_coalesce = tg3_get_coalesce,
10512 .set_coalesce = tg3_set_coalesce,
10513 .get_sset_count = tg3_get_sset_count,
10516 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
10518 u32 cursize, val, magic;
10520 tp->nvram_size = EEPROM_CHIP_SIZE;
10522 if (tg3_nvram_read_swab(tp, 0, &magic) != 0)
10525 if ((magic != TG3_EEPROM_MAGIC) &&
10526 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
10527 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
10531 * Size the chip by reading offsets at increasing powers of two.
10532 * When we encounter our validation signature, we know the addressing
10533 * has wrapped around, and thus have our chip size.
10537 while (cursize < tp->nvram_size) {
10538 if (tg3_nvram_read_swab(tp, cursize, &val) != 0)
10547 tp->nvram_size = cursize;
10550 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
10554 if (tg3_nvram_read_swab(tp, 0, &val) != 0)
10557 /* Selfboot format */
10558 if (val != TG3_EEPROM_MAGIC) {
10559 tg3_get_eeprom_size(tp);
10563 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
10565 tp->nvram_size = (val >> 16) * 1024;
10569 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10572 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
10576 nvcfg1 = tr32(NVRAM_CFG1);
10577 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
10578 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10581 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10582 tw32(NVRAM_CFG1, nvcfg1);
10585 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) ||
10586 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
10587 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
10588 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
10589 tp->nvram_jedecnum = JEDEC_ATMEL;
10590 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10591 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10593 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
10594 tp->nvram_jedecnum = JEDEC_ATMEL;
10595 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
10597 case FLASH_VENDOR_ATMEL_EEPROM:
10598 tp->nvram_jedecnum = JEDEC_ATMEL;
10599 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10600 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10602 case FLASH_VENDOR_ST:
10603 tp->nvram_jedecnum = JEDEC_ST;
10604 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
10605 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10607 case FLASH_VENDOR_SAIFUN:
10608 tp->nvram_jedecnum = JEDEC_SAIFUN;
10609 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
10611 case FLASH_VENDOR_SST_SMALL:
10612 case FLASH_VENDOR_SST_LARGE:
10613 tp->nvram_jedecnum = JEDEC_SST;
10614 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
10619 tp->nvram_jedecnum = JEDEC_ATMEL;
10620 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
10621 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10625 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10629 nvcfg1 = tr32(NVRAM_CFG1);
10631 /* NVRAM protection for TPM */
10632 if (nvcfg1 & (1 << 27))
10633 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10635 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10636 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
10637 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
10638 tp->nvram_jedecnum = JEDEC_ATMEL;
10639 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10641 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10642 tp->nvram_jedecnum = JEDEC_ATMEL;
10643 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10644 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10646 case FLASH_5752VENDOR_ST_M45PE10:
10647 case FLASH_5752VENDOR_ST_M45PE20:
10648 case FLASH_5752VENDOR_ST_M45PE40:
10649 tp->nvram_jedecnum = JEDEC_ST;
10650 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10651 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10655 if (tp->tg3_flags2 & TG3_FLG2_FLASH) {
10656 switch (nvcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
10657 case FLASH_5752PAGE_SIZE_256:
10658 tp->nvram_pagesize = 256;
10660 case FLASH_5752PAGE_SIZE_512:
10661 tp->nvram_pagesize = 512;
10663 case FLASH_5752PAGE_SIZE_1K:
10664 tp->nvram_pagesize = 1024;
10666 case FLASH_5752PAGE_SIZE_2K:
10667 tp->nvram_pagesize = 2048;
10669 case FLASH_5752PAGE_SIZE_4K:
10670 tp->nvram_pagesize = 4096;
10672 case FLASH_5752PAGE_SIZE_264:
10673 tp->nvram_pagesize = 264;
10678 /* For eeprom, set pagesize to maximum eeprom size */
10679 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10681 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10682 tw32(NVRAM_CFG1, nvcfg1);
10686 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10688 u32 nvcfg1, protect = 0;
10690 nvcfg1 = tr32(NVRAM_CFG1);
10692 /* NVRAM protection for TPM */
10693 if (nvcfg1 & (1 << 27)) {
10694 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10698 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10700 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10701 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10702 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10703 case FLASH_5755VENDOR_ATMEL_FLASH_5:
10704 tp->nvram_jedecnum = JEDEC_ATMEL;
10705 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10706 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10707 tp->nvram_pagesize = 264;
10708 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
10709 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
10710 tp->nvram_size = (protect ? 0x3e200 :
10711 TG3_NVRAM_SIZE_512KB);
10712 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
10713 tp->nvram_size = (protect ? 0x1f200 :
10714 TG3_NVRAM_SIZE_256KB);
10716 tp->nvram_size = (protect ? 0x1f200 :
10717 TG3_NVRAM_SIZE_128KB);
10719 case FLASH_5752VENDOR_ST_M45PE10:
10720 case FLASH_5752VENDOR_ST_M45PE20:
10721 case FLASH_5752VENDOR_ST_M45PE40:
10722 tp->nvram_jedecnum = JEDEC_ST;
10723 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10724 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10725 tp->nvram_pagesize = 256;
10726 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
10727 tp->nvram_size = (protect ?
10728 TG3_NVRAM_SIZE_64KB :
10729 TG3_NVRAM_SIZE_128KB);
10730 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
10731 tp->nvram_size = (protect ?
10732 TG3_NVRAM_SIZE_64KB :
10733 TG3_NVRAM_SIZE_256KB);
10735 tp->nvram_size = (protect ?
10736 TG3_NVRAM_SIZE_128KB :
10737 TG3_NVRAM_SIZE_512KB);
10742 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
10746 nvcfg1 = tr32(NVRAM_CFG1);
10748 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10749 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
10750 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
10751 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
10752 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
10753 tp->nvram_jedecnum = JEDEC_ATMEL;
10754 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10755 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10757 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
10758 tw32(NVRAM_CFG1, nvcfg1);
10760 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
10761 case FLASH_5755VENDOR_ATMEL_FLASH_1:
10762 case FLASH_5755VENDOR_ATMEL_FLASH_2:
10763 case FLASH_5755VENDOR_ATMEL_FLASH_3:
10764 tp->nvram_jedecnum = JEDEC_ATMEL;
10765 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10766 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10767 tp->nvram_pagesize = 264;
10769 case FLASH_5752VENDOR_ST_M45PE10:
10770 case FLASH_5752VENDOR_ST_M45PE20:
10771 case FLASH_5752VENDOR_ST_M45PE40:
10772 tp->nvram_jedecnum = JEDEC_ST;
10773 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10774 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10775 tp->nvram_pagesize = 256;
10780 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
10782 u32 nvcfg1, protect = 0;
10784 nvcfg1 = tr32(NVRAM_CFG1);
10786 /* NVRAM protection for TPM */
10787 if (nvcfg1 & (1 << 27)) {
10788 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM;
10792 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
10794 case FLASH_5761VENDOR_ATMEL_ADB021D:
10795 case FLASH_5761VENDOR_ATMEL_ADB041D:
10796 case FLASH_5761VENDOR_ATMEL_ADB081D:
10797 case FLASH_5761VENDOR_ATMEL_ADB161D:
10798 case FLASH_5761VENDOR_ATMEL_MDB021D:
10799 case FLASH_5761VENDOR_ATMEL_MDB041D:
10800 case FLASH_5761VENDOR_ATMEL_MDB081D:
10801 case FLASH_5761VENDOR_ATMEL_MDB161D:
10802 tp->nvram_jedecnum = JEDEC_ATMEL;
10803 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10804 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10805 tp->tg3_flags3 |= TG3_FLG3_NO_NVRAM_ADDR_TRANS;
10806 tp->nvram_pagesize = 256;
10808 case FLASH_5761VENDOR_ST_A_M45PE20:
10809 case FLASH_5761VENDOR_ST_A_M45PE40:
10810 case FLASH_5761VENDOR_ST_A_M45PE80:
10811 case FLASH_5761VENDOR_ST_A_M45PE16:
10812 case FLASH_5761VENDOR_ST_M_M45PE20:
10813 case FLASH_5761VENDOR_ST_M_M45PE40:
10814 case FLASH_5761VENDOR_ST_M_M45PE80:
10815 case FLASH_5761VENDOR_ST_M_M45PE16:
10816 tp->nvram_jedecnum = JEDEC_ST;
10817 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10818 tp->tg3_flags2 |= TG3_FLG2_FLASH;
10819 tp->nvram_pagesize = 256;
10824 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
10827 case FLASH_5761VENDOR_ATMEL_ADB161D:
10828 case FLASH_5761VENDOR_ATMEL_MDB161D:
10829 case FLASH_5761VENDOR_ST_A_M45PE16:
10830 case FLASH_5761VENDOR_ST_M_M45PE16:
10831 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
10833 case FLASH_5761VENDOR_ATMEL_ADB081D:
10834 case FLASH_5761VENDOR_ATMEL_MDB081D:
10835 case FLASH_5761VENDOR_ST_A_M45PE80:
10836 case FLASH_5761VENDOR_ST_M_M45PE80:
10837 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
10839 case FLASH_5761VENDOR_ATMEL_ADB041D:
10840 case FLASH_5761VENDOR_ATMEL_MDB041D:
10841 case FLASH_5761VENDOR_ST_A_M45PE40:
10842 case FLASH_5761VENDOR_ST_M_M45PE40:
10843 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
10845 case FLASH_5761VENDOR_ATMEL_ADB021D:
10846 case FLASH_5761VENDOR_ATMEL_MDB021D:
10847 case FLASH_5761VENDOR_ST_A_M45PE20:
10848 case FLASH_5761VENDOR_ST_M_M45PE20:
10849 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
10855 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
10857 tp->nvram_jedecnum = JEDEC_ATMEL;
10858 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
10859 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
10862 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
10863 static void __devinit tg3_nvram_init(struct tg3 *tp)
10865 tw32_f(GRC_EEPROM_ADDR,
10866 (EEPROM_ADDR_FSM_RESET |
10867 (EEPROM_DEFAULT_CLOCK_PERIOD <<
10868 EEPROM_ADDR_CLKPERD_SHIFT)));
10872 /* Enable seeprom accesses. */
10873 tw32_f(GRC_LOCAL_CTRL,
10874 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
10877 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
10878 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
10879 tp->tg3_flags |= TG3_FLAG_NVRAM;
10881 if (tg3_nvram_lock(tp)) {
10882 printk(KERN_WARNING PFX "%s: Cannot get nvarm lock, "
10883 "tg3_nvram_init failed.\n", tp->dev->name);
10886 tg3_enable_nvram_access(tp);
10888 tp->nvram_size = 0;
10890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
10891 tg3_get_5752_nvram_info(tp);
10892 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
10893 tg3_get_5755_nvram_info(tp);
10894 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
10895 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
10896 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
10897 tg3_get_5787_nvram_info(tp);
10898 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
10899 tg3_get_5761_nvram_info(tp);
10900 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10901 tg3_get_5906_nvram_info(tp);
10903 tg3_get_nvram_info(tp);
10905 if (tp->nvram_size == 0)
10906 tg3_get_nvram_size(tp);
10908 tg3_disable_nvram_access(tp);
10909 tg3_nvram_unlock(tp);
10912 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
10914 tg3_get_eeprom_size(tp);
10918 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
10919 u32 offset, u32 *val)
10924 if (offset > EEPROM_ADDR_ADDR_MASK ||
10928 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
10929 EEPROM_ADDR_DEVID_MASK |
10931 tw32(GRC_EEPROM_ADDR,
10933 (0 << EEPROM_ADDR_DEVID_SHIFT) |
10934 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
10935 EEPROM_ADDR_ADDR_MASK) |
10936 EEPROM_ADDR_READ | EEPROM_ADDR_START);
10938 for (i = 0; i < 1000; i++) {
10939 tmp = tr32(GRC_EEPROM_ADDR);
10941 if (tmp & EEPROM_ADDR_COMPLETE)
10945 if (!(tmp & EEPROM_ADDR_COMPLETE))
10948 *val = tr32(GRC_EEPROM_DATA);
10952 #define NVRAM_CMD_TIMEOUT 10000
10954 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
10958 tw32(NVRAM_CMD, nvram_cmd);
10959 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
10961 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
10966 if (i == NVRAM_CMD_TIMEOUT) {
10972 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
10974 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10975 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10976 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10977 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10978 (tp->nvram_jedecnum == JEDEC_ATMEL))
10980 addr = ((addr / tp->nvram_pagesize) <<
10981 ATMEL_AT45DB0X1B_PAGE_POS) +
10982 (addr % tp->nvram_pagesize);
10987 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
10989 if ((tp->tg3_flags & TG3_FLAG_NVRAM) &&
10990 (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
10991 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
10992 !(tp->tg3_flags3 & TG3_FLG3_NO_NVRAM_ADDR_TRANS) &&
10993 (tp->nvram_jedecnum == JEDEC_ATMEL))
10995 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
10996 tp->nvram_pagesize) +
10997 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
11002 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
11006 if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
11007 return tg3_nvram_read_using_eeprom(tp, offset, val);
11009 offset = tg3_nvram_phys_addr(tp, offset);
11011 if (offset > NVRAM_ADDR_MSK)
11014 ret = tg3_nvram_lock(tp);
11018 tg3_enable_nvram_access(tp);
11020 tw32(NVRAM_ADDR, offset);
11021 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
11022 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
11025 *val = swab32(tr32(NVRAM_RDDATA));
11027 tg3_disable_nvram_access(tp);
11029 tg3_nvram_unlock(tp);
11034 static int tg3_nvram_read_le(struct tg3 *tp, u32 offset, __le32 *val)
11037 int res = tg3_nvram_read(tp, offset, &v);
11039 *val = cpu_to_le32(v);
11043 static int tg3_nvram_read_swab(struct tg3 *tp, u32 offset, u32 *val)
11048 err = tg3_nvram_read(tp, offset, &tmp);
11049 *val = swab32(tmp);
11053 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
11054 u32 offset, u32 len, u8 *buf)
11059 for (i = 0; i < len; i += 4) {
11065 memcpy(&data, buf + i, 4);
11067 tw32(GRC_EEPROM_DATA, le32_to_cpu(data));
11069 val = tr32(GRC_EEPROM_ADDR);
11070 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
11072 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
11074 tw32(GRC_EEPROM_ADDR, val |
11075 (0 << EEPROM_ADDR_DEVID_SHIFT) |
11076 (addr & EEPROM_ADDR_ADDR_MASK) |
11077 EEPROM_ADDR_START |
11078 EEPROM_ADDR_WRITE);
11080 for (j = 0; j < 1000; j++) {
11081 val = tr32(GRC_EEPROM_ADDR);
11083 if (val & EEPROM_ADDR_COMPLETE)
11087 if (!(val & EEPROM_ADDR_COMPLETE)) {
11096 /* offset and length are dword aligned */
11097 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
11101 u32 pagesize = tp->nvram_pagesize;
11102 u32 pagemask = pagesize - 1;
11106 tmp = kmalloc(pagesize, GFP_KERNEL);
11112 u32 phy_addr, page_off, size;
11114 phy_addr = offset & ~pagemask;
11116 for (j = 0; j < pagesize; j += 4) {
11117 if ((ret = tg3_nvram_read_le(tp, phy_addr + j,
11118 (__le32 *) (tmp + j))))
11124 page_off = offset & pagemask;
11131 memcpy(tmp + page_off, buf, size);
11133 offset = offset + (pagesize - page_off);
11135 tg3_enable_nvram_access(tp);
11138 * Before we can erase the flash page, we need
11139 * to issue a special "write enable" command.
11141 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11143 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11146 /* Erase the target page */
11147 tw32(NVRAM_ADDR, phy_addr);
11149 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
11150 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
11152 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11155 /* Issue another write enable to start the write. */
11156 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11158 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
11161 for (j = 0; j < pagesize; j += 4) {
11164 data = *((__be32 *) (tmp + j));
11165 /* swab32(le32_to_cpu(data)), actually */
11166 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11168 tw32(NVRAM_ADDR, phy_addr + j);
11170 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
11174 nvram_cmd |= NVRAM_CMD_FIRST;
11175 else if (j == (pagesize - 4))
11176 nvram_cmd |= NVRAM_CMD_LAST;
11178 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11185 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
11186 tg3_nvram_exec_cmd(tp, nvram_cmd);
11193 /* offset and length are dword aligned */
11194 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
11199 for (i = 0; i < len; i += 4, offset += 4) {
11200 u32 page_off, phy_addr, nvram_cmd;
11203 memcpy(&data, buf + i, 4);
11204 tw32(NVRAM_WRDATA, be32_to_cpu(data));
11206 page_off = offset % tp->nvram_pagesize;
11208 phy_addr = tg3_nvram_phys_addr(tp, offset);
11210 tw32(NVRAM_ADDR, phy_addr);
11212 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
11214 if ((page_off == 0) || (i == 0))
11215 nvram_cmd |= NVRAM_CMD_FIRST;
11216 if (page_off == (tp->nvram_pagesize - 4))
11217 nvram_cmd |= NVRAM_CMD_LAST;
11219 if (i == (len - 4))
11220 nvram_cmd |= NVRAM_CMD_LAST;
11222 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752) &&
11223 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5755) &&
11224 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5787) &&
11225 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784) &&
11226 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) &&
11227 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) &&
11228 (tp->nvram_jedecnum == JEDEC_ST) &&
11229 (nvram_cmd & NVRAM_CMD_FIRST)) {
11231 if ((ret = tg3_nvram_exec_cmd(tp,
11232 NVRAM_CMD_WREN | NVRAM_CMD_GO |
11237 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11238 /* We always do complete word writes to eeprom. */
11239 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
11242 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
11248 /* offset and length are dword aligned */
11249 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11253 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11254 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
11255 ~GRC_LCLCTRL_GPIO_OUTPUT1);
11259 if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
11260 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
11265 ret = tg3_nvram_lock(tp);
11269 tg3_enable_nvram_access(tp);
11270 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11271 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM))
11272 tw32(NVRAM_WRITE1, 0x406);
11274 grc_mode = tr32(GRC_MODE);
11275 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
11277 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
11278 !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
11280 ret = tg3_nvram_write_block_buffered(tp, offset, len,
11284 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
11288 grc_mode = tr32(GRC_MODE);
11289 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
11291 tg3_disable_nvram_access(tp);
11292 tg3_nvram_unlock(tp);
11295 if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
11296 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
11303 struct subsys_tbl_ent {
11304 u16 subsys_vendor, subsys_devid;
11308 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
11309 /* Broadcom boards. */
11310 { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
11311 { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
11312 { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
11313 { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 }, /* BCM95700A9 */
11314 { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
11315 { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
11316 { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 }, /* BCM95701A7 */
11317 { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
11318 { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
11319 { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
11320 { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
11323 { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
11324 { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
11325 { PCI_VENDOR_ID_3COM, 0x1004, 0 }, /* 3C996SX */
11326 { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
11327 { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
11330 { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
11331 { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
11332 { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
11333 { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
11335 /* Compaq boards. */
11336 { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
11337 { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
11338 { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 }, /* CHANGELING */
11339 { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
11340 { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
11343 { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
11346 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
11350 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
11351 if ((subsys_id_to_phy_id[i].subsys_vendor ==
11352 tp->pdev->subsystem_vendor) &&
11353 (subsys_id_to_phy_id[i].subsys_devid ==
11354 tp->pdev->subsystem_device))
11355 return &subsys_id_to_phy_id[i];
11360 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
11365 /* On some early chips the SRAM cannot be accessed in D3hot state,
11366 * so need make sure we're in D0.
11368 pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
11369 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11370 pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
11373 /* Make sure register accesses (indirect or otherwise)
11374 * will function correctly.
11376 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11377 tp->misc_host_ctrl);
11379 /* The memory arbiter has to be enabled in order for SRAM accesses
11380 * to succeed. Normally on powerup the tg3 chip firmware will make
11381 * sure it is enabled, but other entities such as system netboot
11382 * code might disable it.
11384 val = tr32(MEMARB_MODE);
11385 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
11387 tp->phy_id = PHY_ID_INVALID;
11388 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11390 /* Assume an onboard device and WOL capable by default. */
11391 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT | TG3_FLAG_WOL_CAP;
11393 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
11394 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
11395 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11396 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11398 val = tr32(VCPU_CFGSHDW);
11399 if (val & VCPU_CFGSHDW_ASPM_DBNC)
11400 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11401 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
11402 (val & VCPU_CFGSHDW_WOL_MAGPKT) &&
11403 device_may_wakeup(&tp->pdev->dev))
11404 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11408 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
11409 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
11410 u32 nic_cfg, led_cfg;
11411 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
11412 int eeprom_phy_serdes = 0;
11414 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
11415 tp->nic_sram_data_cfg = nic_cfg;
11417 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
11418 ver >>= NIC_SRAM_DATA_VER_SHIFT;
11419 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
11420 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
11421 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
11422 (ver > 0) && (ver < 0x100))
11423 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
11425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11426 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
11428 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
11429 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
11430 eeprom_phy_serdes = 1;
11432 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
11433 if (nic_phy_id != 0) {
11434 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
11435 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
11437 eeprom_phy_id = (id1 >> 16) << 10;
11438 eeprom_phy_id |= (id2 & 0xfc00) << 16;
11439 eeprom_phy_id |= (id2 & 0x03ff) << 0;
11443 tp->phy_id = eeprom_phy_id;
11444 if (eeprom_phy_serdes) {
11445 if (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)
11446 tp->tg3_flags2 |= TG3_FLG2_MII_SERDES;
11448 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11451 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11452 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
11453 SHASTA_EXT_LED_MODE_MASK);
11455 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
11459 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
11460 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11463 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
11464 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11467 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
11468 tp->led_ctrl = LED_CTRL_MODE_MAC;
11470 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
11471 * read on some older 5700/5701 bootcode.
11473 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
11475 GET_ASIC_REV(tp->pci_chip_rev_id) ==
11477 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11481 case SHASTA_EXT_LED_SHARED:
11482 tp->led_ctrl = LED_CTRL_MODE_SHARED;
11483 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
11484 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
11485 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11486 LED_CTRL_MODE_PHY_2);
11489 case SHASTA_EXT_LED_MAC:
11490 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
11493 case SHASTA_EXT_LED_COMBO:
11494 tp->led_ctrl = LED_CTRL_MODE_COMBO;
11495 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
11496 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
11497 LED_CTRL_MODE_PHY_2);
11502 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
11503 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
11504 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
11505 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
11507 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
11508 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
11510 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
11511 tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
11512 if ((tp->pdev->subsystem_vendor ==
11513 PCI_VENDOR_ID_ARIMA) &&
11514 (tp->pdev->subsystem_device == 0x205a ||
11515 tp->pdev->subsystem_device == 0x2063))
11516 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11518 tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
11519 tp->tg3_flags2 |= TG3_FLG2_IS_NIC;
11522 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
11523 tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
11524 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS)
11525 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
11528 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
11529 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
11530 tp->tg3_flags3 |= TG3_FLG3_ENABLE_APE;
11532 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES &&
11533 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
11534 tp->tg3_flags &= ~TG3_FLAG_WOL_CAP;
11536 if ((tp->tg3_flags & TG3_FLAG_WOL_CAP) &&
11537 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE))
11538 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
11540 if (cfg2 & (1 << 17))
11541 tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
11543 /* serdes signal pre-emphasis in register 0x590 set by */
11544 /* bootcode if bit 18 is set */
11545 if (cfg2 & (1 << 18))
11546 tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
11548 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
11551 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
11552 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
11553 tp->tg3_flags |= TG3_FLAG_ASPM_WORKAROUND;
11556 if (cfg4 & NIC_SRAM_RGMII_STD_IBND_DISABLE)
11557 tp->tg3_flags3 |= TG3_FLG3_RGMII_STD_IBND_DISABLE;
11558 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
11559 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_RX_EN;
11560 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
11561 tp->tg3_flags3 |= TG3_FLG3_RGMII_EXT_IBND_TX_EN;
11564 device_init_wakeup(&tp->pdev->dev, tp->tg3_flags & TG3_FLAG_WOL_CAP);
11565 device_set_wakeup_enable(&tp->pdev->dev,
11566 tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
11569 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
11574 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
11575 tw32(OTP_CTRL, cmd);
11577 /* Wait for up to 1 ms for command to execute. */
11578 for (i = 0; i < 100; i++) {
11579 val = tr32(OTP_STATUS);
11580 if (val & OTP_STATUS_CMD_DONE)
11585 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
11588 /* Read the gphy configuration from the OTP region of the chip. The gphy
11589 * configuration is a 32-bit value that straddles the alignment boundary.
11590 * We do two 32-bit reads and then shift and merge the results.
11592 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
11594 u32 bhalf_otp, thalf_otp;
11596 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
11598 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
11601 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
11603 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11606 thalf_otp = tr32(OTP_READ_DATA);
11608 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
11610 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
11613 bhalf_otp = tr32(OTP_READ_DATA);
11615 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
11618 static int __devinit tg3_phy_probe(struct tg3 *tp)
11620 u32 hw_phy_id_1, hw_phy_id_2;
11621 u32 hw_phy_id, hw_phy_id_masked;
11624 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
11625 return tg3_phy_init(tp);
11627 /* Reading the PHY ID register can conflict with ASF
11628 * firwmare access to the PHY hardware.
11631 if ((tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11632 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)) {
11633 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
11635 /* Now read the physical PHY_ID from the chip and verify
11636 * that it is sane. If it doesn't look good, we fall back
11637 * to either the hard-coded table based PHY_ID and failing
11638 * that the value found in the eeprom area.
11640 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
11641 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
11643 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
11644 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
11645 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
11647 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
11650 if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
11651 tp->phy_id = hw_phy_id;
11652 if (hw_phy_id_masked == PHY_ID_BCM8002)
11653 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11655 tp->tg3_flags2 &= ~TG3_FLG2_PHY_SERDES;
11657 if (tp->phy_id != PHY_ID_INVALID) {
11658 /* Do nothing, phy ID already set up in
11659 * tg3_get_eeprom_hw_cfg().
11662 struct subsys_tbl_ent *p;
11664 /* No eeprom signature? Try the hardcoded
11665 * subsys device table.
11667 p = lookup_by_subsys(tp);
11671 tp->phy_id = p->phy_id;
11673 tp->phy_id == PHY_ID_BCM8002)
11674 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
11678 if (!(tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) &&
11679 !(tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) &&
11680 !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
11681 u32 bmsr, adv_reg, tg3_ctrl, mask;
11683 tg3_readphy(tp, MII_BMSR, &bmsr);
11684 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
11685 (bmsr & BMSR_LSTATUS))
11686 goto skip_phy_reset;
11688 err = tg3_phy_reset(tp);
11692 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
11693 ADVERTISE_100HALF | ADVERTISE_100FULL |
11694 ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
11696 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
11697 tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
11698 MII_TG3_CTRL_ADV_1000_FULL);
11699 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
11700 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
11701 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
11702 MII_TG3_CTRL_ENABLE_AS_MASTER);
11705 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
11706 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
11707 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
11708 if (!tg3_copper_is_advertising_all(tp, mask)) {
11709 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11711 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11712 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11714 tg3_writephy(tp, MII_BMCR,
11715 BMCR_ANENABLE | BMCR_ANRESTART);
11717 tg3_phy_set_wirespeed(tp);
11719 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
11720 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
11721 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
11725 if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
11726 err = tg3_init_5401phy_dsp(tp);
11731 if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
11732 err = tg3_init_5401phy_dsp(tp);
11735 if (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES)
11736 tp->link_config.advertising =
11737 (ADVERTISED_1000baseT_Half |
11738 ADVERTISED_1000baseT_Full |
11739 ADVERTISED_Autoneg |
11741 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
11742 tp->link_config.advertising &=
11743 ~(ADVERTISED_1000baseT_Half |
11744 ADVERTISED_1000baseT_Full);
11749 static void __devinit tg3_read_partno(struct tg3 *tp)
11751 unsigned char vpd_data[256];
11755 if (tg3_nvram_read_swab(tp, 0x0, &magic))
11756 goto out_not_found;
11758 if (magic == TG3_EEPROM_MAGIC) {
11759 for (i = 0; i < 256; i += 4) {
11762 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
11763 goto out_not_found;
11765 vpd_data[i + 0] = ((tmp >> 0) & 0xff);
11766 vpd_data[i + 1] = ((tmp >> 8) & 0xff);
11767 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
11768 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
11773 vpd_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_VPD);
11774 for (i = 0; i < 256; i += 4) {
11779 pci_write_config_word(tp->pdev, vpd_cap + PCI_VPD_ADDR,
11781 while (j++ < 100) {
11782 pci_read_config_word(tp->pdev, vpd_cap +
11783 PCI_VPD_ADDR, &tmp16);
11784 if (tmp16 & 0x8000)
11788 if (!(tmp16 & 0x8000))
11789 goto out_not_found;
11791 pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
11793 v = cpu_to_le32(tmp);
11794 memcpy(&vpd_data[i], &v, 4);
11798 /* Now parse and find the part number. */
11799 for (i = 0; i < 254; ) {
11800 unsigned char val = vpd_data[i];
11801 unsigned int block_end;
11803 if (val == 0x82 || val == 0x91) {
11806 (vpd_data[i + 2] << 8)));
11811 goto out_not_found;
11813 block_end = (i + 3 +
11815 (vpd_data[i + 2] << 8)));
11818 if (block_end > 256)
11819 goto out_not_found;
11821 while (i < (block_end - 2)) {
11822 if (vpd_data[i + 0] == 'P' &&
11823 vpd_data[i + 1] == 'N') {
11824 int partno_len = vpd_data[i + 2];
11827 if (partno_len > 24 || (partno_len + i) > 256)
11828 goto out_not_found;
11830 memcpy(tp->board_part_number,
11831 &vpd_data[i], partno_len);
11836 i += 3 + vpd_data[i + 2];
11839 /* Part number not found. */
11840 goto out_not_found;
11844 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11845 strcpy(tp->board_part_number, "BCM95906");
11847 strcpy(tp->board_part_number, "none");
11850 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
11854 if (tg3_nvram_read_swab(tp, offset, &val) ||
11855 (val & 0xfc000000) != 0x0c000000 ||
11856 tg3_nvram_read_swab(tp, offset + 4, &val) ||
11863 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
11865 u32 val, offset, start;
11869 if (tg3_nvram_read_swab(tp, 0, &val))
11872 if (val != TG3_EEPROM_MAGIC)
11875 if (tg3_nvram_read_swab(tp, 0xc, &offset) ||
11876 tg3_nvram_read_swab(tp, 0x4, &start))
11879 offset = tg3_nvram_logical_addr(tp, offset);
11881 if (!tg3_fw_img_is_valid(tp, offset) ||
11882 tg3_nvram_read_swab(tp, offset + 8, &ver_offset))
11885 offset = offset + ver_offset - start;
11886 for (i = 0; i < 16; i += 4) {
11888 if (tg3_nvram_read_le(tp, offset + i, &v))
11891 memcpy(tp->fw_ver + i, &v, 4);
11894 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF) ||
11895 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))
11898 for (offset = TG3_NVM_DIR_START;
11899 offset < TG3_NVM_DIR_END;
11900 offset += TG3_NVM_DIRENT_SIZE) {
11901 if (tg3_nvram_read_swab(tp, offset, &val))
11904 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
11908 if (offset == TG3_NVM_DIR_END)
11911 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
11912 start = 0x08000000;
11913 else if (tg3_nvram_read_swab(tp, offset - 4, &start))
11916 if (tg3_nvram_read_swab(tp, offset + 4, &offset) ||
11917 !tg3_fw_img_is_valid(tp, offset) ||
11918 tg3_nvram_read_swab(tp, offset + 8, &val))
11921 offset += val - start;
11923 bcnt = strlen(tp->fw_ver);
11925 tp->fw_ver[bcnt++] = ',';
11926 tp->fw_ver[bcnt++] = ' ';
11928 for (i = 0; i < 4; i++) {
11930 if (tg3_nvram_read_le(tp, offset, &v))
11933 offset += sizeof(v);
11935 if (bcnt > TG3_VER_SIZE - sizeof(v)) {
11936 memcpy(&tp->fw_ver[bcnt], &v, TG3_VER_SIZE - bcnt);
11940 memcpy(&tp->fw_ver[bcnt], &v, sizeof(v));
11944 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
11947 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
11949 static int __devinit tg3_get_invariants(struct tg3 *tp)
11951 static struct pci_device_id write_reorder_chipsets[] = {
11952 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11953 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
11954 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
11955 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
11956 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
11957 PCI_DEVICE_ID_VIA_8385_0) },
11961 u32 cacheline_sz_reg;
11962 u32 pci_state_reg, grc_misc_cfg;
11967 /* Force memory write invalidate off. If we leave it on,
11968 * then on 5700_BX chips we have to enable a workaround.
11969 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
11970 * to match the cacheline size. The Broadcom driver have this
11971 * workaround but turns MWI off all the times so never uses
11972 * it. This seems to suggest that the workaround is insufficient.
11974 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11975 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
11976 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11978 /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
11979 * has the register indirect write enable bit set before
11980 * we try to access any of the MMIO registers. It is also
11981 * critical that the PCI-X hw workaround situation is decided
11982 * before that as well.
11984 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
11987 tp->pci_chip_rev_id = (misc_ctrl_reg >>
11988 MISC_HOST_CTRL_CHIPREV_SHIFT);
11989 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
11990 u32 prod_id_asic_rev;
11992 pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
11993 &prod_id_asic_rev);
11994 tp->pci_chip_rev_id = prod_id_asic_rev & PROD_ID_ASIC_REV_MASK;
11997 /* Wrong chip ID in 5752 A0. This code can be removed later
11998 * as A0 is not in production.
12000 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
12001 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
12003 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
12004 * we need to disable memory and use config. cycles
12005 * only to access all registers. The 5702/03 chips
12006 * can mistakenly decode the special cycles from the
12007 * ICH chipsets as memory write cycles, causing corruption
12008 * of register and memory space. Only certain ICH bridges
12009 * will drive special cycles with non-zero data during the
12010 * address phase which can fall within the 5703's address
12011 * range. This is not an ICH bug as the PCI spec allows
12012 * non-zero address during special cycles. However, only
12013 * these ICH bridges are known to drive non-zero addresses
12014 * during special cycles.
12016 * Since special cycles do not cross PCI bridges, we only
12017 * enable this workaround if the 5703 is on the secondary
12018 * bus of these ICH bridges.
12020 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
12021 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
12022 static struct tg3_dev_id {
12026 } ich_chipsets[] = {
12027 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
12029 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
12031 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
12033 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
12037 struct tg3_dev_id *pci_id = &ich_chipsets[0];
12038 struct pci_dev *bridge = NULL;
12040 while (pci_id->vendor != 0) {
12041 bridge = pci_get_device(pci_id->vendor, pci_id->device,
12047 if (pci_id->rev != PCI_ANY_ID) {
12048 if (bridge->revision > pci_id->rev)
12051 if (bridge->subordinate &&
12052 (bridge->subordinate->number ==
12053 tp->pdev->bus->number)) {
12055 tp->tg3_flags2 |= TG3_FLG2_ICH_WORKAROUND;
12056 pci_dev_put(bridge);
12062 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
12063 static struct tg3_dev_id {
12066 } bridge_chipsets[] = {
12067 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
12068 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
12071 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
12072 struct pci_dev *bridge = NULL;
12074 while (pci_id->vendor != 0) {
12075 bridge = pci_get_device(pci_id->vendor,
12082 if (bridge->subordinate &&
12083 (bridge->subordinate->number <=
12084 tp->pdev->bus->number) &&
12085 (bridge->subordinate->subordinate >=
12086 tp->pdev->bus->number)) {
12087 tp->tg3_flags3 |= TG3_FLG3_5701_DMA_BUG;
12088 pci_dev_put(bridge);
12094 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
12095 * DMA addresses > 40-bit. This bridge may have other additional
12096 * 57xx devices behind it in some 4-port NIC designs for example.
12097 * Any tg3 device found behind the bridge will also need the 40-bit
12100 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
12101 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
12102 tp->tg3_flags2 |= TG3_FLG2_5780_CLASS;
12103 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12104 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
12107 struct pci_dev *bridge = NULL;
12110 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
12111 PCI_DEVICE_ID_SERVERWORKS_EPB,
12113 if (bridge && bridge->subordinate &&
12114 (bridge->subordinate->number <=
12115 tp->pdev->bus->number) &&
12116 (bridge->subordinate->subordinate >=
12117 tp->pdev->bus->number)) {
12118 tp->tg3_flags |= TG3_FLAG_40BIT_DMA_BUG;
12119 pci_dev_put(bridge);
12125 /* Initialize misc host control in PCI block. */
12126 tp->misc_host_ctrl |= (misc_ctrl_reg &
12127 MISC_HOST_CTRL_CHIPREV);
12128 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12129 tp->misc_host_ctrl);
12131 pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12132 &cacheline_sz_reg);
12134 tp->pci_cacheline_sz = (cacheline_sz_reg >> 0) & 0xff;
12135 tp->pci_lat_timer = (cacheline_sz_reg >> 8) & 0xff;
12136 tp->pci_hdr_type = (cacheline_sz_reg >> 16) & 0xff;
12137 tp->pci_bist = (cacheline_sz_reg >> 24) & 0xff;
12139 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12140 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714))
12141 tp->pdev_peer = tg3_find_peer(tp);
12143 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12144 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12145 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12147 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12149 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
12151 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12152 tp->tg3_flags2 |= TG3_FLG2_5750_PLUS;
12154 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
12155 (tp->tg3_flags2 & TG3_FLG2_5750_PLUS))
12156 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
12158 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12159 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12160 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
12161 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
12162 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
12163 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
12164 tp->pdev_peer == tp->pdev))
12165 tp->tg3_flags &= ~TG3_FLAG_SUPPORT_MSI;
12167 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12168 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12169 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12170 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12171 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12172 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12173 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12174 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12176 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12177 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12179 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12180 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12184 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12185 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12186 tp->tg3_flags2 |= TG3_FLG2_JUMBO_CAPABLE;
12188 pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
12189 if (pcie_cap != 0) {
12190 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12192 pcie_set_readrq(tp->pdev, 4096);
12194 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12197 pci_read_config_word(tp->pdev,
12198 pcie_cap + PCI_EXP_LNKCTL,
12200 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN)
12201 tp->tg3_flags2 &= ~TG3_FLG2_HW_TSO_2;
12203 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12204 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
12206 /* If we have an AMD 762 or VIA K8T800 chipset, write
12207 * reordering to the mailbox registers done by the host
12208 * controller can cause major troubles. We read back from
12209 * every mailbox register write to force the writes to be
12210 * posted to the chip in order.
12212 if (pci_dev_present(write_reorder_chipsets) &&
12213 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12214 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
12216 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12217 tp->pci_lat_timer < 64) {
12218 tp->pci_lat_timer = 64;
12220 cacheline_sz_reg = ((tp->pci_cacheline_sz & 0xff) << 0);
12221 cacheline_sz_reg |= ((tp->pci_lat_timer & 0xff) << 8);
12222 cacheline_sz_reg |= ((tp->pci_hdr_type & 0xff) << 16);
12223 cacheline_sz_reg |= ((tp->pci_bist & 0xff) << 24);
12225 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
12229 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12230 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12231 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
12232 if (!tp->pcix_cap) {
12233 printk(KERN_ERR PFX "Cannot find PCI-X "
12234 "capability, aborting.\n");
12239 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12242 if (tp->pcix_cap && (pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
12243 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
12245 /* If this is a 5700 BX chipset, and we are in PCI-X
12246 * mode, enable register write workaround.
12248 * The workaround is to use indirect register accesses
12249 * for all chip writes not to mailbox registers.
12251 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
12254 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12256 /* The chip can have it's power management PCI config
12257 * space registers clobbered due to this bug.
12258 * So explicitly force the chip into D0 here.
12260 pci_read_config_dword(tp->pdev,
12261 tp->pm_cap + PCI_PM_CTRL,
12263 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
12264 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
12265 pci_write_config_dword(tp->pdev,
12266 tp->pm_cap + PCI_PM_CTRL,
12269 /* Also, force SERR#/PERR# in PCI command. */
12270 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12271 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
12272 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12276 /* 5700 BX chips need to have their TX producer index mailboxes
12277 * written twice to workaround a bug.
12279 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
12280 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
12282 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
12283 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
12284 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
12285 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
12287 /* Chip-specific fixup from Broadcom driver */
12288 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
12289 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
12290 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
12291 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
12294 /* Default fast path register access methods */
12295 tp->read32 = tg3_read32;
12296 tp->write32 = tg3_write32;
12297 tp->read32_mbox = tg3_read32;
12298 tp->write32_mbox = tg3_write32;
12299 tp->write32_tx_mbox = tg3_write32;
12300 tp->write32_rx_mbox = tg3_write32;
12302 /* Various workaround register access methods */
12303 if (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG)
12304 tp->write32 = tg3_write_indirect_reg32;
12305 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
12306 ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
12307 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
12309 * Back to back register writes can cause problems on these
12310 * chips, the workaround is to read back all reg writes
12311 * except those to mailbox regs.
12313 * See tg3_write_indirect_reg32().
12315 tp->write32 = tg3_write_flush_reg32;
12319 if ((tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG) ||
12320 (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)) {
12321 tp->write32_tx_mbox = tg3_write32_tx_mbox;
12322 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
12323 tp->write32_rx_mbox = tg3_write_flush_reg32;
12326 if (tp->tg3_flags2 & TG3_FLG2_ICH_WORKAROUND) {
12327 tp->read32 = tg3_read_indirect_reg32;
12328 tp->write32 = tg3_write_indirect_reg32;
12329 tp->read32_mbox = tg3_read_indirect_mbox;
12330 tp->write32_mbox = tg3_write_indirect_mbox;
12331 tp->write32_tx_mbox = tg3_write_indirect_mbox;
12332 tp->write32_rx_mbox = tg3_write_indirect_mbox;
12337 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
12338 pci_cmd &= ~PCI_COMMAND_MEMORY;
12339 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
12341 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12342 tp->read32_mbox = tg3_read32_mbox_5906;
12343 tp->write32_mbox = tg3_write32_mbox_5906;
12344 tp->write32_tx_mbox = tg3_write32_mbox_5906;
12345 tp->write32_rx_mbox = tg3_write32_mbox_5906;
12348 if (tp->write32 == tg3_write_indirect_reg32 ||
12349 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12350 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12351 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
12352 tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
12354 /* Get eeprom hw config before calling tg3_set_power_state().
12355 * In particular, the TG3_FLG2_IS_NIC flag must be
12356 * determined before calling tg3_set_power_state() so that
12357 * we know whether or not to switch out of Vaux power.
12358 * When the flag is set, it means that GPIO1 is used for eeprom
12359 * write protect and also implies that it is a LOM where GPIOs
12360 * are not used to switch power.
12362 tg3_get_eeprom_hw_cfg(tp);
12364 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
12365 /* Allow reads and writes to the
12366 * APE register and memory space.
12368 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
12369 PCISTATE_ALLOW_APE_SHMEM_WR;
12370 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
12374 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12375 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12376 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12377 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
12379 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state().
12380 * GPIO1 driven high will bring 5700's external PHY out of reset.
12381 * It is also used as eeprom write protect on LOMs.
12383 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
12384 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12385 (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT))
12386 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
12387 GRC_LCLCTRL_GPIO_OUTPUT1);
12388 /* Unused GPIO3 must be driven as output on 5752 because there
12389 * are no pull-up resistors on unused GPIO pins.
12391 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12392 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
12394 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12395 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12397 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761) {
12398 /* Turn off the debug UART. */
12399 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
12400 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
12401 /* Keep VMain power. */
12402 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
12403 GRC_LCLCTRL_GPIO_OUTPUT0;
12406 /* Force the chip into D0. */
12407 err = tg3_set_power_state(tp, PCI_D0);
12409 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
12410 pci_name(tp->pdev));
12414 /* 5700 B0 chips do not support checksumming correctly due
12415 * to hardware bugs.
12417 if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
12418 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
12420 /* Derive initial jumbo mode from MTU assigned in
12421 * ether_setup() via the alloc_etherdev() call
12423 if (tp->dev->mtu > ETH_DATA_LEN &&
12424 !(tp->tg3_flags2 & TG3_FLG2_5780_CLASS))
12425 tp->tg3_flags |= TG3_FLAG_JUMBO_RING_ENABLE;
12427 /* Determine WakeOnLan speed to use. */
12428 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12429 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
12430 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
12431 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
12432 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
12434 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
12437 /* A few boards don't want Ethernet@WireSpeed phy feature */
12438 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
12439 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
12440 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
12441 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
12442 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) ||
12443 (tp->tg3_flags2 & TG3_FLG2_ANY_SERDES))
12444 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
12446 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
12447 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
12448 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
12449 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
12450 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
12452 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
12453 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12454 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12455 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12456 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
12457 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
12458 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
12459 tp->tg3_flags2 |= TG3_FLG2_PHY_JITTER_BUG;
12460 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
12461 tp->tg3_flags2 |= TG3_FLG2_PHY_ADJUST_TRIM;
12462 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906 &&
12463 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
12464 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
12467 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12468 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
12469 tp->phy_otp = tg3_read_otp_phycfg(tp);
12470 if (tp->phy_otp == 0)
12471 tp->phy_otp = TG3_OTP_DEFAULT;
12474 if (tp->tg3_flags & TG3_FLAG_CPMU_PRESENT)
12475 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
12477 tp->mi_mode = MAC_MI_MODE_BASE;
12479 tp->coalesce_mode = 0;
12480 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
12481 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
12482 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
12484 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12485 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12487 err = tg3_mdio_init(tp);
12491 /* Initialize data/descriptor byte/word swapping. */
12492 val = tr32(GRC_MODE);
12493 val &= GRC_MODE_HOST_STACKUP;
12494 tw32(GRC_MODE, val | tp->grc_mode);
12496 tg3_switch_clocks(tp);
12498 /* Clear this out for sanity. */
12499 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
12501 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
12503 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
12504 (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
12505 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
12507 if (chiprevid == CHIPREV_ID_5701_A0 ||
12508 chiprevid == CHIPREV_ID_5701_B0 ||
12509 chiprevid == CHIPREV_ID_5701_B2 ||
12510 chiprevid == CHIPREV_ID_5701_B5) {
12511 void __iomem *sram_base;
12513 /* Write some dummy words into the SRAM status block
12514 * area, see if it reads back correctly. If the return
12515 * value is bad, force enable the PCIX workaround.
12517 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
12519 writel(0x00000000, sram_base);
12520 writel(0x00000000, sram_base + 4);
12521 writel(0xffffffff, sram_base + 4);
12522 if (readl(sram_base) != 0x00000000)
12523 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
12528 tg3_nvram_init(tp);
12530 grc_misc_cfg = tr32(GRC_MISC_CFG);
12531 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
12533 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12534 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
12535 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
12536 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
12538 if (!(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
12539 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700))
12540 tp->tg3_flags |= TG3_FLAG_TAGGED_STATUS;
12541 if (tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) {
12542 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
12543 HOSTCC_MODE_CLRTICK_TXBD);
12545 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
12546 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12547 tp->misc_host_ctrl);
12550 /* Preserve the APE MAC_MODE bits */
12551 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
12552 tp->mac_mode = tr32(MAC_MODE) |
12553 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
12555 tp->mac_mode = TG3_DEF_MAC_MODE;
12557 /* these are limited to 10/100 only */
12558 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
12559 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
12560 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
12561 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12562 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
12563 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
12564 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
12565 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
12566 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
12567 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
12568 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
12569 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12570 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
12572 err = tg3_phy_probe(tp);
12574 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
12575 pci_name(tp->pdev), err);
12576 /* ... but do not return immediately ... */
12580 tg3_read_partno(tp);
12581 tg3_read_fw_ver(tp);
12583 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
12584 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12586 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12587 tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
12589 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
12592 /* 5700 {AX,BX} chips have a broken status block link
12593 * change bit implementation, so we must use the
12594 * status register in those cases.
12596 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
12597 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
12599 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
12601 /* The led_ctrl is set during tg3_phy_probe, here we might
12602 * have to force the link status polling mechanism based
12603 * upon subsystem IDs.
12605 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
12606 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12607 !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
12608 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
12609 TG3_FLAG_USE_LINKCHG_REG);
12612 /* For all SERDES we poll the MAC status register. */
12613 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
12614 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
12616 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
12618 /* All chips before 5787 can get confused if TX buffers
12619 * straddle the 4GB address boundary in some cases.
12621 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
12622 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12623 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12624 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
12625 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
12626 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12627 tp->dev->hard_start_xmit = tg3_start_xmit;
12629 tp->dev->hard_start_xmit = tg3_start_xmit_dma_bug;
12632 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
12633 (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
12636 tp->rx_std_max_post = TG3_RX_RING_SIZE;
12638 /* Increment the rx prod index on the rx std ring by at most
12639 * 8 for these chips to workaround hw errata.
12641 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12642 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
12643 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12644 tp->rx_std_max_post = 8;
12646 if (tp->tg3_flags & TG3_FLAG_ASPM_WORKAROUND)
12647 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
12648 PCIE_PWR_MGMT_L1_THRESH_MSK;
12653 #ifdef CONFIG_SPARC
12654 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
12656 struct net_device *dev = tp->dev;
12657 struct pci_dev *pdev = tp->pdev;
12658 struct device_node *dp = pci_device_to_OF_node(pdev);
12659 const unsigned char *addr;
12662 addr = of_get_property(dp, "local-mac-address", &len);
12663 if (addr && len == 6) {
12664 memcpy(dev->dev_addr, addr, 6);
12665 memcpy(dev->perm_addr, dev->dev_addr, 6);
12671 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
12673 struct net_device *dev = tp->dev;
12675 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
12676 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
12681 static int __devinit tg3_get_device_address(struct tg3 *tp)
12683 struct net_device *dev = tp->dev;
12684 u32 hi, lo, mac_offset;
12687 #ifdef CONFIG_SPARC
12688 if (!tg3_get_macaddr_sparc(tp))
12693 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
12694 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
12695 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
12697 if (tg3_nvram_lock(tp))
12698 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
12700 tg3_nvram_unlock(tp);
12702 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12705 /* First try to get it from MAC address mailbox. */
12706 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
12707 if ((hi >> 16) == 0x484b) {
12708 dev->dev_addr[0] = (hi >> 8) & 0xff;
12709 dev->dev_addr[1] = (hi >> 0) & 0xff;
12711 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
12712 dev->dev_addr[2] = (lo >> 24) & 0xff;
12713 dev->dev_addr[3] = (lo >> 16) & 0xff;
12714 dev->dev_addr[4] = (lo >> 8) & 0xff;
12715 dev->dev_addr[5] = (lo >> 0) & 0xff;
12717 /* Some old bootcode may report a 0 MAC address in SRAM */
12718 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
12721 /* Next, try NVRAM. */
12722 if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
12723 !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
12724 dev->dev_addr[0] = ((hi >> 16) & 0xff);
12725 dev->dev_addr[1] = ((hi >> 24) & 0xff);
12726 dev->dev_addr[2] = ((lo >> 0) & 0xff);
12727 dev->dev_addr[3] = ((lo >> 8) & 0xff);
12728 dev->dev_addr[4] = ((lo >> 16) & 0xff);
12729 dev->dev_addr[5] = ((lo >> 24) & 0xff);
12731 /* Finally just fetch it out of the MAC control regs. */
12733 hi = tr32(MAC_ADDR_0_HIGH);
12734 lo = tr32(MAC_ADDR_0_LOW);
12736 dev->dev_addr[5] = lo & 0xff;
12737 dev->dev_addr[4] = (lo >> 8) & 0xff;
12738 dev->dev_addr[3] = (lo >> 16) & 0xff;
12739 dev->dev_addr[2] = (lo >> 24) & 0xff;
12740 dev->dev_addr[1] = hi & 0xff;
12741 dev->dev_addr[0] = (hi >> 8) & 0xff;
12745 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
12746 #ifdef CONFIG_SPARC
12747 if (!tg3_get_default_macaddr_sparc(tp))
12752 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
12756 #define BOUNDARY_SINGLE_CACHELINE 1
12757 #define BOUNDARY_MULTI_CACHELINE 2
12759 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
12761 int cacheline_size;
12765 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
12767 cacheline_size = 1024;
12769 cacheline_size = (int) byte * 4;
12771 /* On 5703 and later chips, the boundary bits have no
12774 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12775 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12776 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
12779 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
12780 goal = BOUNDARY_MULTI_CACHELINE;
12782 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
12783 goal = BOUNDARY_SINGLE_CACHELINE;
12792 /* PCI controllers on most RISC systems tend to disconnect
12793 * when a device tries to burst across a cache-line boundary.
12794 * Therefore, letting tg3 do so just wastes PCI bandwidth.
12796 * Unfortunately, for PCI-E there are only limited
12797 * write-side controls for this, and thus for reads
12798 * we will still get the disconnects. We'll also waste
12799 * these PCI cycles for both read and write for chips
12800 * other than 5700 and 5701 which do not implement the
12803 if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
12804 !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
12805 switch (cacheline_size) {
12810 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12811 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
12812 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
12814 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12815 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12820 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
12821 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
12825 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
12826 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
12829 } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12830 switch (cacheline_size) {
12834 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12835 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12836 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
12842 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
12843 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
12847 switch (cacheline_size) {
12849 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12850 val |= (DMA_RWCTRL_READ_BNDRY_16 |
12851 DMA_RWCTRL_WRITE_BNDRY_16);
12856 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12857 val |= (DMA_RWCTRL_READ_BNDRY_32 |
12858 DMA_RWCTRL_WRITE_BNDRY_32);
12863 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12864 val |= (DMA_RWCTRL_READ_BNDRY_64 |
12865 DMA_RWCTRL_WRITE_BNDRY_64);
12870 if (goal == BOUNDARY_SINGLE_CACHELINE) {
12871 val |= (DMA_RWCTRL_READ_BNDRY_128 |
12872 DMA_RWCTRL_WRITE_BNDRY_128);
12877 val |= (DMA_RWCTRL_READ_BNDRY_256 |
12878 DMA_RWCTRL_WRITE_BNDRY_256);
12881 val |= (DMA_RWCTRL_READ_BNDRY_512 |
12882 DMA_RWCTRL_WRITE_BNDRY_512);
12886 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
12887 DMA_RWCTRL_WRITE_BNDRY_1024);
12896 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
12898 struct tg3_internal_buffer_desc test_desc;
12899 u32 sram_dma_descs;
12902 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
12904 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
12905 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
12906 tw32(RDMAC_STATUS, 0);
12907 tw32(WDMAC_STATUS, 0);
12909 tw32(BUFMGR_MODE, 0);
12910 tw32(FTQ_RESET, 0);
12912 test_desc.addr_hi = ((u64) buf_dma) >> 32;
12913 test_desc.addr_lo = buf_dma & 0xffffffff;
12914 test_desc.nic_mbuf = 0x00002100;
12915 test_desc.len = size;
12918 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
12919 * the *second* time the tg3 driver was getting loaded after an
12922 * Broadcom tells me:
12923 * ...the DMA engine is connected to the GRC block and a DMA
12924 * reset may affect the GRC block in some unpredictable way...
12925 * The behavior of resets to individual blocks has not been tested.
12927 * Broadcom noted the GRC reset will also reset all sub-components.
12930 test_desc.cqid_sqid = (13 << 8) | 2;
12932 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
12935 test_desc.cqid_sqid = (16 << 8) | 7;
12937 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
12940 test_desc.flags = 0x00000005;
12942 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
12945 val = *(((u32 *)&test_desc) + i);
12946 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
12947 sram_dma_descs + (i * sizeof(u32)));
12948 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
12950 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
12953 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
12955 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
12959 for (i = 0; i < 40; i++) {
12963 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
12965 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
12966 if ((val & 0xffff) == sram_dma_descs) {
12977 #define TEST_BUFFER_SIZE 0x2000
12979 static int __devinit tg3_test_dma(struct tg3 *tp)
12981 dma_addr_t buf_dma;
12982 u32 *buf, saved_dma_rwctrl;
12985 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
12991 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
12992 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
12994 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
12996 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
12997 /* DMA read watermark not used on PCIE */
12998 tp->dma_rwctrl |= 0x00180000;
12999 } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
13000 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13001 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
13002 tp->dma_rwctrl |= 0x003f0000;
13004 tp->dma_rwctrl |= 0x003f000f;
13006 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13007 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
13008 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
13009 u32 read_water = 0x7;
13011 /* If the 5704 is behind the EPB bridge, we can
13012 * do the less restrictive ONE_DMA workaround for
13013 * better performance.
13015 if ((tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) &&
13016 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13017 tp->dma_rwctrl |= 0x8000;
13018 else if (ccval == 0x6 || ccval == 0x7)
13019 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
13021 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
13023 /* Set bit 23 to enable PCIX hw bug fix */
13025 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
13026 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
13028 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
13029 /* 5780 always in PCIX mode */
13030 tp->dma_rwctrl |= 0x00144000;
13031 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13032 /* 5714 always in PCIX mode */
13033 tp->dma_rwctrl |= 0x00148000;
13035 tp->dma_rwctrl |= 0x001b000f;
13039 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
13040 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
13041 tp->dma_rwctrl &= 0xfffffff0;
13043 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13044 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13045 /* Remove this if it causes problems for some boards. */
13046 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
13048 /* On 5700/5701 chips, we need to set this bit.
13049 * Otherwise the chip will issue cacheline transactions
13050 * to streamable DMA memory with not all the byte
13051 * enables turned on. This is an error on several
13052 * RISC PCI controllers, in particular sparc64.
13054 * On 5703/5704 chips, this bit has been reassigned
13055 * a different meaning. In particular, it is used
13056 * on those chips to enable a PCI-X workaround.
13058 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
13061 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13064 /* Unneeded, already done by tg3_get_invariants. */
13065 tg3_switch_clocks(tp);
13069 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13070 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13073 /* It is best to perform DMA test with maximum write burst size
13074 * to expose the 5700/5701 write DMA bug.
13076 saved_dma_rwctrl = tp->dma_rwctrl;
13077 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13078 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13083 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
13086 /* Send the buffer to the chip. */
13087 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
13089 printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
13094 /* validate data reached card RAM correctly. */
13095 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13097 tg3_read_mem(tp, 0x2100 + (i*4), &val);
13098 if (le32_to_cpu(val) != p[i]) {
13099 printk(KERN_ERR " tg3_test_dma() Card buffer corrupted on write! (%d != %d)\n", val, i);
13100 /* ret = -ENODEV here? */
13105 /* Now read it back. */
13106 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
13108 printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
13114 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
13118 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13119 DMA_RWCTRL_WRITE_BNDRY_16) {
13120 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13121 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13122 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13125 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
13131 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
13137 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
13138 DMA_RWCTRL_WRITE_BNDRY_16) {
13139 static struct pci_device_id dma_wait_state_chipsets[] = {
13140 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
13141 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
13145 /* DMA test passed without adjusting DMA boundary,
13146 * now look for chipsets that are known to expose the
13147 * DMA bug without failing the test.
13149 if (pci_dev_present(dma_wait_state_chipsets)) {
13150 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
13151 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
13154 /* Safe to use the calculated DMA boundary. */
13155 tp->dma_rwctrl = saved_dma_rwctrl;
13157 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
13161 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
13166 static void __devinit tg3_init_link_config(struct tg3 *tp)
13168 tp->link_config.advertising =
13169 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
13170 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
13171 ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
13172 ADVERTISED_Autoneg | ADVERTISED_MII);
13173 tp->link_config.speed = SPEED_INVALID;
13174 tp->link_config.duplex = DUPLEX_INVALID;
13175 tp->link_config.autoneg = AUTONEG_ENABLE;
13176 tp->link_config.active_speed = SPEED_INVALID;
13177 tp->link_config.active_duplex = DUPLEX_INVALID;
13178 tp->link_config.phy_is_low_power = 0;
13179 tp->link_config.orig_speed = SPEED_INVALID;
13180 tp->link_config.orig_duplex = DUPLEX_INVALID;
13181 tp->link_config.orig_autoneg = AUTONEG_INVALID;
13184 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
13186 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13187 tp->bufmgr_config.mbuf_read_dma_low_water =
13188 DEFAULT_MB_RDMA_LOW_WATER_5705;
13189 tp->bufmgr_config.mbuf_mac_rx_low_water =
13190 DEFAULT_MB_MACRX_LOW_WATER_5705;
13191 tp->bufmgr_config.mbuf_high_water =
13192 DEFAULT_MB_HIGH_WATER_5705;
13193 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13194 tp->bufmgr_config.mbuf_mac_rx_low_water =
13195 DEFAULT_MB_MACRX_LOW_WATER_5906;
13196 tp->bufmgr_config.mbuf_high_water =
13197 DEFAULT_MB_HIGH_WATER_5906;
13200 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13201 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
13202 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13203 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
13204 tp->bufmgr_config.mbuf_high_water_jumbo =
13205 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
13207 tp->bufmgr_config.mbuf_read_dma_low_water =
13208 DEFAULT_MB_RDMA_LOW_WATER;
13209 tp->bufmgr_config.mbuf_mac_rx_low_water =
13210 DEFAULT_MB_MACRX_LOW_WATER;
13211 tp->bufmgr_config.mbuf_high_water =
13212 DEFAULT_MB_HIGH_WATER;
13214 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
13215 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
13216 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
13217 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
13218 tp->bufmgr_config.mbuf_high_water_jumbo =
13219 DEFAULT_MB_HIGH_WATER_JUMBO;
13222 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
13223 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
13226 static char * __devinit tg3_phy_string(struct tg3 *tp)
13228 switch (tp->phy_id & PHY_ID_MASK) {
13229 case PHY_ID_BCM5400: return "5400";
13230 case PHY_ID_BCM5401: return "5401";
13231 case PHY_ID_BCM5411: return "5411";
13232 case PHY_ID_BCM5701: return "5701";
13233 case PHY_ID_BCM5703: return "5703";
13234 case PHY_ID_BCM5704: return "5704";
13235 case PHY_ID_BCM5705: return "5705";
13236 case PHY_ID_BCM5750: return "5750";
13237 case PHY_ID_BCM5752: return "5752";
13238 case PHY_ID_BCM5714: return "5714";
13239 case PHY_ID_BCM5780: return "5780";
13240 case PHY_ID_BCM5755: return "5755";
13241 case PHY_ID_BCM5787: return "5787";
13242 case PHY_ID_BCM5784: return "5784";
13243 case PHY_ID_BCM5756: return "5722/5756";
13244 case PHY_ID_BCM5906: return "5906";
13245 case PHY_ID_BCM5761: return "5761";
13246 case PHY_ID_BCM8002: return "8002/serdes";
13247 case 0: return "serdes";
13248 default: return "unknown";
13252 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
13254 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13255 strcpy(str, "PCI Express");
13257 } else if (tp->tg3_flags & TG3_FLAG_PCIX_MODE) {
13258 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
13260 strcpy(str, "PCIX:");
13262 if ((clock_ctrl == 7) ||
13263 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
13264 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
13265 strcat(str, "133MHz");
13266 else if (clock_ctrl == 0)
13267 strcat(str, "33MHz");
13268 else if (clock_ctrl == 2)
13269 strcat(str, "50MHz");
13270 else if (clock_ctrl == 4)
13271 strcat(str, "66MHz");
13272 else if (clock_ctrl == 6)
13273 strcat(str, "100MHz");
13275 strcpy(str, "PCI:");
13276 if (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED)
13277 strcat(str, "66MHz");
13279 strcat(str, "33MHz");
13281 if (tp->tg3_flags & TG3_FLAG_PCI_32BIT)
13282 strcat(str, ":32-bit");
13284 strcat(str, ":64-bit");
13288 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13290 struct pci_dev *peer;
13291 unsigned int func, devnr = tp->pdev->devfn & ~7;
13293 for (func = 0; func < 8; func++) {
13294 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13295 if (peer && peer != tp->pdev)
13299 /* 5704 can be configured in single-port mode, set peer to
13300 * tp->pdev in that case.
13308 * We don't need to keep the refcount elevated; there's no way
13309 * to remove one half of this device without removing the other
13316 static void __devinit tg3_init_coal(struct tg3 *tp)
13318 struct ethtool_coalesce *ec = &tp->coal;
13320 memset(ec, 0, sizeof(*ec));
13321 ec->cmd = ETHTOOL_GCOALESCE;
13322 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
13323 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
13324 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
13325 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
13326 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
13327 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
13328 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
13329 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
13330 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
13332 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
13333 HOSTCC_MODE_CLRTICK_TXBD)) {
13334 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
13335 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
13336 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
13337 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
13340 if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
13341 ec->rx_coalesce_usecs_irq = 0;
13342 ec->tx_coalesce_usecs_irq = 0;
13343 ec->stats_block_coalesce_usecs = 0;
13347 static int __devinit tg3_init_one(struct pci_dev *pdev,
13348 const struct pci_device_id *ent)
13350 static int tg3_version_printed = 0;
13351 resource_size_t tg3reg_len;
13352 struct net_device *dev;
13356 u64 dma_mask, persist_dma_mask;
13358 if (tg3_version_printed++ == 0)
13359 printk(KERN_INFO "%s", version);
13361 err = pci_enable_device(pdev);
13363 printk(KERN_ERR PFX "Cannot enable PCI device, "
13368 if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM)) {
13369 printk(KERN_ERR PFX "Cannot find proper PCI device "
13370 "base address, aborting.\n");
13372 goto err_out_disable_pdev;
13375 err = pci_request_regions(pdev, DRV_MODULE_NAME);
13377 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
13379 goto err_out_disable_pdev;
13382 pci_set_master(pdev);
13384 /* Find power-management capability. */
13385 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
13387 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
13390 goto err_out_free_res;
13393 dev = alloc_etherdev(sizeof(*tp));
13395 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
13397 goto err_out_free_res;
13400 SET_NETDEV_DEV(dev, &pdev->dev);
13402 #if TG3_VLAN_TAG_USED
13403 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
13404 dev->vlan_rx_register = tg3_vlan_rx_register;
13407 tp = netdev_priv(dev);
13410 tp->pm_cap = pm_cap;
13411 tp->rx_mode = TG3_DEF_RX_MODE;
13412 tp->tx_mode = TG3_DEF_TX_MODE;
13415 tp->msg_enable = tg3_debug;
13417 tp->msg_enable = TG3_DEF_MSG_ENABLE;
13419 /* The word/byte swap controls here control register access byte
13420 * swapping. DMA data byte swapping is controlled in the GRC_MODE
13423 tp->misc_host_ctrl =
13424 MISC_HOST_CTRL_MASK_PCI_INT |
13425 MISC_HOST_CTRL_WORD_SWAP |
13426 MISC_HOST_CTRL_INDIR_ACCESS |
13427 MISC_HOST_CTRL_PCISTATE_RW;
13429 /* The NONFRM (non-frame) byte/word swap controls take effect
13430 * on descriptor entries, anything which isn't packet data.
13432 * The StrongARM chips on the board (one for tx, one for rx)
13433 * are running in big-endian mode.
13435 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
13436 GRC_MODE_WSWAP_NONFRM_DATA);
13437 #ifdef __BIG_ENDIAN
13438 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
13440 spin_lock_init(&tp->lock);
13441 spin_lock_init(&tp->indirect_lock);
13442 INIT_WORK(&tp->reset_task, tg3_reset_task);
13444 dev->mem_start = pci_resource_start(pdev, BAR_0);
13445 tg3reg_len = pci_resource_len(pdev, BAR_0);
13446 dev->mem_end = dev->mem_start + tg3reg_len;
13448 tp->regs = ioremap_nocache(dev->mem_start, tg3reg_len);
13450 printk(KERN_ERR PFX "Cannot map device registers, "
13453 goto err_out_free_dev;
13456 tg3_init_link_config(tp);
13458 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13459 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13460 tp->tx_pending = TG3_DEF_TX_RING_PENDING;
13462 dev->open = tg3_open;
13463 dev->stop = tg3_close;
13464 dev->get_stats = tg3_get_stats;
13465 dev->set_multicast_list = tg3_set_rx_mode;
13466 dev->set_mac_address = tg3_set_mac_addr;
13467 dev->do_ioctl = tg3_ioctl;
13468 dev->tx_timeout = tg3_tx_timeout;
13469 netif_napi_add(dev, &tp->napi, tg3_poll, 64);
13470 dev->ethtool_ops = &tg3_ethtool_ops;
13471 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13472 dev->change_mtu = tg3_change_mtu;
13473 dev->irq = pdev->irq;
13474 #ifdef CONFIG_NET_POLL_CONTROLLER
13475 dev->poll_controller = tg3_poll_controller;
13478 err = tg3_get_invariants(tp);
13480 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
13482 goto err_out_iounmap;
13485 /* The EPB bridge inside 5714, 5715, and 5780 and any
13486 * device behind the EPB cannot support DMA addresses > 40-bit.
13487 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
13488 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
13489 * do DMA address check in tg3_start_xmit().
13491 if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
13492 persist_dma_mask = dma_mask = DMA_32BIT_MASK;
13493 else if (tp->tg3_flags & TG3_FLAG_40BIT_DMA_BUG) {
13494 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
13495 #ifdef CONFIG_HIGHMEM
13496 dma_mask = DMA_64BIT_MASK;
13499 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
13501 /* Configure DMA attributes. */
13502 if (dma_mask > DMA_32BIT_MASK) {
13503 err = pci_set_dma_mask(pdev, dma_mask);
13505 dev->features |= NETIF_F_HIGHDMA;
13506 err = pci_set_consistent_dma_mask(pdev,
13509 printk(KERN_ERR PFX "Unable to obtain 64 bit "
13510 "DMA for consistent allocations\n");
13511 goto err_out_iounmap;
13515 if (err || dma_mask == DMA_32BIT_MASK) {
13516 err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
13518 printk(KERN_ERR PFX "No usable DMA configuration, "
13520 goto err_out_iounmap;
13524 tg3_init_bufmgr_config(tp);
13526 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13527 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
13529 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13530 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13531 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
13532 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13533 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
13534 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
13536 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
13539 /* TSO is on by default on chips that support hardware TSO.
13540 * Firmware TSO on older chips gives lower performance, so it
13541 * is off by default, but can be enabled using ethtool.
13543 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
13544 dev->features |= NETIF_F_TSO;
13545 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) &&
13546 (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5906))
13547 dev->features |= NETIF_F_TSO6;
13548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13549 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13550 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
13551 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13552 dev->features |= NETIF_F_TSO_ECN;
13556 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
13557 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
13558 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
13559 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
13560 tp->rx_pending = 63;
13563 err = tg3_get_device_address(tp);
13565 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
13567 goto err_out_iounmap;
13570 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
13571 if (!(pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM)) {
13572 printk(KERN_ERR PFX "Cannot find proper PCI device "
13573 "base address for APE, aborting.\n");
13575 goto err_out_iounmap;
13578 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
13579 if (!tp->aperegs) {
13580 printk(KERN_ERR PFX "Cannot map APE registers, "
13583 goto err_out_iounmap;
13586 tg3_ape_lock_init(tp);
13590 * Reset chip in case UNDI or EFI driver did not shutdown
13591 * DMA self test will enable WDMAC and we'll see (spurious)
13592 * pending DMA on the PCI bus at that point.
13594 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
13595 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
13596 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
13597 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13600 err = tg3_test_dma(tp);
13602 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
13603 goto err_out_apeunmap;
13606 /* Tigon3 can do ipv4 only... and some chips have buggy
13609 if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
13610 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
13611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13612 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13613 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13614 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13615 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13616 dev->features |= NETIF_F_IPV6_CSUM;
13618 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
13620 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
13622 /* flow control autonegotiation is default behavior */
13623 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
13624 tp->link_config.flowctrl = TG3_FLOW_CTRL_TX | TG3_FLOW_CTRL_RX;
13628 pci_set_drvdata(pdev, dev);
13630 err = register_netdev(dev);
13632 printk(KERN_ERR PFX "Cannot register net device, "
13634 goto err_out_apeunmap;
13637 printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
13639 tp->board_part_number,
13640 tp->pci_chip_rev_id,
13641 tg3_bus_string(tp, str),
13644 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)
13646 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
13648 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name,
13649 tp->mdio_bus->phy_map[PHY_ADDR]->dev.bus_id);
13652 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
13653 tp->dev->name, tg3_phy_string(tp),
13654 ((tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100Base-TX" :
13655 ((tp->tg3_flags2 & TG3_FLG2_ANY_SERDES) ? "1000Base-SX" :
13656 "10/100/1000Base-T")),
13657 (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0);
13659 printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
13661 (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
13662 (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
13663 (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
13664 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
13665 (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
13666 printk(KERN_INFO "%s: dma_rwctrl[%08x] dma_mask[%d-bit]\n",
13667 dev->name, tp->dma_rwctrl,
13668 (pdev->dma_mask == DMA_32BIT_MASK) ? 32 :
13669 (((u64) pdev->dma_mask == DMA_40BIT_MASK) ? 40 : 64));
13675 iounmap(tp->aperegs);
13676 tp->aperegs = NULL;
13689 pci_release_regions(pdev);
13691 err_out_disable_pdev:
13692 pci_disable_device(pdev);
13693 pci_set_drvdata(pdev, NULL);
13697 static void __devexit tg3_remove_one(struct pci_dev *pdev)
13699 struct net_device *dev = pci_get_drvdata(pdev);
13702 struct tg3 *tp = netdev_priv(dev);
13704 flush_scheduled_work();
13706 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
13711 unregister_netdev(dev);
13713 iounmap(tp->aperegs);
13714 tp->aperegs = NULL;
13721 pci_release_regions(pdev);
13722 pci_disable_device(pdev);
13723 pci_set_drvdata(pdev, NULL);
13727 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
13729 struct net_device *dev = pci_get_drvdata(pdev);
13730 struct tg3 *tp = netdev_priv(dev);
13731 pci_power_t target_state;
13734 /* PCI register 4 needs to be saved whether netif_running() or not.
13735 * MSI address and data need to be saved if using MSI and
13738 pci_save_state(pdev);
13740 if (!netif_running(dev))
13743 flush_scheduled_work();
13745 tg3_netif_stop(tp);
13747 del_timer_sync(&tp->timer);
13749 tg3_full_lock(tp, 1);
13750 tg3_disable_ints(tp);
13751 tg3_full_unlock(tp);
13753 netif_device_detach(dev);
13755 tg3_full_lock(tp, 0);
13756 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13757 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
13758 tg3_full_unlock(tp);
13760 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot;
13762 err = tg3_set_power_state(tp, target_state);
13766 tg3_full_lock(tp, 0);
13768 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13769 err2 = tg3_restart_hw(tp, 1);
13773 tp->timer.expires = jiffies + tp->timer_offset;
13774 add_timer(&tp->timer);
13776 netif_device_attach(dev);
13777 tg3_netif_start(tp);
13780 tg3_full_unlock(tp);
13789 static int tg3_resume(struct pci_dev *pdev)
13791 struct net_device *dev = pci_get_drvdata(pdev);
13792 struct tg3 *tp = netdev_priv(dev);
13795 pci_restore_state(tp->pdev);
13797 if (!netif_running(dev))
13800 err = tg3_set_power_state(tp, PCI_D0);
13804 netif_device_attach(dev);
13806 tg3_full_lock(tp, 0);
13808 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
13809 err = tg3_restart_hw(tp, 1);
13813 tp->timer.expires = jiffies + tp->timer_offset;
13814 add_timer(&tp->timer);
13816 tg3_netif_start(tp);
13819 tg3_full_unlock(tp);
13827 static struct pci_driver tg3_driver = {
13828 .name = DRV_MODULE_NAME,
13829 .id_table = tg3_pci_tbl,
13830 .probe = tg3_init_one,
13831 .remove = __devexit_p(tg3_remove_one),
13832 .suspend = tg3_suspend,
13833 .resume = tg3_resume
13836 static int __init tg3_init(void)
13838 return pci_register_driver(&tg3_driver);
13841 static void __exit tg3_cleanup(void)
13843 pci_unregister_driver(&tg3_driver);
13846 module_init(tg3_init);
13847 module_exit(tg3_cleanup);