2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2012 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
48 #include <net/checksum.h>
51 #include <asm/system.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
57 #include <asm/idprom.h>
66 /* Functions & macros to verify TG3_FLAGS types */
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
70 return test_bit(flag, bits);
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
80 clear_bit(flag, bits);
83 #define tg3_flag(tp, flag) \
84 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag) \
86 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag) \
88 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define DRV_MODULE_NAME "tg3"
92 #define TG3_MIN_NUM 122
93 #define DRV_MODULE_VERSION \
94 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE "December 7, 2011"
97 #define RESET_KIND_SHUTDOWN 0
98 #define RESET_KIND_INIT 1
99 #define RESET_KIND_SUSPEND 2
101 #define TG3_DEF_RX_MODE 0
102 #define TG3_DEF_TX_MODE 0
103 #define TG3_DEF_MSG_ENABLE \
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
115 /* length of time before we decide the hardware is borked,
116 * and dev->tx_timeout() should be called to fix the problem
119 #define TG3_TX_TIMEOUT (5 * HZ)
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU 60
123 #define TG3_MAX_MTU(tp) \
124 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127 * You can't change the ring sizes, but you can change where you place
128 * them in the NIC onboard memory.
130 #define TG3_RX_STD_RING_SIZE(tp) \
131 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING 200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
139 /* Do not place this n-ring entries value into the tp struct itself,
140 * we really want to expose these constants to GCC so that modulo et
141 * al. operations are done with shifts and masks instead of with
142 * hw multiply/modulo instructions. Another solution would be to
143 * replace things like '% foo' with '& (foo - 1)'.
146 #define TG3_TX_RING_SIZE 512
147 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
149 #define TG3_RX_STD_RING_BYTES(tp) \
150 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
157 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
159 #define TG3_DMA_BYTE_ENAB 64
161 #define TG3_RX_STD_DMA_SZ 1536
162 #define TG3_RX_JMB_DMA_SZ 9046
164 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
166 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176 * that are at least dword aligned when used in PCIX mode. The driver
177 * works around this bug by double copying the packet. This workaround
178 * is built into the normal double copy length check for efficiency.
180 * However, the double copy is only necessary on those architectures
181 * where unaligned memory accesses are inefficient. For those architectures
182 * where unaligned memory accesses incur little penalty, we can reintegrate
183 * the 5701 in the normal rx path. Doing so saves a device structure
184 * dereference by hardcoding the double copy threshold in place.
186 #define TG3_RX_COPY_THRESHOLD 256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
190 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
196 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K 2048
202 #define TG3_TX_BD_DMA_MAX_4K 4096
204 #define TG3_RAW_IP_ALIGN 2
206 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
207 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
209 #define FIRMWARE_TG3 "tigon/tg3.bin"
210 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
211 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
213 static char version[] __devinitdata =
214 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
216 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
217 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(DRV_MODULE_VERSION);
220 MODULE_FIRMWARE(FIRMWARE_TG3);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
224 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
228 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
229 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
230 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
231 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
232 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
233 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
234 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
257 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
258 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
260 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
261 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
264 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
265 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
272 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
278 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
286 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
287 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
288 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
289 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
292 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
302 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
315 static const struct {
316 const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
320 { "rx_ucast_packets" },
321 { "rx_mcast_packets" },
322 { "rx_bcast_packets" },
324 { "rx_align_errors" },
325 { "rx_xon_pause_rcvd" },
326 { "rx_xoff_pause_rcvd" },
327 { "rx_mac_ctrl_rcvd" },
328 { "rx_xoff_entered" },
329 { "rx_frame_too_long_errors" },
331 { "rx_undersize_packets" },
332 { "rx_in_length_errors" },
333 { "rx_out_length_errors" },
334 { "rx_64_or_less_octet_packets" },
335 { "rx_65_to_127_octet_packets" },
336 { "rx_128_to_255_octet_packets" },
337 { "rx_256_to_511_octet_packets" },
338 { "rx_512_to_1023_octet_packets" },
339 { "rx_1024_to_1522_octet_packets" },
340 { "rx_1523_to_2047_octet_packets" },
341 { "rx_2048_to_4095_octet_packets" },
342 { "rx_4096_to_8191_octet_packets" },
343 { "rx_8192_to_9022_octet_packets" },
350 { "tx_flow_control" },
352 { "tx_single_collisions" },
353 { "tx_mult_collisions" },
355 { "tx_excessive_collisions" },
356 { "tx_late_collisions" },
357 { "tx_collide_2times" },
358 { "tx_collide_3times" },
359 { "tx_collide_4times" },
360 { "tx_collide_5times" },
361 { "tx_collide_6times" },
362 { "tx_collide_7times" },
363 { "tx_collide_8times" },
364 { "tx_collide_9times" },
365 { "tx_collide_10times" },
366 { "tx_collide_11times" },
367 { "tx_collide_12times" },
368 { "tx_collide_13times" },
369 { "tx_collide_14times" },
370 { "tx_collide_15times" },
371 { "tx_ucast_packets" },
372 { "tx_mcast_packets" },
373 { "tx_bcast_packets" },
374 { "tx_carrier_sense_errors" },
378 { "dma_writeq_full" },
379 { "dma_write_prioq_full" },
383 { "rx_threshold_hit" },
385 { "dma_readq_full" },
386 { "dma_read_prioq_full" },
387 { "tx_comp_queue_full" },
389 { "ring_set_send_prod_index" },
390 { "ring_status_update" },
392 { "nic_avoided_irqs" },
393 { "nic_tx_threshold_hit" },
395 { "mbuf_lwm_thresh_hit" },
398 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
401 static const struct {
402 const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404 { "nvram test (online) " },
405 { "link test (online) " },
406 { "register test (offline)" },
407 { "memory test (offline)" },
408 { "mac loopback test (offline)" },
409 { "phy loopback test (offline)" },
410 { "ext loopback test (offline)" },
411 { "interrupt test (offline)" },
414 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
419 writel(val, tp->regs + off);
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
424 return readl(tp->regs + off);
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
429 writel(val, tp->aperegs + off);
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
434 return readl(tp->aperegs + off);
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
441 spin_lock_irqsave(&tp->indirect_lock, flags);
442 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444 spin_unlock_irqrestore(&tp->indirect_lock, flags);
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
449 writel(val, tp->regs + off);
450 readl(tp->regs + off);
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
458 spin_lock_irqsave(&tp->indirect_lock, flags);
459 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461 spin_unlock_irqrestore(&tp->indirect_lock, flags);
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
469 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471 TG3_64BIT_REG_LOW, val);
474 if (off == TG3_RX_STD_PROD_IDX_REG) {
475 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476 TG3_64BIT_REG_LOW, val);
480 spin_lock_irqsave(&tp->indirect_lock, flags);
481 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483 spin_unlock_irqrestore(&tp->indirect_lock, flags);
485 /* In indirect mode when disabling interrupts, we also need
486 * to clear the interrupt bit in the GRC local ctrl register.
488 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
490 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
500 spin_lock_irqsave(&tp->indirect_lock, flags);
501 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503 spin_unlock_irqrestore(&tp->indirect_lock, flags);
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508 * where it is unsafe to read back the register without some delay.
509 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
514 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515 /* Non-posted methods */
516 tp->write32(tp, off, val);
519 tg3_write32(tp, off, val);
524 /* Wait again after the read for the posted method to guarantee that
525 * the wait time is met.
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
533 tp->write32_mbox(tp, off, val);
534 if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535 tp->read32_mbox(tp, off);
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
540 void __iomem *mbox = tp->regs + off;
542 if (tg3_flag(tp, TXD_MBOX_HWBUG))
544 if (tg3_flag(tp, MBOX_WRITE_REORDER))
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
550 return readl(tp->regs + off + GRCMBOX_BASE);
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
555 writel(val, tp->regs + off + GRCMBOX_BASE);
558 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
564 #define tw32(reg, val) tp->write32(tp, reg, val)
565 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg) tp->read32(tp, reg)
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
573 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
577 spin_lock_irqsave(&tp->indirect_lock, flags);
578 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
582 /* Always leave this as zero. */
583 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
585 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586 tw32_f(TG3PCI_MEM_WIN_DATA, val);
588 /* Always leave this as zero. */
589 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
591 spin_unlock_irqrestore(&tp->indirect_lock, flags);
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
598 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
604 spin_lock_irqsave(&tp->indirect_lock, flags);
605 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
609 /* Always leave this as zero. */
610 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
612 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613 *val = tr32(TG3PCI_MEM_WIN_DATA);
615 /* Always leave this as zero. */
616 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
618 spin_unlock_irqrestore(&tp->indirect_lock, flags);
621 static void tg3_ape_lock_init(struct tg3 *tp)
626 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627 regbase = TG3_APE_LOCK_GRANT;
629 regbase = TG3_APE_PER_LOCK_GRANT;
631 /* Make sure the driver hasn't any stale locks. */
632 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
634 case TG3_APE_LOCK_PHY0:
635 case TG3_APE_LOCK_PHY1:
636 case TG3_APE_LOCK_PHY2:
637 case TG3_APE_LOCK_PHY3:
638 bit = APE_LOCK_GRANT_DRIVER;
642 bit = APE_LOCK_GRANT_DRIVER;
644 bit = 1 << tp->pci_fn;
646 tg3_ape_write32(tp, regbase + 4 * i, bit);
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
655 u32 status, req, gnt, bit;
657 if (!tg3_flag(tp, ENABLE_APE))
661 case TG3_APE_LOCK_GPIO:
662 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
664 case TG3_APE_LOCK_GRC:
665 case TG3_APE_LOCK_MEM:
667 bit = APE_LOCK_REQ_DRIVER;
669 bit = 1 << tp->pci_fn;
675 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676 req = TG3_APE_LOCK_REQ;
677 gnt = TG3_APE_LOCK_GRANT;
679 req = TG3_APE_PER_LOCK_REQ;
680 gnt = TG3_APE_PER_LOCK_GRANT;
685 tg3_ape_write32(tp, req + off, bit);
687 /* Wait for up to 1 millisecond to acquire lock. */
688 for (i = 0; i < 100; i++) {
689 status = tg3_ape_read32(tp, gnt + off);
696 /* Revoke the lock request. */
697 tg3_ape_write32(tp, gnt + off, bit);
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
708 if (!tg3_flag(tp, ENABLE_APE))
712 case TG3_APE_LOCK_GPIO:
713 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
715 case TG3_APE_LOCK_GRC:
716 case TG3_APE_LOCK_MEM:
718 bit = APE_LOCK_GRANT_DRIVER;
720 bit = 1 << tp->pci_fn;
726 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727 gnt = TG3_APE_LOCK_GRANT;
729 gnt = TG3_APE_PER_LOCK_GRANT;
731 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
739 /* NCSI does not support APE events */
740 if (tg3_flag(tp, APE_HAS_NCSI))
743 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744 if (apedata != APE_SEG_SIG_MAGIC)
747 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748 if (!(apedata & APE_FW_STATUS_READY))
751 /* Wait for up to 1 millisecond for APE to service previous event. */
752 for (i = 0; i < 10; i++) {
753 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
756 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
758 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760 event | APE_EVENT_STATUS_EVENT_PENDING);
762 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
764 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
770 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
779 if (!tg3_flag(tp, ENABLE_APE))
783 case RESET_KIND_INIT:
784 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785 APE_HOST_SEG_SIG_MAGIC);
786 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787 APE_HOST_SEG_LEN_MAGIC);
788 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793 APE_HOST_BEHAV_NO_PHYLOCK);
794 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795 TG3_APE_HOST_DRVR_STATE_START);
797 event = APE_EVENT_STATUS_STATE_START;
799 case RESET_KIND_SHUTDOWN:
800 /* With the interface we are currently using,
801 * APE does not track driver state. Wiping
802 * out the HOST SEGMENT SIGNATURE forces
803 * the APE to assume OS absent status.
805 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
807 if (device_may_wakeup(&tp->pdev->dev) &&
808 tg3_flag(tp, WOL_ENABLE)) {
809 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810 TG3_APE_HOST_WOL_SPEED_AUTO);
811 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
813 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
815 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
817 event = APE_EVENT_STATUS_STATE_UNLOAD;
819 case RESET_KIND_SUSPEND:
820 event = APE_EVENT_STATUS_STATE_SUSPEND;
826 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
828 tg3_ape_send_event(tp, event);
831 static void tg3_disable_ints(struct tg3 *tp)
835 tw32(TG3PCI_MISC_HOST_CTRL,
836 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837 for (i = 0; i < tp->irq_max; i++)
838 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
841 static void tg3_enable_ints(struct tg3 *tp)
848 tw32(TG3PCI_MISC_HOST_CTRL,
849 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
851 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852 for (i = 0; i < tp->irq_cnt; i++) {
853 struct tg3_napi *tnapi = &tp->napi[i];
855 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856 if (tg3_flag(tp, 1SHOT_MSI))
857 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
859 tp->coal_now |= tnapi->coal_now;
862 /* Force an initial interrupt */
863 if (!tg3_flag(tp, TAGGED_STATUS) &&
864 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
867 tw32(HOSTCC_MODE, tp->coal_now);
869 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
874 struct tg3 *tp = tnapi->tp;
875 struct tg3_hw_status *sblk = tnapi->hw_status;
876 unsigned int work_exists = 0;
878 /* check for phy events */
879 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880 if (sblk->status & SD_STATUS_LINK_CHG)
883 /* check for RX/TX work to do */
884 if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
885 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
892 * similar to tg3_enable_ints, but it accurately determines whether there
893 * is new work pending and can return without flushing the PIO write
894 * which reenables interrupts
896 static void tg3_int_reenable(struct tg3_napi *tnapi)
898 struct tg3 *tp = tnapi->tp;
900 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
903 /* When doing tagged status, this work check is unnecessary.
904 * The last_tag we write above tells the chip which piece of
905 * work we've completed.
907 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
908 tw32(HOSTCC_MODE, tp->coalesce_mode |
909 HOSTCC_MODE_ENABLE | tnapi->coal_now);
912 static void tg3_switch_clocks(struct tg3 *tp)
917 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
920 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
922 orig_clock_ctrl = clock_ctrl;
923 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
924 CLOCK_CTRL_CLKRUN_OENABLE |
926 tp->pci_clock_ctrl = clock_ctrl;
928 if (tg3_flag(tp, 5705_PLUS)) {
929 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
930 tw32_wait_f(TG3PCI_CLOCK_CTRL,
931 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
933 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
934 tw32_wait_f(TG3PCI_CLOCK_CTRL,
936 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
938 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939 clock_ctrl | (CLOCK_CTRL_ALTCLK),
942 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
945 #define PHY_BUSY_LOOPS 5000
947 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
953 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
955 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
961 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
962 MI_COM_PHY_ADDR_MASK);
963 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
964 MI_COM_REG_ADDR_MASK);
965 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
967 tw32_f(MAC_MI_COM, frame_val);
969 loops = PHY_BUSY_LOOPS;
972 frame_val = tr32(MAC_MI_COM);
974 if ((frame_val & MI_COM_BUSY) == 0) {
976 frame_val = tr32(MAC_MI_COM);
984 *val = frame_val & MI_COM_DATA_MASK;
988 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
989 tw32_f(MAC_MI_MODE, tp->mi_mode);
996 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1002 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1003 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1006 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1008 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1012 frame_val = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1013 MI_COM_PHY_ADDR_MASK);
1014 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1015 MI_COM_REG_ADDR_MASK);
1016 frame_val |= (val & MI_COM_DATA_MASK);
1017 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1019 tw32_f(MAC_MI_COM, frame_val);
1021 loops = PHY_BUSY_LOOPS;
1022 while (loops != 0) {
1024 frame_val = tr32(MAC_MI_COM);
1025 if ((frame_val & MI_COM_BUSY) == 0) {
1027 frame_val = tr32(MAC_MI_COM);
1037 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1038 tw32_f(MAC_MI_MODE, tp->mi_mode);
1045 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1049 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1053 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1057 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1058 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1062 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1068 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1072 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1076 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1080 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1081 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1085 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1091 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1095 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1097 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1102 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1106 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1108 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1113 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1117 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1118 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1119 MII_TG3_AUXCTL_SHDWSEL_MISC);
1121 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1126 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1128 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1129 set |= MII_TG3_AUXCTL_MISC_WREN;
1131 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1134 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1135 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1136 MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1137 MII_TG3_AUXCTL_ACTL_TX_6DB)
1139 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1140 tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141 MII_TG3_AUXCTL_ACTL_TX_6DB);
1143 static int tg3_bmcr_reset(struct tg3 *tp)
1148 /* OK, reset it, and poll the BMCR_RESET bit until it
1149 * clears or we time out.
1151 phy_control = BMCR_RESET;
1152 err = tg3_writephy(tp, MII_BMCR, phy_control);
1158 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1162 if ((phy_control & BMCR_RESET) == 0) {
1174 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1176 struct tg3 *tp = bp->priv;
1179 spin_lock_bh(&tp->lock);
1181 if (tg3_readphy(tp, reg, &val))
1184 spin_unlock_bh(&tp->lock);
1189 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1191 struct tg3 *tp = bp->priv;
1194 spin_lock_bh(&tp->lock);
1196 if (tg3_writephy(tp, reg, val))
1199 spin_unlock_bh(&tp->lock);
1204 static int tg3_mdio_reset(struct mii_bus *bp)
1209 static void tg3_mdio_config_5785(struct tg3 *tp)
1212 struct phy_device *phydev;
1214 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1215 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1216 case PHY_ID_BCM50610:
1217 case PHY_ID_BCM50610M:
1218 val = MAC_PHYCFG2_50610_LED_MODES;
1220 case PHY_ID_BCMAC131:
1221 val = MAC_PHYCFG2_AC131_LED_MODES;
1223 case PHY_ID_RTL8211C:
1224 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1226 case PHY_ID_RTL8201E:
1227 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1233 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1234 tw32(MAC_PHYCFG2, val);
1236 val = tr32(MAC_PHYCFG1);
1237 val &= ~(MAC_PHYCFG1_RGMII_INT |
1238 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1239 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1240 tw32(MAC_PHYCFG1, val);
1245 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1246 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1247 MAC_PHYCFG2_FMODE_MASK_MASK |
1248 MAC_PHYCFG2_GMODE_MASK_MASK |
1249 MAC_PHYCFG2_ACT_MASK_MASK |
1250 MAC_PHYCFG2_QUAL_MASK_MASK |
1251 MAC_PHYCFG2_INBAND_ENABLE;
1253 tw32(MAC_PHYCFG2, val);
1255 val = tr32(MAC_PHYCFG1);
1256 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1257 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1258 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1259 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1260 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1261 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1262 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1264 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1265 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1266 tw32(MAC_PHYCFG1, val);
1268 val = tr32(MAC_EXT_RGMII_MODE);
1269 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1270 MAC_RGMII_MODE_RX_QUALITY |
1271 MAC_RGMII_MODE_RX_ACTIVITY |
1272 MAC_RGMII_MODE_RX_ENG_DET |
1273 MAC_RGMII_MODE_TX_ENABLE |
1274 MAC_RGMII_MODE_TX_LOWPWR |
1275 MAC_RGMII_MODE_TX_RESET);
1276 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1277 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1278 val |= MAC_RGMII_MODE_RX_INT_B |
1279 MAC_RGMII_MODE_RX_QUALITY |
1280 MAC_RGMII_MODE_RX_ACTIVITY |
1281 MAC_RGMII_MODE_RX_ENG_DET;
1282 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1283 val |= MAC_RGMII_MODE_TX_ENABLE |
1284 MAC_RGMII_MODE_TX_LOWPWR |
1285 MAC_RGMII_MODE_TX_RESET;
1287 tw32(MAC_EXT_RGMII_MODE, val);
1290 static void tg3_mdio_start(struct tg3 *tp)
1292 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1293 tw32_f(MAC_MI_MODE, tp->mi_mode);
1296 if (tg3_flag(tp, MDIOBUS_INITED) &&
1297 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1298 tg3_mdio_config_5785(tp);
1301 static int tg3_mdio_init(struct tg3 *tp)
1305 struct phy_device *phydev;
1307 if (tg3_flag(tp, 5717_PLUS)) {
1310 tp->phy_addr = tp->pci_fn + 1;
1312 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1313 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1315 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1316 TG3_CPMU_PHY_STRAP_IS_SERDES;
1320 tp->phy_addr = TG3_PHY_MII_ADDR;
1324 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1327 tp->mdio_bus = mdiobus_alloc();
1328 if (tp->mdio_bus == NULL)
1331 tp->mdio_bus->name = "tg3 mdio bus";
1332 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1333 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1334 tp->mdio_bus->priv = tp;
1335 tp->mdio_bus->parent = &tp->pdev->dev;
1336 tp->mdio_bus->read = &tg3_mdio_read;
1337 tp->mdio_bus->write = &tg3_mdio_write;
1338 tp->mdio_bus->reset = &tg3_mdio_reset;
1339 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1340 tp->mdio_bus->irq = &tp->mdio_irq[0];
1342 for (i = 0; i < PHY_MAX_ADDR; i++)
1343 tp->mdio_bus->irq[i] = PHY_POLL;
1345 /* The bus registration will look for all the PHYs on the mdio bus.
1346 * Unfortunately, it does not ensure the PHY is powered up before
1347 * accessing the PHY ID registers. A chip reset is the
1348 * quickest way to bring the device back to an operational state..
1350 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1353 i = mdiobus_register(tp->mdio_bus);
1355 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1356 mdiobus_free(tp->mdio_bus);
1360 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1362 if (!phydev || !phydev->drv) {
1363 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1364 mdiobus_unregister(tp->mdio_bus);
1365 mdiobus_free(tp->mdio_bus);
1369 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1370 case PHY_ID_BCM57780:
1371 phydev->interface = PHY_INTERFACE_MODE_GMII;
1372 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1374 case PHY_ID_BCM50610:
1375 case PHY_ID_BCM50610M:
1376 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1377 PHY_BRCM_RX_REFCLK_UNUSED |
1378 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1379 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1380 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1381 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1382 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1383 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1384 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1385 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1387 case PHY_ID_RTL8211C:
1388 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1390 case PHY_ID_RTL8201E:
1391 case PHY_ID_BCMAC131:
1392 phydev->interface = PHY_INTERFACE_MODE_MII;
1393 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1394 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1398 tg3_flag_set(tp, MDIOBUS_INITED);
1400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1401 tg3_mdio_config_5785(tp);
1406 static void tg3_mdio_fini(struct tg3 *tp)
1408 if (tg3_flag(tp, MDIOBUS_INITED)) {
1409 tg3_flag_clear(tp, MDIOBUS_INITED);
1410 mdiobus_unregister(tp->mdio_bus);
1411 mdiobus_free(tp->mdio_bus);
1415 /* tp->lock is held. */
1416 static inline void tg3_generate_fw_event(struct tg3 *tp)
1420 val = tr32(GRC_RX_CPU_EVENT);
1421 val |= GRC_RX_CPU_DRIVER_EVENT;
1422 tw32_f(GRC_RX_CPU_EVENT, val);
1424 tp->last_event_jiffies = jiffies;
1427 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1429 /* tp->lock is held. */
1430 static void tg3_wait_for_event_ack(struct tg3 *tp)
1433 unsigned int delay_cnt;
1436 /* If enough time has passed, no wait is necessary. */
1437 time_remain = (long)(tp->last_event_jiffies + 1 +
1438 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1440 if (time_remain < 0)
1443 /* Check if we can shorten the wait time. */
1444 delay_cnt = jiffies_to_usecs(time_remain);
1445 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1446 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1447 delay_cnt = (delay_cnt >> 3) + 1;
1449 for (i = 0; i < delay_cnt; i++) {
1450 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1456 /* tp->lock is held. */
1457 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1462 if (!tg3_readphy(tp, MII_BMCR, ®))
1464 if (!tg3_readphy(tp, MII_BMSR, ®))
1465 val |= (reg & 0xffff);
1469 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1471 if (!tg3_readphy(tp, MII_LPA, ®))
1472 val |= (reg & 0xffff);
1476 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1477 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1479 if (!tg3_readphy(tp, MII_STAT1000, ®))
1480 val |= (reg & 0xffff);
1484 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1491 /* tp->lock is held. */
1492 static void tg3_ump_link_report(struct tg3 *tp)
1496 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1499 tg3_phy_gather_ump_data(tp, data);
1501 tg3_wait_for_event_ack(tp);
1503 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1504 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1505 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1506 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1507 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1508 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1510 tg3_generate_fw_event(tp);
1513 /* tp->lock is held. */
1514 static void tg3_stop_fw(struct tg3 *tp)
1516 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1517 /* Wait for RX cpu to ACK the previous event. */
1518 tg3_wait_for_event_ack(tp);
1520 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1522 tg3_generate_fw_event(tp);
1524 /* Wait for RX cpu to ACK this event. */
1525 tg3_wait_for_event_ack(tp);
1529 /* tp->lock is held. */
1530 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1532 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1533 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1535 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1537 case RESET_KIND_INIT:
1538 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1542 case RESET_KIND_SHUTDOWN:
1543 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1547 case RESET_KIND_SUSPEND:
1548 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1557 if (kind == RESET_KIND_INIT ||
1558 kind == RESET_KIND_SUSPEND)
1559 tg3_ape_driver_state_change(tp, kind);
1562 /* tp->lock is held. */
1563 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1565 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1567 case RESET_KIND_INIT:
1568 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1569 DRV_STATE_START_DONE);
1572 case RESET_KIND_SHUTDOWN:
1573 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574 DRV_STATE_UNLOAD_DONE);
1582 if (kind == RESET_KIND_SHUTDOWN)
1583 tg3_ape_driver_state_change(tp, kind);
1586 /* tp->lock is held. */
1587 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1589 if (tg3_flag(tp, ENABLE_ASF)) {
1591 case RESET_KIND_INIT:
1592 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1596 case RESET_KIND_SHUTDOWN:
1597 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1601 case RESET_KIND_SUSPEND:
1602 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1612 static int tg3_poll_fw(struct tg3 *tp)
1617 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1618 /* Wait up to 20ms for init done. */
1619 for (i = 0; i < 200; i++) {
1620 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1627 /* Wait for firmware initialization to complete. */
1628 for (i = 0; i < 100000; i++) {
1629 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1630 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1635 /* Chip might not be fitted with firmware. Some Sun onboard
1636 * parts are configured like that. So don't signal the timeout
1637 * of the above loop as an error, but do report the lack of
1638 * running firmware once.
1640 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1641 tg3_flag_set(tp, NO_FWARE_REPORTED);
1643 netdev_info(tp->dev, "No firmware running\n");
1646 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1647 /* The 57765 A0 needs a little more
1648 * time to do some important work.
1656 static void tg3_link_report(struct tg3 *tp)
1658 if (!netif_carrier_ok(tp->dev)) {
1659 netif_info(tp, link, tp->dev, "Link is down\n");
1660 tg3_ump_link_report(tp);
1661 } else if (netif_msg_link(tp)) {
1662 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1663 (tp->link_config.active_speed == SPEED_1000 ?
1665 (tp->link_config.active_speed == SPEED_100 ?
1667 (tp->link_config.active_duplex == DUPLEX_FULL ?
1670 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1671 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1673 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1676 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1677 netdev_info(tp->dev, "EEE is %s\n",
1678 tp->setlpicnt ? "enabled" : "disabled");
1680 tg3_ump_link_report(tp);
1684 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1688 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1689 miireg = ADVERTISE_1000XPAUSE;
1690 else if (flow_ctrl & FLOW_CTRL_TX)
1691 miireg = ADVERTISE_1000XPSE_ASYM;
1692 else if (flow_ctrl & FLOW_CTRL_RX)
1693 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1700 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1704 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1705 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1706 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1707 if (lcladv & ADVERTISE_1000XPAUSE)
1709 if (rmtadv & ADVERTISE_1000XPAUSE)
1716 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1720 u32 old_rx_mode = tp->rx_mode;
1721 u32 old_tx_mode = tp->tx_mode;
1723 if (tg3_flag(tp, USE_PHYLIB))
1724 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1726 autoneg = tp->link_config.autoneg;
1728 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1729 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1730 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1732 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1734 flowctrl = tp->link_config.flowctrl;
1736 tp->link_config.active_flowctrl = flowctrl;
1738 if (flowctrl & FLOW_CTRL_RX)
1739 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1741 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1743 if (old_rx_mode != tp->rx_mode)
1744 tw32_f(MAC_RX_MODE, tp->rx_mode);
1746 if (flowctrl & FLOW_CTRL_TX)
1747 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1749 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1751 if (old_tx_mode != tp->tx_mode)
1752 tw32_f(MAC_TX_MODE, tp->tx_mode);
1755 static void tg3_adjust_link(struct net_device *dev)
1757 u8 oldflowctrl, linkmesg = 0;
1758 u32 mac_mode, lcl_adv, rmt_adv;
1759 struct tg3 *tp = netdev_priv(dev);
1760 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1762 spin_lock_bh(&tp->lock);
1764 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1765 MAC_MODE_HALF_DUPLEX);
1767 oldflowctrl = tp->link_config.active_flowctrl;
1773 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1774 mac_mode |= MAC_MODE_PORT_MODE_MII;
1775 else if (phydev->speed == SPEED_1000 ||
1776 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1777 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1779 mac_mode |= MAC_MODE_PORT_MODE_MII;
1781 if (phydev->duplex == DUPLEX_HALF)
1782 mac_mode |= MAC_MODE_HALF_DUPLEX;
1784 lcl_adv = mii_advertise_flowctrl(
1785 tp->link_config.flowctrl);
1788 rmt_adv = LPA_PAUSE_CAP;
1789 if (phydev->asym_pause)
1790 rmt_adv |= LPA_PAUSE_ASYM;
1793 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1795 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1797 if (mac_mode != tp->mac_mode) {
1798 tp->mac_mode = mac_mode;
1799 tw32_f(MAC_MODE, tp->mac_mode);
1803 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1804 if (phydev->speed == SPEED_10)
1806 MAC_MI_STAT_10MBPS_MODE |
1807 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1809 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1812 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1813 tw32(MAC_TX_LENGTHS,
1814 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1815 (6 << TX_LENGTHS_IPG_SHIFT) |
1816 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1818 tw32(MAC_TX_LENGTHS,
1819 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820 (6 << TX_LENGTHS_IPG_SHIFT) |
1821 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1823 if (phydev->link != tp->old_link ||
1824 phydev->speed != tp->link_config.active_speed ||
1825 phydev->duplex != tp->link_config.active_duplex ||
1826 oldflowctrl != tp->link_config.active_flowctrl)
1829 tp->old_link = phydev->link;
1830 tp->link_config.active_speed = phydev->speed;
1831 tp->link_config.active_duplex = phydev->duplex;
1833 spin_unlock_bh(&tp->lock);
1836 tg3_link_report(tp);
1839 static int tg3_phy_init(struct tg3 *tp)
1841 struct phy_device *phydev;
1843 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1846 /* Bring the PHY back to a known state. */
1849 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1851 /* Attach the MAC to the PHY. */
1852 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1853 phydev->dev_flags, phydev->interface);
1854 if (IS_ERR(phydev)) {
1855 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1856 return PTR_ERR(phydev);
1859 /* Mask with MAC supported features. */
1860 switch (phydev->interface) {
1861 case PHY_INTERFACE_MODE_GMII:
1862 case PHY_INTERFACE_MODE_RGMII:
1863 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1864 phydev->supported &= (PHY_GBIT_FEATURES |
1866 SUPPORTED_Asym_Pause);
1870 case PHY_INTERFACE_MODE_MII:
1871 phydev->supported &= (PHY_BASIC_FEATURES |
1873 SUPPORTED_Asym_Pause);
1876 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1880 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1882 phydev->advertising = phydev->supported;
1887 static void tg3_phy_start(struct tg3 *tp)
1889 struct phy_device *phydev;
1891 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1894 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1896 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1897 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1898 phydev->speed = tp->link_config.speed;
1899 phydev->duplex = tp->link_config.duplex;
1900 phydev->autoneg = tp->link_config.autoneg;
1901 phydev->advertising = tp->link_config.advertising;
1906 phy_start_aneg(phydev);
1909 static void tg3_phy_stop(struct tg3 *tp)
1911 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1914 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1917 static void tg3_phy_fini(struct tg3 *tp)
1919 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1920 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1925 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1930 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1933 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1934 /* Cannot do read-modify-write on 5401 */
1935 err = tg3_phy_auxctl_write(tp,
1936 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1937 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1942 err = tg3_phy_auxctl_read(tp,
1943 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1947 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1948 err = tg3_phy_auxctl_write(tp,
1949 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1955 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1959 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1962 tg3_writephy(tp, MII_TG3_FET_TEST,
1963 phytest | MII_TG3_FET_SHADOW_EN);
1964 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1966 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1968 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1969 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1971 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1975 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1979 if (!tg3_flag(tp, 5705_PLUS) ||
1980 (tg3_flag(tp, 5717_PLUS) &&
1981 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1984 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1985 tg3_phy_fet_toggle_apd(tp, enable);
1989 reg = MII_TG3_MISC_SHDW_WREN |
1990 MII_TG3_MISC_SHDW_SCR5_SEL |
1991 MII_TG3_MISC_SHDW_SCR5_LPED |
1992 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1993 MII_TG3_MISC_SHDW_SCR5_SDTL |
1994 MII_TG3_MISC_SHDW_SCR5_C125OE;
1995 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1996 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1998 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2001 reg = MII_TG3_MISC_SHDW_WREN |
2002 MII_TG3_MISC_SHDW_APD_SEL |
2003 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2005 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2007 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2010 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2014 if (!tg3_flag(tp, 5705_PLUS) ||
2015 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2018 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2021 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2022 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2024 tg3_writephy(tp, MII_TG3_FET_TEST,
2025 ephy | MII_TG3_FET_SHADOW_EN);
2026 if (!tg3_readphy(tp, reg, &phy)) {
2028 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2030 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2031 tg3_writephy(tp, reg, phy);
2033 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2038 ret = tg3_phy_auxctl_read(tp,
2039 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2042 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2044 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2045 tg3_phy_auxctl_write(tp,
2046 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2051 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2056 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2059 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2061 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2062 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2065 static void tg3_phy_apply_otp(struct tg3 *tp)
2074 if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2077 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2078 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2079 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2081 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2082 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2083 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2085 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2086 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2087 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2089 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2090 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2092 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2093 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2095 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2096 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2097 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2099 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2102 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2106 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2111 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2112 current_link_up == 1 &&
2113 tp->link_config.active_duplex == DUPLEX_FULL &&
2114 (tp->link_config.active_speed == SPEED_100 ||
2115 tp->link_config.active_speed == SPEED_1000)) {
2118 if (tp->link_config.active_speed == SPEED_1000)
2119 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2121 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2123 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2125 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2126 TG3_CL45_D7_EEERES_STAT, &val);
2128 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2129 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2133 if (!tp->setlpicnt) {
2134 if (current_link_up == 1 &&
2135 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2136 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2137 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2140 val = tr32(TG3_CPMU_EEE_MODE);
2141 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2145 static void tg3_phy_eee_enable(struct tg3 *tp)
2149 if (tp->link_config.active_speed == SPEED_1000 &&
2150 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2151 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2152 tg3_flag(tp, 57765_CLASS)) &&
2153 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2154 val = MII_TG3_DSP_TAP26_ALNOKO |
2155 MII_TG3_DSP_TAP26_RMRXSTO;
2156 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2157 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2160 val = tr32(TG3_CPMU_EEE_MODE);
2161 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2164 static int tg3_wait_macro_done(struct tg3 *tp)
2171 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2172 if ((tmp32 & 0x1000) == 0)
2182 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2184 static const u32 test_pat[4][6] = {
2185 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2186 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2187 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2188 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2192 for (chan = 0; chan < 4; chan++) {
2195 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2196 (chan * 0x2000) | 0x0200);
2197 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2199 for (i = 0; i < 6; i++)
2200 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2203 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2204 if (tg3_wait_macro_done(tp)) {
2209 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2210 (chan * 0x2000) | 0x0200);
2211 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2212 if (tg3_wait_macro_done(tp)) {
2217 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2218 if (tg3_wait_macro_done(tp)) {
2223 for (i = 0; i < 6; i += 2) {
2226 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2227 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2228 tg3_wait_macro_done(tp)) {
2234 if (low != test_pat[chan][i] ||
2235 high != test_pat[chan][i+1]) {
2236 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2237 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2238 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2248 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2252 for (chan = 0; chan < 4; chan++) {
2255 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2256 (chan * 0x2000) | 0x0200);
2257 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2258 for (i = 0; i < 6; i++)
2259 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2260 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2261 if (tg3_wait_macro_done(tp))
2268 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2270 u32 reg32, phy9_orig;
2271 int retries, do_phy_reset, err;
2277 err = tg3_bmcr_reset(tp);
2283 /* Disable transmitter and interrupt. */
2284 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2288 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2290 /* Set full-duplex, 1000 mbps. */
2291 tg3_writephy(tp, MII_BMCR,
2292 BMCR_FULLDPLX | BMCR_SPEED1000);
2294 /* Set to master mode. */
2295 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2298 tg3_writephy(tp, MII_CTRL1000,
2299 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2301 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2305 /* Block the PHY control access. */
2306 tg3_phydsp_write(tp, 0x8005, 0x0800);
2308 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2311 } while (--retries);
2313 err = tg3_phy_reset_chanpat(tp);
2317 tg3_phydsp_write(tp, 0x8005, 0x0000);
2319 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2320 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2322 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2324 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2326 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2328 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2335 /* This will reset the tigon3 PHY if there is no valid
2336 * link unless the FORCE argument is non-zero.
2338 static int tg3_phy_reset(struct tg3 *tp)
2343 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2344 val = tr32(GRC_MISC_CFG);
2345 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2348 err = tg3_readphy(tp, MII_BMSR, &val);
2349 err |= tg3_readphy(tp, MII_BMSR, &val);
2353 if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2354 netif_carrier_off(tp->dev);
2355 tg3_link_report(tp);
2358 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2359 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2360 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2361 err = tg3_phy_reset_5703_4_5(tp);
2368 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2369 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2370 cpmuctrl = tr32(TG3_CPMU_CTRL);
2371 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2373 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2376 err = tg3_bmcr_reset(tp);
2380 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2381 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2382 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2384 tw32(TG3_CPMU_CTRL, cpmuctrl);
2387 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2388 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2389 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2390 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2391 CPMU_LSPD_1000MB_MACCLK_12_5) {
2392 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2394 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2398 if (tg3_flag(tp, 5717_PLUS) &&
2399 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2402 tg3_phy_apply_otp(tp);
2404 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2405 tg3_phy_toggle_apd(tp, true);
2407 tg3_phy_toggle_apd(tp, false);
2410 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2411 !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2412 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2413 tg3_phydsp_write(tp, 0x000a, 0x0323);
2414 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2417 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2418 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2422 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2423 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2424 tg3_phydsp_write(tp, 0x000a, 0x310b);
2425 tg3_phydsp_write(tp, 0x201f, 0x9506);
2426 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2427 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2429 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2430 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2431 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2432 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2433 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2434 tg3_writephy(tp, MII_TG3_TEST1,
2435 MII_TG3_TEST1_TRIM_EN | 0x4);
2437 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2439 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2443 /* Set Extended packet length bit (bit 14) on all chips that */
2444 /* support jumbo frames */
2445 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2446 /* Cannot do read-modify-write on 5401 */
2447 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2448 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2449 /* Set bit 14 with read-modify-write to preserve other bits */
2450 err = tg3_phy_auxctl_read(tp,
2451 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2453 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2454 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2457 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2458 * jumbo frames transmission.
2460 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2461 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2462 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2463 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2466 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467 /* adjust output voltage */
2468 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2471 tg3_phy_toggle_automdix(tp, 1);
2472 tg3_phy_set_wirespeed(tp);
2476 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2477 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2478 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2479 TG3_GPIO_MSG_NEED_VAUX)
2480 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2481 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2482 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2483 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2484 (TG3_GPIO_MSG_DRVR_PRES << 12))
2486 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2487 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2488 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2489 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2490 (TG3_GPIO_MSG_NEED_VAUX << 12))
2492 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2496 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2497 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2498 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2500 status = tr32(TG3_CPMU_DRV_STATUS);
2502 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2503 status &= ~(TG3_GPIO_MSG_MASK << shift);
2504 status |= (newstat << shift);
2506 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2507 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2508 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2510 tw32(TG3_CPMU_DRV_STATUS, status);
2512 return status >> TG3_APE_GPIO_MSG_SHIFT;
2515 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2517 if (!tg3_flag(tp, IS_NIC))
2520 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2521 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2522 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2523 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2526 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2528 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2529 TG3_GRC_LCLCTL_PWRSW_DELAY);
2531 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2533 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534 TG3_GRC_LCLCTL_PWRSW_DELAY);
2540 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2544 if (!tg3_flag(tp, IS_NIC) ||
2545 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2546 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2549 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2551 tw32_wait_f(GRC_LOCAL_CTRL,
2552 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2553 TG3_GRC_LCLCTL_PWRSW_DELAY);
2555 tw32_wait_f(GRC_LOCAL_CTRL,
2557 TG3_GRC_LCLCTL_PWRSW_DELAY);
2559 tw32_wait_f(GRC_LOCAL_CTRL,
2560 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2561 TG3_GRC_LCLCTL_PWRSW_DELAY);
2564 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2566 if (!tg3_flag(tp, IS_NIC))
2569 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2570 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2571 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2572 (GRC_LCLCTRL_GPIO_OE0 |
2573 GRC_LCLCTRL_GPIO_OE1 |
2574 GRC_LCLCTRL_GPIO_OE2 |
2575 GRC_LCLCTRL_GPIO_OUTPUT0 |
2576 GRC_LCLCTRL_GPIO_OUTPUT1),
2577 TG3_GRC_LCLCTL_PWRSW_DELAY);
2578 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2579 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2580 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2581 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2582 GRC_LCLCTRL_GPIO_OE1 |
2583 GRC_LCLCTRL_GPIO_OE2 |
2584 GRC_LCLCTRL_GPIO_OUTPUT0 |
2585 GRC_LCLCTRL_GPIO_OUTPUT1 |
2587 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2588 TG3_GRC_LCLCTL_PWRSW_DELAY);
2590 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2591 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592 TG3_GRC_LCLCTL_PWRSW_DELAY);
2594 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2595 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596 TG3_GRC_LCLCTL_PWRSW_DELAY);
2599 u32 grc_local_ctrl = 0;
2601 /* Workaround to prevent overdrawing Amps. */
2602 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2603 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2604 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2606 TG3_GRC_LCLCTL_PWRSW_DELAY);
2609 /* On 5753 and variants, GPIO2 cannot be used. */
2610 no_gpio2 = tp->nic_sram_data_cfg &
2611 NIC_SRAM_DATA_CFG_NO_GPIO2;
2613 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2614 GRC_LCLCTRL_GPIO_OE1 |
2615 GRC_LCLCTRL_GPIO_OE2 |
2616 GRC_LCLCTRL_GPIO_OUTPUT1 |
2617 GRC_LCLCTRL_GPIO_OUTPUT2;
2619 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2620 GRC_LCLCTRL_GPIO_OUTPUT2);
2622 tw32_wait_f(GRC_LOCAL_CTRL,
2623 tp->grc_local_ctrl | grc_local_ctrl,
2624 TG3_GRC_LCLCTL_PWRSW_DELAY);
2626 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2628 tw32_wait_f(GRC_LOCAL_CTRL,
2629 tp->grc_local_ctrl | grc_local_ctrl,
2630 TG3_GRC_LCLCTL_PWRSW_DELAY);
2633 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2634 tw32_wait_f(GRC_LOCAL_CTRL,
2635 tp->grc_local_ctrl | grc_local_ctrl,
2636 TG3_GRC_LCLCTL_PWRSW_DELAY);
2641 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2645 /* Serialize power state transitions */
2646 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2649 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2650 msg = TG3_GPIO_MSG_NEED_VAUX;
2652 msg = tg3_set_function_status(tp, msg);
2654 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2657 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2658 tg3_pwrsrc_switch_to_vaux(tp);
2660 tg3_pwrsrc_die_with_vmain(tp);
2663 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2666 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2668 bool need_vaux = false;
2670 /* The GPIOs do something completely different on 57765. */
2671 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2674 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2675 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2676 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2677 tg3_frob_aux_power_5717(tp, include_wol ?
2678 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2682 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2683 struct net_device *dev_peer;
2685 dev_peer = pci_get_drvdata(tp->pdev_peer);
2687 /* remove_one() may have been run on the peer. */
2689 struct tg3 *tp_peer = netdev_priv(dev_peer);
2691 if (tg3_flag(tp_peer, INIT_COMPLETE))
2694 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2695 tg3_flag(tp_peer, ENABLE_ASF))
2700 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2701 tg3_flag(tp, ENABLE_ASF))
2705 tg3_pwrsrc_switch_to_vaux(tp);
2707 tg3_pwrsrc_die_with_vmain(tp);
2710 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2712 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2714 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2715 if (speed != SPEED_10)
2717 } else if (speed == SPEED_10)
2723 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2727 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2728 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2729 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2730 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2733 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2734 tw32(SG_DIG_CTRL, sg_dig_ctrl);
2735 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2740 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2742 val = tr32(GRC_MISC_CFG);
2743 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2746 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2748 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2751 tg3_writephy(tp, MII_ADVERTISE, 0);
2752 tg3_writephy(tp, MII_BMCR,
2753 BMCR_ANENABLE | BMCR_ANRESTART);
2755 tg3_writephy(tp, MII_TG3_FET_TEST,
2756 phytest | MII_TG3_FET_SHADOW_EN);
2757 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2758 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2760 MII_TG3_FET_SHDW_AUXMODE4,
2763 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2766 } else if (do_low_power) {
2767 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2768 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2770 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2771 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2772 MII_TG3_AUXCTL_PCTL_VREG_11V;
2773 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2776 /* The PHY should not be powered down on some chips because
2779 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2780 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2781 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2782 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2785 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2786 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2787 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2788 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2789 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2790 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2793 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2796 /* tp->lock is held. */
2797 static int tg3_nvram_lock(struct tg3 *tp)
2799 if (tg3_flag(tp, NVRAM)) {
2802 if (tp->nvram_lock_cnt == 0) {
2803 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2804 for (i = 0; i < 8000; i++) {
2805 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2810 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2814 tp->nvram_lock_cnt++;
2819 /* tp->lock is held. */
2820 static void tg3_nvram_unlock(struct tg3 *tp)
2822 if (tg3_flag(tp, NVRAM)) {
2823 if (tp->nvram_lock_cnt > 0)
2824 tp->nvram_lock_cnt--;
2825 if (tp->nvram_lock_cnt == 0)
2826 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2830 /* tp->lock is held. */
2831 static void tg3_enable_nvram_access(struct tg3 *tp)
2833 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2834 u32 nvaccess = tr32(NVRAM_ACCESS);
2836 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2840 /* tp->lock is held. */
2841 static void tg3_disable_nvram_access(struct tg3 *tp)
2843 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2844 u32 nvaccess = tr32(NVRAM_ACCESS);
2846 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2850 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2851 u32 offset, u32 *val)
2856 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2859 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2860 EEPROM_ADDR_DEVID_MASK |
2862 tw32(GRC_EEPROM_ADDR,
2864 (0 << EEPROM_ADDR_DEVID_SHIFT) |
2865 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2866 EEPROM_ADDR_ADDR_MASK) |
2867 EEPROM_ADDR_READ | EEPROM_ADDR_START);
2869 for (i = 0; i < 1000; i++) {
2870 tmp = tr32(GRC_EEPROM_ADDR);
2872 if (tmp & EEPROM_ADDR_COMPLETE)
2876 if (!(tmp & EEPROM_ADDR_COMPLETE))
2879 tmp = tr32(GRC_EEPROM_DATA);
2882 * The data will always be opposite the native endian
2883 * format. Perform a blind byteswap to compensate.
2890 #define NVRAM_CMD_TIMEOUT 10000
2892 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2896 tw32(NVRAM_CMD, nvram_cmd);
2897 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2899 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2905 if (i == NVRAM_CMD_TIMEOUT)
2911 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2913 if (tg3_flag(tp, NVRAM) &&
2914 tg3_flag(tp, NVRAM_BUFFERED) &&
2915 tg3_flag(tp, FLASH) &&
2916 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2917 (tp->nvram_jedecnum == JEDEC_ATMEL))
2919 addr = ((addr / tp->nvram_pagesize) <<
2920 ATMEL_AT45DB0X1B_PAGE_POS) +
2921 (addr % tp->nvram_pagesize);
2926 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2928 if (tg3_flag(tp, NVRAM) &&
2929 tg3_flag(tp, NVRAM_BUFFERED) &&
2930 tg3_flag(tp, FLASH) &&
2931 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932 (tp->nvram_jedecnum == JEDEC_ATMEL))
2934 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2935 tp->nvram_pagesize) +
2936 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2941 /* NOTE: Data read in from NVRAM is byteswapped according to
2942 * the byteswapping settings for all other register accesses.
2943 * tg3 devices are BE devices, so on a BE machine, the data
2944 * returned will be exactly as it is seen in NVRAM. On a LE
2945 * machine, the 32-bit value will be byteswapped.
2947 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2951 if (!tg3_flag(tp, NVRAM))
2952 return tg3_nvram_read_using_eeprom(tp, offset, val);
2954 offset = tg3_nvram_phys_addr(tp, offset);
2956 if (offset > NVRAM_ADDR_MSK)
2959 ret = tg3_nvram_lock(tp);
2963 tg3_enable_nvram_access(tp);
2965 tw32(NVRAM_ADDR, offset);
2966 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2967 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2970 *val = tr32(NVRAM_RDDATA);
2972 tg3_disable_nvram_access(tp);
2974 tg3_nvram_unlock(tp);
2979 /* Ensures NVRAM data is in bytestream format. */
2980 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2983 int res = tg3_nvram_read(tp, offset, &v);
2985 *val = cpu_to_be32(v);
2989 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2990 u32 offset, u32 len, u8 *buf)
2995 for (i = 0; i < len; i += 4) {
3001 memcpy(&data, buf + i, 4);
3004 * The SEEPROM interface expects the data to always be opposite
3005 * the native endian format. We accomplish this by reversing
3006 * all the operations that would have been performed on the
3007 * data from a call to tg3_nvram_read_be32().
3009 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3011 val = tr32(GRC_EEPROM_ADDR);
3012 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3014 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3016 tw32(GRC_EEPROM_ADDR, val |
3017 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3018 (addr & EEPROM_ADDR_ADDR_MASK) |
3022 for (j = 0; j < 1000; j++) {
3023 val = tr32(GRC_EEPROM_ADDR);
3025 if (val & EEPROM_ADDR_COMPLETE)
3029 if (!(val & EEPROM_ADDR_COMPLETE)) {
3038 /* offset and length are dword aligned */
3039 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3043 u32 pagesize = tp->nvram_pagesize;
3044 u32 pagemask = pagesize - 1;
3048 tmp = kmalloc(pagesize, GFP_KERNEL);
3054 u32 phy_addr, page_off, size;
3056 phy_addr = offset & ~pagemask;
3058 for (j = 0; j < pagesize; j += 4) {
3059 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3060 (__be32 *) (tmp + j));
3067 page_off = offset & pagemask;
3074 memcpy(tmp + page_off, buf, size);
3076 offset = offset + (pagesize - page_off);
3078 tg3_enable_nvram_access(tp);
3081 * Before we can erase the flash page, we need
3082 * to issue a special "write enable" command.
3084 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3086 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3089 /* Erase the target page */
3090 tw32(NVRAM_ADDR, phy_addr);
3092 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3093 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3095 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3098 /* Issue another write enable to start the write. */
3099 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3101 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3104 for (j = 0; j < pagesize; j += 4) {
3107 data = *((__be32 *) (tmp + j));
3109 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3111 tw32(NVRAM_ADDR, phy_addr + j);
3113 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3117 nvram_cmd |= NVRAM_CMD_FIRST;
3118 else if (j == (pagesize - 4))
3119 nvram_cmd |= NVRAM_CMD_LAST;
3121 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3129 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3130 tg3_nvram_exec_cmd(tp, nvram_cmd);
3137 /* offset and length are dword aligned */
3138 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3143 for (i = 0; i < len; i += 4, offset += 4) {
3144 u32 page_off, phy_addr, nvram_cmd;
3147 memcpy(&data, buf + i, 4);
3148 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3150 page_off = offset % tp->nvram_pagesize;
3152 phy_addr = tg3_nvram_phys_addr(tp, offset);
3154 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3156 if (page_off == 0 || i == 0)
3157 nvram_cmd |= NVRAM_CMD_FIRST;
3158 if (page_off == (tp->nvram_pagesize - 4))
3159 nvram_cmd |= NVRAM_CMD_LAST;
3162 nvram_cmd |= NVRAM_CMD_LAST;
3164 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3165 !tg3_flag(tp, FLASH) ||
3166 !tg3_flag(tp, 57765_PLUS))
3167 tw32(NVRAM_ADDR, phy_addr);
3169 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3170 !tg3_flag(tp, 5755_PLUS) &&
3171 (tp->nvram_jedecnum == JEDEC_ST) &&
3172 (nvram_cmd & NVRAM_CMD_FIRST)) {
3175 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3176 ret = tg3_nvram_exec_cmd(tp, cmd);
3180 if (!tg3_flag(tp, FLASH)) {
3181 /* We always do complete word writes to eeprom. */
3182 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3185 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3192 /* offset and length are dword aligned */
3193 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3197 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3198 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3199 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3203 if (!tg3_flag(tp, NVRAM)) {
3204 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3208 ret = tg3_nvram_lock(tp);
3212 tg3_enable_nvram_access(tp);
3213 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3214 tw32(NVRAM_WRITE1, 0x406);
3216 grc_mode = tr32(GRC_MODE);
3217 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3219 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3220 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3223 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3227 grc_mode = tr32(GRC_MODE);
3228 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3230 tg3_disable_nvram_access(tp);
3231 tg3_nvram_unlock(tp);
3234 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3235 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3242 #define RX_CPU_SCRATCH_BASE 0x30000
3243 #define RX_CPU_SCRATCH_SIZE 0x04000
3244 #define TX_CPU_SCRATCH_BASE 0x34000
3245 #define TX_CPU_SCRATCH_SIZE 0x04000
3247 /* tp->lock is held. */
3248 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3252 BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3255 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3257 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3260 if (offset == RX_CPU_BASE) {
3261 for (i = 0; i < 10000; i++) {
3262 tw32(offset + CPU_STATE, 0xffffffff);
3263 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3264 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3268 tw32(offset + CPU_STATE, 0xffffffff);
3269 tw32_f(offset + CPU_MODE, CPU_MODE_HALT);
3272 for (i = 0; i < 10000; i++) {
3273 tw32(offset + CPU_STATE, 0xffffffff);
3274 tw32(offset + CPU_MODE, CPU_MODE_HALT);
3275 if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3281 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3282 __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3286 /* Clear firmware's nvram arbitration. */
3287 if (tg3_flag(tp, NVRAM))
3288 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3293 unsigned int fw_base;
3294 unsigned int fw_len;
3295 const __be32 *fw_data;
3298 /* tp->lock is held. */
3299 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3300 u32 cpu_scratch_base, int cpu_scratch_size,
3301 struct fw_info *info)
3303 int err, lock_err, i;
3304 void (*write_op)(struct tg3 *, u32, u32);
3306 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3308 "%s: Trying to load TX cpu firmware which is 5705\n",
3313 if (tg3_flag(tp, 5705_PLUS))
3314 write_op = tg3_write_mem;
3316 write_op = tg3_write_indirect_reg32;
3318 /* It is possible that bootcode is still loading at this point.
3319 * Get the nvram lock first before halting the cpu.
3321 lock_err = tg3_nvram_lock(tp);
3322 err = tg3_halt_cpu(tp, cpu_base);
3324 tg3_nvram_unlock(tp);
3328 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3329 write_op(tp, cpu_scratch_base + i, 0);
3330 tw32(cpu_base + CPU_STATE, 0xffffffff);
3331 tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3332 for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3333 write_op(tp, (cpu_scratch_base +
3334 (info->fw_base & 0xffff) +
3336 be32_to_cpu(info->fw_data[i]));
3344 /* tp->lock is held. */
3345 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3347 struct fw_info info;
3348 const __be32 *fw_data;
3351 fw_data = (void *)tp->fw->data;
3353 /* Firmware blob starts with version numbers, followed by
3354 start address and length. We are setting complete length.
3355 length = end_address_of_bss - start_address_of_text.
3356 Remainder is the blob to be loaded contiguously
3357 from start address. */
3359 info.fw_base = be32_to_cpu(fw_data[1]);
3360 info.fw_len = tp->fw->size - 12;
3361 info.fw_data = &fw_data[3];
3363 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3364 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3369 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3370 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3375 /* Now startup only the RX cpu. */
3376 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3377 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3379 for (i = 0; i < 5; i++) {
3380 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3382 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383 tw32(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3384 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3388 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3389 "should be %08x\n", __func__,
3390 tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3393 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3394 tw32_f(RX_CPU_BASE + CPU_MODE, 0x00000000);
3399 /* tp->lock is held. */
3400 static int tg3_load_tso_firmware(struct tg3 *tp)
3402 struct fw_info info;
3403 const __be32 *fw_data;
3404 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3407 if (tg3_flag(tp, HW_TSO_1) ||
3408 tg3_flag(tp, HW_TSO_2) ||
3409 tg3_flag(tp, HW_TSO_3))
3412 fw_data = (void *)tp->fw->data;
3414 /* Firmware blob starts with version numbers, followed by
3415 start address and length. We are setting complete length.
3416 length = end_address_of_bss - start_address_of_text.
3417 Remainder is the blob to be loaded contiguously
3418 from start address. */
3420 info.fw_base = be32_to_cpu(fw_data[1]);
3421 cpu_scratch_size = tp->fw_len;
3422 info.fw_len = tp->fw->size - 12;
3423 info.fw_data = &fw_data[3];
3425 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3426 cpu_base = RX_CPU_BASE;
3427 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3429 cpu_base = TX_CPU_BASE;
3430 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3431 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3434 err = tg3_load_firmware_cpu(tp, cpu_base,
3435 cpu_scratch_base, cpu_scratch_size,
3440 /* Now startup the cpu. */
3441 tw32(cpu_base + CPU_STATE, 0xffffffff);
3442 tw32_f(cpu_base + CPU_PC, info.fw_base);
3444 for (i = 0; i < 5; i++) {
3445 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3447 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3449 tw32_f(cpu_base + CPU_PC, info.fw_base);
3454 "%s fails to set CPU PC, is %08x should be %08x\n",
3455 __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3458 tw32(cpu_base + CPU_STATE, 0xffffffff);
3459 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3464 /* tp->lock is held. */
3465 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3467 u32 addr_high, addr_low;
3470 addr_high = ((tp->dev->dev_addr[0] << 8) |
3471 tp->dev->dev_addr[1]);
3472 addr_low = ((tp->dev->dev_addr[2] << 24) |
3473 (tp->dev->dev_addr[3] << 16) |
3474 (tp->dev->dev_addr[4] << 8) |
3475 (tp->dev->dev_addr[5] << 0));
3476 for (i = 0; i < 4; i++) {
3477 if (i == 1 && skip_mac_1)
3479 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3480 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3483 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3484 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3485 for (i = 0; i < 12; i++) {
3486 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3487 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3491 addr_high = (tp->dev->dev_addr[0] +
3492 tp->dev->dev_addr[1] +
3493 tp->dev->dev_addr[2] +
3494 tp->dev->dev_addr[3] +
3495 tp->dev->dev_addr[4] +
3496 tp->dev->dev_addr[5]) &
3497 TX_BACKOFF_SEED_MASK;
3498 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3501 static void tg3_enable_register_access(struct tg3 *tp)
3504 * Make sure register accesses (indirect or otherwise) will function
3507 pci_write_config_dword(tp->pdev,
3508 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3511 static int tg3_power_up(struct tg3 *tp)
3515 tg3_enable_register_access(tp);
3517 err = pci_set_power_state(tp->pdev, PCI_D0);
3519 /* Switch out of Vaux if it is a NIC */
3520 tg3_pwrsrc_switch_to_vmain(tp);
3522 netdev_err(tp->dev, "Transition to D0 failed\n");
3528 static int tg3_setup_phy(struct tg3 *, int);
3530 static int tg3_power_down_prepare(struct tg3 *tp)
3533 bool device_should_wake, do_low_power;
3535 tg3_enable_register_access(tp);
3537 /* Restore the CLKREQ setting. */
3538 if (tg3_flag(tp, CLKREQ_BUG)) {
3541 pci_read_config_word(tp->pdev,
3542 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3544 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3545 pci_write_config_word(tp->pdev,
3546 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3550 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3551 tw32(TG3PCI_MISC_HOST_CTRL,
3552 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3554 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3555 tg3_flag(tp, WOL_ENABLE);
3557 if (tg3_flag(tp, USE_PHYLIB)) {
3558 do_low_power = false;
3559 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3560 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3561 struct phy_device *phydev;
3562 u32 phyid, advertising;
3564 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3566 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3568 tp->link_config.speed = phydev->speed;
3569 tp->link_config.duplex = phydev->duplex;
3570 tp->link_config.autoneg = phydev->autoneg;
3571 tp->link_config.advertising = phydev->advertising;
3573 advertising = ADVERTISED_TP |
3575 ADVERTISED_Autoneg |
3576 ADVERTISED_10baseT_Half;
3578 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3579 if (tg3_flag(tp, WOL_SPEED_100MB))
3581 ADVERTISED_100baseT_Half |
3582 ADVERTISED_100baseT_Full |
3583 ADVERTISED_10baseT_Full;
3585 advertising |= ADVERTISED_10baseT_Full;
3588 phydev->advertising = advertising;
3590 phy_start_aneg(phydev);
3592 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3593 if (phyid != PHY_ID_BCMAC131) {
3594 phyid &= PHY_BCM_OUI_MASK;
3595 if (phyid == PHY_BCM_OUI_1 ||
3596 phyid == PHY_BCM_OUI_2 ||
3597 phyid == PHY_BCM_OUI_3)
3598 do_low_power = true;
3602 do_low_power = true;
3604 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3605 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3607 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3608 tg3_setup_phy(tp, 0);
3611 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3614 val = tr32(GRC_VCPU_EXT_CTRL);
3615 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3616 } else if (!tg3_flag(tp, ENABLE_ASF)) {
3620 for (i = 0; i < 200; i++) {
3621 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3622 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3627 if (tg3_flag(tp, WOL_CAP))
3628 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3629 WOL_DRV_STATE_SHUTDOWN |
3633 if (device_should_wake) {
3636 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3638 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3639 tg3_phy_auxctl_write(tp,
3640 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3641 MII_TG3_AUXCTL_PCTL_WOL_EN |
3642 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3643 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3647 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3648 mac_mode = MAC_MODE_PORT_MODE_GMII;
3650 mac_mode = MAC_MODE_PORT_MODE_MII;
3652 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3653 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3655 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3656 SPEED_100 : SPEED_10;
3657 if (tg3_5700_link_polarity(tp, speed))
3658 mac_mode |= MAC_MODE_LINK_POLARITY;
3660 mac_mode &= ~MAC_MODE_LINK_POLARITY;
3663 mac_mode = MAC_MODE_PORT_MODE_TBI;
3666 if (!tg3_flag(tp, 5750_PLUS))
3667 tw32(MAC_LED_CTRL, tp->led_ctrl);
3669 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3670 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3671 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3672 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3674 if (tg3_flag(tp, ENABLE_APE))
3675 mac_mode |= MAC_MODE_APE_TX_EN |
3676 MAC_MODE_APE_RX_EN |
3677 MAC_MODE_TDE_ENABLE;
3679 tw32_f(MAC_MODE, mac_mode);
3682 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3686 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3687 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3688 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3691 base_val = tp->pci_clock_ctrl;
3692 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3693 CLOCK_CTRL_TXCLK_DISABLE);
3695 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3696 CLOCK_CTRL_PWRDOWN_PLL133, 40);
3697 } else if (tg3_flag(tp, 5780_CLASS) ||
3698 tg3_flag(tp, CPMU_PRESENT) ||
3699 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3701 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3702 u32 newbits1, newbits2;
3704 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3705 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3706 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3707 CLOCK_CTRL_TXCLK_DISABLE |
3709 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710 } else if (tg3_flag(tp, 5705_PLUS)) {
3711 newbits1 = CLOCK_CTRL_625_CORE;
3712 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3714 newbits1 = CLOCK_CTRL_ALTCLK;
3715 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3718 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3721 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3724 if (!tg3_flag(tp, 5705_PLUS)) {
3727 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3728 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3729 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3730 CLOCK_CTRL_TXCLK_DISABLE |
3731 CLOCK_CTRL_44MHZ_CORE);
3733 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3736 tw32_wait_f(TG3PCI_CLOCK_CTRL,
3737 tp->pci_clock_ctrl | newbits3, 40);
3741 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3742 tg3_power_down_phy(tp, do_low_power);
3744 tg3_frob_aux_power(tp, true);
3746 /* Workaround for unstable PLL clock */
3747 if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3748 (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3749 u32 val = tr32(0x7d00);
3751 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3753 if (!tg3_flag(tp, ENABLE_ASF)) {
3756 err = tg3_nvram_lock(tp);
3757 tg3_halt_cpu(tp, RX_CPU_BASE);
3759 tg3_nvram_unlock(tp);
3763 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3768 static void tg3_power_down(struct tg3 *tp)
3770 tg3_power_down_prepare(tp);
3772 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3773 pci_set_power_state(tp->pdev, PCI_D3hot);
3776 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3778 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3779 case MII_TG3_AUX_STAT_10HALF:
3781 *duplex = DUPLEX_HALF;
3784 case MII_TG3_AUX_STAT_10FULL:
3786 *duplex = DUPLEX_FULL;
3789 case MII_TG3_AUX_STAT_100HALF:
3791 *duplex = DUPLEX_HALF;
3794 case MII_TG3_AUX_STAT_100FULL:
3796 *duplex = DUPLEX_FULL;
3799 case MII_TG3_AUX_STAT_1000HALF:
3800 *speed = SPEED_1000;
3801 *duplex = DUPLEX_HALF;
3804 case MII_TG3_AUX_STAT_1000FULL:
3805 *speed = SPEED_1000;
3806 *duplex = DUPLEX_FULL;
3810 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3811 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3813 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3817 *speed = SPEED_UNKNOWN;
3818 *duplex = DUPLEX_UNKNOWN;
3823 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3828 new_adv = ADVERTISE_CSMA;
3829 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3830 new_adv |= mii_advertise_flowctrl(flowctrl);
3832 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3836 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3837 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3839 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3840 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3841 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3843 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3848 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3851 tw32(TG3_CPMU_EEE_MODE,
3852 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3854 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3859 /* Advertise 100-BaseTX EEE ability */
3860 if (advertise & ADVERTISED_100baseT_Full)
3861 val |= MDIO_AN_EEE_ADV_100TX;
3862 /* Advertise 1000-BaseT EEE ability */
3863 if (advertise & ADVERTISED_1000baseT_Full)
3864 val |= MDIO_AN_EEE_ADV_1000T;
3865 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3869 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3871 case ASIC_REV_57765:
3872 case ASIC_REV_57766:
3874 /* If we advertised any eee advertisements above... */
3876 val = MII_TG3_DSP_TAP26_ALNOKO |
3877 MII_TG3_DSP_TAP26_RMRXSTO |
3878 MII_TG3_DSP_TAP26_OPCSINPT;
3879 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3882 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3883 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3884 MII_TG3_DSP_CH34TP2_HIBW01);
3887 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3896 static void tg3_phy_copper_begin(struct tg3 *tp)
3898 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3899 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3902 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3903 adv = ADVERTISED_10baseT_Half |
3904 ADVERTISED_10baseT_Full;
3905 if (tg3_flag(tp, WOL_SPEED_100MB))
3906 adv |= ADVERTISED_100baseT_Half |
3907 ADVERTISED_100baseT_Full;
3909 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3911 adv = tp->link_config.advertising;
3912 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3913 adv &= ~(ADVERTISED_1000baseT_Half |
3914 ADVERTISED_1000baseT_Full);
3916 fc = tp->link_config.flowctrl;
3919 tg3_phy_autoneg_cfg(tp, adv, fc);
3921 tg3_writephy(tp, MII_BMCR,
3922 BMCR_ANENABLE | BMCR_ANRESTART);
3925 u32 bmcr, orig_bmcr;
3927 tp->link_config.active_speed = tp->link_config.speed;
3928 tp->link_config.active_duplex = tp->link_config.duplex;
3931 switch (tp->link_config.speed) {
3937 bmcr |= BMCR_SPEED100;
3941 bmcr |= BMCR_SPEED1000;
3945 if (tp->link_config.duplex == DUPLEX_FULL)
3946 bmcr |= BMCR_FULLDPLX;
3948 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3949 (bmcr != orig_bmcr)) {
3950 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3951 for (i = 0; i < 1500; i++) {
3955 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3956 tg3_readphy(tp, MII_BMSR, &tmp))
3958 if (!(tmp & BMSR_LSTATUS)) {
3963 tg3_writephy(tp, MII_BMCR, bmcr);
3969 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3973 /* Turn off tap power management. */
3974 /* Set Extended packet length bit */
3975 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3977 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3978 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3979 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3980 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3981 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3988 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3990 u32 advmsk, tgtadv, advertising;
3992 advertising = tp->link_config.advertising;
3993 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3995 advmsk = ADVERTISE_ALL;
3996 if (tp->link_config.active_duplex == DUPLEX_FULL) {
3997 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3998 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4001 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4004 if ((*lcladv & advmsk) != tgtadv)
4007 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4010 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4012 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4016 (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4017 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4018 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4019 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4020 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4022 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4025 if (tg3_ctrl != tgtadv)
4032 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4036 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4039 if (tg3_readphy(tp, MII_STAT1000, &val))
4042 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4045 if (tg3_readphy(tp, MII_LPA, rmtadv))
4048 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4049 tp->link_config.rmt_adv = lpeth;
4054 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4056 int current_link_up;
4058 u32 lcl_adv, rmt_adv;
4066 (MAC_STATUS_SYNC_CHANGED |
4067 MAC_STATUS_CFG_CHANGED |
4068 MAC_STATUS_MI_COMPLETION |
4069 MAC_STATUS_LNKSTATE_CHANGED));
4072 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4074 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4078 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4080 /* Some third-party PHYs need to be reset on link going
4083 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4084 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4085 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4086 netif_carrier_ok(tp->dev)) {
4087 tg3_readphy(tp, MII_BMSR, &bmsr);
4088 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4089 !(bmsr & BMSR_LSTATUS))
4095 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4096 tg3_readphy(tp, MII_BMSR, &bmsr);
4097 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4098 !tg3_flag(tp, INIT_COMPLETE))
4101 if (!(bmsr & BMSR_LSTATUS)) {
4102 err = tg3_init_5401phy_dsp(tp);
4106 tg3_readphy(tp, MII_BMSR, &bmsr);
4107 for (i = 0; i < 1000; i++) {
4109 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4110 (bmsr & BMSR_LSTATUS)) {
4116 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4117 TG3_PHY_REV_BCM5401_B0 &&
4118 !(bmsr & BMSR_LSTATUS) &&
4119 tp->link_config.active_speed == SPEED_1000) {
4120 err = tg3_phy_reset(tp);
4122 err = tg3_init_5401phy_dsp(tp);
4127 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4128 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4129 /* 5701 {A0,B0} CRC bug workaround */
4130 tg3_writephy(tp, 0x15, 0x0a75);
4131 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4132 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4133 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4136 /* Clear pending interrupts... */
4137 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4140 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4141 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4142 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4143 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4145 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4146 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4147 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4148 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4149 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4151 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4154 current_link_up = 0;
4155 current_speed = SPEED_UNKNOWN;
4156 current_duplex = DUPLEX_UNKNOWN;
4157 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4158 tp->link_config.rmt_adv = 0;
4160 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4161 err = tg3_phy_auxctl_read(tp,
4162 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4164 if (!err && !(val & (1 << 10))) {
4165 tg3_phy_auxctl_write(tp,
4166 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4173 for (i = 0; i < 100; i++) {
4174 tg3_readphy(tp, MII_BMSR, &bmsr);
4175 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4176 (bmsr & BMSR_LSTATUS))
4181 if (bmsr & BMSR_LSTATUS) {
4184 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4185 for (i = 0; i < 2000; i++) {
4187 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4192 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4197 for (i = 0; i < 200; i++) {
4198 tg3_readphy(tp, MII_BMCR, &bmcr);
4199 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4201 if (bmcr && bmcr != 0x7fff)
4209 tp->link_config.active_speed = current_speed;
4210 tp->link_config.active_duplex = current_duplex;
4212 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4213 if ((bmcr & BMCR_ANENABLE) &&
4214 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4215 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4216 current_link_up = 1;
4218 if (!(bmcr & BMCR_ANENABLE) &&
4219 tp->link_config.speed == current_speed &&
4220 tp->link_config.duplex == current_duplex &&
4221 tp->link_config.flowctrl ==
4222 tp->link_config.active_flowctrl) {
4223 current_link_up = 1;
4227 if (current_link_up == 1 &&
4228 tp->link_config.active_duplex == DUPLEX_FULL) {
4231 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4232 reg = MII_TG3_FET_GEN_STAT;
4233 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4235 reg = MII_TG3_EXT_STAT;
4236 bit = MII_TG3_EXT_STAT_MDIX;
4239 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4240 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4242 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4247 if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248 tg3_phy_copper_begin(tp);
4250 tg3_readphy(tp, MII_BMSR, &bmsr);
4251 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4252 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4253 current_link_up = 1;
4256 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4257 if (current_link_up == 1) {
4258 if (tp->link_config.active_speed == SPEED_100 ||
4259 tp->link_config.active_speed == SPEED_10)
4260 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4262 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4263 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4264 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4266 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4268 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4269 if (tp->link_config.active_duplex == DUPLEX_HALF)
4270 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4272 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4273 if (current_link_up == 1 &&
4274 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4275 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4277 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4280 /* ??? Without this setting Netgear GA302T PHY does not
4281 * ??? send/receive packets...
4283 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4284 tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4285 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4286 tw32_f(MAC_MI_MODE, tp->mi_mode);
4290 tw32_f(MAC_MODE, tp->mac_mode);
4293 tg3_phy_eee_adjust(tp, current_link_up);
4295 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4296 /* Polled via timer. */
4297 tw32_f(MAC_EVENT, 0);
4299 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4303 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4304 current_link_up == 1 &&
4305 tp->link_config.active_speed == SPEED_1000 &&
4306 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4309 (MAC_STATUS_SYNC_CHANGED |
4310 MAC_STATUS_CFG_CHANGED));
4313 NIC_SRAM_FIRMWARE_MBOX,
4314 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4317 /* Prevent send BD corruption. */
4318 if (tg3_flag(tp, CLKREQ_BUG)) {
4319 u16 oldlnkctl, newlnkctl;
4321 pci_read_config_word(tp->pdev,
4322 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4324 if (tp->link_config.active_speed == SPEED_100 ||
4325 tp->link_config.active_speed == SPEED_10)
4326 newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4328 newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4329 if (newlnkctl != oldlnkctl)
4330 pci_write_config_word(tp->pdev,
4331 pci_pcie_cap(tp->pdev) +
4332 PCI_EXP_LNKCTL, newlnkctl);
4335 if (current_link_up != netif_carrier_ok(tp->dev)) {
4336 if (current_link_up)
4337 netif_carrier_on(tp->dev);
4339 netif_carrier_off(tp->dev);
4340 tg3_link_report(tp);
4346 struct tg3_fiber_aneginfo {
4348 #define ANEG_STATE_UNKNOWN 0
4349 #define ANEG_STATE_AN_ENABLE 1
4350 #define ANEG_STATE_RESTART_INIT 2
4351 #define ANEG_STATE_RESTART 3
4352 #define ANEG_STATE_DISABLE_LINK_OK 4
4353 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4354 #define ANEG_STATE_ABILITY_DETECT 6
4355 #define ANEG_STATE_ACK_DETECT_INIT 7
4356 #define ANEG_STATE_ACK_DETECT 8
4357 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4358 #define ANEG_STATE_COMPLETE_ACK 10
4359 #define ANEG_STATE_IDLE_DETECT_INIT 11
4360 #define ANEG_STATE_IDLE_DETECT 12
4361 #define ANEG_STATE_LINK_OK 13
4362 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
4363 #define ANEG_STATE_NEXT_PAGE_WAIT 15
4366 #define MR_AN_ENABLE 0x00000001
4367 #define MR_RESTART_AN 0x00000002
4368 #define MR_AN_COMPLETE 0x00000004
4369 #define MR_PAGE_RX 0x00000008
4370 #define MR_NP_LOADED 0x00000010
4371 #define MR_TOGGLE_TX 0x00000020
4372 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
4373 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
4374 #define MR_LP_ADV_SYM_PAUSE 0x00000100
4375 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
4376 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4377 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4378 #define MR_LP_ADV_NEXT_PAGE 0x00001000
4379 #define MR_TOGGLE_RX 0x00002000
4380 #define MR_NP_RX 0x00004000
4382 #define MR_LINK_OK 0x80000000
4384 unsigned long link_time, cur_time;
4386 u32 ability_match_cfg;
4387 int ability_match_count;
4389 char ability_match, idle_match, ack_match;
4391 u32 txconfig, rxconfig;
4392 #define ANEG_CFG_NP 0x00000080
4393 #define ANEG_CFG_ACK 0x00000040
4394 #define ANEG_CFG_RF2 0x00000020
4395 #define ANEG_CFG_RF1 0x00000010
4396 #define ANEG_CFG_PS2 0x00000001
4397 #define ANEG_CFG_PS1 0x00008000
4398 #define ANEG_CFG_HD 0x00004000
4399 #define ANEG_CFG_FD 0x00002000
4400 #define ANEG_CFG_INVAL 0x00001f06
4405 #define ANEG_TIMER_ENAB 2
4406 #define ANEG_FAILED -1
4408 #define ANEG_STATE_SETTLE_TIME 10000
4410 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4411 struct tg3_fiber_aneginfo *ap)
4414 unsigned long delta;
4418 if (ap->state == ANEG_STATE_UNKNOWN) {
4422 ap->ability_match_cfg = 0;
4423 ap->ability_match_count = 0;
4424 ap->ability_match = 0;
4430 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4431 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4433 if (rx_cfg_reg != ap->ability_match_cfg) {
4434 ap->ability_match_cfg = rx_cfg_reg;
4435 ap->ability_match = 0;
4436 ap->ability_match_count = 0;
4438 if (++ap->ability_match_count > 1) {
4439 ap->ability_match = 1;
4440 ap->ability_match_cfg = rx_cfg_reg;
4443 if (rx_cfg_reg & ANEG_CFG_ACK)
4451 ap->ability_match_cfg = 0;
4452 ap->ability_match_count = 0;
4453 ap->ability_match = 0;
4459 ap->rxconfig = rx_cfg_reg;
4462 switch (ap->state) {
4463 case ANEG_STATE_UNKNOWN:
4464 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4465 ap->state = ANEG_STATE_AN_ENABLE;
4468 case ANEG_STATE_AN_ENABLE:
4469 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4470 if (ap->flags & MR_AN_ENABLE) {
4473 ap->ability_match_cfg = 0;
4474 ap->ability_match_count = 0;
4475 ap->ability_match = 0;
4479 ap->state = ANEG_STATE_RESTART_INIT;
4481 ap->state = ANEG_STATE_DISABLE_LINK_OK;
4485 case ANEG_STATE_RESTART_INIT:
4486 ap->link_time = ap->cur_time;
4487 ap->flags &= ~(MR_NP_LOADED);
4489 tw32(MAC_TX_AUTO_NEG, 0);
4490 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4491 tw32_f(MAC_MODE, tp->mac_mode);
4494 ret = ANEG_TIMER_ENAB;
4495 ap->state = ANEG_STATE_RESTART;
4498 case ANEG_STATE_RESTART:
4499 delta = ap->cur_time - ap->link_time;
4500 if (delta > ANEG_STATE_SETTLE_TIME)
4501 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4503 ret = ANEG_TIMER_ENAB;
4506 case ANEG_STATE_DISABLE_LINK_OK:
4510 case ANEG_STATE_ABILITY_DETECT_INIT:
4511 ap->flags &= ~(MR_TOGGLE_TX);
4512 ap->txconfig = ANEG_CFG_FD;
4513 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4514 if (flowctrl & ADVERTISE_1000XPAUSE)
4515 ap->txconfig |= ANEG_CFG_PS1;
4516 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4517 ap->txconfig |= ANEG_CFG_PS2;
4518 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4519 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4520 tw32_f(MAC_MODE, tp->mac_mode);
4523 ap->state = ANEG_STATE_ABILITY_DETECT;
4526 case ANEG_STATE_ABILITY_DETECT:
4527 if (ap->ability_match != 0 && ap->rxconfig != 0)
4528 ap->state = ANEG_STATE_ACK_DETECT_INIT;
4531 case ANEG_STATE_ACK_DETECT_INIT:
4532 ap->txconfig |= ANEG_CFG_ACK;
4533 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535 tw32_f(MAC_MODE, tp->mac_mode);
4538 ap->state = ANEG_STATE_ACK_DETECT;
4541 case ANEG_STATE_ACK_DETECT:
4542 if (ap->ack_match != 0) {
4543 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4544 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4545 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4547 ap->state = ANEG_STATE_AN_ENABLE;
4549 } else if (ap->ability_match != 0 &&
4550 ap->rxconfig == 0) {
4551 ap->state = ANEG_STATE_AN_ENABLE;
4555 case ANEG_STATE_COMPLETE_ACK_INIT:
4556 if (ap->rxconfig & ANEG_CFG_INVAL) {
4560 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4561 MR_LP_ADV_HALF_DUPLEX |
4562 MR_LP_ADV_SYM_PAUSE |
4563 MR_LP_ADV_ASYM_PAUSE |
4564 MR_LP_ADV_REMOTE_FAULT1 |
4565 MR_LP_ADV_REMOTE_FAULT2 |
4566 MR_LP_ADV_NEXT_PAGE |
4569 if (ap->rxconfig & ANEG_CFG_FD)
4570 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4571 if (ap->rxconfig & ANEG_CFG_HD)
4572 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4573 if (ap->rxconfig & ANEG_CFG_PS1)
4574 ap->flags |= MR_LP_ADV_SYM_PAUSE;
4575 if (ap->rxconfig & ANEG_CFG_PS2)
4576 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4577 if (ap->rxconfig & ANEG_CFG_RF1)
4578 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4579 if (ap->rxconfig & ANEG_CFG_RF2)
4580 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4581 if (ap->rxconfig & ANEG_CFG_NP)
4582 ap->flags |= MR_LP_ADV_NEXT_PAGE;
4584 ap->link_time = ap->cur_time;
4586 ap->flags ^= (MR_TOGGLE_TX);
4587 if (ap->rxconfig & 0x0008)
4588 ap->flags |= MR_TOGGLE_RX;
4589 if (ap->rxconfig & ANEG_CFG_NP)
4590 ap->flags |= MR_NP_RX;
4591 ap->flags |= MR_PAGE_RX;
4593 ap->state = ANEG_STATE_COMPLETE_ACK;
4594 ret = ANEG_TIMER_ENAB;
4597 case ANEG_STATE_COMPLETE_ACK:
4598 if (ap->ability_match != 0 &&
4599 ap->rxconfig == 0) {
4600 ap->state = ANEG_STATE_AN_ENABLE;
4603 delta = ap->cur_time - ap->link_time;
4604 if (delta > ANEG_STATE_SETTLE_TIME) {
4605 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4606 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4608 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4609 !(ap->flags & MR_NP_RX)) {
4610 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4618 case ANEG_STATE_IDLE_DETECT_INIT:
4619 ap->link_time = ap->cur_time;
4620 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4621 tw32_f(MAC_MODE, tp->mac_mode);
4624 ap->state = ANEG_STATE_IDLE_DETECT;
4625 ret = ANEG_TIMER_ENAB;
4628 case ANEG_STATE_IDLE_DETECT:
4629 if (ap->ability_match != 0 &&
4630 ap->rxconfig == 0) {
4631 ap->state = ANEG_STATE_AN_ENABLE;
4634 delta = ap->cur_time - ap->link_time;
4635 if (delta > ANEG_STATE_SETTLE_TIME) {
4636 /* XXX another gem from the Broadcom driver :( */
4637 ap->state = ANEG_STATE_LINK_OK;
4641 case ANEG_STATE_LINK_OK:
4642 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4646 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4647 /* ??? unimplemented */
4650 case ANEG_STATE_NEXT_PAGE_WAIT:
4651 /* ??? unimplemented */
4662 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4665 struct tg3_fiber_aneginfo aninfo;
4666 int status = ANEG_FAILED;
4670 tw32_f(MAC_TX_AUTO_NEG, 0);
4672 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4673 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4676 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4679 memset(&aninfo, 0, sizeof(aninfo));
4680 aninfo.flags |= MR_AN_ENABLE;
4681 aninfo.state = ANEG_STATE_UNKNOWN;
4682 aninfo.cur_time = 0;
4684 while (++tick < 195000) {
4685 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4686 if (status == ANEG_DONE || status == ANEG_FAILED)
4692 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4693 tw32_f(MAC_MODE, tp->mac_mode);
4696 *txflags = aninfo.txconfig;
4697 *rxflags = aninfo.flags;
4699 if (status == ANEG_DONE &&
4700 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4701 MR_LP_ADV_FULL_DUPLEX)))
4707 static void tg3_init_bcm8002(struct tg3 *tp)
4709 u32 mac_status = tr32(MAC_STATUS);
4712 /* Reset when initting first time or we have a link. */
4713 if (tg3_flag(tp, INIT_COMPLETE) &&
4714 !(mac_status & MAC_STATUS_PCS_SYNCED))
4717 /* Set PLL lock range. */
4718 tg3_writephy(tp, 0x16, 0x8007);
4721 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4723 /* Wait for reset to complete. */
4724 /* XXX schedule_timeout() ... */
4725 for (i = 0; i < 500; i++)
4728 /* Config mode; select PMA/Ch 1 regs. */
4729 tg3_writephy(tp, 0x10, 0x8411);
4731 /* Enable auto-lock and comdet, select txclk for tx. */
4732 tg3_writephy(tp, 0x11, 0x0a10);
4734 tg3_writephy(tp, 0x18, 0x00a0);
4735 tg3_writephy(tp, 0x16, 0x41ff);
4737 /* Assert and deassert POR. */
4738 tg3_writephy(tp, 0x13, 0x0400);
4740 tg3_writephy(tp, 0x13, 0x0000);
4742 tg3_writephy(tp, 0x11, 0x0a50);
4744 tg3_writephy(tp, 0x11, 0x0a10);
4746 /* Wait for signal to stabilize */
4747 /* XXX schedule_timeout() ... */
4748 for (i = 0; i < 15000; i++)
4751 /* Deselect the channel register so we can read the PHYID
4754 tg3_writephy(tp, 0x10, 0x8011);
4757 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4760 u32 sg_dig_ctrl, sg_dig_status;
4761 u32 serdes_cfg, expected_sg_dig_ctrl;
4762 int workaround, port_a;
4763 int current_link_up;
4766 expected_sg_dig_ctrl = 0;
4769 current_link_up = 0;
4771 if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4772 tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4774 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4777 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4778 /* preserve bits 20-23 for voltage regulator */
4779 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4782 sg_dig_ctrl = tr32(SG_DIG_CTRL);
4784 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4785 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4787 u32 val = serdes_cfg;
4793 tw32_f(MAC_SERDES_CFG, val);
4796 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4798 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4799 tg3_setup_flow_control(tp, 0, 0);
4800 current_link_up = 1;
4805 /* Want auto-negotiation. */
4806 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4808 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4809 if (flowctrl & ADVERTISE_1000XPAUSE)
4810 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4811 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4812 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4814 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4815 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4816 tp->serdes_counter &&
4817 ((mac_status & (MAC_STATUS_PCS_SYNCED |
4818 MAC_STATUS_RCVD_CFG)) ==
4819 MAC_STATUS_PCS_SYNCED)) {
4820 tp->serdes_counter--;
4821 current_link_up = 1;
4826 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4827 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4829 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4831 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4832 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4833 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4834 MAC_STATUS_SIGNAL_DET)) {
4835 sg_dig_status = tr32(SG_DIG_STATUS);
4836 mac_status = tr32(MAC_STATUS);
4838 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4839 (mac_status & MAC_STATUS_PCS_SYNCED)) {
4840 u32 local_adv = 0, remote_adv = 0;
4842 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4843 local_adv |= ADVERTISE_1000XPAUSE;
4844 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4845 local_adv |= ADVERTISE_1000XPSE_ASYM;
4847 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4848 remote_adv |= LPA_1000XPAUSE;
4849 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4850 remote_adv |= LPA_1000XPAUSE_ASYM;
4852 tp->link_config.rmt_adv =
4853 mii_adv_to_ethtool_adv_x(remote_adv);
4855 tg3_setup_flow_control(tp, local_adv, remote_adv);
4856 current_link_up = 1;
4857 tp->serdes_counter = 0;
4858 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4859 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4860 if (tp->serdes_counter)
4861 tp->serdes_counter--;
4864 u32 val = serdes_cfg;
4871 tw32_f(MAC_SERDES_CFG, val);
4874 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4877 /* Link parallel detection - link is up */
4878 /* only if we have PCS_SYNC and not */
4879 /* receiving config code words */
4880 mac_status = tr32(MAC_STATUS);
4881 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4882 !(mac_status & MAC_STATUS_RCVD_CFG)) {
4883 tg3_setup_flow_control(tp, 0, 0);
4884 current_link_up = 1;
4886 TG3_PHYFLG_PARALLEL_DETECT;
4887 tp->serdes_counter =
4888 SERDES_PARALLEL_DET_TIMEOUT;
4890 goto restart_autoneg;
4894 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4895 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4899 return current_link_up;
4902 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4904 int current_link_up = 0;
4906 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4909 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910 u32 txflags, rxflags;
4913 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4914 u32 local_adv = 0, remote_adv = 0;
4916 if (txflags & ANEG_CFG_PS1)
4917 local_adv |= ADVERTISE_1000XPAUSE;
4918 if (txflags & ANEG_CFG_PS2)
4919 local_adv |= ADVERTISE_1000XPSE_ASYM;
4921 if (rxflags & MR_LP_ADV_SYM_PAUSE)
4922 remote_adv |= LPA_1000XPAUSE;
4923 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4924 remote_adv |= LPA_1000XPAUSE_ASYM;
4926 tp->link_config.rmt_adv =
4927 mii_adv_to_ethtool_adv_x(remote_adv);
4929 tg3_setup_flow_control(tp, local_adv, remote_adv);
4931 current_link_up = 1;
4933 for (i = 0; i < 30; i++) {
4936 (MAC_STATUS_SYNC_CHANGED |
4937 MAC_STATUS_CFG_CHANGED));
4939 if ((tr32(MAC_STATUS) &
4940 (MAC_STATUS_SYNC_CHANGED |
4941 MAC_STATUS_CFG_CHANGED)) == 0)
4945 mac_status = tr32(MAC_STATUS);
4946 if (current_link_up == 0 &&
4947 (mac_status & MAC_STATUS_PCS_SYNCED) &&
4948 !(mac_status & MAC_STATUS_RCVD_CFG))
4949 current_link_up = 1;
4951 tg3_setup_flow_control(tp, 0, 0);
4953 /* Forcing 1000FD link up. */
4954 current_link_up = 1;
4956 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4959 tw32_f(MAC_MODE, tp->mac_mode);
4964 return current_link_up;
4967 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4970 u16 orig_active_speed;
4971 u8 orig_active_duplex;
4973 int current_link_up;
4976 orig_pause_cfg = tp->link_config.active_flowctrl;
4977 orig_active_speed = tp->link_config.active_speed;
4978 orig_active_duplex = tp->link_config.active_duplex;
4980 if (!tg3_flag(tp, HW_AUTONEG) &&
4981 netif_carrier_ok(tp->dev) &&
4982 tg3_flag(tp, INIT_COMPLETE)) {
4983 mac_status = tr32(MAC_STATUS);
4984 mac_status &= (MAC_STATUS_PCS_SYNCED |
4985 MAC_STATUS_SIGNAL_DET |
4986 MAC_STATUS_CFG_CHANGED |
4987 MAC_STATUS_RCVD_CFG);
4988 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4989 MAC_STATUS_SIGNAL_DET)) {
4990 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4991 MAC_STATUS_CFG_CHANGED));
4996 tw32_f(MAC_TX_AUTO_NEG, 0);
4998 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4999 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5000 tw32_f(MAC_MODE, tp->mac_mode);
5003 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5004 tg3_init_bcm8002(tp);
5006 /* Enable link change event even when serdes polling. */
5007 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5010 current_link_up = 0;
5011 tp->link_config.rmt_adv = 0;
5012 mac_status = tr32(MAC_STATUS);
5014 if (tg3_flag(tp, HW_AUTONEG))
5015 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5017 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5019 tp->napi[0].hw_status->status =
5020 (SD_STATUS_UPDATED |
5021 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5023 for (i = 0; i < 100; i++) {
5024 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5025 MAC_STATUS_CFG_CHANGED));
5027 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5028 MAC_STATUS_CFG_CHANGED |
5029 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5033 mac_status = tr32(MAC_STATUS);
5034 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5035 current_link_up = 0;
5036 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5037 tp->serdes_counter == 0) {
5038 tw32_f(MAC_MODE, (tp->mac_mode |
5039 MAC_MODE_SEND_CONFIGS));
5041 tw32_f(MAC_MODE, tp->mac_mode);
5045 if (current_link_up == 1) {
5046 tp->link_config.active_speed = SPEED_1000;
5047 tp->link_config.active_duplex = DUPLEX_FULL;
5048 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5049 LED_CTRL_LNKLED_OVERRIDE |
5050 LED_CTRL_1000MBPS_ON));
5052 tp->link_config.active_speed = SPEED_UNKNOWN;
5053 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5054 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055 LED_CTRL_LNKLED_OVERRIDE |
5056 LED_CTRL_TRAFFIC_OVERRIDE));
5059 if (current_link_up != netif_carrier_ok(tp->dev)) {
5060 if (current_link_up)
5061 netif_carrier_on(tp->dev);
5063 netif_carrier_off(tp->dev);
5064 tg3_link_report(tp);
5066 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5067 if (orig_pause_cfg != now_pause_cfg ||
5068 orig_active_speed != tp->link_config.active_speed ||
5069 orig_active_duplex != tp->link_config.active_duplex)
5070 tg3_link_report(tp);
5076 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5078 int current_link_up, err = 0;
5082 u32 local_adv, remote_adv;
5084 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5085 tw32_f(MAC_MODE, tp->mac_mode);
5091 (MAC_STATUS_SYNC_CHANGED |
5092 MAC_STATUS_CFG_CHANGED |
5093 MAC_STATUS_MI_COMPLETION |
5094 MAC_STATUS_LNKSTATE_CHANGED));
5100 current_link_up = 0;
5101 current_speed = SPEED_UNKNOWN;
5102 current_duplex = DUPLEX_UNKNOWN;
5103 tp->link_config.rmt_adv = 0;
5105 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5108 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5109 bmsr |= BMSR_LSTATUS;
5111 bmsr &= ~BMSR_LSTATUS;
5114 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5116 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5117 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5118 /* do nothing, just check for link up at the end */
5119 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5122 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5123 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5124 ADVERTISE_1000XPAUSE |
5125 ADVERTISE_1000XPSE_ASYM |
5128 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5129 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5131 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5132 tg3_writephy(tp, MII_ADVERTISE, newadv);
5133 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5134 tg3_writephy(tp, MII_BMCR, bmcr);
5136 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5137 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5138 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5145 bmcr &= ~BMCR_SPEED1000;
5146 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5148 if (tp->link_config.duplex == DUPLEX_FULL)
5149 new_bmcr |= BMCR_FULLDPLX;
5151 if (new_bmcr != bmcr) {
5152 /* BMCR_SPEED1000 is a reserved bit that needs
5153 * to be set on write.
5155 new_bmcr |= BMCR_SPEED1000;
5157 /* Force a linkdown */
5158 if (netif_carrier_ok(tp->dev)) {
5161 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5162 adv &= ~(ADVERTISE_1000XFULL |
5163 ADVERTISE_1000XHALF |
5165 tg3_writephy(tp, MII_ADVERTISE, adv);
5166 tg3_writephy(tp, MII_BMCR, bmcr |
5170 netif_carrier_off(tp->dev);
5172 tg3_writephy(tp, MII_BMCR, new_bmcr);
5174 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5178 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5179 bmsr |= BMSR_LSTATUS;
5181 bmsr &= ~BMSR_LSTATUS;
5183 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5187 if (bmsr & BMSR_LSTATUS) {
5188 current_speed = SPEED_1000;
5189 current_link_up = 1;
5190 if (bmcr & BMCR_FULLDPLX)
5191 current_duplex = DUPLEX_FULL;
5193 current_duplex = DUPLEX_HALF;
5198 if (bmcr & BMCR_ANENABLE) {
5201 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5202 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5203 common = local_adv & remote_adv;
5204 if (common & (ADVERTISE_1000XHALF |
5205 ADVERTISE_1000XFULL)) {
5206 if (common & ADVERTISE_1000XFULL)
5207 current_duplex = DUPLEX_FULL;
5209 current_duplex = DUPLEX_HALF;
5211 tp->link_config.rmt_adv =
5212 mii_adv_to_ethtool_adv_x(remote_adv);
5213 } else if (!tg3_flag(tp, 5780_CLASS)) {
5214 /* Link is up via parallel detect */
5216 current_link_up = 0;
5221 if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5222 tg3_setup_flow_control(tp, local_adv, remote_adv);
5224 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5225 if (tp->link_config.active_duplex == DUPLEX_HALF)
5226 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5228 tw32_f(MAC_MODE, tp->mac_mode);
5231 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5233 tp->link_config.active_speed = current_speed;
5234 tp->link_config.active_duplex = current_duplex;
5236 if (current_link_up != netif_carrier_ok(tp->dev)) {
5237 if (current_link_up)
5238 netif_carrier_on(tp->dev);
5240 netif_carrier_off(tp->dev);
5241 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5243 tg3_link_report(tp);
5248 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5250 if (tp->serdes_counter) {
5251 /* Give autoneg time to complete. */
5252 tp->serdes_counter--;
5256 if (!netif_carrier_ok(tp->dev) &&
5257 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5260 tg3_readphy(tp, MII_BMCR, &bmcr);
5261 if (bmcr & BMCR_ANENABLE) {
5264 /* Select shadow register 0x1f */
5265 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5266 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5268 /* Select expansion interrupt status register */
5269 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5270 MII_TG3_DSP_EXP1_INT_STAT);
5271 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5274 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5275 /* We have signal detect and not receiving
5276 * config code words, link is up by parallel
5280 bmcr &= ~BMCR_ANENABLE;
5281 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5282 tg3_writephy(tp, MII_BMCR, bmcr);
5283 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5286 } else if (netif_carrier_ok(tp->dev) &&
5287 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5288 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5291 /* Select expansion interrupt status register */
5292 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5293 MII_TG3_DSP_EXP1_INT_STAT);
5294 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5298 /* Config code words received, turn on autoneg. */
5299 tg3_readphy(tp, MII_BMCR, &bmcr);
5300 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5302 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5308 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5313 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5314 err = tg3_setup_fiber_phy(tp, force_reset);
5315 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5316 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5318 err = tg3_setup_copper_phy(tp, force_reset);
5320 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5323 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5324 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5326 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5331 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5332 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5333 tw32(GRC_MISC_CFG, val);
5336 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5337 (6 << TX_LENGTHS_IPG_SHIFT);
5338 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5339 val |= tr32(MAC_TX_LENGTHS) &
5340 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5341 TX_LENGTHS_CNT_DWN_VAL_MSK);
5343 if (tp->link_config.active_speed == SPEED_1000 &&
5344 tp->link_config.active_duplex == DUPLEX_HALF)
5345 tw32(MAC_TX_LENGTHS, val |
5346 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5348 tw32(MAC_TX_LENGTHS, val |
5349 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5351 if (!tg3_flag(tp, 5705_PLUS)) {
5352 if (netif_carrier_ok(tp->dev)) {
5353 tw32(HOSTCC_STAT_COAL_TICKS,
5354 tp->coal.stats_block_coalesce_usecs);
5356 tw32(HOSTCC_STAT_COAL_TICKS, 0);
5360 if (tg3_flag(tp, ASPM_WORKAROUND)) {
5361 val = tr32(PCIE_PWR_MGMT_THRESH);
5362 if (!netif_carrier_ok(tp->dev))
5363 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5366 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5367 tw32(PCIE_PWR_MGMT_THRESH, val);
5373 static inline int tg3_irq_sync(struct tg3 *tp)
5375 return tp->irq_sync;
5378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5382 dst = (u32 *)((u8 *)dst + off);
5383 for (i = 0; i < len; i += sizeof(u32))
5384 *dst++ = tr32(off + i);
5387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5389 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5390 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5391 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5392 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5393 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5394 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5395 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5396 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5397 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5398 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5399 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5400 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5401 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5402 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5403 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5404 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5405 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5406 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5407 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5409 if (tg3_flag(tp, SUPPORT_MSIX))
5410 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5412 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5413 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5414 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5415 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5416 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5417 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5418 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5419 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5421 if (!tg3_flag(tp, 5705_PLUS)) {
5422 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5423 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5424 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5427 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5428 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5429 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5430 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5431 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5433 if (tg3_flag(tp, NVRAM))
5434 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5437 static void tg3_dump_state(struct tg3 *tp)
5442 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5444 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5448 if (tg3_flag(tp, PCI_EXPRESS)) {
5449 /* Read up to but not including private PCI registers */
5450 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5451 regs[i / sizeof(u32)] = tr32(i);
5453 tg3_dump_legacy_regs(tp, regs);
5455 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5456 if (!regs[i + 0] && !regs[i + 1] &&
5457 !regs[i + 2] && !regs[i + 3])
5460 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5462 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5467 for (i = 0; i < tp->irq_cnt; i++) {
5468 struct tg3_napi *tnapi = &tp->napi[i];
5470 /* SW status block */
5472 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5474 tnapi->hw_status->status,
5475 tnapi->hw_status->status_tag,
5476 tnapi->hw_status->rx_jumbo_consumer,
5477 tnapi->hw_status->rx_consumer,
5478 tnapi->hw_status->rx_mini_consumer,
5479 tnapi->hw_status->idx[0].rx_producer,
5480 tnapi->hw_status->idx[0].tx_consumer);
5483 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5485 tnapi->last_tag, tnapi->last_irq_tag,
5486 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5488 tnapi->prodring.rx_std_prod_idx,
5489 tnapi->prodring.rx_std_cons_idx,
5490 tnapi->prodring.rx_jmb_prod_idx,
5491 tnapi->prodring.rx_jmb_cons_idx);
5495 /* This is called whenever we suspect that the system chipset is re-
5496 * ordering the sequence of MMIO to the tx send mailbox. The symptom
5497 * is bogus tx completions. We try to recover by setting the
5498 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5501 static void tg3_tx_recover(struct tg3 *tp)
5503 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5504 tp->write32_tx_mbox == tg3_write_indirect_mbox);
5506 netdev_warn(tp->dev,
5507 "The system may be re-ordering memory-mapped I/O "
5508 "cycles to the network device, attempting to recover. "
5509 "Please report the problem to the driver maintainer "
5510 "and include system chipset information.\n");
5512 spin_lock(&tp->lock);
5513 tg3_flag_set(tp, TX_RECOVERY_PENDING);
5514 spin_unlock(&tp->lock);
5517 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5519 /* Tell compiler to fetch tx indices from memory. */
5521 return tnapi->tx_pending -
5522 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5525 /* Tigon3 never reports partial packet sends. So we do not
5526 * need special logic to handle SKBs that have not had all
5527 * of their frags sent yet, like SunGEM does.
5529 static void tg3_tx(struct tg3_napi *tnapi)
5531 struct tg3 *tp = tnapi->tp;
5532 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5533 u32 sw_idx = tnapi->tx_cons;
5534 struct netdev_queue *txq;
5535 int index = tnapi - tp->napi;
5536 unsigned int pkts_compl = 0, bytes_compl = 0;
5538 if (tg3_flag(tp, ENABLE_TSS))
5541 txq = netdev_get_tx_queue(tp->dev, index);
5543 while (sw_idx != hw_idx) {
5544 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5545 struct sk_buff *skb = ri->skb;
5548 if (unlikely(skb == NULL)) {
5553 pci_unmap_single(tp->pdev,
5554 dma_unmap_addr(ri, mapping),
5560 while (ri->fragmented) {
5561 ri->fragmented = false;
5562 sw_idx = NEXT_TX(sw_idx);
5563 ri = &tnapi->tx_buffers[sw_idx];
5566 sw_idx = NEXT_TX(sw_idx);
5568 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5569 ri = &tnapi->tx_buffers[sw_idx];
5570 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5573 pci_unmap_page(tp->pdev,
5574 dma_unmap_addr(ri, mapping),
5575 skb_frag_size(&skb_shinfo(skb)->frags[i]),
5578 while (ri->fragmented) {
5579 ri->fragmented = false;
5580 sw_idx = NEXT_TX(sw_idx);
5581 ri = &tnapi->tx_buffers[sw_idx];
5584 sw_idx = NEXT_TX(sw_idx);
5588 bytes_compl += skb->len;
5592 if (unlikely(tx_bug)) {
5598 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5600 tnapi->tx_cons = sw_idx;
5602 /* Need to make the tx_cons update visible to tg3_start_xmit()
5603 * before checking for netif_queue_stopped(). Without the
5604 * memory barrier, there is a small possibility that tg3_start_xmit()
5605 * will miss it and cause the queue to be stopped forever.
5609 if (unlikely(netif_tx_queue_stopped(txq) &&
5610 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5611 __netif_tx_lock(txq, smp_processor_id());
5612 if (netif_tx_queue_stopped(txq) &&
5613 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5614 netif_tx_wake_queue(txq);
5615 __netif_tx_unlock(txq);
5619 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5624 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5625 map_sz, PCI_DMA_FROMDEVICE);
5630 /* Returns size of skb allocated or < 0 on error.
5632 * We only need to fill in the address because the other members
5633 * of the RX descriptor are invariant, see tg3_init_rings.
5635 * Note the purposeful assymetry of cpu vs. chip accesses. For
5636 * posting buffers we only dirty the first cache line of the RX
5637 * descriptor (containing the address). Whereas for the RX status
5638 * buffers the cpu only reads the last cacheline of the RX descriptor
5639 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5641 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5642 u32 opaque_key, u32 dest_idx_unmasked)
5644 struct tg3_rx_buffer_desc *desc;
5645 struct ring_info *map;
5648 int skb_size, data_size, dest_idx;
5650 switch (opaque_key) {
5651 case RXD_OPAQUE_RING_STD:
5652 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5653 desc = &tpr->rx_std[dest_idx];
5654 map = &tpr->rx_std_buffers[dest_idx];
5655 data_size = tp->rx_pkt_map_sz;
5658 case RXD_OPAQUE_RING_JUMBO:
5659 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5660 desc = &tpr->rx_jmb[dest_idx].std;
5661 map = &tpr->rx_jmb_buffers[dest_idx];
5662 data_size = TG3_RX_JMB_MAP_SZ;
5669 /* Do not overwrite any of the map or rp information
5670 * until we are sure we can commit to a new buffer.
5672 * Callers depend upon this behavior and assume that
5673 * we leave everything unchanged if we fail.
5675 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5676 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5677 data = kmalloc(skb_size, GFP_ATOMIC);
5681 mapping = pci_map_single(tp->pdev,
5682 data + TG3_RX_OFFSET(tp),
5684 PCI_DMA_FROMDEVICE);
5685 if (pci_dma_mapping_error(tp->pdev, mapping)) {
5691 dma_unmap_addr_set(map, mapping, mapping);
5693 desc->addr_hi = ((u64)mapping >> 32);
5694 desc->addr_lo = ((u64)mapping & 0xffffffff);
5699 /* We only need to move over in the address because the other
5700 * members of the RX descriptor are invariant. See notes above
5701 * tg3_alloc_rx_data for full details.
5703 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5704 struct tg3_rx_prodring_set *dpr,
5705 u32 opaque_key, int src_idx,
5706 u32 dest_idx_unmasked)
5708 struct tg3 *tp = tnapi->tp;
5709 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5710 struct ring_info *src_map, *dest_map;
5711 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5714 switch (opaque_key) {
5715 case RXD_OPAQUE_RING_STD:
5716 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5717 dest_desc = &dpr->rx_std[dest_idx];
5718 dest_map = &dpr->rx_std_buffers[dest_idx];
5719 src_desc = &spr->rx_std[src_idx];
5720 src_map = &spr->rx_std_buffers[src_idx];
5723 case RXD_OPAQUE_RING_JUMBO:
5724 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5725 dest_desc = &dpr->rx_jmb[dest_idx].std;
5726 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5727 src_desc = &spr->rx_jmb[src_idx].std;
5728 src_map = &spr->rx_jmb_buffers[src_idx];
5735 dest_map->data = src_map->data;
5736 dma_unmap_addr_set(dest_map, mapping,
5737 dma_unmap_addr(src_map, mapping));
5738 dest_desc->addr_hi = src_desc->addr_hi;
5739 dest_desc->addr_lo = src_desc->addr_lo;
5741 /* Ensure that the update to the skb happens after the physical
5742 * addresses have been transferred to the new BD location.
5746 src_map->data = NULL;
5749 /* The RX ring scheme is composed of multiple rings which post fresh
5750 * buffers to the chip, and one special ring the chip uses to report
5751 * status back to the host.
5753 * The special ring reports the status of received packets to the
5754 * host. The chip does not write into the original descriptor the
5755 * RX buffer was obtained from. The chip simply takes the original
5756 * descriptor as provided by the host, updates the status and length
5757 * field, then writes this into the next status ring entry.
5759 * Each ring the host uses to post buffers to the chip is described
5760 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
5761 * it is first placed into the on-chip ram. When the packet's length
5762 * is known, it walks down the TG3_BDINFO entries to select the ring.
5763 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5764 * which is within the range of the new packet's length is chosen.
5766 * The "separate ring for rx status" scheme may sound queer, but it makes
5767 * sense from a cache coherency perspective. If only the host writes
5768 * to the buffer post rings, and only the chip writes to the rx status
5769 * rings, then cache lines never move beyond shared-modified state.
5770 * If both the host and chip were to write into the same ring, cache line
5771 * eviction could occur since both entities want it in an exclusive state.
5773 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5775 struct tg3 *tp = tnapi->tp;
5776 u32 work_mask, rx_std_posted = 0;
5777 u32 std_prod_idx, jmb_prod_idx;
5778 u32 sw_idx = tnapi->rx_rcb_ptr;
5781 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5783 hw_idx = *(tnapi->rx_rcb_prod_idx);
5785 * We need to order the read of hw_idx and the read of
5786 * the opaque cookie.
5791 std_prod_idx = tpr->rx_std_prod_idx;
5792 jmb_prod_idx = tpr->rx_jmb_prod_idx;
5793 while (sw_idx != hw_idx && budget > 0) {
5794 struct ring_info *ri;
5795 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5797 struct sk_buff *skb;
5798 dma_addr_t dma_addr;
5799 u32 opaque_key, desc_idx, *post_ptr;
5802 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5803 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5804 if (opaque_key == RXD_OPAQUE_RING_STD) {
5805 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5806 dma_addr = dma_unmap_addr(ri, mapping);
5808 post_ptr = &std_prod_idx;
5810 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5811 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5812 dma_addr = dma_unmap_addr(ri, mapping);
5814 post_ptr = &jmb_prod_idx;
5816 goto next_pkt_nopost;
5818 work_mask |= opaque_key;
5820 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5821 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5823 tg3_recycle_rx(tnapi, tpr, opaque_key,
5824 desc_idx, *post_ptr);
5826 /* Other statistics kept track of by card. */
5831 prefetch(data + TG3_RX_OFFSET(tp));
5832 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5835 if (len > TG3_RX_COPY_THRESH(tp)) {
5838 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5843 pci_unmap_single(tp->pdev, dma_addr, skb_size,
5844 PCI_DMA_FROMDEVICE);
5846 skb = build_skb(data);
5849 goto drop_it_no_recycle;
5851 skb_reserve(skb, TG3_RX_OFFSET(tp));
5852 /* Ensure that the update to the data happens
5853 * after the usage of the old DMA mapping.
5860 tg3_recycle_rx(tnapi, tpr, opaque_key,
5861 desc_idx, *post_ptr);
5863 skb = netdev_alloc_skb(tp->dev,
5864 len + TG3_RAW_IP_ALIGN);
5866 goto drop_it_no_recycle;
5868 skb_reserve(skb, TG3_RAW_IP_ALIGN);
5869 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5871 data + TG3_RX_OFFSET(tp),
5873 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5877 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5878 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5879 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5880 >> RXD_TCPCSUM_SHIFT) == 0xffff))
5881 skb->ip_summed = CHECKSUM_UNNECESSARY;
5883 skb_checksum_none_assert(skb);
5885 skb->protocol = eth_type_trans(skb, tp->dev);
5887 if (len > (tp->dev->mtu + ETH_HLEN) &&
5888 skb->protocol != htons(ETH_P_8021Q)) {
5890 goto drop_it_no_recycle;
5893 if (desc->type_flags & RXD_FLAG_VLAN &&
5894 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5895 __vlan_hwaccel_put_tag(skb,
5896 desc->err_vlan & RXD_VLAN_MASK);
5898 napi_gro_receive(&tnapi->napi, skb);
5906 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5907 tpr->rx_std_prod_idx = std_prod_idx &
5908 tp->rx_std_ring_mask;
5909 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5910 tpr->rx_std_prod_idx);
5911 work_mask &= ~RXD_OPAQUE_RING_STD;
5916 sw_idx &= tp->rx_ret_ring_mask;
5918 /* Refresh hw_idx to see if there is new work */
5919 if (sw_idx == hw_idx) {
5920 hw_idx = *(tnapi->rx_rcb_prod_idx);
5925 /* ACK the status ring. */
5926 tnapi->rx_rcb_ptr = sw_idx;
5927 tw32_rx_mbox(tnapi->consmbox, sw_idx);
5929 /* Refill RX ring(s). */
5930 if (!tg3_flag(tp, ENABLE_RSS)) {
5931 /* Sync BD data before updating mailbox */
5934 if (work_mask & RXD_OPAQUE_RING_STD) {
5935 tpr->rx_std_prod_idx = std_prod_idx &
5936 tp->rx_std_ring_mask;
5937 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5938 tpr->rx_std_prod_idx);
5940 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5941 tpr->rx_jmb_prod_idx = jmb_prod_idx &
5942 tp->rx_jmb_ring_mask;
5943 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5944 tpr->rx_jmb_prod_idx);
5947 } else if (work_mask) {
5948 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5949 * updated before the producer indices can be updated.
5953 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5954 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5956 if (tnapi != &tp->napi[1])
5957 napi_schedule(&tp->napi[1].napi);
5963 static void tg3_poll_link(struct tg3 *tp)
5965 /* handle link change and other phy events */
5966 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5967 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5969 if (sblk->status & SD_STATUS_LINK_CHG) {
5970 sblk->status = SD_STATUS_UPDATED |
5971 (sblk->status & ~SD_STATUS_LINK_CHG);
5972 spin_lock(&tp->lock);
5973 if (tg3_flag(tp, USE_PHYLIB)) {
5975 (MAC_STATUS_SYNC_CHANGED |
5976 MAC_STATUS_CFG_CHANGED |
5977 MAC_STATUS_MI_COMPLETION |
5978 MAC_STATUS_LNKSTATE_CHANGED));
5981 tg3_setup_phy(tp, 0);
5982 spin_unlock(&tp->lock);
5987 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5988 struct tg3_rx_prodring_set *dpr,
5989 struct tg3_rx_prodring_set *spr)
5991 u32 si, di, cpycnt, src_prod_idx;
5995 src_prod_idx = spr->rx_std_prod_idx;
5997 /* Make sure updates to the rx_std_buffers[] entries and the
5998 * standard producer index are seen in the correct order.
6002 if (spr->rx_std_cons_idx == src_prod_idx)
6005 if (spr->rx_std_cons_idx < src_prod_idx)
6006 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6008 cpycnt = tp->rx_std_ring_mask + 1 -
6009 spr->rx_std_cons_idx;
6011 cpycnt = min(cpycnt,
6012 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6014 si = spr->rx_std_cons_idx;
6015 di = dpr->rx_std_prod_idx;
6017 for (i = di; i < di + cpycnt; i++) {
6018 if (dpr->rx_std_buffers[i].data) {
6028 /* Ensure that updates to the rx_std_buffers ring and the
6029 * shadowed hardware producer ring from tg3_recycle_skb() are
6030 * ordered correctly WRT the skb check above.
6034 memcpy(&dpr->rx_std_buffers[di],
6035 &spr->rx_std_buffers[si],
6036 cpycnt * sizeof(struct ring_info));
6038 for (i = 0; i < cpycnt; i++, di++, si++) {
6039 struct tg3_rx_buffer_desc *sbd, *dbd;
6040 sbd = &spr->rx_std[si];
6041 dbd = &dpr->rx_std[di];
6042 dbd->addr_hi = sbd->addr_hi;
6043 dbd->addr_lo = sbd->addr_lo;
6046 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6047 tp->rx_std_ring_mask;
6048 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6049 tp->rx_std_ring_mask;
6053 src_prod_idx = spr->rx_jmb_prod_idx;
6055 /* Make sure updates to the rx_jmb_buffers[] entries and
6056 * the jumbo producer index are seen in the correct order.
6060 if (spr->rx_jmb_cons_idx == src_prod_idx)
6063 if (spr->rx_jmb_cons_idx < src_prod_idx)
6064 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6066 cpycnt = tp->rx_jmb_ring_mask + 1 -
6067 spr->rx_jmb_cons_idx;
6069 cpycnt = min(cpycnt,
6070 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6072 si = spr->rx_jmb_cons_idx;
6073 di = dpr->rx_jmb_prod_idx;
6075 for (i = di; i < di + cpycnt; i++) {
6076 if (dpr->rx_jmb_buffers[i].data) {
6086 /* Ensure that updates to the rx_jmb_buffers ring and the
6087 * shadowed hardware producer ring from tg3_recycle_skb() are
6088 * ordered correctly WRT the skb check above.
6092 memcpy(&dpr->rx_jmb_buffers[di],
6093 &spr->rx_jmb_buffers[si],
6094 cpycnt * sizeof(struct ring_info));
6096 for (i = 0; i < cpycnt; i++, di++, si++) {
6097 struct tg3_rx_buffer_desc *sbd, *dbd;
6098 sbd = &spr->rx_jmb[si].std;
6099 dbd = &dpr->rx_jmb[di].std;
6100 dbd->addr_hi = sbd->addr_hi;
6101 dbd->addr_lo = sbd->addr_lo;
6104 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6105 tp->rx_jmb_ring_mask;
6106 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6107 tp->rx_jmb_ring_mask;
6113 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6115 struct tg3 *tp = tnapi->tp;
6117 /* run TX completion thread */
6118 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6120 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6124 /* run RX thread, within the bounds set by NAPI.
6125 * All RX "locking" is done by ensuring outside
6126 * code synchronizes with tg3->napi.poll()
6128 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6129 work_done += tg3_rx(tnapi, budget - work_done);
6131 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6132 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6134 u32 std_prod_idx = dpr->rx_std_prod_idx;
6135 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6137 for (i = 1; i < tp->irq_cnt; i++)
6138 err |= tg3_rx_prodring_xfer(tp, dpr,
6139 &tp->napi[i].prodring);
6143 if (std_prod_idx != dpr->rx_std_prod_idx)
6144 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6145 dpr->rx_std_prod_idx);
6147 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6148 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6149 dpr->rx_jmb_prod_idx);
6154 tw32_f(HOSTCC_MODE, tp->coal_now);
6160 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6162 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6163 schedule_work(&tp->reset_task);
6166 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6168 cancel_work_sync(&tp->reset_task);
6169 tg3_flag_clear(tp, RESET_TASK_PENDING);
6170 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6173 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6175 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6176 struct tg3 *tp = tnapi->tp;
6178 struct tg3_hw_status *sblk = tnapi->hw_status;
6181 work_done = tg3_poll_work(tnapi, work_done, budget);
6183 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6186 if (unlikely(work_done >= budget))
6189 /* tp->last_tag is used in tg3_int_reenable() below
6190 * to tell the hw how much work has been processed,
6191 * so we must read it before checking for more work.
6193 tnapi->last_tag = sblk->status_tag;
6194 tnapi->last_irq_tag = tnapi->last_tag;
6197 /* check for RX/TX work to do */
6198 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6199 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6200 napi_complete(napi);
6201 /* Reenable interrupts. */
6202 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6211 /* work_done is guaranteed to be less than budget. */
6212 napi_complete(napi);
6213 tg3_reset_task_schedule(tp);
6217 static void tg3_process_error(struct tg3 *tp)
6220 bool real_error = false;
6222 if (tg3_flag(tp, ERROR_PROCESSED))
6225 /* Check Flow Attention register */
6226 val = tr32(HOSTCC_FLOW_ATTN);
6227 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6228 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
6232 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6233 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
6237 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6238 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
6247 tg3_flag_set(tp, ERROR_PROCESSED);
6248 tg3_reset_task_schedule(tp);
6251 static int tg3_poll(struct napi_struct *napi, int budget)
6253 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6254 struct tg3 *tp = tnapi->tp;
6256 struct tg3_hw_status *sblk = tnapi->hw_status;
6259 if (sblk->status & SD_STATUS_ERROR)
6260 tg3_process_error(tp);
6264 work_done = tg3_poll_work(tnapi, work_done, budget);
6266 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6269 if (unlikely(work_done >= budget))
6272 if (tg3_flag(tp, TAGGED_STATUS)) {
6273 /* tp->last_tag is used in tg3_int_reenable() below
6274 * to tell the hw how much work has been processed,
6275 * so we must read it before checking for more work.
6277 tnapi->last_tag = sblk->status_tag;
6278 tnapi->last_irq_tag = tnapi->last_tag;
6281 sblk->status &= ~SD_STATUS_UPDATED;
6283 if (likely(!tg3_has_work(tnapi))) {
6284 napi_complete(napi);
6285 tg3_int_reenable(tnapi);
6293 /* work_done is guaranteed to be less than budget. */
6294 napi_complete(napi);
6295 tg3_reset_task_schedule(tp);
6299 static void tg3_napi_disable(struct tg3 *tp)
6303 for (i = tp->irq_cnt - 1; i >= 0; i--)
6304 napi_disable(&tp->napi[i].napi);
6307 static void tg3_napi_enable(struct tg3 *tp)
6311 for (i = 0; i < tp->irq_cnt; i++)
6312 napi_enable(&tp->napi[i].napi);
6315 static void tg3_napi_init(struct tg3 *tp)
6319 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6320 for (i = 1; i < tp->irq_cnt; i++)
6321 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6324 static void tg3_napi_fini(struct tg3 *tp)
6328 for (i = 0; i < tp->irq_cnt; i++)
6329 netif_napi_del(&tp->napi[i].napi);
6332 static inline void tg3_netif_stop(struct tg3 *tp)
6334 tp->dev->trans_start = jiffies; /* prevent tx timeout */
6335 tg3_napi_disable(tp);
6336 netif_tx_disable(tp->dev);
6339 static inline void tg3_netif_start(struct tg3 *tp)
6341 /* NOTE: unconditional netif_tx_wake_all_queues is only
6342 * appropriate so long as all callers are assured to
6343 * have free tx slots (such as after tg3_init_hw)
6345 netif_tx_wake_all_queues(tp->dev);
6347 tg3_napi_enable(tp);
6348 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6349 tg3_enable_ints(tp);
6352 static void tg3_irq_quiesce(struct tg3 *tp)
6356 BUG_ON(tp->irq_sync);
6361 for (i = 0; i < tp->irq_cnt; i++)
6362 synchronize_irq(tp->napi[i].irq_vec);
6365 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6366 * If irq_sync is non-zero, then the IRQ handler must be synchronized
6367 * with as well. Most of the time, this is not necessary except when
6368 * shutting down the device.
6370 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6372 spin_lock_bh(&tp->lock);
6374 tg3_irq_quiesce(tp);
6377 static inline void tg3_full_unlock(struct tg3 *tp)
6379 spin_unlock_bh(&tp->lock);
6382 /* One-shot MSI handler - Chip automatically disables interrupt
6383 * after sending MSI so driver doesn't have to do it.
6385 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6387 struct tg3_napi *tnapi = dev_id;
6388 struct tg3 *tp = tnapi->tp;
6390 prefetch(tnapi->hw_status);
6392 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6394 if (likely(!tg3_irq_sync(tp)))
6395 napi_schedule(&tnapi->napi);
6400 /* MSI ISR - No need to check for interrupt sharing and no need to
6401 * flush status block and interrupt mailbox. PCI ordering rules
6402 * guarantee that MSI will arrive after the status block.
6404 static irqreturn_t tg3_msi(int irq, void *dev_id)
6406 struct tg3_napi *tnapi = dev_id;
6407 struct tg3 *tp = tnapi->tp;
6409 prefetch(tnapi->hw_status);
6411 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6413 * Writing any value to intr-mbox-0 clears PCI INTA# and
6414 * chip-internal interrupt pending events.
6415 * Writing non-zero to intr-mbox-0 additional tells the
6416 * NIC to stop sending us irqs, engaging "in-intr-handler"
6419 tw32_mailbox(tnapi->int_mbox, 0x00000001);
6420 if (likely(!tg3_irq_sync(tp)))
6421 napi_schedule(&tnapi->napi);
6423 return IRQ_RETVAL(1);
6426 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6428 struct tg3_napi *tnapi = dev_id;
6429 struct tg3 *tp = tnapi->tp;
6430 struct tg3_hw_status *sblk = tnapi->hw_status;
6431 unsigned int handled = 1;
6433 /* In INTx mode, it is possible for the interrupt to arrive at
6434 * the CPU before the status block posted prior to the interrupt.
6435 * Reading the PCI State register will confirm whether the
6436 * interrupt is ours and will flush the status block.
6438 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6439 if (tg3_flag(tp, CHIP_RESETTING) ||
6440 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6447 * Writing any value to intr-mbox-0 clears PCI INTA# and
6448 * chip-internal interrupt pending events.
6449 * Writing non-zero to intr-mbox-0 additional tells the
6450 * NIC to stop sending us irqs, engaging "in-intr-handler"
6453 * Flush the mailbox to de-assert the IRQ immediately to prevent
6454 * spurious interrupts. The flush impacts performance but
6455 * excessive spurious interrupts can be worse in some cases.
6457 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6458 if (tg3_irq_sync(tp))
6460 sblk->status &= ~SD_STATUS_UPDATED;
6461 if (likely(tg3_has_work(tnapi))) {
6462 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6463 napi_schedule(&tnapi->napi);
6465 /* No work, shared interrupt perhaps? re-enable
6466 * interrupts, and flush that PCI write
6468 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6472 return IRQ_RETVAL(handled);
6475 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6477 struct tg3_napi *tnapi = dev_id;
6478 struct tg3 *tp = tnapi->tp;
6479 struct tg3_hw_status *sblk = tnapi->hw_status;
6480 unsigned int handled = 1;
6482 /* In INTx mode, it is possible for the interrupt to arrive at
6483 * the CPU before the status block posted prior to the interrupt.
6484 * Reading the PCI State register will confirm whether the
6485 * interrupt is ours and will flush the status block.
6487 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6488 if (tg3_flag(tp, CHIP_RESETTING) ||
6489 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6496 * writing any value to intr-mbox-0 clears PCI INTA# and
6497 * chip-internal interrupt pending events.
6498 * writing non-zero to intr-mbox-0 additional tells the
6499 * NIC to stop sending us irqs, engaging "in-intr-handler"
6502 * Flush the mailbox to de-assert the IRQ immediately to prevent
6503 * spurious interrupts. The flush impacts performance but
6504 * excessive spurious interrupts can be worse in some cases.
6506 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6509 * In a shared interrupt configuration, sometimes other devices'
6510 * interrupts will scream. We record the current status tag here
6511 * so that the above check can report that the screaming interrupts
6512 * are unhandled. Eventually they will be silenced.
6514 tnapi->last_irq_tag = sblk->status_tag;
6516 if (tg3_irq_sync(tp))
6519 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6521 napi_schedule(&tnapi->napi);
6524 return IRQ_RETVAL(handled);
6527 /* ISR for interrupt test */
6528 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6530 struct tg3_napi *tnapi = dev_id;
6531 struct tg3 *tp = tnapi->tp;
6532 struct tg3_hw_status *sblk = tnapi->hw_status;
6534 if ((sblk->status & SD_STATUS_UPDATED) ||
6535 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6536 tg3_disable_ints(tp);
6537 return IRQ_RETVAL(1);
6539 return IRQ_RETVAL(0);
6542 #ifdef CONFIG_NET_POLL_CONTROLLER
6543 static void tg3_poll_controller(struct net_device *dev)
6546 struct tg3 *tp = netdev_priv(dev);
6548 for (i = 0; i < tp->irq_cnt; i++)
6549 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6553 static void tg3_tx_timeout(struct net_device *dev)
6555 struct tg3 *tp = netdev_priv(dev);
6557 if (netif_msg_tx_err(tp)) {
6558 netdev_err(dev, "transmit timed out, resetting\n");
6562 tg3_reset_task_schedule(tp);
6565 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6566 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6568 u32 base = (u32) mapping & 0xffffffff;
6570 return (base > 0xffffdcc0) && (base + len + 8 < base);
6573 /* Test for DMA addresses > 40-bit */
6574 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6577 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6578 if (tg3_flag(tp, 40BIT_DMA_BUG))
6579 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6586 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6587 dma_addr_t mapping, u32 len, u32 flags,
6590 txbd->addr_hi = ((u64) mapping >> 32);
6591 txbd->addr_lo = ((u64) mapping & 0xffffffff);
6592 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6593 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6596 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6597 dma_addr_t map, u32 len, u32 flags,
6600 struct tg3 *tp = tnapi->tp;
6603 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6606 if (tg3_4g_overflow_test(map, len))
6609 if (tg3_40bit_overflow_test(tp, map, len))
6612 if (tp->dma_limit) {
6613 u32 prvidx = *entry;
6614 u32 tmp_flag = flags & ~TXD_FLAG_END;
6615 while (len > tp->dma_limit && *budget) {
6616 u32 frag_len = tp->dma_limit;
6617 len -= tp->dma_limit;
6619 /* Avoid the 8byte DMA problem */
6621 len += tp->dma_limit / 2;
6622 frag_len = tp->dma_limit / 2;
6625 tnapi->tx_buffers[*entry].fragmented = true;
6627 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6628 frag_len, tmp_flag, mss, vlan);
6631 *entry = NEXT_TX(*entry);
6638 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6639 len, flags, mss, vlan);
6641 *entry = NEXT_TX(*entry);
6644 tnapi->tx_buffers[prvidx].fragmented = false;
6648 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6649 len, flags, mss, vlan);
6650 *entry = NEXT_TX(*entry);
6656 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6659 struct sk_buff *skb;
6660 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6665 pci_unmap_single(tnapi->tp->pdev,
6666 dma_unmap_addr(txb, mapping),
6670 while (txb->fragmented) {
6671 txb->fragmented = false;
6672 entry = NEXT_TX(entry);
6673 txb = &tnapi->tx_buffers[entry];
6676 for (i = 0; i <= last; i++) {
6677 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6679 entry = NEXT_TX(entry);
6680 txb = &tnapi->tx_buffers[entry];
6682 pci_unmap_page(tnapi->tp->pdev,
6683 dma_unmap_addr(txb, mapping),
6684 skb_frag_size(frag), PCI_DMA_TODEVICE);
6686 while (txb->fragmented) {
6687 txb->fragmented = false;
6688 entry = NEXT_TX(entry);
6689 txb = &tnapi->tx_buffers[entry];
6694 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6695 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6696 struct sk_buff **pskb,
6697 u32 *entry, u32 *budget,
6698 u32 base_flags, u32 mss, u32 vlan)
6700 struct tg3 *tp = tnapi->tp;
6701 struct sk_buff *new_skb, *skb = *pskb;
6702 dma_addr_t new_addr = 0;
6705 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6706 new_skb = skb_copy(skb, GFP_ATOMIC);
6708 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6710 new_skb = skb_copy_expand(skb,
6711 skb_headroom(skb) + more_headroom,
6712 skb_tailroom(skb), GFP_ATOMIC);
6718 /* New SKB is guaranteed to be linear. */
6719 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6721 /* Make sure the mapping succeeded */
6722 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6723 dev_kfree_skb(new_skb);
6726 u32 save_entry = *entry;
6728 base_flags |= TXD_FLAG_END;
6730 tnapi->tx_buffers[*entry].skb = new_skb;
6731 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6734 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6735 new_skb->len, base_flags,
6737 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6738 dev_kfree_skb(new_skb);
6749 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6751 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6752 * TSO header is greater than 80 bytes.
6754 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6756 struct sk_buff *segs, *nskb;
6757 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6759 /* Estimate the number of fragments in the worst case */
6760 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6761 netif_stop_queue(tp->dev);
6763 /* netif_tx_stop_queue() must be done before checking
6764 * checking tx index in tg3_tx_avail() below, because in
6765 * tg3_tx(), we update tx index before checking for
6766 * netif_tx_queue_stopped().
6769 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6770 return NETDEV_TX_BUSY;
6772 netif_wake_queue(tp->dev);
6775 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6777 goto tg3_tso_bug_end;
6783 tg3_start_xmit(nskb, tp->dev);
6789 return NETDEV_TX_OK;
6792 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6793 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6795 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6797 struct tg3 *tp = netdev_priv(dev);
6798 u32 len, entry, base_flags, mss, vlan = 0;
6800 int i = -1, would_hit_hwbug;
6802 struct tg3_napi *tnapi;
6803 struct netdev_queue *txq;
6806 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6807 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6808 if (tg3_flag(tp, ENABLE_TSS))
6811 budget = tg3_tx_avail(tnapi);
6813 /* We are running in BH disabled context with netif_tx_lock
6814 * and TX reclaim runs via tp->napi.poll inside of a software
6815 * interrupt. Furthermore, IRQ processing runs lockless so we have
6816 * no IRQ context deadlocks to worry about either. Rejoice!
6818 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6819 if (!netif_tx_queue_stopped(txq)) {
6820 netif_tx_stop_queue(txq);
6822 /* This is a hard error, log it. */
6824 "BUG! Tx Ring full when queue awake!\n");
6826 return NETDEV_TX_BUSY;
6829 entry = tnapi->tx_prod;
6831 if (skb->ip_summed == CHECKSUM_PARTIAL)
6832 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6834 mss = skb_shinfo(skb)->gso_size;
6837 u32 tcp_opt_len, hdr_len;
6839 if (skb_header_cloned(skb) &&
6840 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6844 tcp_opt_len = tcp_optlen(skb);
6846 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6848 if (!skb_is_gso_v6(skb)) {
6850 iph->tot_len = htons(mss + hdr_len);
6853 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6854 tg3_flag(tp, TSO_BUG))
6855 return tg3_tso_bug(tp, skb);
6857 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6858 TXD_FLAG_CPU_POST_DMA);
6860 if (tg3_flag(tp, HW_TSO_1) ||
6861 tg3_flag(tp, HW_TSO_2) ||
6862 tg3_flag(tp, HW_TSO_3)) {
6863 tcp_hdr(skb)->check = 0;
6864 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6866 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6871 if (tg3_flag(tp, HW_TSO_3)) {
6872 mss |= (hdr_len & 0xc) << 12;
6874 base_flags |= 0x00000010;
6875 base_flags |= (hdr_len & 0x3e0) << 5;
6876 } else if (tg3_flag(tp, HW_TSO_2))
6877 mss |= hdr_len << 9;
6878 else if (tg3_flag(tp, HW_TSO_1) ||
6879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6880 if (tcp_opt_len || iph->ihl > 5) {
6883 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6884 mss |= (tsflags << 11);
6887 if (tcp_opt_len || iph->ihl > 5) {
6890 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6891 base_flags |= tsflags << 12;
6896 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6897 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6898 base_flags |= TXD_FLAG_JMB_PKT;
6900 if (vlan_tx_tag_present(skb)) {
6901 base_flags |= TXD_FLAG_VLAN;
6902 vlan = vlan_tx_tag_get(skb);
6905 len = skb_headlen(skb);
6907 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6908 if (pci_dma_mapping_error(tp->pdev, mapping))
6912 tnapi->tx_buffers[entry].skb = skb;
6913 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6915 would_hit_hwbug = 0;
6917 if (tg3_flag(tp, 5701_DMA_BUG))
6918 would_hit_hwbug = 1;
6920 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6921 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6923 would_hit_hwbug = 1;
6924 } else if (skb_shinfo(skb)->nr_frags > 0) {
6927 if (!tg3_flag(tp, HW_TSO_1) &&
6928 !tg3_flag(tp, HW_TSO_2) &&
6929 !tg3_flag(tp, HW_TSO_3))
6932 /* Now loop through additional data
6933 * fragments, and queue them.
6935 last = skb_shinfo(skb)->nr_frags - 1;
6936 for (i = 0; i <= last; i++) {
6937 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6939 len = skb_frag_size(frag);
6940 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6941 len, DMA_TO_DEVICE);
6943 tnapi->tx_buffers[entry].skb = NULL;
6944 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6946 if (dma_mapping_error(&tp->pdev->dev, mapping))
6950 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6952 ((i == last) ? TXD_FLAG_END : 0),
6954 would_hit_hwbug = 1;
6960 if (would_hit_hwbug) {
6961 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6963 /* If the workaround fails due to memory/mapping
6964 * failure, silently drop this packet.
6966 entry = tnapi->tx_prod;
6967 budget = tg3_tx_avail(tnapi);
6968 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6969 base_flags, mss, vlan))
6973 skb_tx_timestamp(skb);
6974 netdev_tx_sent_queue(txq, skb->len);
6976 /* Sync BD data before updating mailbox */
6979 /* Packets are ready, update Tx producer idx local and on card. */
6980 tw32_tx_mbox(tnapi->prodmbox, entry);
6982 tnapi->tx_prod = entry;
6983 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6984 netif_tx_stop_queue(txq);
6986 /* netif_tx_stop_queue() must be done before checking
6987 * checking tx index in tg3_tx_avail() below, because in
6988 * tg3_tx(), we update tx index before checking for
6989 * netif_tx_queue_stopped().
6992 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6993 netif_tx_wake_queue(txq);
6997 return NETDEV_TX_OK;
7000 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7001 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7006 return NETDEV_TX_OK;
7009 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7012 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7013 MAC_MODE_PORT_MODE_MASK);
7015 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7017 if (!tg3_flag(tp, 5705_PLUS))
7018 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7020 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7021 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7023 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7025 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7027 if (tg3_flag(tp, 5705_PLUS) ||
7028 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7029 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7030 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7033 tw32(MAC_MODE, tp->mac_mode);
7037 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7039 u32 val, bmcr, mac_mode, ptest = 0;
7041 tg3_phy_toggle_apd(tp, false);
7042 tg3_phy_toggle_automdix(tp, 0);
7044 if (extlpbk && tg3_phy_set_extloopbk(tp))
7047 bmcr = BMCR_FULLDPLX;
7052 bmcr |= BMCR_SPEED100;
7056 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7058 bmcr |= BMCR_SPEED100;
7061 bmcr |= BMCR_SPEED1000;
7066 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7067 tg3_readphy(tp, MII_CTRL1000, &val);
7068 val |= CTL1000_AS_MASTER |
7069 CTL1000_ENABLE_MASTER;
7070 tg3_writephy(tp, MII_CTRL1000, val);
7072 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7073 MII_TG3_FET_PTEST_TRIM_2;
7074 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7077 bmcr |= BMCR_LOOPBACK;
7079 tg3_writephy(tp, MII_BMCR, bmcr);
7081 /* The write needs to be flushed for the FETs */
7082 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7083 tg3_readphy(tp, MII_BMCR, &bmcr);
7087 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7088 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7089 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7090 MII_TG3_FET_PTEST_FRC_TX_LINK |
7091 MII_TG3_FET_PTEST_FRC_TX_LOCK);
7093 /* The write needs to be flushed for the AC131 */
7094 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7097 /* Reset to prevent losing 1st rx packet intermittently */
7098 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7099 tg3_flag(tp, 5780_CLASS)) {
7100 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7102 tw32_f(MAC_RX_MODE, tp->rx_mode);
7105 mac_mode = tp->mac_mode &
7106 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7107 if (speed == SPEED_1000)
7108 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7110 mac_mode |= MAC_MODE_PORT_MODE_MII;
7112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7113 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7115 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7116 mac_mode &= ~MAC_MODE_LINK_POLARITY;
7117 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7118 mac_mode |= MAC_MODE_LINK_POLARITY;
7120 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7121 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7124 tw32(MAC_MODE, mac_mode);
7130 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7132 struct tg3 *tp = netdev_priv(dev);
7134 if (features & NETIF_F_LOOPBACK) {
7135 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7138 spin_lock_bh(&tp->lock);
7139 tg3_mac_loopback(tp, true);
7140 netif_carrier_on(tp->dev);
7141 spin_unlock_bh(&tp->lock);
7142 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7144 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7147 spin_lock_bh(&tp->lock);
7148 tg3_mac_loopback(tp, false);
7149 /* Force link status check */
7150 tg3_setup_phy(tp, 1);
7151 spin_unlock_bh(&tp->lock);
7152 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7156 static netdev_features_t tg3_fix_features(struct net_device *dev,
7157 netdev_features_t features)
7159 struct tg3 *tp = netdev_priv(dev);
7161 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7162 features &= ~NETIF_F_ALL_TSO;
7167 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7169 netdev_features_t changed = dev->features ^ features;
7171 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7172 tg3_set_loopback(dev, features);
7177 static void tg3_rx_prodring_free(struct tg3 *tp,
7178 struct tg3_rx_prodring_set *tpr)
7182 if (tpr != &tp->napi[0].prodring) {
7183 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7184 i = (i + 1) & tp->rx_std_ring_mask)
7185 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7188 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7189 for (i = tpr->rx_jmb_cons_idx;
7190 i != tpr->rx_jmb_prod_idx;
7191 i = (i + 1) & tp->rx_jmb_ring_mask) {
7192 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7200 for (i = 0; i <= tp->rx_std_ring_mask; i++)
7201 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7204 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7205 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7206 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7211 /* Initialize rx rings for packet processing.
7213 * The chip has been shut down and the driver detached from
7214 * the networking, so no interrupts or new tx packets will
7215 * end up in the driver. tp->{tx,}lock are held and thus
7218 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7219 struct tg3_rx_prodring_set *tpr)
7221 u32 i, rx_pkt_dma_sz;
7223 tpr->rx_std_cons_idx = 0;
7224 tpr->rx_std_prod_idx = 0;
7225 tpr->rx_jmb_cons_idx = 0;
7226 tpr->rx_jmb_prod_idx = 0;
7228 if (tpr != &tp->napi[0].prodring) {
7229 memset(&tpr->rx_std_buffers[0], 0,
7230 TG3_RX_STD_BUFF_RING_SIZE(tp));
7231 if (tpr->rx_jmb_buffers)
7232 memset(&tpr->rx_jmb_buffers[0], 0,
7233 TG3_RX_JMB_BUFF_RING_SIZE(tp));
7237 /* Zero out all descriptors. */
7238 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7240 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7241 if (tg3_flag(tp, 5780_CLASS) &&
7242 tp->dev->mtu > ETH_DATA_LEN)
7243 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7244 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7246 /* Initialize invariants of the rings, we only set this
7247 * stuff once. This works because the card does not
7248 * write into the rx buffer posting rings.
7250 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7251 struct tg3_rx_buffer_desc *rxd;
7253 rxd = &tpr->rx_std[i];
7254 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7255 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7256 rxd->opaque = (RXD_OPAQUE_RING_STD |
7257 (i << RXD_OPAQUE_INDEX_SHIFT));
7260 /* Now allocate fresh SKBs for each rx ring. */
7261 for (i = 0; i < tp->rx_pending; i++) {
7262 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7263 netdev_warn(tp->dev,
7264 "Using a smaller RX standard ring. Only "
7265 "%d out of %d buffers were allocated "
7266 "successfully\n", i, tp->rx_pending);
7274 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7277 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7279 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7282 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7283 struct tg3_rx_buffer_desc *rxd;
7285 rxd = &tpr->rx_jmb[i].std;
7286 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7287 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7289 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7290 (i << RXD_OPAQUE_INDEX_SHIFT));
7293 for (i = 0; i < tp->rx_jumbo_pending; i++) {
7294 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7295 netdev_warn(tp->dev,
7296 "Using a smaller RX jumbo ring. Only %d "
7297 "out of %d buffers were allocated "
7298 "successfully\n", i, tp->rx_jumbo_pending);
7301 tp->rx_jumbo_pending = i;
7310 tg3_rx_prodring_free(tp, tpr);
7314 static void tg3_rx_prodring_fini(struct tg3 *tp,
7315 struct tg3_rx_prodring_set *tpr)
7317 kfree(tpr->rx_std_buffers);
7318 tpr->rx_std_buffers = NULL;
7319 kfree(tpr->rx_jmb_buffers);
7320 tpr->rx_jmb_buffers = NULL;
7322 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7323 tpr->rx_std, tpr->rx_std_mapping);
7327 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7328 tpr->rx_jmb, tpr->rx_jmb_mapping);
7333 static int tg3_rx_prodring_init(struct tg3 *tp,
7334 struct tg3_rx_prodring_set *tpr)
7336 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7338 if (!tpr->rx_std_buffers)
7341 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7342 TG3_RX_STD_RING_BYTES(tp),
7343 &tpr->rx_std_mapping,
7348 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7349 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7351 if (!tpr->rx_jmb_buffers)
7354 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7355 TG3_RX_JMB_RING_BYTES(tp),
7356 &tpr->rx_jmb_mapping,
7365 tg3_rx_prodring_fini(tp, tpr);
7369 /* Free up pending packets in all rx/tx rings.
7371 * The chip has been shut down and the driver detached from
7372 * the networking, so no interrupts or new tx packets will
7373 * end up in the driver. tp->{tx,}lock is not held and we are not
7374 * in an interrupt context and thus may sleep.
7376 static void tg3_free_rings(struct tg3 *tp)
7380 for (j = 0; j < tp->irq_cnt; j++) {
7381 struct tg3_napi *tnapi = &tp->napi[j];
7383 tg3_rx_prodring_free(tp, &tnapi->prodring);
7385 if (!tnapi->tx_buffers)
7388 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7389 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7394 tg3_tx_skb_unmap(tnapi, i,
7395 skb_shinfo(skb)->nr_frags - 1);
7397 dev_kfree_skb_any(skb);
7399 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7403 /* Initialize tx/rx rings for packet processing.
7405 * The chip has been shut down and the driver detached from
7406 * the networking, so no interrupts or new tx packets will
7407 * end up in the driver. tp->{tx,}lock are held and thus
7410 static int tg3_init_rings(struct tg3 *tp)
7414 /* Free up all the SKBs. */
7417 for (i = 0; i < tp->irq_cnt; i++) {
7418 struct tg3_napi *tnapi = &tp->napi[i];
7420 tnapi->last_tag = 0;
7421 tnapi->last_irq_tag = 0;
7422 tnapi->hw_status->status = 0;
7423 tnapi->hw_status->status_tag = 0;
7424 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7429 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7431 tnapi->rx_rcb_ptr = 0;
7433 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7435 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7445 * Must not be invoked with interrupt sources disabled and
7446 * the hardware shutdown down.
7448 static void tg3_free_consistent(struct tg3 *tp)
7452 for (i = 0; i < tp->irq_cnt; i++) {
7453 struct tg3_napi *tnapi = &tp->napi[i];
7455 if (tnapi->tx_ring) {
7456 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7457 tnapi->tx_ring, tnapi->tx_desc_mapping);
7458 tnapi->tx_ring = NULL;
7461 kfree(tnapi->tx_buffers);
7462 tnapi->tx_buffers = NULL;
7464 if (tnapi->rx_rcb) {
7465 dma_free_coherent(&tp->pdev->dev,
7466 TG3_RX_RCB_RING_BYTES(tp),
7468 tnapi->rx_rcb_mapping);
7469 tnapi->rx_rcb = NULL;
7472 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7474 if (tnapi->hw_status) {
7475 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7477 tnapi->status_mapping);
7478 tnapi->hw_status = NULL;
7483 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7484 tp->hw_stats, tp->stats_mapping);
7485 tp->hw_stats = NULL;
7490 * Must not be invoked with interrupt sources disabled and
7491 * the hardware shutdown down. Can sleep.
7493 static int tg3_alloc_consistent(struct tg3 *tp)
7497 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7498 sizeof(struct tg3_hw_stats),
7504 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7506 for (i = 0; i < tp->irq_cnt; i++) {
7507 struct tg3_napi *tnapi = &tp->napi[i];
7508 struct tg3_hw_status *sblk;
7510 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7512 &tnapi->status_mapping,
7514 if (!tnapi->hw_status)
7517 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7518 sblk = tnapi->hw_status;
7520 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7523 /* If multivector TSS is enabled, vector 0 does not handle
7524 * tx interrupts. Don't allocate any resources for it.
7526 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7527 (i && tg3_flag(tp, ENABLE_TSS))) {
7528 tnapi->tx_buffers = kzalloc(
7529 sizeof(struct tg3_tx_ring_info) *
7530 TG3_TX_RING_SIZE, GFP_KERNEL);
7531 if (!tnapi->tx_buffers)
7534 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7536 &tnapi->tx_desc_mapping,
7538 if (!tnapi->tx_ring)
7543 * When RSS is enabled, the status block format changes
7544 * slightly. The "rx_jumbo_consumer", "reserved",
7545 * and "rx_mini_consumer" members get mapped to the
7546 * other three rx return ring producer indexes.
7550 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7553 tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7556 tnapi->rx_rcb_prod_idx = &sblk->reserved;
7559 tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7564 * If multivector RSS is enabled, vector 0 does not handle
7565 * rx or tx interrupts. Don't allocate any resources for it.
7567 if (!i && tg3_flag(tp, ENABLE_RSS))
7570 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7571 TG3_RX_RCB_RING_BYTES(tp),
7572 &tnapi->rx_rcb_mapping,
7577 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7583 tg3_free_consistent(tp);
7587 #define MAX_WAIT_CNT 1000
7589 /* To stop a block, clear the enable bit and poll till it
7590 * clears. tp->lock is held.
7592 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7597 if (tg3_flag(tp, 5705_PLUS)) {
7604 /* We can't enable/disable these bits of the
7605 * 5705/5750, just say success.
7618 for (i = 0; i < MAX_WAIT_CNT; i++) {
7621 if ((val & enable_bit) == 0)
7625 if (i == MAX_WAIT_CNT && !silent) {
7626 dev_err(&tp->pdev->dev,
7627 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7635 /* tp->lock is held. */
7636 static int tg3_abort_hw(struct tg3 *tp, int silent)
7640 tg3_disable_ints(tp);
7642 tp->rx_mode &= ~RX_MODE_ENABLE;
7643 tw32_f(MAC_RX_MODE, tp->rx_mode);
7646 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7647 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7648 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7649 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7650 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7651 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7653 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7654 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7655 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7656 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7657 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7658 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7659 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7661 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7662 tw32_f(MAC_MODE, tp->mac_mode);
7665 tp->tx_mode &= ~TX_MODE_ENABLE;
7666 tw32_f(MAC_TX_MODE, tp->tx_mode);
7668 for (i = 0; i < MAX_WAIT_CNT; i++) {
7670 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7673 if (i >= MAX_WAIT_CNT) {
7674 dev_err(&tp->pdev->dev,
7675 "%s timed out, TX_MODE_ENABLE will not clear "
7676 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7680 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7681 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7682 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7684 tw32(FTQ_RESET, 0xffffffff);
7685 tw32(FTQ_RESET, 0x00000000);
7687 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7688 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7690 for (i = 0; i < tp->irq_cnt; i++) {
7691 struct tg3_napi *tnapi = &tp->napi[i];
7692 if (tnapi->hw_status)
7693 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7699 /* Save PCI command register before chip reset */
7700 static void tg3_save_pci_state(struct tg3 *tp)
7702 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7705 /* Restore PCI state after chip reset */
7706 static void tg3_restore_pci_state(struct tg3 *tp)
7710 /* Re-enable indirect register accesses. */
7711 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7712 tp->misc_host_ctrl);
7714 /* Set MAX PCI retry to zero. */
7715 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7716 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7717 tg3_flag(tp, PCIX_MODE))
7718 val |= PCISTATE_RETRY_SAME_DMA;
7719 /* Allow reads and writes to the APE register and memory space. */
7720 if (tg3_flag(tp, ENABLE_APE))
7721 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7722 PCISTATE_ALLOW_APE_SHMEM_WR |
7723 PCISTATE_ALLOW_APE_PSPACE_WR;
7724 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7726 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7728 if (!tg3_flag(tp, PCI_EXPRESS)) {
7729 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7730 tp->pci_cacheline_sz);
7731 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7735 /* Make sure PCI-X relaxed ordering bit is clear. */
7736 if (tg3_flag(tp, PCIX_MODE)) {
7739 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7741 pcix_cmd &= ~PCI_X_CMD_ERO;
7742 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7746 if (tg3_flag(tp, 5780_CLASS)) {
7748 /* Chip reset on 5780 will reset MSI enable bit,
7749 * so need to restore it.
7751 if (tg3_flag(tp, USING_MSI)) {
7754 pci_read_config_word(tp->pdev,
7755 tp->msi_cap + PCI_MSI_FLAGS,
7757 pci_write_config_word(tp->pdev,
7758 tp->msi_cap + PCI_MSI_FLAGS,
7759 ctrl | PCI_MSI_FLAGS_ENABLE);
7760 val = tr32(MSGINT_MODE);
7761 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7766 /* tp->lock is held. */
7767 static int tg3_chip_reset(struct tg3 *tp)
7770 void (*write_op)(struct tg3 *, u32, u32);
7775 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7777 /* No matching tg3_nvram_unlock() after this because
7778 * chip reset below will undo the nvram lock.
7780 tp->nvram_lock_cnt = 0;
7782 /* GRC_MISC_CFG core clock reset will clear the memory
7783 * enable bit in PCI register 4 and the MSI enable bit
7784 * on some chips, so we save relevant registers here.
7786 tg3_save_pci_state(tp);
7788 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7789 tg3_flag(tp, 5755_PLUS))
7790 tw32(GRC_FASTBOOT_PC, 0);
7793 * We must avoid the readl() that normally takes place.
7794 * It locks machines, causes machine checks, and other
7795 * fun things. So, temporarily disable the 5701
7796 * hardware workaround, while we do the reset.
7798 write_op = tp->write32;
7799 if (write_op == tg3_write_flush_reg32)
7800 tp->write32 = tg3_write32;
7802 /* Prevent the irq handler from reading or writing PCI registers
7803 * during chip reset when the memory enable bit in the PCI command
7804 * register may be cleared. The chip does not generate interrupt
7805 * at this time, but the irq handler may still be called due to irq
7806 * sharing or irqpoll.
7808 tg3_flag_set(tp, CHIP_RESETTING);
7809 for (i = 0; i < tp->irq_cnt; i++) {
7810 struct tg3_napi *tnapi = &tp->napi[i];
7811 if (tnapi->hw_status) {
7812 tnapi->hw_status->status = 0;
7813 tnapi->hw_status->status_tag = 0;
7815 tnapi->last_tag = 0;
7816 tnapi->last_irq_tag = 0;
7820 for (i = 0; i < tp->irq_cnt; i++)
7821 synchronize_irq(tp->napi[i].irq_vec);
7823 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7824 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7825 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7829 val = GRC_MISC_CFG_CORECLK_RESET;
7831 if (tg3_flag(tp, PCI_EXPRESS)) {
7832 /* Force PCIe 1.0a mode */
7833 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7834 !tg3_flag(tp, 57765_PLUS) &&
7835 tr32(TG3_PCIE_PHY_TSTCTL) ==
7836 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7837 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7839 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7840 tw32(GRC_MISC_CFG, (1 << 29));
7845 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7846 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7847 tw32(GRC_VCPU_EXT_CTRL,
7848 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7851 /* Manage gphy power for all CPMU absent PCIe devices. */
7852 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7853 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7855 tw32(GRC_MISC_CFG, val);
7857 /* restore 5701 hardware bug workaround write method */
7858 tp->write32 = write_op;
7860 /* Unfortunately, we have to delay before the PCI read back.
7861 * Some 575X chips even will not respond to a PCI cfg access
7862 * when the reset command is given to the chip.
7864 * How do these hardware designers expect things to work
7865 * properly if the PCI write is posted for a long period
7866 * of time? It is always necessary to have some method by
7867 * which a register read back can occur to push the write
7868 * out which does the reset.
7870 * For most tg3 variants the trick below was working.
7875 /* Flush PCI posted writes. The normal MMIO registers
7876 * are inaccessible at this time so this is the only
7877 * way to make this reliably (actually, this is no longer
7878 * the case, see above). I tried to use indirect
7879 * register read/write but this upset some 5701 variants.
7881 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7885 if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7888 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7892 /* Wait for link training to complete. */
7893 for (i = 0; i < 5000; i++)
7896 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7897 pci_write_config_dword(tp->pdev, 0xc4,
7898 cfg_val | (1 << 15));
7901 /* Clear the "no snoop" and "relaxed ordering" bits. */
7902 pci_read_config_word(tp->pdev,
7903 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7905 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7906 PCI_EXP_DEVCTL_NOSNOOP_EN);
7908 * Older PCIe devices only support the 128 byte
7909 * MPS setting. Enforce the restriction.
7911 if (!tg3_flag(tp, CPMU_PRESENT))
7912 val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7913 pci_write_config_word(tp->pdev,
7914 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7917 /* Clear error status */
7918 pci_write_config_word(tp->pdev,
7919 pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7920 PCI_EXP_DEVSTA_CED |
7921 PCI_EXP_DEVSTA_NFED |
7922 PCI_EXP_DEVSTA_FED |
7923 PCI_EXP_DEVSTA_URD);
7926 tg3_restore_pci_state(tp);
7928 tg3_flag_clear(tp, CHIP_RESETTING);
7929 tg3_flag_clear(tp, ERROR_PROCESSED);
7932 if (tg3_flag(tp, 5780_CLASS))
7933 val = tr32(MEMARB_MODE);
7934 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7936 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7938 tw32(0x5000, 0x400);
7941 tw32(GRC_MODE, tp->grc_mode);
7943 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7946 tw32(0xc4, val | (1 << 15));
7949 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7950 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7951 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7952 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7953 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7954 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7957 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7958 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7960 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7961 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7966 tw32_f(MAC_MODE, val);
7969 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7971 err = tg3_poll_fw(tp);
7977 if (tg3_flag(tp, PCI_EXPRESS) &&
7978 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7979 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7980 !tg3_flag(tp, 57765_PLUS)) {
7983 tw32(0x7c00, val | (1 << 25));
7986 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7987 val = tr32(TG3_CPMU_CLCK_ORIDE);
7988 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7991 /* Reprobe ASF enable state. */
7992 tg3_flag_clear(tp, ENABLE_ASF);
7993 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7994 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7995 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7998 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7999 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8000 tg3_flag_set(tp, ENABLE_ASF);
8001 tp->last_event_jiffies = jiffies;
8002 if (tg3_flag(tp, 5750_PLUS))
8003 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8010 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8011 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8013 /* tp->lock is held. */
8014 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8020 tg3_write_sig_pre_reset(tp, kind);
8022 tg3_abort_hw(tp, silent);
8023 err = tg3_chip_reset(tp);
8025 __tg3_set_mac_addr(tp, 0);
8027 tg3_write_sig_legacy(tp, kind);
8028 tg3_write_sig_post_reset(tp, kind);
8031 /* Save the stats across chip resets... */
8032 tg3_get_nstats(tp, &tp->net_stats_prev);
8033 tg3_get_estats(tp, &tp->estats_prev);
8035 /* And make sure the next sample is new data */
8036 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8045 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8047 struct tg3 *tp = netdev_priv(dev);
8048 struct sockaddr *addr = p;
8049 int err = 0, skip_mac_1 = 0;
8051 if (!is_valid_ether_addr(addr->sa_data))
8052 return -EADDRNOTAVAIL;
8054 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8056 if (!netif_running(dev))
8059 if (tg3_flag(tp, ENABLE_ASF)) {
8060 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8062 addr0_high = tr32(MAC_ADDR_0_HIGH);
8063 addr0_low = tr32(MAC_ADDR_0_LOW);
8064 addr1_high = tr32(MAC_ADDR_1_HIGH);
8065 addr1_low = tr32(MAC_ADDR_1_LOW);
8067 /* Skip MAC addr 1 if ASF is using it. */
8068 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8069 !(addr1_high == 0 && addr1_low == 0))
8072 spin_lock_bh(&tp->lock);
8073 __tg3_set_mac_addr(tp, skip_mac_1);
8074 spin_unlock_bh(&tp->lock);
8079 /* tp->lock is held. */
8080 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8081 dma_addr_t mapping, u32 maxlen_flags,
8085 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8086 ((u64) mapping >> 32));
8088 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8089 ((u64) mapping & 0xffffffff));
8091 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8094 if (!tg3_flag(tp, 5705_PLUS))
8096 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8100 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8104 if (!tg3_flag(tp, ENABLE_TSS)) {
8105 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8106 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8107 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8109 tw32(HOSTCC_TXCOL_TICKS, 0);
8110 tw32(HOSTCC_TXMAX_FRAMES, 0);
8111 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8114 if (!tg3_flag(tp, ENABLE_RSS)) {
8115 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8116 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8117 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8119 tw32(HOSTCC_RXCOL_TICKS, 0);
8120 tw32(HOSTCC_RXMAX_FRAMES, 0);
8121 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8124 if (!tg3_flag(tp, 5705_PLUS)) {
8125 u32 val = ec->stats_block_coalesce_usecs;
8127 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8128 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8130 if (!netif_carrier_ok(tp->dev))
8133 tw32(HOSTCC_STAT_COAL_TICKS, val);
8136 for (i = 0; i < tp->irq_cnt - 1; i++) {
8139 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8140 tw32(reg, ec->rx_coalesce_usecs);
8141 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8142 tw32(reg, ec->rx_max_coalesced_frames);
8143 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8144 tw32(reg, ec->rx_max_coalesced_frames_irq);
8146 if (tg3_flag(tp, ENABLE_TSS)) {
8147 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8148 tw32(reg, ec->tx_coalesce_usecs);
8149 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8150 tw32(reg, ec->tx_max_coalesced_frames);
8151 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8152 tw32(reg, ec->tx_max_coalesced_frames_irq);
8156 for (; i < tp->irq_max - 1; i++) {
8157 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8158 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8159 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8161 if (tg3_flag(tp, ENABLE_TSS)) {
8162 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8163 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8164 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8169 /* tp->lock is held. */
8170 static void tg3_rings_reset(struct tg3 *tp)
8173 u32 stblk, txrcb, rxrcb, limit;
8174 struct tg3_napi *tnapi = &tp->napi[0];
8176 /* Disable all transmit rings but the first. */
8177 if (!tg3_flag(tp, 5705_PLUS))
8178 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8179 else if (tg3_flag(tp, 5717_PLUS))
8180 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8181 else if (tg3_flag(tp, 57765_CLASS))
8182 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8184 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8186 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8187 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8188 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8189 BDINFO_FLAGS_DISABLED);
8192 /* Disable all receive return rings but the first. */
8193 if (tg3_flag(tp, 5717_PLUS))
8194 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8195 else if (!tg3_flag(tp, 5705_PLUS))
8196 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8197 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8198 tg3_flag(tp, 57765_CLASS))
8199 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8201 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8203 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8204 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8205 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8206 BDINFO_FLAGS_DISABLED);
8208 /* Disable interrupts */
8209 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8210 tp->napi[0].chk_msi_cnt = 0;
8211 tp->napi[0].last_rx_cons = 0;
8212 tp->napi[0].last_tx_cons = 0;
8214 /* Zero mailbox registers. */
8215 if (tg3_flag(tp, SUPPORT_MSIX)) {
8216 for (i = 1; i < tp->irq_max; i++) {
8217 tp->napi[i].tx_prod = 0;
8218 tp->napi[i].tx_cons = 0;
8219 if (tg3_flag(tp, ENABLE_TSS))
8220 tw32_mailbox(tp->napi[i].prodmbox, 0);
8221 tw32_rx_mbox(tp->napi[i].consmbox, 0);
8222 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8223 tp->napi[i].chk_msi_cnt = 0;
8224 tp->napi[i].last_rx_cons = 0;
8225 tp->napi[i].last_tx_cons = 0;
8227 if (!tg3_flag(tp, ENABLE_TSS))
8228 tw32_mailbox(tp->napi[0].prodmbox, 0);
8230 tp->napi[0].tx_prod = 0;
8231 tp->napi[0].tx_cons = 0;
8232 tw32_mailbox(tp->napi[0].prodmbox, 0);
8233 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8236 /* Make sure the NIC-based send BD rings are disabled. */
8237 if (!tg3_flag(tp, 5705_PLUS)) {
8238 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8239 for (i = 0; i < 16; i++)
8240 tw32_tx_mbox(mbox + i * 8, 0);
8243 txrcb = NIC_SRAM_SEND_RCB;
8244 rxrcb = NIC_SRAM_RCV_RET_RCB;
8246 /* Clear status block in ram. */
8247 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8249 /* Set status block DMA address */
8250 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8251 ((u64) tnapi->status_mapping >> 32));
8252 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8253 ((u64) tnapi->status_mapping & 0xffffffff));
8255 if (tnapi->tx_ring) {
8256 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8257 (TG3_TX_RING_SIZE <<
8258 BDINFO_FLAGS_MAXLEN_SHIFT),
8259 NIC_SRAM_TX_BUFFER_DESC);
8260 txrcb += TG3_BDINFO_SIZE;
8263 if (tnapi->rx_rcb) {
8264 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8265 (tp->rx_ret_ring_mask + 1) <<
8266 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8267 rxrcb += TG3_BDINFO_SIZE;
8270 stblk = HOSTCC_STATBLCK_RING1;
8272 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8273 u64 mapping = (u64)tnapi->status_mapping;
8274 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8275 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8277 /* Clear status block in ram. */
8278 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8280 if (tnapi->tx_ring) {
8281 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8282 (TG3_TX_RING_SIZE <<
8283 BDINFO_FLAGS_MAXLEN_SHIFT),
8284 NIC_SRAM_TX_BUFFER_DESC);
8285 txrcb += TG3_BDINFO_SIZE;
8288 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8289 ((tp->rx_ret_ring_mask + 1) <<
8290 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8293 rxrcb += TG3_BDINFO_SIZE;
8297 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8299 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8301 if (!tg3_flag(tp, 5750_PLUS) ||
8302 tg3_flag(tp, 5780_CLASS) ||
8303 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8304 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8305 tg3_flag(tp, 57765_PLUS))
8306 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8307 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8308 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8309 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8311 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8313 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8314 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8316 val = min(nic_rep_thresh, host_rep_thresh);
8317 tw32(RCVBDI_STD_THRESH, val);
8319 if (tg3_flag(tp, 57765_PLUS))
8320 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8322 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8325 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8327 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8329 val = min(bdcache_maxcnt / 2, host_rep_thresh);
8330 tw32(RCVBDI_JUMBO_THRESH, val);
8332 if (tg3_flag(tp, 57765_PLUS))
8333 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8336 static inline u32 calc_crc(unsigned char *buf, int len)
8344 for (j = 0; j < len; j++) {
8347 for (k = 0; k < 8; k++) {
8360 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8362 /* accept or reject all multicast frames */
8363 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8364 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8365 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8366 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8369 static void __tg3_set_rx_mode(struct net_device *dev)
8371 struct tg3 *tp = netdev_priv(dev);
8374 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8375 RX_MODE_KEEP_VLAN_TAG);
8377 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8378 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8381 if (!tg3_flag(tp, ENABLE_ASF))
8382 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8385 if (dev->flags & IFF_PROMISC) {
8386 /* Promiscuous mode. */
8387 rx_mode |= RX_MODE_PROMISC;
8388 } else if (dev->flags & IFF_ALLMULTI) {
8389 /* Accept all multicast. */
8390 tg3_set_multi(tp, 1);
8391 } else if (netdev_mc_empty(dev)) {
8392 /* Reject all multicast. */
8393 tg3_set_multi(tp, 0);
8395 /* Accept one or more multicast(s). */
8396 struct netdev_hw_addr *ha;
8397 u32 mc_filter[4] = { 0, };
8402 netdev_for_each_mc_addr(ha, dev) {
8403 crc = calc_crc(ha->addr, ETH_ALEN);
8405 regidx = (bit & 0x60) >> 5;
8407 mc_filter[regidx] |= (1 << bit);
8410 tw32(MAC_HASH_REG_0, mc_filter[0]);
8411 tw32(MAC_HASH_REG_1, mc_filter[1]);
8412 tw32(MAC_HASH_REG_2, mc_filter[2]);
8413 tw32(MAC_HASH_REG_3, mc_filter[3]);
8416 if (rx_mode != tp->rx_mode) {
8417 tp->rx_mode = rx_mode;
8418 tw32_f(MAC_RX_MODE, rx_mode);
8423 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8427 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8428 tp->rss_ind_tbl[i] =
8429 ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8432 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8436 if (!tg3_flag(tp, SUPPORT_MSIX))
8439 if (tp->irq_cnt <= 2) {
8440 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8444 /* Validate table against current IRQ count */
8445 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8446 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8450 if (i != TG3_RSS_INDIR_TBL_SIZE)
8451 tg3_rss_init_dflt_indir_tbl(tp);
8454 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8457 u32 reg = MAC_RSS_INDIR_TBL_0;
8459 while (i < TG3_RSS_INDIR_TBL_SIZE) {
8460 u32 val = tp->rss_ind_tbl[i];
8462 for (; i % 8; i++) {
8464 val |= tp->rss_ind_tbl[i];
8471 /* tp->lock is held. */
8472 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8474 u32 val, rdmac_mode;
8476 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8478 tg3_disable_ints(tp);
8482 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8484 if (tg3_flag(tp, INIT_COMPLETE))
8485 tg3_abort_hw(tp, 1);
8487 /* Enable MAC control of LPI */
8488 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8489 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8490 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8491 TG3_CPMU_EEE_LNKIDL_UART_IDL);
8493 tw32_f(TG3_CPMU_EEE_CTRL,
8494 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8496 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8497 TG3_CPMU_EEEMD_LPI_IN_TX |
8498 TG3_CPMU_EEEMD_LPI_IN_RX |
8499 TG3_CPMU_EEEMD_EEE_ENABLE;
8501 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8502 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8504 if (tg3_flag(tp, ENABLE_APE))
8505 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8507 tw32_f(TG3_CPMU_EEE_MODE, val);
8509 tw32_f(TG3_CPMU_EEE_DBTMR1,
8510 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8511 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8513 tw32_f(TG3_CPMU_EEE_DBTMR2,
8514 TG3_CPMU_DBTMR2_APE_TX_2047US |
8515 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8521 err = tg3_chip_reset(tp);
8525 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8527 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8528 val = tr32(TG3_CPMU_CTRL);
8529 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8530 tw32(TG3_CPMU_CTRL, val);
8532 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8533 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8534 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8535 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8537 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8538 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8539 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8540 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8542 val = tr32(TG3_CPMU_HST_ACC);
8543 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8544 val |= CPMU_HST_ACC_MACCLK_6_25;
8545 tw32(TG3_CPMU_HST_ACC, val);
8548 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8549 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8550 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8551 PCIE_PWR_MGMT_L1_THRESH_4MS;
8552 tw32(PCIE_PWR_MGMT_THRESH, val);
8554 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8555 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8557 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8559 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8560 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8563 if (tg3_flag(tp, L1PLLPD_EN)) {
8564 u32 grc_mode = tr32(GRC_MODE);
8566 /* Access the lower 1K of PL PCIE block registers. */
8567 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8568 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8570 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8571 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8572 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8574 tw32(GRC_MODE, grc_mode);
8577 if (tg3_flag(tp, 57765_CLASS)) {
8578 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8579 u32 grc_mode = tr32(GRC_MODE);
8581 /* Access the lower 1K of PL PCIE block registers. */
8582 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8583 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8585 val = tr32(TG3_PCIE_TLDLPL_PORT +
8586 TG3_PCIE_PL_LO_PHYCTL5);
8587 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8588 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8590 tw32(GRC_MODE, grc_mode);
8593 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8594 u32 grc_mode = tr32(GRC_MODE);
8596 /* Access the lower 1K of DL PCIE block registers. */
8597 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8598 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8600 val = tr32(TG3_PCIE_TLDLPL_PORT +
8601 TG3_PCIE_DL_LO_FTSMAX);
8602 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8603 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8604 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8606 tw32(GRC_MODE, grc_mode);
8609 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8610 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8611 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8612 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8615 /* This works around an issue with Athlon chipsets on
8616 * B3 tigon3 silicon. This bit has no effect on any
8617 * other revision. But do not set this on PCI Express
8618 * chips and don't even touch the clocks if the CPMU is present.
8620 if (!tg3_flag(tp, CPMU_PRESENT)) {
8621 if (!tg3_flag(tp, PCI_EXPRESS))
8622 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8623 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8626 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8627 tg3_flag(tp, PCIX_MODE)) {
8628 val = tr32(TG3PCI_PCISTATE);
8629 val |= PCISTATE_RETRY_SAME_DMA;
8630 tw32(TG3PCI_PCISTATE, val);
8633 if (tg3_flag(tp, ENABLE_APE)) {
8634 /* Allow reads and writes to the
8635 * APE register and memory space.
8637 val = tr32(TG3PCI_PCISTATE);
8638 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8639 PCISTATE_ALLOW_APE_SHMEM_WR |
8640 PCISTATE_ALLOW_APE_PSPACE_WR;
8641 tw32(TG3PCI_PCISTATE, val);
8644 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8645 /* Enable some hw fixes. */
8646 val = tr32(TG3PCI_MSI_DATA);
8647 val |= (1 << 26) | (1 << 28) | (1 << 29);
8648 tw32(TG3PCI_MSI_DATA, val);
8651 /* Descriptor ring init may make accesses to the
8652 * NIC SRAM area to setup the TX descriptors, so we
8653 * can only do this after the hardware has been
8654 * successfully reset.
8656 err = tg3_init_rings(tp);
8660 if (tg3_flag(tp, 57765_PLUS)) {
8661 val = tr32(TG3PCI_DMA_RW_CTRL) &
8662 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8663 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8664 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8665 if (!tg3_flag(tp, 57765_CLASS) &&
8666 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8667 val |= DMA_RWCTRL_TAGGED_STAT_WA;
8668 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8669 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8670 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8671 /* This value is determined during the probe time DMA
8672 * engine test, tg3_test_dma.
8674 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8677 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8678 GRC_MODE_4X_NIC_SEND_RINGS |
8679 GRC_MODE_NO_TX_PHDR_CSUM |
8680 GRC_MODE_NO_RX_PHDR_CSUM);
8681 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8683 /* Pseudo-header checksum is done by hardware logic and not
8684 * the offload processers, so make the chip do the pseudo-
8685 * header checksums on receive. For transmit it is more
8686 * convenient to do the pseudo-header checksum in software
8687 * as Linux does that on transmit for us in all cases.
8689 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8693 (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8695 /* Setup the timer prescalar register. Clock is always 66Mhz. */
8696 val = tr32(GRC_MISC_CFG);
8698 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8699 tw32(GRC_MISC_CFG, val);
8701 /* Initialize MBUF/DESC pool. */
8702 if (tg3_flag(tp, 5750_PLUS)) {
8704 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8705 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8706 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8707 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8709 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8710 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8711 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8712 } else if (tg3_flag(tp, TSO_CAPABLE)) {
8715 fw_len = tp->fw_len;
8716 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8717 tw32(BUFMGR_MB_POOL_ADDR,
8718 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8719 tw32(BUFMGR_MB_POOL_SIZE,
8720 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8723 if (tp->dev->mtu <= ETH_DATA_LEN) {
8724 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8725 tp->bufmgr_config.mbuf_read_dma_low_water);
8726 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8727 tp->bufmgr_config.mbuf_mac_rx_low_water);
8728 tw32(BUFMGR_MB_HIGH_WATER,
8729 tp->bufmgr_config.mbuf_high_water);
8731 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8732 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8733 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8734 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8735 tw32(BUFMGR_MB_HIGH_WATER,
8736 tp->bufmgr_config.mbuf_high_water_jumbo);
8738 tw32(BUFMGR_DMA_LOW_WATER,
8739 tp->bufmgr_config.dma_low_water);
8740 tw32(BUFMGR_DMA_HIGH_WATER,
8741 tp->bufmgr_config.dma_high_water);
8743 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8744 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8745 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8746 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8747 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8748 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8749 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8750 tw32(BUFMGR_MODE, val);
8751 for (i = 0; i < 2000; i++) {
8752 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8757 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8761 if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8762 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8764 tg3_setup_rxbd_thresholds(tp);
8766 /* Initialize TG3_BDINFO's at:
8767 * RCVDBDI_STD_BD: standard eth size rx ring
8768 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
8769 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
8772 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
8773 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
8774 * ring attribute flags
8775 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
8777 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8778 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8780 * The size of each ring is fixed in the firmware, but the location is
8783 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8784 ((u64) tpr->rx_std_mapping >> 32));
8785 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8786 ((u64) tpr->rx_std_mapping & 0xffffffff));
8787 if (!tg3_flag(tp, 5717_PLUS))
8788 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8789 NIC_SRAM_RX_BUFFER_DESC);
8791 /* Disable the mini ring */
8792 if (!tg3_flag(tp, 5705_PLUS))
8793 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8794 BDINFO_FLAGS_DISABLED);
8796 /* Program the jumbo buffer descriptor ring control
8797 * blocks on those devices that have them.
8799 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8800 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8802 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8803 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8804 ((u64) tpr->rx_jmb_mapping >> 32));
8805 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8806 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8807 val = TG3_RX_JMB_RING_SIZE(tp) <<
8808 BDINFO_FLAGS_MAXLEN_SHIFT;
8809 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8810 val | BDINFO_FLAGS_USE_EXT_RECV);
8811 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8812 tg3_flag(tp, 57765_CLASS))
8813 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8814 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8816 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8817 BDINFO_FLAGS_DISABLED);
8820 if (tg3_flag(tp, 57765_PLUS)) {
8821 val = TG3_RX_STD_RING_SIZE(tp);
8822 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8823 val |= (TG3_RX_STD_DMA_SZ << 2);
8825 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8827 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8829 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8831 tpr->rx_std_prod_idx = tp->rx_pending;
8832 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8834 tpr->rx_jmb_prod_idx =
8835 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8836 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8838 tg3_rings_reset(tp);
8840 /* Initialize MAC address and backoff seed. */
8841 __tg3_set_mac_addr(tp, 0);
8843 /* MTU + ethernet header + FCS + optional VLAN tag */
8844 tw32(MAC_RX_MTU_SIZE,
8845 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8847 /* The slot time is changed by tg3_setup_phy if we
8848 * run at gigabit with half duplex.
8850 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8851 (6 << TX_LENGTHS_IPG_SHIFT) |
8852 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8854 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8855 val |= tr32(MAC_TX_LENGTHS) &
8856 (TX_LENGTHS_JMB_FRM_LEN_MSK |
8857 TX_LENGTHS_CNT_DWN_VAL_MSK);
8859 tw32(MAC_TX_LENGTHS, val);
8861 /* Receive rules. */
8862 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8863 tw32(RCVLPC_CONFIG, 0x0181);
8865 /* Calculate RDMAC_MODE setting early, we need it to determine
8866 * the RCVLPC_STATE_ENABLE mask.
8868 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8869 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8870 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8871 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8872 RDMAC_MODE_LNGREAD_ENAB);
8874 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8875 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8877 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8878 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8879 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8880 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8881 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8882 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8884 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8885 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8886 if (tg3_flag(tp, TSO_CAPABLE) &&
8887 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8888 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8889 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8890 !tg3_flag(tp, IS_5788)) {
8891 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8895 if (tg3_flag(tp, PCI_EXPRESS))
8896 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8898 if (tg3_flag(tp, HW_TSO_1) ||
8899 tg3_flag(tp, HW_TSO_2) ||
8900 tg3_flag(tp, HW_TSO_3))
8901 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8903 if (tg3_flag(tp, 57765_PLUS) ||
8904 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8905 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8906 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8909 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8911 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8912 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8913 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8914 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8915 tg3_flag(tp, 57765_PLUS)) {
8916 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8917 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8918 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8919 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8920 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8921 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8922 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8923 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8924 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8926 tw32(TG3_RDMA_RSRVCTRL_REG,
8927 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8931 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8932 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8933 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8934 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8935 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8938 /* Receive/send statistics. */
8939 if (tg3_flag(tp, 5750_PLUS)) {
8940 val = tr32(RCVLPC_STATS_ENABLE);
8941 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8942 tw32(RCVLPC_STATS_ENABLE, val);
8943 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8944 tg3_flag(tp, TSO_CAPABLE)) {
8945 val = tr32(RCVLPC_STATS_ENABLE);
8946 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8947 tw32(RCVLPC_STATS_ENABLE, val);
8949 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8951 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8952 tw32(SNDDATAI_STATSENAB, 0xffffff);
8953 tw32(SNDDATAI_STATSCTRL,
8954 (SNDDATAI_SCTRL_ENABLE |
8955 SNDDATAI_SCTRL_FASTUPD));
8957 /* Setup host coalescing engine. */
8958 tw32(HOSTCC_MODE, 0);
8959 for (i = 0; i < 2000; i++) {
8960 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8965 __tg3_set_coalesce(tp, &tp->coal);
8967 if (!tg3_flag(tp, 5705_PLUS)) {
8968 /* Status/statistics block address. See tg3_timer,
8969 * the tg3_periodic_fetch_stats call there, and
8970 * tg3_get_stats to see how this works for 5705/5750 chips.
8972 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8973 ((u64) tp->stats_mapping >> 32));
8974 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8975 ((u64) tp->stats_mapping & 0xffffffff));
8976 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8978 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8980 /* Clear statistics and status block memory areas */
8981 for (i = NIC_SRAM_STATS_BLK;
8982 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8984 tg3_write_mem(tp, i, 0);
8989 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8991 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8992 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8993 if (!tg3_flag(tp, 5705_PLUS))
8994 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8996 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8997 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8998 /* reset to prevent losing 1st rx packet intermittently */
8999 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9003 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9004 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9005 MAC_MODE_FHDE_ENABLE;
9006 if (tg3_flag(tp, ENABLE_APE))
9007 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9008 if (!tg3_flag(tp, 5705_PLUS) &&
9009 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9010 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9011 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9012 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9015 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9016 * If TG3_FLAG_IS_NIC is zero, we should read the
9017 * register to preserve the GPIO settings for LOMs. The GPIOs,
9018 * whether used as inputs or outputs, are set by boot code after
9021 if (!tg3_flag(tp, IS_NIC)) {
9024 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9025 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9026 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9028 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9029 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9030 GRC_LCLCTRL_GPIO_OUTPUT3;
9032 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9033 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9035 tp->grc_local_ctrl &= ~gpio_mask;
9036 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9038 /* GPIO1 must be driven high for eeprom write protect */
9039 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9040 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9041 GRC_LCLCTRL_GPIO_OUTPUT1);
9043 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9046 if (tg3_flag(tp, USING_MSIX)) {
9047 val = tr32(MSGINT_MODE);
9048 val |= MSGINT_MODE_ENABLE;
9049 if (tp->irq_cnt > 1)
9050 val |= MSGINT_MODE_MULTIVEC_EN;
9051 if (!tg3_flag(tp, 1SHOT_MSI))
9052 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9053 tw32(MSGINT_MODE, val);
9056 if (!tg3_flag(tp, 5705_PLUS)) {
9057 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9061 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9062 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9063 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9064 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9065 WDMAC_MODE_LNGREAD_ENAB);
9067 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9068 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9069 if (tg3_flag(tp, TSO_CAPABLE) &&
9070 (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9071 tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9073 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9074 !tg3_flag(tp, IS_5788)) {
9075 val |= WDMAC_MODE_RX_ACCEL;
9079 /* Enable host coalescing bug fix */
9080 if (tg3_flag(tp, 5755_PLUS))
9081 val |= WDMAC_MODE_STATUS_TAG_FIX;
9083 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9084 val |= WDMAC_MODE_BURST_ALL_DATA;
9086 tw32_f(WDMAC_MODE, val);
9089 if (tg3_flag(tp, PCIX_MODE)) {
9092 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9094 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9095 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9096 pcix_cmd |= PCI_X_CMD_READ_2K;
9097 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9098 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9099 pcix_cmd |= PCI_X_CMD_READ_2K;
9101 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9105 tw32_f(RDMAC_MODE, rdmac_mode);
9108 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9109 if (!tg3_flag(tp, 5705_PLUS))
9110 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9112 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9114 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9116 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9118 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9119 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9120 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9121 if (tg3_flag(tp, LRG_PROD_RING_CAP))
9122 val |= RCVDBDI_MODE_LRG_RING_SZ;
9123 tw32(RCVDBDI_MODE, val);
9124 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9125 if (tg3_flag(tp, HW_TSO_1) ||
9126 tg3_flag(tp, HW_TSO_2) ||
9127 tg3_flag(tp, HW_TSO_3))
9128 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9129 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9130 if (tg3_flag(tp, ENABLE_TSS))
9131 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9132 tw32(SNDBDI_MODE, val);
9133 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9135 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9136 err = tg3_load_5701_a0_firmware_fix(tp);
9141 if (tg3_flag(tp, TSO_CAPABLE)) {
9142 err = tg3_load_tso_firmware(tp);
9147 tp->tx_mode = TX_MODE_ENABLE;
9149 if (tg3_flag(tp, 5755_PLUS) ||
9150 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9151 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9153 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9154 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9155 tp->tx_mode &= ~val;
9156 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9159 tw32_f(MAC_TX_MODE, tp->tx_mode);
9162 if (tg3_flag(tp, ENABLE_RSS)) {
9163 tg3_rss_write_indir_tbl(tp);
9165 /* Setup the "secret" hash key. */
9166 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9167 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9168 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9169 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9170 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9171 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9172 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9173 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9174 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9175 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9178 tp->rx_mode = RX_MODE_ENABLE;
9179 if (tg3_flag(tp, 5755_PLUS))
9180 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9182 if (tg3_flag(tp, ENABLE_RSS))
9183 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9184 RX_MODE_RSS_ITBL_HASH_BITS_7 |
9185 RX_MODE_RSS_IPV6_HASH_EN |
9186 RX_MODE_RSS_TCP_IPV6_HASH_EN |
9187 RX_MODE_RSS_IPV4_HASH_EN |
9188 RX_MODE_RSS_TCP_IPV4_HASH_EN;
9190 tw32_f(MAC_RX_MODE, tp->rx_mode);
9193 tw32(MAC_LED_CTRL, tp->led_ctrl);
9195 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9196 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9197 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9200 tw32_f(MAC_RX_MODE, tp->rx_mode);
9203 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9204 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9205 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9206 /* Set drive transmission level to 1.2V */
9207 /* only if the signal pre-emphasis bit is not set */
9208 val = tr32(MAC_SERDES_CFG);
9211 tw32(MAC_SERDES_CFG, val);
9213 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9214 tw32(MAC_SERDES_CFG, 0x616000);
9217 /* Prevent chip from dropping frames when flow control
9220 if (tg3_flag(tp, 57765_CLASS))
9224 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9226 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9227 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9228 /* Use hardware link auto-negotiation */
9229 tg3_flag_set(tp, HW_AUTONEG);
9232 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9233 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9236 tmp = tr32(SERDES_RX_CTRL);
9237 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9238 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9239 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9240 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9243 if (!tg3_flag(tp, USE_PHYLIB)) {
9244 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9245 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9247 err = tg3_setup_phy(tp, 0);
9251 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9252 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9255 /* Clear CRC stats. */
9256 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9257 tg3_writephy(tp, MII_TG3_TEST1,
9258 tmp | MII_TG3_TEST1_CRC_EN);
9259 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9264 __tg3_set_rx_mode(tp->dev);
9266 /* Initialize receive rules. */
9267 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
9268 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9269 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
9270 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9272 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9276 if (tg3_flag(tp, ENABLE_ASF))
9280 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
9282 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
9284 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
9286 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
9288 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
9290 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
9292 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
9294 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
9296 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
9298 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
9300 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
9302 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
9304 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
9306 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
9314 if (tg3_flag(tp, ENABLE_APE))
9315 /* Write our heartbeat update interval to APE. */
9316 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9317 APE_HOST_HEARTBEAT_INT_DISABLE);
9319 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9324 /* Called at device open time to get the chip ready for
9325 * packet processing. Invoked with tp->lock held.
9327 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9329 tg3_switch_clocks(tp);
9331 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9333 return tg3_reset_hw(tp, reset_phy);
9336 #define TG3_STAT_ADD32(PSTAT, REG) \
9337 do { u32 __val = tr32(REG); \
9338 (PSTAT)->low += __val; \
9339 if ((PSTAT)->low < __val) \
9340 (PSTAT)->high += 1; \
9343 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9345 struct tg3_hw_stats *sp = tp->hw_stats;
9347 if (!netif_carrier_ok(tp->dev))
9350 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9351 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9352 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9353 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9354 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9355 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9356 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9357 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9358 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9359 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9360 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9361 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9362 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9364 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9365 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9366 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9367 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9368 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9369 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9370 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9371 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9372 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9373 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9374 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9375 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9376 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9377 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9379 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9380 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9381 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9382 tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9383 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9385 u32 val = tr32(HOSTCC_FLOW_ATTN);
9386 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9388 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9389 sp->rx_discards.low += val;
9390 if (sp->rx_discards.low < val)
9391 sp->rx_discards.high += 1;
9393 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9395 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9398 static void tg3_chk_missed_msi(struct tg3 *tp)
9402 for (i = 0; i < tp->irq_cnt; i++) {
9403 struct tg3_napi *tnapi = &tp->napi[i];
9405 if (tg3_has_work(tnapi)) {
9406 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9407 tnapi->last_tx_cons == tnapi->tx_cons) {
9408 if (tnapi->chk_msi_cnt < 1) {
9409 tnapi->chk_msi_cnt++;
9415 tnapi->chk_msi_cnt = 0;
9416 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9417 tnapi->last_tx_cons = tnapi->tx_cons;
9421 static void tg3_timer(unsigned long __opaque)
9423 struct tg3 *tp = (struct tg3 *) __opaque;
9425 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9428 spin_lock(&tp->lock);
9430 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9431 tg3_flag(tp, 57765_CLASS))
9432 tg3_chk_missed_msi(tp);
9434 if (!tg3_flag(tp, TAGGED_STATUS)) {
9435 /* All of this garbage is because when using non-tagged
9436 * IRQ status the mailbox/status_block protocol the chip
9437 * uses with the cpu is race prone.
9439 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9440 tw32(GRC_LOCAL_CTRL,
9441 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9443 tw32(HOSTCC_MODE, tp->coalesce_mode |
9444 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9447 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9448 spin_unlock(&tp->lock);
9449 tg3_reset_task_schedule(tp);
9454 /* This part only runs once per second. */
9455 if (!--tp->timer_counter) {
9456 if (tg3_flag(tp, 5705_PLUS))
9457 tg3_periodic_fetch_stats(tp);
9459 if (tp->setlpicnt && !--tp->setlpicnt)
9460 tg3_phy_eee_enable(tp);
9462 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9466 mac_stat = tr32(MAC_STATUS);
9469 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9470 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9472 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9476 tg3_setup_phy(tp, 0);
9477 } else if (tg3_flag(tp, POLL_SERDES)) {
9478 u32 mac_stat = tr32(MAC_STATUS);
9481 if (netif_carrier_ok(tp->dev) &&
9482 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9485 if (!netif_carrier_ok(tp->dev) &&
9486 (mac_stat & (MAC_STATUS_PCS_SYNCED |
9487 MAC_STATUS_SIGNAL_DET))) {
9491 if (!tp->serdes_counter) {
9494 ~MAC_MODE_PORT_MODE_MASK));
9496 tw32_f(MAC_MODE, tp->mac_mode);
9499 tg3_setup_phy(tp, 0);
9501 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9502 tg3_flag(tp, 5780_CLASS)) {
9503 tg3_serdes_parallel_detect(tp);
9506 tp->timer_counter = tp->timer_multiplier;
9509 /* Heartbeat is only sent once every 2 seconds.
9511 * The heartbeat is to tell the ASF firmware that the host
9512 * driver is still alive. In the event that the OS crashes,
9513 * ASF needs to reset the hardware to free up the FIFO space
9514 * that may be filled with rx packets destined for the host.
9515 * If the FIFO is full, ASF will no longer function properly.
9517 * Unintended resets have been reported on real time kernels
9518 * where the timer doesn't run on time. Netpoll will also have
9521 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9522 * to check the ring condition when the heartbeat is expiring
9523 * before doing the reset. This will prevent most unintended
9526 if (!--tp->asf_counter) {
9527 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9528 tg3_wait_for_event_ack(tp);
9530 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9531 FWCMD_NICDRV_ALIVE3);
9532 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9533 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9534 TG3_FW_UPDATE_TIMEOUT_SEC);
9536 tg3_generate_fw_event(tp);
9538 tp->asf_counter = tp->asf_multiplier;
9541 spin_unlock(&tp->lock);
9544 tp->timer.expires = jiffies + tp->timer_offset;
9545 add_timer(&tp->timer);
9548 static void __devinit tg3_timer_init(struct tg3 *tp)
9550 if (tg3_flag(tp, TAGGED_STATUS) &&
9551 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9552 !tg3_flag(tp, 57765_CLASS))
9553 tp->timer_offset = HZ;
9555 tp->timer_offset = HZ / 10;
9557 BUG_ON(tp->timer_offset > HZ);
9559 tp->timer_multiplier = (HZ / tp->timer_offset);
9560 tp->asf_multiplier = (HZ / tp->timer_offset) *
9561 TG3_FW_UPDATE_FREQ_SEC;
9563 init_timer(&tp->timer);
9564 tp->timer.data = (unsigned long) tp;
9565 tp->timer.function = tg3_timer;
9568 static void tg3_timer_start(struct tg3 *tp)
9570 tp->asf_counter = tp->asf_multiplier;
9571 tp->timer_counter = tp->timer_multiplier;
9573 tp->timer.expires = jiffies + tp->timer_offset;
9574 add_timer(&tp->timer);
9577 static void tg3_timer_stop(struct tg3 *tp)
9579 del_timer_sync(&tp->timer);
9582 /* Restart hardware after configuration changes, self-test, etc.
9583 * Invoked with tp->lock held.
9585 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9586 __releases(tp->lock)
9587 __acquires(tp->lock)
9591 err = tg3_init_hw(tp, reset_phy);
9594 "Failed to re-initialize device, aborting\n");
9595 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9596 tg3_full_unlock(tp);
9599 tg3_napi_enable(tp);
9601 tg3_full_lock(tp, 0);
9606 static void tg3_reset_task(struct work_struct *work)
9608 struct tg3 *tp = container_of(work, struct tg3, reset_task);
9611 tg3_full_lock(tp, 0);
9613 if (!netif_running(tp->dev)) {
9614 tg3_flag_clear(tp, RESET_TASK_PENDING);
9615 tg3_full_unlock(tp);
9619 tg3_full_unlock(tp);
9625 tg3_full_lock(tp, 1);
9627 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9628 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9629 tp->write32_rx_mbox = tg3_write_flush_reg32;
9630 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9631 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9634 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9635 err = tg3_init_hw(tp, 1);
9639 tg3_netif_start(tp);
9642 tg3_full_unlock(tp);
9647 tg3_flag_clear(tp, RESET_TASK_PENDING);
9650 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9653 unsigned long flags;
9655 struct tg3_napi *tnapi = &tp->napi[irq_num];
9657 if (tp->irq_cnt == 1)
9658 name = tp->dev->name;
9660 name = &tnapi->irq_lbl[0];
9661 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9662 name[IFNAMSIZ-1] = 0;
9665 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9667 if (tg3_flag(tp, 1SHOT_MSI))
9672 if (tg3_flag(tp, TAGGED_STATUS))
9673 fn = tg3_interrupt_tagged;
9674 flags = IRQF_SHARED;
9677 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9680 static int tg3_test_interrupt(struct tg3 *tp)
9682 struct tg3_napi *tnapi = &tp->napi[0];
9683 struct net_device *dev = tp->dev;
9684 int err, i, intr_ok = 0;
9687 if (!netif_running(dev))
9690 tg3_disable_ints(tp);
9692 free_irq(tnapi->irq_vec, tnapi);
9695 * Turn off MSI one shot mode. Otherwise this test has no
9696 * observable way to know whether the interrupt was delivered.
9698 if (tg3_flag(tp, 57765_PLUS)) {
9699 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9700 tw32(MSGINT_MODE, val);
9703 err = request_irq(tnapi->irq_vec, tg3_test_isr,
9704 IRQF_SHARED, dev->name, tnapi);
9708 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9709 tg3_enable_ints(tp);
9711 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9714 for (i = 0; i < 5; i++) {
9715 u32 int_mbox, misc_host_ctrl;
9717 int_mbox = tr32_mailbox(tnapi->int_mbox);
9718 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9720 if ((int_mbox != 0) ||
9721 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9726 if (tg3_flag(tp, 57765_PLUS) &&
9727 tnapi->hw_status->status_tag != tnapi->last_tag)
9728 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9733 tg3_disable_ints(tp);
9735 free_irq(tnapi->irq_vec, tnapi);
9737 err = tg3_request_irq(tp, 0);
9743 /* Reenable MSI one shot mode. */
9744 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9745 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9746 tw32(MSGINT_MODE, val);
9754 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9755 * successfully restored
9757 static int tg3_test_msi(struct tg3 *tp)
9762 if (!tg3_flag(tp, USING_MSI))
9765 /* Turn off SERR reporting in case MSI terminates with Master
9768 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9769 pci_write_config_word(tp->pdev, PCI_COMMAND,
9770 pci_cmd & ~PCI_COMMAND_SERR);
9772 err = tg3_test_interrupt(tp);
9774 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9779 /* other failures */
9783 /* MSI test failed, go back to INTx mode */
9784 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9785 "to INTx mode. Please report this failure to the PCI "
9786 "maintainer and include system chipset information\n");
9788 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9790 pci_disable_msi(tp->pdev);
9792 tg3_flag_clear(tp, USING_MSI);
9793 tp->napi[0].irq_vec = tp->pdev->irq;
9795 err = tg3_request_irq(tp, 0);
9799 /* Need to reset the chip because the MSI cycle may have terminated
9800 * with Master Abort.
9802 tg3_full_lock(tp, 1);
9804 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9805 err = tg3_init_hw(tp, 1);
9807 tg3_full_unlock(tp);
9810 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9815 static int tg3_request_firmware(struct tg3 *tp)
9817 const __be32 *fw_data;
9819 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9820 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9825 fw_data = (void *)tp->fw->data;
9827 /* Firmware blob starts with version numbers, followed by
9828 * start address and _full_ length including BSS sections
9829 * (which must be longer than the actual data, of course
9832 tp->fw_len = be32_to_cpu(fw_data[2]); /* includes bss */
9833 if (tp->fw_len < (tp->fw->size - 12)) {
9834 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9835 tp->fw_len, tp->fw_needed);
9836 release_firmware(tp->fw);
9841 /* We no longer need firmware; we have it. */
9842 tp->fw_needed = NULL;
9846 static bool tg3_enable_msix(struct tg3 *tp)
9849 struct msix_entry msix_ent[tp->irq_max];
9851 tp->irq_cnt = num_online_cpus();
9852 if (tp->irq_cnt > 1) {
9853 /* We want as many rx rings enabled as there are cpus.
9854 * In multiqueue MSI-X mode, the first MSI-X vector
9855 * only deals with link interrupts, etc, so we add
9856 * one to the number of vectors we are requesting.
9858 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9861 for (i = 0; i < tp->irq_max; i++) {
9862 msix_ent[i].entry = i;
9863 msix_ent[i].vector = 0;
9866 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9869 } else if (rc != 0) {
9870 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9872 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9877 for (i = 0; i < tp->irq_max; i++)
9878 tp->napi[i].irq_vec = msix_ent[i].vector;
9880 netif_set_real_num_tx_queues(tp->dev, 1);
9881 rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9882 if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9883 pci_disable_msix(tp->pdev);
9887 if (tp->irq_cnt > 1) {
9888 tg3_flag_set(tp, ENABLE_RSS);
9890 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9891 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9892 tg3_flag_set(tp, ENABLE_TSS);
9893 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9900 static void tg3_ints_init(struct tg3 *tp)
9902 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9903 !tg3_flag(tp, TAGGED_STATUS)) {
9904 /* All MSI supporting chips should support tagged
9905 * status. Assert that this is the case.
9907 netdev_warn(tp->dev,
9908 "MSI without TAGGED_STATUS? Not using MSI\n");
9912 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9913 tg3_flag_set(tp, USING_MSIX);
9914 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9915 tg3_flag_set(tp, USING_MSI);
9917 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9918 u32 msi_mode = tr32(MSGINT_MODE);
9919 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9920 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9921 if (!tg3_flag(tp, 1SHOT_MSI))
9922 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9923 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9926 if (!tg3_flag(tp, USING_MSIX)) {
9928 tp->napi[0].irq_vec = tp->pdev->irq;
9929 netif_set_real_num_tx_queues(tp->dev, 1);
9930 netif_set_real_num_rx_queues(tp->dev, 1);
9934 static void tg3_ints_fini(struct tg3 *tp)
9936 if (tg3_flag(tp, USING_MSIX))
9937 pci_disable_msix(tp->pdev);
9938 else if (tg3_flag(tp, USING_MSI))
9939 pci_disable_msi(tp->pdev);
9940 tg3_flag_clear(tp, USING_MSI);
9941 tg3_flag_clear(tp, USING_MSIX);
9942 tg3_flag_clear(tp, ENABLE_RSS);
9943 tg3_flag_clear(tp, ENABLE_TSS);
9946 static int tg3_open(struct net_device *dev)
9948 struct tg3 *tp = netdev_priv(dev);
9951 if (tp->fw_needed) {
9952 err = tg3_request_firmware(tp);
9953 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9957 netdev_warn(tp->dev, "TSO capability disabled\n");
9958 tg3_flag_clear(tp, TSO_CAPABLE);
9959 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9960 netdev_notice(tp->dev, "TSO capability restored\n");
9961 tg3_flag_set(tp, TSO_CAPABLE);
9965 netif_carrier_off(tp->dev);
9967 err = tg3_power_up(tp);
9971 tg3_full_lock(tp, 0);
9973 tg3_disable_ints(tp);
9974 tg3_flag_clear(tp, INIT_COMPLETE);
9976 tg3_full_unlock(tp);
9979 * Setup interrupts first so we know how
9980 * many NAPI resources to allocate
9984 tg3_rss_check_indir_tbl(tp);
9986 /* The placement of this call is tied
9987 * to the setup and use of Host TX descriptors.
9989 err = tg3_alloc_consistent(tp);
9995 tg3_napi_enable(tp);
9997 for (i = 0; i < tp->irq_cnt; i++) {
9998 struct tg3_napi *tnapi = &tp->napi[i];
9999 err = tg3_request_irq(tp, i);
10001 for (i--; i >= 0; i--) {
10002 tnapi = &tp->napi[i];
10003 free_irq(tnapi->irq_vec, tnapi);
10009 tg3_full_lock(tp, 0);
10011 err = tg3_init_hw(tp, 1);
10013 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10014 tg3_free_rings(tp);
10017 tg3_full_unlock(tp);
10022 if (tg3_flag(tp, USING_MSI)) {
10023 err = tg3_test_msi(tp);
10026 tg3_full_lock(tp, 0);
10027 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10028 tg3_free_rings(tp);
10029 tg3_full_unlock(tp);
10034 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10035 u32 val = tr32(PCIE_TRANSACTION_CFG);
10037 tw32(PCIE_TRANSACTION_CFG,
10038 val | PCIE_TRANS_CFG_1SHOT_MSI);
10044 tg3_full_lock(tp, 0);
10046 tg3_timer_start(tp);
10047 tg3_flag_set(tp, INIT_COMPLETE);
10048 tg3_enable_ints(tp);
10050 tg3_full_unlock(tp);
10052 netif_tx_start_all_queues(dev);
10055 * Reset loopback feature if it was turned on while the device was down
10056 * make sure that it's installed properly now.
10058 if (dev->features & NETIF_F_LOOPBACK)
10059 tg3_set_loopback(dev, dev->features);
10064 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10065 struct tg3_napi *tnapi = &tp->napi[i];
10066 free_irq(tnapi->irq_vec, tnapi);
10070 tg3_napi_disable(tp);
10072 tg3_free_consistent(tp);
10076 tg3_frob_aux_power(tp, false);
10077 pci_set_power_state(tp->pdev, PCI_D3hot);
10081 static int tg3_close(struct net_device *dev)
10084 struct tg3 *tp = netdev_priv(dev);
10086 tg3_napi_disable(tp);
10087 tg3_reset_task_cancel(tp);
10089 netif_tx_stop_all_queues(dev);
10091 tg3_timer_stop(tp);
10095 tg3_full_lock(tp, 1);
10097 tg3_disable_ints(tp);
10099 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10100 tg3_free_rings(tp);
10101 tg3_flag_clear(tp, INIT_COMPLETE);
10103 tg3_full_unlock(tp);
10105 for (i = tp->irq_cnt - 1; i >= 0; i--) {
10106 struct tg3_napi *tnapi = &tp->napi[i];
10107 free_irq(tnapi->irq_vec, tnapi);
10112 /* Clear stats across close / open calls */
10113 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10114 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10118 tg3_free_consistent(tp);
10120 tg3_power_down(tp);
10122 netif_carrier_off(tp->dev);
10127 static inline u64 get_stat64(tg3_stat64_t *val)
10129 return ((u64)val->high << 32) | ((u64)val->low);
10132 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10134 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10136 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10137 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10138 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10141 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10142 tg3_writephy(tp, MII_TG3_TEST1,
10143 val | MII_TG3_TEST1_CRC_EN);
10144 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10148 tp->phy_crc_errors += val;
10150 return tp->phy_crc_errors;
10153 return get_stat64(&hw_stats->rx_fcs_errors);
10156 #define ESTAT_ADD(member) \
10157 estats->member = old_estats->member + \
10158 get_stat64(&hw_stats->member)
10160 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10162 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10163 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10165 ESTAT_ADD(rx_octets);
10166 ESTAT_ADD(rx_fragments);
10167 ESTAT_ADD(rx_ucast_packets);
10168 ESTAT_ADD(rx_mcast_packets);
10169 ESTAT_ADD(rx_bcast_packets);
10170 ESTAT_ADD(rx_fcs_errors);
10171 ESTAT_ADD(rx_align_errors);
10172 ESTAT_ADD(rx_xon_pause_rcvd);
10173 ESTAT_ADD(rx_xoff_pause_rcvd);
10174 ESTAT_ADD(rx_mac_ctrl_rcvd);
10175 ESTAT_ADD(rx_xoff_entered);
10176 ESTAT_ADD(rx_frame_too_long_errors);
10177 ESTAT_ADD(rx_jabbers);
10178 ESTAT_ADD(rx_undersize_packets);
10179 ESTAT_ADD(rx_in_length_errors);
10180 ESTAT_ADD(rx_out_length_errors);
10181 ESTAT_ADD(rx_64_or_less_octet_packets);
10182 ESTAT_ADD(rx_65_to_127_octet_packets);
10183 ESTAT_ADD(rx_128_to_255_octet_packets);
10184 ESTAT_ADD(rx_256_to_511_octet_packets);
10185 ESTAT_ADD(rx_512_to_1023_octet_packets);
10186 ESTAT_ADD(rx_1024_to_1522_octet_packets);
10187 ESTAT_ADD(rx_1523_to_2047_octet_packets);
10188 ESTAT_ADD(rx_2048_to_4095_octet_packets);
10189 ESTAT_ADD(rx_4096_to_8191_octet_packets);
10190 ESTAT_ADD(rx_8192_to_9022_octet_packets);
10192 ESTAT_ADD(tx_octets);
10193 ESTAT_ADD(tx_collisions);
10194 ESTAT_ADD(tx_xon_sent);
10195 ESTAT_ADD(tx_xoff_sent);
10196 ESTAT_ADD(tx_flow_control);
10197 ESTAT_ADD(tx_mac_errors);
10198 ESTAT_ADD(tx_single_collisions);
10199 ESTAT_ADD(tx_mult_collisions);
10200 ESTAT_ADD(tx_deferred);
10201 ESTAT_ADD(tx_excessive_collisions);
10202 ESTAT_ADD(tx_late_collisions);
10203 ESTAT_ADD(tx_collide_2times);
10204 ESTAT_ADD(tx_collide_3times);
10205 ESTAT_ADD(tx_collide_4times);
10206 ESTAT_ADD(tx_collide_5times);
10207 ESTAT_ADD(tx_collide_6times);
10208 ESTAT_ADD(tx_collide_7times);
10209 ESTAT_ADD(tx_collide_8times);
10210 ESTAT_ADD(tx_collide_9times);
10211 ESTAT_ADD(tx_collide_10times);
10212 ESTAT_ADD(tx_collide_11times);
10213 ESTAT_ADD(tx_collide_12times);
10214 ESTAT_ADD(tx_collide_13times);
10215 ESTAT_ADD(tx_collide_14times);
10216 ESTAT_ADD(tx_collide_15times);
10217 ESTAT_ADD(tx_ucast_packets);
10218 ESTAT_ADD(tx_mcast_packets);
10219 ESTAT_ADD(tx_bcast_packets);
10220 ESTAT_ADD(tx_carrier_sense_errors);
10221 ESTAT_ADD(tx_discards);
10222 ESTAT_ADD(tx_errors);
10224 ESTAT_ADD(dma_writeq_full);
10225 ESTAT_ADD(dma_write_prioq_full);
10226 ESTAT_ADD(rxbds_empty);
10227 ESTAT_ADD(rx_discards);
10228 ESTAT_ADD(rx_errors);
10229 ESTAT_ADD(rx_threshold_hit);
10231 ESTAT_ADD(dma_readq_full);
10232 ESTAT_ADD(dma_read_prioq_full);
10233 ESTAT_ADD(tx_comp_queue_full);
10235 ESTAT_ADD(ring_set_send_prod_index);
10236 ESTAT_ADD(ring_status_update);
10237 ESTAT_ADD(nic_irqs);
10238 ESTAT_ADD(nic_avoided_irqs);
10239 ESTAT_ADD(nic_tx_threshold_hit);
10241 ESTAT_ADD(mbuf_lwm_thresh_hit);
10244 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10246 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10247 struct tg3_hw_stats *hw_stats = tp->hw_stats;
10249 stats->rx_packets = old_stats->rx_packets +
10250 get_stat64(&hw_stats->rx_ucast_packets) +
10251 get_stat64(&hw_stats->rx_mcast_packets) +
10252 get_stat64(&hw_stats->rx_bcast_packets);
10254 stats->tx_packets = old_stats->tx_packets +
10255 get_stat64(&hw_stats->tx_ucast_packets) +
10256 get_stat64(&hw_stats->tx_mcast_packets) +
10257 get_stat64(&hw_stats->tx_bcast_packets);
10259 stats->rx_bytes = old_stats->rx_bytes +
10260 get_stat64(&hw_stats->rx_octets);
10261 stats->tx_bytes = old_stats->tx_bytes +
10262 get_stat64(&hw_stats->tx_octets);
10264 stats->rx_errors = old_stats->rx_errors +
10265 get_stat64(&hw_stats->rx_errors);
10266 stats->tx_errors = old_stats->tx_errors +
10267 get_stat64(&hw_stats->tx_errors) +
10268 get_stat64(&hw_stats->tx_mac_errors) +
10269 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10270 get_stat64(&hw_stats->tx_discards);
10272 stats->multicast = old_stats->multicast +
10273 get_stat64(&hw_stats->rx_mcast_packets);
10274 stats->collisions = old_stats->collisions +
10275 get_stat64(&hw_stats->tx_collisions);
10277 stats->rx_length_errors = old_stats->rx_length_errors +
10278 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10279 get_stat64(&hw_stats->rx_undersize_packets);
10281 stats->rx_over_errors = old_stats->rx_over_errors +
10282 get_stat64(&hw_stats->rxbds_empty);
10283 stats->rx_frame_errors = old_stats->rx_frame_errors +
10284 get_stat64(&hw_stats->rx_align_errors);
10285 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10286 get_stat64(&hw_stats->tx_discards);
10287 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10288 get_stat64(&hw_stats->tx_carrier_sense_errors);
10290 stats->rx_crc_errors = old_stats->rx_crc_errors +
10291 tg3_calc_crc_errors(tp);
10293 stats->rx_missed_errors = old_stats->rx_missed_errors +
10294 get_stat64(&hw_stats->rx_discards);
10296 stats->rx_dropped = tp->rx_dropped;
10297 stats->tx_dropped = tp->tx_dropped;
10300 static int tg3_get_regs_len(struct net_device *dev)
10302 return TG3_REG_BLK_SIZE;
10305 static void tg3_get_regs(struct net_device *dev,
10306 struct ethtool_regs *regs, void *_p)
10308 struct tg3 *tp = netdev_priv(dev);
10312 memset(_p, 0, TG3_REG_BLK_SIZE);
10314 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10317 tg3_full_lock(tp, 0);
10319 tg3_dump_legacy_regs(tp, (u32 *)_p);
10321 tg3_full_unlock(tp);
10324 static int tg3_get_eeprom_len(struct net_device *dev)
10326 struct tg3 *tp = netdev_priv(dev);
10328 return tp->nvram_size;
10331 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10333 struct tg3 *tp = netdev_priv(dev);
10336 u32 i, offset, len, b_offset, b_count;
10339 if (tg3_flag(tp, NO_NVRAM))
10342 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10345 offset = eeprom->offset;
10349 eeprom->magic = TG3_EEPROM_MAGIC;
10352 /* adjustments to start on required 4 byte boundary */
10353 b_offset = offset & 3;
10354 b_count = 4 - b_offset;
10355 if (b_count > len) {
10356 /* i.e. offset=1 len=2 */
10359 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10362 memcpy(data, ((char *)&val) + b_offset, b_count);
10365 eeprom->len += b_count;
10368 /* read bytes up to the last 4 byte boundary */
10369 pd = &data[eeprom->len];
10370 for (i = 0; i < (len - (len & 3)); i += 4) {
10371 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10376 memcpy(pd + i, &val, 4);
10381 /* read last bytes not ending on 4 byte boundary */
10382 pd = &data[eeprom->len];
10384 b_offset = offset + len - b_count;
10385 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10388 memcpy(pd, &val, b_count);
10389 eeprom->len += b_count;
10394 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10396 struct tg3 *tp = netdev_priv(dev);
10398 u32 offset, len, b_offset, odd_len;
10402 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10405 if (tg3_flag(tp, NO_NVRAM) ||
10406 eeprom->magic != TG3_EEPROM_MAGIC)
10409 offset = eeprom->offset;
10412 if ((b_offset = (offset & 3))) {
10413 /* adjustments to start on required 4 byte boundary */
10414 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10425 /* adjustments to end on required 4 byte boundary */
10427 len = (len + 3) & ~3;
10428 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10434 if (b_offset || odd_len) {
10435 buf = kmalloc(len, GFP_KERNEL);
10439 memcpy(buf, &start, 4);
10441 memcpy(buf+len-4, &end, 4);
10442 memcpy(buf + b_offset, data, eeprom->len);
10445 ret = tg3_nvram_write_block(tp, offset, len, buf);
10453 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10455 struct tg3 *tp = netdev_priv(dev);
10457 if (tg3_flag(tp, USE_PHYLIB)) {
10458 struct phy_device *phydev;
10459 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10461 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10462 return phy_ethtool_gset(phydev, cmd);
10465 cmd->supported = (SUPPORTED_Autoneg);
10467 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10468 cmd->supported |= (SUPPORTED_1000baseT_Half |
10469 SUPPORTED_1000baseT_Full);
10471 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10472 cmd->supported |= (SUPPORTED_100baseT_Half |
10473 SUPPORTED_100baseT_Full |
10474 SUPPORTED_10baseT_Half |
10475 SUPPORTED_10baseT_Full |
10477 cmd->port = PORT_TP;
10479 cmd->supported |= SUPPORTED_FIBRE;
10480 cmd->port = PORT_FIBRE;
10483 cmd->advertising = tp->link_config.advertising;
10484 if (tg3_flag(tp, PAUSE_AUTONEG)) {
10485 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10486 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10487 cmd->advertising |= ADVERTISED_Pause;
10489 cmd->advertising |= ADVERTISED_Pause |
10490 ADVERTISED_Asym_Pause;
10492 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10493 cmd->advertising |= ADVERTISED_Asym_Pause;
10496 if (netif_running(dev) && netif_carrier_ok(dev)) {
10497 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10498 cmd->duplex = tp->link_config.active_duplex;
10499 cmd->lp_advertising = tp->link_config.rmt_adv;
10500 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10501 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10502 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10504 cmd->eth_tp_mdix = ETH_TP_MDI;
10507 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10508 cmd->duplex = DUPLEX_UNKNOWN;
10509 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10511 cmd->phy_address = tp->phy_addr;
10512 cmd->transceiver = XCVR_INTERNAL;
10513 cmd->autoneg = tp->link_config.autoneg;
10519 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10521 struct tg3 *tp = netdev_priv(dev);
10522 u32 speed = ethtool_cmd_speed(cmd);
10524 if (tg3_flag(tp, USE_PHYLIB)) {
10525 struct phy_device *phydev;
10526 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10528 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10529 return phy_ethtool_sset(phydev, cmd);
10532 if (cmd->autoneg != AUTONEG_ENABLE &&
10533 cmd->autoneg != AUTONEG_DISABLE)
10536 if (cmd->autoneg == AUTONEG_DISABLE &&
10537 cmd->duplex != DUPLEX_FULL &&
10538 cmd->duplex != DUPLEX_HALF)
10541 if (cmd->autoneg == AUTONEG_ENABLE) {
10542 u32 mask = ADVERTISED_Autoneg |
10544 ADVERTISED_Asym_Pause;
10546 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10547 mask |= ADVERTISED_1000baseT_Half |
10548 ADVERTISED_1000baseT_Full;
10550 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10551 mask |= ADVERTISED_100baseT_Half |
10552 ADVERTISED_100baseT_Full |
10553 ADVERTISED_10baseT_Half |
10554 ADVERTISED_10baseT_Full |
10557 mask |= ADVERTISED_FIBRE;
10559 if (cmd->advertising & ~mask)
10562 mask &= (ADVERTISED_1000baseT_Half |
10563 ADVERTISED_1000baseT_Full |
10564 ADVERTISED_100baseT_Half |
10565 ADVERTISED_100baseT_Full |
10566 ADVERTISED_10baseT_Half |
10567 ADVERTISED_10baseT_Full);
10569 cmd->advertising &= mask;
10571 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10572 if (speed != SPEED_1000)
10575 if (cmd->duplex != DUPLEX_FULL)
10578 if (speed != SPEED_100 &&
10584 tg3_full_lock(tp, 0);
10586 tp->link_config.autoneg = cmd->autoneg;
10587 if (cmd->autoneg == AUTONEG_ENABLE) {
10588 tp->link_config.advertising = (cmd->advertising |
10589 ADVERTISED_Autoneg);
10590 tp->link_config.speed = SPEED_UNKNOWN;
10591 tp->link_config.duplex = DUPLEX_UNKNOWN;
10593 tp->link_config.advertising = 0;
10594 tp->link_config.speed = speed;
10595 tp->link_config.duplex = cmd->duplex;
10598 if (netif_running(dev))
10599 tg3_setup_phy(tp, 1);
10601 tg3_full_unlock(tp);
10606 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10608 struct tg3 *tp = netdev_priv(dev);
10610 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10611 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10612 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10613 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10616 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10618 struct tg3 *tp = netdev_priv(dev);
10620 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10621 wol->supported = WAKE_MAGIC;
10623 wol->supported = 0;
10625 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10626 wol->wolopts = WAKE_MAGIC;
10627 memset(&wol->sopass, 0, sizeof(wol->sopass));
10630 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10632 struct tg3 *tp = netdev_priv(dev);
10633 struct device *dp = &tp->pdev->dev;
10635 if (wol->wolopts & ~WAKE_MAGIC)
10637 if ((wol->wolopts & WAKE_MAGIC) &&
10638 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10641 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10643 spin_lock_bh(&tp->lock);
10644 if (device_may_wakeup(dp))
10645 tg3_flag_set(tp, WOL_ENABLE);
10647 tg3_flag_clear(tp, WOL_ENABLE);
10648 spin_unlock_bh(&tp->lock);
10653 static u32 tg3_get_msglevel(struct net_device *dev)
10655 struct tg3 *tp = netdev_priv(dev);
10656 return tp->msg_enable;
10659 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10661 struct tg3 *tp = netdev_priv(dev);
10662 tp->msg_enable = value;
10665 static int tg3_nway_reset(struct net_device *dev)
10667 struct tg3 *tp = netdev_priv(dev);
10670 if (!netif_running(dev))
10673 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10676 if (tg3_flag(tp, USE_PHYLIB)) {
10677 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10679 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10683 spin_lock_bh(&tp->lock);
10685 tg3_readphy(tp, MII_BMCR, &bmcr);
10686 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10687 ((bmcr & BMCR_ANENABLE) ||
10688 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10689 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10693 spin_unlock_bh(&tp->lock);
10699 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10701 struct tg3 *tp = netdev_priv(dev);
10703 ering->rx_max_pending = tp->rx_std_ring_mask;
10704 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10705 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10707 ering->rx_jumbo_max_pending = 0;
10709 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10711 ering->rx_pending = tp->rx_pending;
10712 if (tg3_flag(tp, JUMBO_RING_ENABLE))
10713 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10715 ering->rx_jumbo_pending = 0;
10717 ering->tx_pending = tp->napi[0].tx_pending;
10720 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10722 struct tg3 *tp = netdev_priv(dev);
10723 int i, irq_sync = 0, err = 0;
10725 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10726 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10727 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10728 (ering->tx_pending <= MAX_SKB_FRAGS) ||
10729 (tg3_flag(tp, TSO_BUG) &&
10730 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10733 if (netif_running(dev)) {
10735 tg3_netif_stop(tp);
10739 tg3_full_lock(tp, irq_sync);
10741 tp->rx_pending = ering->rx_pending;
10743 if (tg3_flag(tp, MAX_RXPEND_64) &&
10744 tp->rx_pending > 63)
10745 tp->rx_pending = 63;
10746 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10748 for (i = 0; i < tp->irq_max; i++)
10749 tp->napi[i].tx_pending = ering->tx_pending;
10751 if (netif_running(dev)) {
10752 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10753 err = tg3_restart_hw(tp, 1);
10755 tg3_netif_start(tp);
10758 tg3_full_unlock(tp);
10760 if (irq_sync && !err)
10766 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10768 struct tg3 *tp = netdev_priv(dev);
10770 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10772 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10773 epause->rx_pause = 1;
10775 epause->rx_pause = 0;
10777 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10778 epause->tx_pause = 1;
10780 epause->tx_pause = 0;
10783 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10785 struct tg3 *tp = netdev_priv(dev);
10788 if (tg3_flag(tp, USE_PHYLIB)) {
10790 struct phy_device *phydev;
10792 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10794 if (!(phydev->supported & SUPPORTED_Pause) ||
10795 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10796 (epause->rx_pause != epause->tx_pause)))
10799 tp->link_config.flowctrl = 0;
10800 if (epause->rx_pause) {
10801 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10803 if (epause->tx_pause) {
10804 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10805 newadv = ADVERTISED_Pause;
10807 newadv = ADVERTISED_Pause |
10808 ADVERTISED_Asym_Pause;
10809 } else if (epause->tx_pause) {
10810 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10811 newadv = ADVERTISED_Asym_Pause;
10815 if (epause->autoneg)
10816 tg3_flag_set(tp, PAUSE_AUTONEG);
10818 tg3_flag_clear(tp, PAUSE_AUTONEG);
10820 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10821 u32 oldadv = phydev->advertising &
10822 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10823 if (oldadv != newadv) {
10824 phydev->advertising &=
10825 ~(ADVERTISED_Pause |
10826 ADVERTISED_Asym_Pause);
10827 phydev->advertising |= newadv;
10828 if (phydev->autoneg) {
10830 * Always renegotiate the link to
10831 * inform our link partner of our
10832 * flow control settings, even if the
10833 * flow control is forced. Let
10834 * tg3_adjust_link() do the final
10835 * flow control setup.
10837 return phy_start_aneg(phydev);
10841 if (!epause->autoneg)
10842 tg3_setup_flow_control(tp, 0, 0);
10844 tp->link_config.advertising &=
10845 ~(ADVERTISED_Pause |
10846 ADVERTISED_Asym_Pause);
10847 tp->link_config.advertising |= newadv;
10852 if (netif_running(dev)) {
10853 tg3_netif_stop(tp);
10857 tg3_full_lock(tp, irq_sync);
10859 if (epause->autoneg)
10860 tg3_flag_set(tp, PAUSE_AUTONEG);
10862 tg3_flag_clear(tp, PAUSE_AUTONEG);
10863 if (epause->rx_pause)
10864 tp->link_config.flowctrl |= FLOW_CTRL_RX;
10866 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10867 if (epause->tx_pause)
10868 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10870 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10872 if (netif_running(dev)) {
10873 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10874 err = tg3_restart_hw(tp, 1);
10876 tg3_netif_start(tp);
10879 tg3_full_unlock(tp);
10885 static int tg3_get_sset_count(struct net_device *dev, int sset)
10889 return TG3_NUM_TEST;
10891 return TG3_NUM_STATS;
10893 return -EOPNOTSUPP;
10897 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10898 u32 *rules __always_unused)
10900 struct tg3 *tp = netdev_priv(dev);
10902 if (!tg3_flag(tp, SUPPORT_MSIX))
10903 return -EOPNOTSUPP;
10905 switch (info->cmd) {
10906 case ETHTOOL_GRXRINGS:
10907 if (netif_running(tp->dev))
10908 info->data = tp->irq_cnt;
10910 info->data = num_online_cpus();
10911 if (info->data > TG3_IRQ_MAX_VECS_RSS)
10912 info->data = TG3_IRQ_MAX_VECS_RSS;
10915 /* The first interrupt vector only
10916 * handles link interrupts.
10922 return -EOPNOTSUPP;
10926 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10929 struct tg3 *tp = netdev_priv(dev);
10931 if (tg3_flag(tp, SUPPORT_MSIX))
10932 size = TG3_RSS_INDIR_TBL_SIZE;
10937 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10939 struct tg3 *tp = netdev_priv(dev);
10942 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10943 indir[i] = tp->rss_ind_tbl[i];
10948 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10950 struct tg3 *tp = netdev_priv(dev);
10953 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10954 tp->rss_ind_tbl[i] = indir[i];
10956 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10959 /* It is legal to write the indirection
10960 * table while the device is running.
10962 tg3_full_lock(tp, 0);
10963 tg3_rss_write_indir_tbl(tp);
10964 tg3_full_unlock(tp);
10969 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10971 switch (stringset) {
10973 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
10976 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
10979 WARN_ON(1); /* we need a WARN() */
10984 static int tg3_set_phys_id(struct net_device *dev,
10985 enum ethtool_phys_id_state state)
10987 struct tg3 *tp = netdev_priv(dev);
10989 if (!netif_running(tp->dev))
10993 case ETHTOOL_ID_ACTIVE:
10994 return 1; /* cycle on/off once per second */
10996 case ETHTOOL_ID_ON:
10997 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10998 LED_CTRL_1000MBPS_ON |
10999 LED_CTRL_100MBPS_ON |
11000 LED_CTRL_10MBPS_ON |
11001 LED_CTRL_TRAFFIC_OVERRIDE |
11002 LED_CTRL_TRAFFIC_BLINK |
11003 LED_CTRL_TRAFFIC_LED);
11006 case ETHTOOL_ID_OFF:
11007 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11008 LED_CTRL_TRAFFIC_OVERRIDE);
11011 case ETHTOOL_ID_INACTIVE:
11012 tw32(MAC_LED_CTRL, tp->led_ctrl);
11019 static void tg3_get_ethtool_stats(struct net_device *dev,
11020 struct ethtool_stats *estats, u64 *tmp_stats)
11022 struct tg3 *tp = netdev_priv(dev);
11025 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11027 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11030 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11034 u32 offset = 0, len = 0;
11037 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11040 if (magic == TG3_EEPROM_MAGIC) {
11041 for (offset = TG3_NVM_DIR_START;
11042 offset < TG3_NVM_DIR_END;
11043 offset += TG3_NVM_DIRENT_SIZE) {
11044 if (tg3_nvram_read(tp, offset, &val))
11047 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11048 TG3_NVM_DIRTYPE_EXTVPD)
11052 if (offset != TG3_NVM_DIR_END) {
11053 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11054 if (tg3_nvram_read(tp, offset + 4, &offset))
11057 offset = tg3_nvram_logical_addr(tp, offset);
11061 if (!offset || !len) {
11062 offset = TG3_NVM_VPD_OFF;
11063 len = TG3_NVM_VPD_LEN;
11066 buf = kmalloc(len, GFP_KERNEL);
11070 if (magic == TG3_EEPROM_MAGIC) {
11071 for (i = 0; i < len; i += 4) {
11072 /* The data is in little-endian format in NVRAM.
11073 * Use the big-endian read routines to preserve
11074 * the byte order as it exists in NVRAM.
11076 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11082 unsigned int pos = 0;
11084 ptr = (u8 *)&buf[0];
11085 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11086 cnt = pci_read_vpd(tp->pdev, pos,
11088 if (cnt == -ETIMEDOUT || cnt == -EINTR)
11106 #define NVRAM_TEST_SIZE 0x100
11107 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
11108 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
11109 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
11110 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
11111 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
11112 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
11113 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11114 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11116 static int tg3_test_nvram(struct tg3 *tp)
11118 u32 csum, magic, len;
11120 int i, j, k, err = 0, size;
11122 if (tg3_flag(tp, NO_NVRAM))
11125 if (tg3_nvram_read(tp, 0, &magic) != 0)
11128 if (magic == TG3_EEPROM_MAGIC)
11129 size = NVRAM_TEST_SIZE;
11130 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11131 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11132 TG3_EEPROM_SB_FORMAT_1) {
11133 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11134 case TG3_EEPROM_SB_REVISION_0:
11135 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11137 case TG3_EEPROM_SB_REVISION_2:
11138 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11140 case TG3_EEPROM_SB_REVISION_3:
11141 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11143 case TG3_EEPROM_SB_REVISION_4:
11144 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11146 case TG3_EEPROM_SB_REVISION_5:
11147 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11149 case TG3_EEPROM_SB_REVISION_6:
11150 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11157 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11158 size = NVRAM_SELFBOOT_HW_SIZE;
11162 buf = kmalloc(size, GFP_KERNEL);
11167 for (i = 0, j = 0; i < size; i += 4, j++) {
11168 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11175 /* Selfboot format */
11176 magic = be32_to_cpu(buf[0]);
11177 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11178 TG3_EEPROM_MAGIC_FW) {
11179 u8 *buf8 = (u8 *) buf, csum8 = 0;
11181 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11182 TG3_EEPROM_SB_REVISION_2) {
11183 /* For rev 2, the csum doesn't include the MBA. */
11184 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11186 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11189 for (i = 0; i < size; i++)
11202 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11203 TG3_EEPROM_MAGIC_HW) {
11204 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11205 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11206 u8 *buf8 = (u8 *) buf;
11208 /* Separate the parity bits and the data bytes. */
11209 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11210 if ((i == 0) || (i == 8)) {
11214 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11215 parity[k++] = buf8[i] & msk;
11217 } else if (i == 16) {
11221 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11222 parity[k++] = buf8[i] & msk;
11225 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11226 parity[k++] = buf8[i] & msk;
11229 data[j++] = buf8[i];
11233 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11234 u8 hw8 = hweight8(data[i]);
11236 if ((hw8 & 0x1) && parity[i])
11238 else if (!(hw8 & 0x1) && !parity[i])
11247 /* Bootstrap checksum at offset 0x10 */
11248 csum = calc_crc((unsigned char *) buf, 0x10);
11249 if (csum != le32_to_cpu(buf[0x10/4]))
11252 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11253 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11254 if (csum != le32_to_cpu(buf[0xfc/4]))
11259 buf = tg3_vpd_readblock(tp, &len);
11263 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11265 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11269 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11272 i += PCI_VPD_LRDT_TAG_SIZE;
11273 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11274 PCI_VPD_RO_KEYWORD_CHKSUM);
11278 j += PCI_VPD_INFO_FLD_HDR_SIZE;
11280 for (i = 0; i <= j; i++)
11281 csum8 += ((u8 *)buf)[i];
11295 #define TG3_SERDES_TIMEOUT_SEC 2
11296 #define TG3_COPPER_TIMEOUT_SEC 6
11298 static int tg3_test_link(struct tg3 *tp)
11302 if (!netif_running(tp->dev))
11305 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11306 max = TG3_SERDES_TIMEOUT_SEC;
11308 max = TG3_COPPER_TIMEOUT_SEC;
11310 for (i = 0; i < max; i++) {
11311 if (netif_carrier_ok(tp->dev))
11314 if (msleep_interruptible(1000))
11321 /* Only test the commonly used registers */
11322 static int tg3_test_registers(struct tg3 *tp)
11324 int i, is_5705, is_5750;
11325 u32 offset, read_mask, write_mask, val, save_val, read_val;
11329 #define TG3_FL_5705 0x1
11330 #define TG3_FL_NOT_5705 0x2
11331 #define TG3_FL_NOT_5788 0x4
11332 #define TG3_FL_NOT_5750 0x8
11336 /* MAC Control Registers */
11337 { MAC_MODE, TG3_FL_NOT_5705,
11338 0x00000000, 0x00ef6f8c },
11339 { MAC_MODE, TG3_FL_5705,
11340 0x00000000, 0x01ef6b8c },
11341 { MAC_STATUS, TG3_FL_NOT_5705,
11342 0x03800107, 0x00000000 },
11343 { MAC_STATUS, TG3_FL_5705,
11344 0x03800100, 0x00000000 },
11345 { MAC_ADDR_0_HIGH, 0x0000,
11346 0x00000000, 0x0000ffff },
11347 { MAC_ADDR_0_LOW, 0x0000,
11348 0x00000000, 0xffffffff },
11349 { MAC_RX_MTU_SIZE, 0x0000,
11350 0x00000000, 0x0000ffff },
11351 { MAC_TX_MODE, 0x0000,
11352 0x00000000, 0x00000070 },
11353 { MAC_TX_LENGTHS, 0x0000,
11354 0x00000000, 0x00003fff },
11355 { MAC_RX_MODE, TG3_FL_NOT_5705,
11356 0x00000000, 0x000007fc },
11357 { MAC_RX_MODE, TG3_FL_5705,
11358 0x00000000, 0x000007dc },
11359 { MAC_HASH_REG_0, 0x0000,
11360 0x00000000, 0xffffffff },
11361 { MAC_HASH_REG_1, 0x0000,
11362 0x00000000, 0xffffffff },
11363 { MAC_HASH_REG_2, 0x0000,
11364 0x00000000, 0xffffffff },
11365 { MAC_HASH_REG_3, 0x0000,
11366 0x00000000, 0xffffffff },
11368 /* Receive Data and Receive BD Initiator Control Registers. */
11369 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11370 0x00000000, 0xffffffff },
11371 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11372 0x00000000, 0xffffffff },
11373 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11374 0x00000000, 0x00000003 },
11375 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11376 0x00000000, 0xffffffff },
11377 { RCVDBDI_STD_BD+0, 0x0000,
11378 0x00000000, 0xffffffff },
11379 { RCVDBDI_STD_BD+4, 0x0000,
11380 0x00000000, 0xffffffff },
11381 { RCVDBDI_STD_BD+8, 0x0000,
11382 0x00000000, 0xffff0002 },
11383 { RCVDBDI_STD_BD+0xc, 0x0000,
11384 0x00000000, 0xffffffff },
11386 /* Receive BD Initiator Control Registers. */
11387 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11388 0x00000000, 0xffffffff },
11389 { RCVBDI_STD_THRESH, TG3_FL_5705,
11390 0x00000000, 0x000003ff },
11391 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11392 0x00000000, 0xffffffff },
11394 /* Host Coalescing Control Registers. */
11395 { HOSTCC_MODE, TG3_FL_NOT_5705,
11396 0x00000000, 0x00000004 },
11397 { HOSTCC_MODE, TG3_FL_5705,
11398 0x00000000, 0x000000f6 },
11399 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11400 0x00000000, 0xffffffff },
11401 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11402 0x00000000, 0x000003ff },
11403 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11404 0x00000000, 0xffffffff },
11405 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11406 0x00000000, 0x000003ff },
11407 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11408 0x00000000, 0xffffffff },
11409 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11410 0x00000000, 0x000000ff },
11411 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11412 0x00000000, 0xffffffff },
11413 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11414 0x00000000, 0x000000ff },
11415 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11416 0x00000000, 0xffffffff },
11417 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11418 0x00000000, 0xffffffff },
11419 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11420 0x00000000, 0xffffffff },
11421 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11422 0x00000000, 0x000000ff },
11423 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11424 0x00000000, 0xffffffff },
11425 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11426 0x00000000, 0x000000ff },
11427 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11428 0x00000000, 0xffffffff },
11429 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11430 0x00000000, 0xffffffff },
11431 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11432 0x00000000, 0xffffffff },
11433 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11434 0x00000000, 0xffffffff },
11435 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11436 0x00000000, 0xffffffff },
11437 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11438 0xffffffff, 0x00000000 },
11439 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11440 0xffffffff, 0x00000000 },
11442 /* Buffer Manager Control Registers. */
11443 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11444 0x00000000, 0x007fff80 },
11445 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11446 0x00000000, 0x007fffff },
11447 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11448 0x00000000, 0x0000003f },
11449 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11450 0x00000000, 0x000001ff },
11451 { BUFMGR_MB_HIGH_WATER, 0x0000,
11452 0x00000000, 0x000001ff },
11453 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11454 0xffffffff, 0x00000000 },
11455 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11456 0xffffffff, 0x00000000 },
11458 /* Mailbox Registers */
11459 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11460 0x00000000, 0x000001ff },
11461 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11462 0x00000000, 0x000001ff },
11463 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11464 0x00000000, 0x000007ff },
11465 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11466 0x00000000, 0x000001ff },
11468 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11471 is_5705 = is_5750 = 0;
11472 if (tg3_flag(tp, 5705_PLUS)) {
11474 if (tg3_flag(tp, 5750_PLUS))
11478 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11479 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11482 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11485 if (tg3_flag(tp, IS_5788) &&
11486 (reg_tbl[i].flags & TG3_FL_NOT_5788))
11489 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11492 offset = (u32) reg_tbl[i].offset;
11493 read_mask = reg_tbl[i].read_mask;
11494 write_mask = reg_tbl[i].write_mask;
11496 /* Save the original register content */
11497 save_val = tr32(offset);
11499 /* Determine the read-only value. */
11500 read_val = save_val & read_mask;
11502 /* Write zero to the register, then make sure the read-only bits
11503 * are not changed and the read/write bits are all zeros.
11507 val = tr32(offset);
11509 /* Test the read-only and read/write bits. */
11510 if (((val & read_mask) != read_val) || (val & write_mask))
11513 /* Write ones to all the bits defined by RdMask and WrMask, then
11514 * make sure the read-only bits are not changed and the
11515 * read/write bits are all ones.
11517 tw32(offset, read_mask | write_mask);
11519 val = tr32(offset);
11521 /* Test the read-only bits. */
11522 if ((val & read_mask) != read_val)
11525 /* Test the read/write bits. */
11526 if ((val & write_mask) != write_mask)
11529 tw32(offset, save_val);
11535 if (netif_msg_hw(tp))
11536 netdev_err(tp->dev,
11537 "Register test failed at offset %x\n", offset);
11538 tw32(offset, save_val);
11542 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11544 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11548 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11549 for (j = 0; j < len; j += 4) {
11552 tg3_write_mem(tp, offset + j, test_pattern[i]);
11553 tg3_read_mem(tp, offset + j, &val);
11554 if (val != test_pattern[i])
11561 static int tg3_test_memory(struct tg3 *tp)
11563 static struct mem_entry {
11566 } mem_tbl_570x[] = {
11567 { 0x00000000, 0x00b50},
11568 { 0x00002000, 0x1c000},
11569 { 0xffffffff, 0x00000}
11570 }, mem_tbl_5705[] = {
11571 { 0x00000100, 0x0000c},
11572 { 0x00000200, 0x00008},
11573 { 0x00004000, 0x00800},
11574 { 0x00006000, 0x01000},
11575 { 0x00008000, 0x02000},
11576 { 0x00010000, 0x0e000},
11577 { 0xffffffff, 0x00000}
11578 }, mem_tbl_5755[] = {
11579 { 0x00000200, 0x00008},
11580 { 0x00004000, 0x00800},
11581 { 0x00006000, 0x00800},
11582 { 0x00008000, 0x02000},
11583 { 0x00010000, 0x0c000},
11584 { 0xffffffff, 0x00000}
11585 }, mem_tbl_5906[] = {
11586 { 0x00000200, 0x00008},
11587 { 0x00004000, 0x00400},
11588 { 0x00006000, 0x00400},
11589 { 0x00008000, 0x01000},
11590 { 0x00010000, 0x01000},
11591 { 0xffffffff, 0x00000}
11592 }, mem_tbl_5717[] = {
11593 { 0x00000200, 0x00008},
11594 { 0x00010000, 0x0a000},
11595 { 0x00020000, 0x13c00},
11596 { 0xffffffff, 0x00000}
11597 }, mem_tbl_57765[] = {
11598 { 0x00000200, 0x00008},
11599 { 0x00004000, 0x00800},
11600 { 0x00006000, 0x09800},
11601 { 0x00010000, 0x0a000},
11602 { 0xffffffff, 0x00000}
11604 struct mem_entry *mem_tbl;
11608 if (tg3_flag(tp, 5717_PLUS))
11609 mem_tbl = mem_tbl_5717;
11610 else if (tg3_flag(tp, 57765_CLASS))
11611 mem_tbl = mem_tbl_57765;
11612 else if (tg3_flag(tp, 5755_PLUS))
11613 mem_tbl = mem_tbl_5755;
11614 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11615 mem_tbl = mem_tbl_5906;
11616 else if (tg3_flag(tp, 5705_PLUS))
11617 mem_tbl = mem_tbl_5705;
11619 mem_tbl = mem_tbl_570x;
11621 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11622 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11630 #define TG3_TSO_MSS 500
11632 #define TG3_TSO_IP_HDR_LEN 20
11633 #define TG3_TSO_TCP_HDR_LEN 20
11634 #define TG3_TSO_TCP_OPT_LEN 12
11636 static const u8 tg3_tso_header[] = {
11638 0x45, 0x00, 0x00, 0x00,
11639 0x00, 0x00, 0x40, 0x00,
11640 0x40, 0x06, 0x00, 0x00,
11641 0x0a, 0x00, 0x00, 0x01,
11642 0x0a, 0x00, 0x00, 0x02,
11643 0x0d, 0x00, 0xe0, 0x00,
11644 0x00, 0x00, 0x01, 0x00,
11645 0x00, 0x00, 0x02, 0x00,
11646 0x80, 0x10, 0x10, 0x00,
11647 0x14, 0x09, 0x00, 0x00,
11648 0x01, 0x01, 0x08, 0x0a,
11649 0x11, 0x11, 0x11, 0x11,
11650 0x11, 0x11, 0x11, 0x11,
11653 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11655 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11656 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11658 struct sk_buff *skb;
11659 u8 *tx_data, *rx_data;
11661 int num_pkts, tx_len, rx_len, i, err;
11662 struct tg3_rx_buffer_desc *desc;
11663 struct tg3_napi *tnapi, *rnapi;
11664 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11666 tnapi = &tp->napi[0];
11667 rnapi = &tp->napi[0];
11668 if (tp->irq_cnt > 1) {
11669 if (tg3_flag(tp, ENABLE_RSS))
11670 rnapi = &tp->napi[1];
11671 if (tg3_flag(tp, ENABLE_TSS))
11672 tnapi = &tp->napi[1];
11674 coal_now = tnapi->coal_now | rnapi->coal_now;
11679 skb = netdev_alloc_skb(tp->dev, tx_len);
11683 tx_data = skb_put(skb, tx_len);
11684 memcpy(tx_data, tp->dev->dev_addr, 6);
11685 memset(tx_data + 6, 0x0, 8);
11687 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11689 if (tso_loopback) {
11690 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11692 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11693 TG3_TSO_TCP_OPT_LEN;
11695 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11696 sizeof(tg3_tso_header));
11699 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11700 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11702 /* Set the total length field in the IP header */
11703 iph->tot_len = htons((u16)(mss + hdr_len));
11705 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11706 TXD_FLAG_CPU_POST_DMA);
11708 if (tg3_flag(tp, HW_TSO_1) ||
11709 tg3_flag(tp, HW_TSO_2) ||
11710 tg3_flag(tp, HW_TSO_3)) {
11712 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11713 th = (struct tcphdr *)&tx_data[val];
11716 base_flags |= TXD_FLAG_TCPUDP_CSUM;
11718 if (tg3_flag(tp, HW_TSO_3)) {
11719 mss |= (hdr_len & 0xc) << 12;
11720 if (hdr_len & 0x10)
11721 base_flags |= 0x00000010;
11722 base_flags |= (hdr_len & 0x3e0) << 5;
11723 } else if (tg3_flag(tp, HW_TSO_2))
11724 mss |= hdr_len << 9;
11725 else if (tg3_flag(tp, HW_TSO_1) ||
11726 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11727 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11729 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11732 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11735 data_off = ETH_HLEN;
11737 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11738 tx_len > VLAN_ETH_FRAME_LEN)
11739 base_flags |= TXD_FLAG_JMB_PKT;
11742 for (i = data_off; i < tx_len; i++)
11743 tx_data[i] = (u8) (i & 0xff);
11745 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11746 if (pci_dma_mapping_error(tp->pdev, map)) {
11747 dev_kfree_skb(skb);
11751 val = tnapi->tx_prod;
11752 tnapi->tx_buffers[val].skb = skb;
11753 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11755 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11760 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11762 budget = tg3_tx_avail(tnapi);
11763 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11764 base_flags | TXD_FLAG_END, mss, 0)) {
11765 tnapi->tx_buffers[val].skb = NULL;
11766 dev_kfree_skb(skb);
11772 /* Sync BD data before updating mailbox */
11775 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11776 tr32_mailbox(tnapi->prodmbox);
11780 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
11781 for (i = 0; i < 35; i++) {
11782 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11787 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11788 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11789 if ((tx_idx == tnapi->tx_prod) &&
11790 (rx_idx == (rx_start_idx + num_pkts)))
11794 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11795 dev_kfree_skb(skb);
11797 if (tx_idx != tnapi->tx_prod)
11800 if (rx_idx != rx_start_idx + num_pkts)
11804 while (rx_idx != rx_start_idx) {
11805 desc = &rnapi->rx_rcb[rx_start_idx++];
11806 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11807 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11809 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11810 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11813 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11816 if (!tso_loopback) {
11817 if (rx_len != tx_len)
11820 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11821 if (opaque_key != RXD_OPAQUE_RING_STD)
11824 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11827 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11828 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11829 >> RXD_TCPCSUM_SHIFT != 0xffff) {
11833 if (opaque_key == RXD_OPAQUE_RING_STD) {
11834 rx_data = tpr->rx_std_buffers[desc_idx].data;
11835 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11837 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11838 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11839 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11844 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11845 PCI_DMA_FROMDEVICE);
11847 rx_data += TG3_RX_OFFSET(tp);
11848 for (i = data_off; i < rx_len; i++, val++) {
11849 if (*(rx_data + i) != (u8) (val & 0xff))
11856 /* tg3_free_rings will unmap and free the rx_data */
11861 #define TG3_STD_LOOPBACK_FAILED 1
11862 #define TG3_JMB_LOOPBACK_FAILED 2
11863 #define TG3_TSO_LOOPBACK_FAILED 4
11864 #define TG3_LOOPBACK_FAILED \
11865 (TG3_STD_LOOPBACK_FAILED | \
11866 TG3_JMB_LOOPBACK_FAILED | \
11867 TG3_TSO_LOOPBACK_FAILED)
11869 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11873 u32 jmb_pkt_sz = 9000;
11876 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11878 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11879 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11881 if (!netif_running(tp->dev)) {
11882 data[0] = TG3_LOOPBACK_FAILED;
11883 data[1] = TG3_LOOPBACK_FAILED;
11885 data[2] = TG3_LOOPBACK_FAILED;
11889 err = tg3_reset_hw(tp, 1);
11891 data[0] = TG3_LOOPBACK_FAILED;
11892 data[1] = TG3_LOOPBACK_FAILED;
11894 data[2] = TG3_LOOPBACK_FAILED;
11898 if (tg3_flag(tp, ENABLE_RSS)) {
11901 /* Reroute all rx packets to the 1st queue */
11902 for (i = MAC_RSS_INDIR_TBL_0;
11903 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11907 /* HW errata - mac loopback fails in some cases on 5780.
11908 * Normal traffic and PHY loopback are not affected by
11909 * errata. Also, the MAC loopback test is deprecated for
11910 * all newer ASIC revisions.
11912 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11913 !tg3_flag(tp, CPMU_PRESENT)) {
11914 tg3_mac_loopback(tp, true);
11916 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11917 data[0] |= TG3_STD_LOOPBACK_FAILED;
11919 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11920 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11921 data[0] |= TG3_JMB_LOOPBACK_FAILED;
11923 tg3_mac_loopback(tp, false);
11926 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11927 !tg3_flag(tp, USE_PHYLIB)) {
11930 tg3_phy_lpbk_set(tp, 0, false);
11932 /* Wait for link */
11933 for (i = 0; i < 100; i++) {
11934 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11939 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11940 data[1] |= TG3_STD_LOOPBACK_FAILED;
11941 if (tg3_flag(tp, TSO_CAPABLE) &&
11942 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11943 data[1] |= TG3_TSO_LOOPBACK_FAILED;
11944 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11945 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11946 data[1] |= TG3_JMB_LOOPBACK_FAILED;
11949 tg3_phy_lpbk_set(tp, 0, true);
11951 /* All link indications report up, but the hardware
11952 * isn't really ready for about 20 msec. Double it
11957 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11958 data[2] |= TG3_STD_LOOPBACK_FAILED;
11959 if (tg3_flag(tp, TSO_CAPABLE) &&
11960 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11961 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11962 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11963 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11964 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11967 /* Re-enable gphy autopowerdown. */
11968 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11969 tg3_phy_toggle_apd(tp, true);
11972 err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11975 tp->phy_flags |= eee_cap;
11980 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11983 struct tg3 *tp = netdev_priv(dev);
11984 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11986 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11987 tg3_power_up(tp)) {
11988 etest->flags |= ETH_TEST_FL_FAILED;
11989 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11993 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11995 if (tg3_test_nvram(tp) != 0) {
11996 etest->flags |= ETH_TEST_FL_FAILED;
11999 if (!doextlpbk && tg3_test_link(tp)) {
12000 etest->flags |= ETH_TEST_FL_FAILED;
12003 if (etest->flags & ETH_TEST_FL_OFFLINE) {
12004 int err, err2 = 0, irq_sync = 0;
12006 if (netif_running(dev)) {
12008 tg3_netif_stop(tp);
12012 tg3_full_lock(tp, irq_sync);
12014 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12015 err = tg3_nvram_lock(tp);
12016 tg3_halt_cpu(tp, RX_CPU_BASE);
12017 if (!tg3_flag(tp, 5705_PLUS))
12018 tg3_halt_cpu(tp, TX_CPU_BASE);
12020 tg3_nvram_unlock(tp);
12022 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12025 if (tg3_test_registers(tp) != 0) {
12026 etest->flags |= ETH_TEST_FL_FAILED;
12030 if (tg3_test_memory(tp) != 0) {
12031 etest->flags |= ETH_TEST_FL_FAILED;
12036 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12038 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12039 etest->flags |= ETH_TEST_FL_FAILED;
12041 tg3_full_unlock(tp);
12043 if (tg3_test_interrupt(tp) != 0) {
12044 etest->flags |= ETH_TEST_FL_FAILED;
12048 tg3_full_lock(tp, 0);
12050 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12051 if (netif_running(dev)) {
12052 tg3_flag_set(tp, INIT_COMPLETE);
12053 err2 = tg3_restart_hw(tp, 1);
12055 tg3_netif_start(tp);
12058 tg3_full_unlock(tp);
12060 if (irq_sync && !err2)
12063 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12064 tg3_power_down(tp);
12068 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12070 struct mii_ioctl_data *data = if_mii(ifr);
12071 struct tg3 *tp = netdev_priv(dev);
12074 if (tg3_flag(tp, USE_PHYLIB)) {
12075 struct phy_device *phydev;
12076 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12078 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12079 return phy_mii_ioctl(phydev, ifr, cmd);
12084 data->phy_id = tp->phy_addr;
12087 case SIOCGMIIREG: {
12090 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12091 break; /* We have no PHY */
12093 if (!netif_running(dev))
12096 spin_lock_bh(&tp->lock);
12097 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12098 spin_unlock_bh(&tp->lock);
12100 data->val_out = mii_regval;
12106 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12107 break; /* We have no PHY */
12109 if (!netif_running(dev))
12112 spin_lock_bh(&tp->lock);
12113 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12114 spin_unlock_bh(&tp->lock);
12122 return -EOPNOTSUPP;
12125 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12127 struct tg3 *tp = netdev_priv(dev);
12129 memcpy(ec, &tp->coal, sizeof(*ec));
12133 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12135 struct tg3 *tp = netdev_priv(dev);
12136 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12137 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12139 if (!tg3_flag(tp, 5705_PLUS)) {
12140 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12141 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12142 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12143 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12146 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12147 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12148 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12149 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12150 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12151 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12152 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12153 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12154 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12155 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12158 /* No rx interrupts will be generated if both are zero */
12159 if ((ec->rx_coalesce_usecs == 0) &&
12160 (ec->rx_max_coalesced_frames == 0))
12163 /* No tx interrupts will be generated if both are zero */
12164 if ((ec->tx_coalesce_usecs == 0) &&
12165 (ec->tx_max_coalesced_frames == 0))
12168 /* Only copy relevant parameters, ignore all others. */
12169 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12170 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12171 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12172 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12173 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12174 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12175 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12176 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12177 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12179 if (netif_running(dev)) {
12180 tg3_full_lock(tp, 0);
12181 __tg3_set_coalesce(tp, &tp->coal);
12182 tg3_full_unlock(tp);
12187 static const struct ethtool_ops tg3_ethtool_ops = {
12188 .get_settings = tg3_get_settings,
12189 .set_settings = tg3_set_settings,
12190 .get_drvinfo = tg3_get_drvinfo,
12191 .get_regs_len = tg3_get_regs_len,
12192 .get_regs = tg3_get_regs,
12193 .get_wol = tg3_get_wol,
12194 .set_wol = tg3_set_wol,
12195 .get_msglevel = tg3_get_msglevel,
12196 .set_msglevel = tg3_set_msglevel,
12197 .nway_reset = tg3_nway_reset,
12198 .get_link = ethtool_op_get_link,
12199 .get_eeprom_len = tg3_get_eeprom_len,
12200 .get_eeprom = tg3_get_eeprom,
12201 .set_eeprom = tg3_set_eeprom,
12202 .get_ringparam = tg3_get_ringparam,
12203 .set_ringparam = tg3_set_ringparam,
12204 .get_pauseparam = tg3_get_pauseparam,
12205 .set_pauseparam = tg3_set_pauseparam,
12206 .self_test = tg3_self_test,
12207 .get_strings = tg3_get_strings,
12208 .set_phys_id = tg3_set_phys_id,
12209 .get_ethtool_stats = tg3_get_ethtool_stats,
12210 .get_coalesce = tg3_get_coalesce,
12211 .set_coalesce = tg3_set_coalesce,
12212 .get_sset_count = tg3_get_sset_count,
12213 .get_rxnfc = tg3_get_rxnfc,
12214 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
12215 .get_rxfh_indir = tg3_get_rxfh_indir,
12216 .set_rxfh_indir = tg3_set_rxfh_indir,
12219 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12220 struct rtnl_link_stats64 *stats)
12222 struct tg3 *tp = netdev_priv(dev);
12225 return &tp->net_stats_prev;
12227 spin_lock_bh(&tp->lock);
12228 tg3_get_nstats(tp, stats);
12229 spin_unlock_bh(&tp->lock);
12234 static void tg3_set_rx_mode(struct net_device *dev)
12236 struct tg3 *tp = netdev_priv(dev);
12238 if (!netif_running(dev))
12241 tg3_full_lock(tp, 0);
12242 __tg3_set_rx_mode(dev);
12243 tg3_full_unlock(tp);
12246 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12249 dev->mtu = new_mtu;
12251 if (new_mtu > ETH_DATA_LEN) {
12252 if (tg3_flag(tp, 5780_CLASS)) {
12253 netdev_update_features(dev);
12254 tg3_flag_clear(tp, TSO_CAPABLE);
12256 tg3_flag_set(tp, JUMBO_RING_ENABLE);
12259 if (tg3_flag(tp, 5780_CLASS)) {
12260 tg3_flag_set(tp, TSO_CAPABLE);
12261 netdev_update_features(dev);
12263 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12267 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12269 struct tg3 *tp = netdev_priv(dev);
12270 int err, reset_phy = 0;
12272 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12275 if (!netif_running(dev)) {
12276 /* We'll just catch it later when the
12279 tg3_set_mtu(dev, tp, new_mtu);
12285 tg3_netif_stop(tp);
12287 tg3_full_lock(tp, 1);
12289 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12291 tg3_set_mtu(dev, tp, new_mtu);
12293 /* Reset PHY, otherwise the read DMA engine will be in a mode that
12294 * breaks all requests to 256 bytes.
12296 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12299 err = tg3_restart_hw(tp, reset_phy);
12302 tg3_netif_start(tp);
12304 tg3_full_unlock(tp);
12312 static const struct net_device_ops tg3_netdev_ops = {
12313 .ndo_open = tg3_open,
12314 .ndo_stop = tg3_close,
12315 .ndo_start_xmit = tg3_start_xmit,
12316 .ndo_get_stats64 = tg3_get_stats64,
12317 .ndo_validate_addr = eth_validate_addr,
12318 .ndo_set_rx_mode = tg3_set_rx_mode,
12319 .ndo_set_mac_address = tg3_set_mac_addr,
12320 .ndo_do_ioctl = tg3_ioctl,
12321 .ndo_tx_timeout = tg3_tx_timeout,
12322 .ndo_change_mtu = tg3_change_mtu,
12323 .ndo_fix_features = tg3_fix_features,
12324 .ndo_set_features = tg3_set_features,
12325 #ifdef CONFIG_NET_POLL_CONTROLLER
12326 .ndo_poll_controller = tg3_poll_controller,
12330 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12332 u32 cursize, val, magic;
12334 tp->nvram_size = EEPROM_CHIP_SIZE;
12336 if (tg3_nvram_read(tp, 0, &magic) != 0)
12339 if ((magic != TG3_EEPROM_MAGIC) &&
12340 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12341 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12345 * Size the chip by reading offsets at increasing powers of two.
12346 * When we encounter our validation signature, we know the addressing
12347 * has wrapped around, and thus have our chip size.
12351 while (cursize < tp->nvram_size) {
12352 if (tg3_nvram_read(tp, cursize, &val) != 0)
12361 tp->nvram_size = cursize;
12364 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12368 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12371 /* Selfboot format */
12372 if (val != TG3_EEPROM_MAGIC) {
12373 tg3_get_eeprom_size(tp);
12377 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12379 /* This is confusing. We want to operate on the
12380 * 16-bit value at offset 0xf2. The tg3_nvram_read()
12381 * call will read from NVRAM and byteswap the data
12382 * according to the byteswapping settings for all
12383 * other register accesses. This ensures the data we
12384 * want will always reside in the lower 16-bits.
12385 * However, the data in NVRAM is in LE format, which
12386 * means the data from the NVRAM read will always be
12387 * opposite the endianness of the CPU. The 16-bit
12388 * byteswap then brings the data to CPU endianness.
12390 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12394 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12397 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12401 nvcfg1 = tr32(NVRAM_CFG1);
12402 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12403 tg3_flag_set(tp, FLASH);
12405 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12406 tw32(NVRAM_CFG1, nvcfg1);
12409 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12410 tg3_flag(tp, 5780_CLASS)) {
12411 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12412 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12413 tp->nvram_jedecnum = JEDEC_ATMEL;
12414 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12415 tg3_flag_set(tp, NVRAM_BUFFERED);
12417 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12418 tp->nvram_jedecnum = JEDEC_ATMEL;
12419 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12421 case FLASH_VENDOR_ATMEL_EEPROM:
12422 tp->nvram_jedecnum = JEDEC_ATMEL;
12423 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12424 tg3_flag_set(tp, NVRAM_BUFFERED);
12426 case FLASH_VENDOR_ST:
12427 tp->nvram_jedecnum = JEDEC_ST;
12428 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12429 tg3_flag_set(tp, NVRAM_BUFFERED);
12431 case FLASH_VENDOR_SAIFUN:
12432 tp->nvram_jedecnum = JEDEC_SAIFUN;
12433 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12435 case FLASH_VENDOR_SST_SMALL:
12436 case FLASH_VENDOR_SST_LARGE:
12437 tp->nvram_jedecnum = JEDEC_SST;
12438 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12442 tp->nvram_jedecnum = JEDEC_ATMEL;
12443 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12444 tg3_flag_set(tp, NVRAM_BUFFERED);
12448 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12450 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12451 case FLASH_5752PAGE_SIZE_256:
12452 tp->nvram_pagesize = 256;
12454 case FLASH_5752PAGE_SIZE_512:
12455 tp->nvram_pagesize = 512;
12457 case FLASH_5752PAGE_SIZE_1K:
12458 tp->nvram_pagesize = 1024;
12460 case FLASH_5752PAGE_SIZE_2K:
12461 tp->nvram_pagesize = 2048;
12463 case FLASH_5752PAGE_SIZE_4K:
12464 tp->nvram_pagesize = 4096;
12466 case FLASH_5752PAGE_SIZE_264:
12467 tp->nvram_pagesize = 264;
12469 case FLASH_5752PAGE_SIZE_528:
12470 tp->nvram_pagesize = 528;
12475 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12479 nvcfg1 = tr32(NVRAM_CFG1);
12481 /* NVRAM protection for TPM */
12482 if (nvcfg1 & (1 << 27))
12483 tg3_flag_set(tp, PROTECTED_NVRAM);
12485 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12486 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12487 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12488 tp->nvram_jedecnum = JEDEC_ATMEL;
12489 tg3_flag_set(tp, NVRAM_BUFFERED);
12491 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12492 tp->nvram_jedecnum = JEDEC_ATMEL;
12493 tg3_flag_set(tp, NVRAM_BUFFERED);
12494 tg3_flag_set(tp, FLASH);
12496 case FLASH_5752VENDOR_ST_M45PE10:
12497 case FLASH_5752VENDOR_ST_M45PE20:
12498 case FLASH_5752VENDOR_ST_M45PE40:
12499 tp->nvram_jedecnum = JEDEC_ST;
12500 tg3_flag_set(tp, NVRAM_BUFFERED);
12501 tg3_flag_set(tp, FLASH);
12505 if (tg3_flag(tp, FLASH)) {
12506 tg3_nvram_get_pagesize(tp, nvcfg1);
12508 /* For eeprom, set pagesize to maximum eeprom size */
12509 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12511 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12512 tw32(NVRAM_CFG1, nvcfg1);
12516 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12518 u32 nvcfg1, protect = 0;
12520 nvcfg1 = tr32(NVRAM_CFG1);
12522 /* NVRAM protection for TPM */
12523 if (nvcfg1 & (1 << 27)) {
12524 tg3_flag_set(tp, PROTECTED_NVRAM);
12528 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12530 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12531 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12532 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12533 case FLASH_5755VENDOR_ATMEL_FLASH_5:
12534 tp->nvram_jedecnum = JEDEC_ATMEL;
12535 tg3_flag_set(tp, NVRAM_BUFFERED);
12536 tg3_flag_set(tp, FLASH);
12537 tp->nvram_pagesize = 264;
12538 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12539 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12540 tp->nvram_size = (protect ? 0x3e200 :
12541 TG3_NVRAM_SIZE_512KB);
12542 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12543 tp->nvram_size = (protect ? 0x1f200 :
12544 TG3_NVRAM_SIZE_256KB);
12546 tp->nvram_size = (protect ? 0x1f200 :
12547 TG3_NVRAM_SIZE_128KB);
12549 case FLASH_5752VENDOR_ST_M45PE10:
12550 case FLASH_5752VENDOR_ST_M45PE20:
12551 case FLASH_5752VENDOR_ST_M45PE40:
12552 tp->nvram_jedecnum = JEDEC_ST;
12553 tg3_flag_set(tp, NVRAM_BUFFERED);
12554 tg3_flag_set(tp, FLASH);
12555 tp->nvram_pagesize = 256;
12556 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12557 tp->nvram_size = (protect ?
12558 TG3_NVRAM_SIZE_64KB :
12559 TG3_NVRAM_SIZE_128KB);
12560 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12561 tp->nvram_size = (protect ?
12562 TG3_NVRAM_SIZE_64KB :
12563 TG3_NVRAM_SIZE_256KB);
12565 tp->nvram_size = (protect ?
12566 TG3_NVRAM_SIZE_128KB :
12567 TG3_NVRAM_SIZE_512KB);
12572 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12576 nvcfg1 = tr32(NVRAM_CFG1);
12578 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12579 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12580 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12581 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12582 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12583 tp->nvram_jedecnum = JEDEC_ATMEL;
12584 tg3_flag_set(tp, NVRAM_BUFFERED);
12585 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12587 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12588 tw32(NVRAM_CFG1, nvcfg1);
12590 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12591 case FLASH_5755VENDOR_ATMEL_FLASH_1:
12592 case FLASH_5755VENDOR_ATMEL_FLASH_2:
12593 case FLASH_5755VENDOR_ATMEL_FLASH_3:
12594 tp->nvram_jedecnum = JEDEC_ATMEL;
12595 tg3_flag_set(tp, NVRAM_BUFFERED);
12596 tg3_flag_set(tp, FLASH);
12597 tp->nvram_pagesize = 264;
12599 case FLASH_5752VENDOR_ST_M45PE10:
12600 case FLASH_5752VENDOR_ST_M45PE20:
12601 case FLASH_5752VENDOR_ST_M45PE40:
12602 tp->nvram_jedecnum = JEDEC_ST;
12603 tg3_flag_set(tp, NVRAM_BUFFERED);
12604 tg3_flag_set(tp, FLASH);
12605 tp->nvram_pagesize = 256;
12610 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12612 u32 nvcfg1, protect = 0;
12614 nvcfg1 = tr32(NVRAM_CFG1);
12616 /* NVRAM protection for TPM */
12617 if (nvcfg1 & (1 << 27)) {
12618 tg3_flag_set(tp, PROTECTED_NVRAM);
12622 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12624 case FLASH_5761VENDOR_ATMEL_ADB021D:
12625 case FLASH_5761VENDOR_ATMEL_ADB041D:
12626 case FLASH_5761VENDOR_ATMEL_ADB081D:
12627 case FLASH_5761VENDOR_ATMEL_ADB161D:
12628 case FLASH_5761VENDOR_ATMEL_MDB021D:
12629 case FLASH_5761VENDOR_ATMEL_MDB041D:
12630 case FLASH_5761VENDOR_ATMEL_MDB081D:
12631 case FLASH_5761VENDOR_ATMEL_MDB161D:
12632 tp->nvram_jedecnum = JEDEC_ATMEL;
12633 tg3_flag_set(tp, NVRAM_BUFFERED);
12634 tg3_flag_set(tp, FLASH);
12635 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12636 tp->nvram_pagesize = 256;
12638 case FLASH_5761VENDOR_ST_A_M45PE20:
12639 case FLASH_5761VENDOR_ST_A_M45PE40:
12640 case FLASH_5761VENDOR_ST_A_M45PE80:
12641 case FLASH_5761VENDOR_ST_A_M45PE16:
12642 case FLASH_5761VENDOR_ST_M_M45PE20:
12643 case FLASH_5761VENDOR_ST_M_M45PE40:
12644 case FLASH_5761VENDOR_ST_M_M45PE80:
12645 case FLASH_5761VENDOR_ST_M_M45PE16:
12646 tp->nvram_jedecnum = JEDEC_ST;
12647 tg3_flag_set(tp, NVRAM_BUFFERED);
12648 tg3_flag_set(tp, FLASH);
12649 tp->nvram_pagesize = 256;
12654 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12657 case FLASH_5761VENDOR_ATMEL_ADB161D:
12658 case FLASH_5761VENDOR_ATMEL_MDB161D:
12659 case FLASH_5761VENDOR_ST_A_M45PE16:
12660 case FLASH_5761VENDOR_ST_M_M45PE16:
12661 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12663 case FLASH_5761VENDOR_ATMEL_ADB081D:
12664 case FLASH_5761VENDOR_ATMEL_MDB081D:
12665 case FLASH_5761VENDOR_ST_A_M45PE80:
12666 case FLASH_5761VENDOR_ST_M_M45PE80:
12667 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12669 case FLASH_5761VENDOR_ATMEL_ADB041D:
12670 case FLASH_5761VENDOR_ATMEL_MDB041D:
12671 case FLASH_5761VENDOR_ST_A_M45PE40:
12672 case FLASH_5761VENDOR_ST_M_M45PE40:
12673 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12675 case FLASH_5761VENDOR_ATMEL_ADB021D:
12676 case FLASH_5761VENDOR_ATMEL_MDB021D:
12677 case FLASH_5761VENDOR_ST_A_M45PE20:
12678 case FLASH_5761VENDOR_ST_M_M45PE20:
12679 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12685 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12687 tp->nvram_jedecnum = JEDEC_ATMEL;
12688 tg3_flag_set(tp, NVRAM_BUFFERED);
12689 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12692 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12696 nvcfg1 = tr32(NVRAM_CFG1);
12698 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12699 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12700 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12701 tp->nvram_jedecnum = JEDEC_ATMEL;
12702 tg3_flag_set(tp, NVRAM_BUFFERED);
12703 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12705 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12706 tw32(NVRAM_CFG1, nvcfg1);
12708 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12709 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12710 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12711 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12712 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12713 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12714 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12715 tp->nvram_jedecnum = JEDEC_ATMEL;
12716 tg3_flag_set(tp, NVRAM_BUFFERED);
12717 tg3_flag_set(tp, FLASH);
12719 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12720 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12721 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12722 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12723 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12725 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12726 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12727 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12729 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12730 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12731 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12735 case FLASH_5752VENDOR_ST_M45PE10:
12736 case FLASH_5752VENDOR_ST_M45PE20:
12737 case FLASH_5752VENDOR_ST_M45PE40:
12738 tp->nvram_jedecnum = JEDEC_ST;
12739 tg3_flag_set(tp, NVRAM_BUFFERED);
12740 tg3_flag_set(tp, FLASH);
12742 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12743 case FLASH_5752VENDOR_ST_M45PE10:
12744 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12746 case FLASH_5752VENDOR_ST_M45PE20:
12747 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12749 case FLASH_5752VENDOR_ST_M45PE40:
12750 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12755 tg3_flag_set(tp, NO_NVRAM);
12759 tg3_nvram_get_pagesize(tp, nvcfg1);
12760 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12761 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12765 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12769 nvcfg1 = tr32(NVRAM_CFG1);
12771 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12772 case FLASH_5717VENDOR_ATMEL_EEPROM:
12773 case FLASH_5717VENDOR_MICRO_EEPROM:
12774 tp->nvram_jedecnum = JEDEC_ATMEL;
12775 tg3_flag_set(tp, NVRAM_BUFFERED);
12776 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12778 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12779 tw32(NVRAM_CFG1, nvcfg1);
12781 case FLASH_5717VENDOR_ATMEL_MDB011D:
12782 case FLASH_5717VENDOR_ATMEL_ADB011B:
12783 case FLASH_5717VENDOR_ATMEL_ADB011D:
12784 case FLASH_5717VENDOR_ATMEL_MDB021D:
12785 case FLASH_5717VENDOR_ATMEL_ADB021B:
12786 case FLASH_5717VENDOR_ATMEL_ADB021D:
12787 case FLASH_5717VENDOR_ATMEL_45USPT:
12788 tp->nvram_jedecnum = JEDEC_ATMEL;
12789 tg3_flag_set(tp, NVRAM_BUFFERED);
12790 tg3_flag_set(tp, FLASH);
12792 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12793 case FLASH_5717VENDOR_ATMEL_MDB021D:
12794 /* Detect size with tg3_nvram_get_size() */
12796 case FLASH_5717VENDOR_ATMEL_ADB021B:
12797 case FLASH_5717VENDOR_ATMEL_ADB021D:
12798 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12801 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12805 case FLASH_5717VENDOR_ST_M_M25PE10:
12806 case FLASH_5717VENDOR_ST_A_M25PE10:
12807 case FLASH_5717VENDOR_ST_M_M45PE10:
12808 case FLASH_5717VENDOR_ST_A_M45PE10:
12809 case FLASH_5717VENDOR_ST_M_M25PE20:
12810 case FLASH_5717VENDOR_ST_A_M25PE20:
12811 case FLASH_5717VENDOR_ST_M_M45PE20:
12812 case FLASH_5717VENDOR_ST_A_M45PE20:
12813 case FLASH_5717VENDOR_ST_25USPT:
12814 case FLASH_5717VENDOR_ST_45USPT:
12815 tp->nvram_jedecnum = JEDEC_ST;
12816 tg3_flag_set(tp, NVRAM_BUFFERED);
12817 tg3_flag_set(tp, FLASH);
12819 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12820 case FLASH_5717VENDOR_ST_M_M25PE20:
12821 case FLASH_5717VENDOR_ST_M_M45PE20:
12822 /* Detect size with tg3_nvram_get_size() */
12824 case FLASH_5717VENDOR_ST_A_M25PE20:
12825 case FLASH_5717VENDOR_ST_A_M45PE20:
12826 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12829 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12834 tg3_flag_set(tp, NO_NVRAM);
12838 tg3_nvram_get_pagesize(tp, nvcfg1);
12839 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12840 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12843 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12845 u32 nvcfg1, nvmpinstrp;
12847 nvcfg1 = tr32(NVRAM_CFG1);
12848 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12850 switch (nvmpinstrp) {
12851 case FLASH_5720_EEPROM_HD:
12852 case FLASH_5720_EEPROM_LD:
12853 tp->nvram_jedecnum = JEDEC_ATMEL;
12854 tg3_flag_set(tp, NVRAM_BUFFERED);
12856 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12857 tw32(NVRAM_CFG1, nvcfg1);
12858 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12859 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12861 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12863 case FLASH_5720VENDOR_M_ATMEL_DB011D:
12864 case FLASH_5720VENDOR_A_ATMEL_DB011B:
12865 case FLASH_5720VENDOR_A_ATMEL_DB011D:
12866 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12867 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12868 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12869 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12870 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12871 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12872 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12873 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12874 case FLASH_5720VENDOR_ATMEL_45USPT:
12875 tp->nvram_jedecnum = JEDEC_ATMEL;
12876 tg3_flag_set(tp, NVRAM_BUFFERED);
12877 tg3_flag_set(tp, FLASH);
12879 switch (nvmpinstrp) {
12880 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12881 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12882 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12883 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12885 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12886 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12887 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12888 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12890 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12891 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12892 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12895 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12899 case FLASH_5720VENDOR_M_ST_M25PE10:
12900 case FLASH_5720VENDOR_M_ST_M45PE10:
12901 case FLASH_5720VENDOR_A_ST_M25PE10:
12902 case FLASH_5720VENDOR_A_ST_M45PE10:
12903 case FLASH_5720VENDOR_M_ST_M25PE20:
12904 case FLASH_5720VENDOR_M_ST_M45PE20:
12905 case FLASH_5720VENDOR_A_ST_M25PE20:
12906 case FLASH_5720VENDOR_A_ST_M45PE20:
12907 case FLASH_5720VENDOR_M_ST_M25PE40:
12908 case FLASH_5720VENDOR_M_ST_M45PE40:
12909 case FLASH_5720VENDOR_A_ST_M25PE40:
12910 case FLASH_5720VENDOR_A_ST_M45PE40:
12911 case FLASH_5720VENDOR_M_ST_M25PE80:
12912 case FLASH_5720VENDOR_M_ST_M45PE80:
12913 case FLASH_5720VENDOR_A_ST_M25PE80:
12914 case FLASH_5720VENDOR_A_ST_M45PE80:
12915 case FLASH_5720VENDOR_ST_25USPT:
12916 case FLASH_5720VENDOR_ST_45USPT:
12917 tp->nvram_jedecnum = JEDEC_ST;
12918 tg3_flag_set(tp, NVRAM_BUFFERED);
12919 tg3_flag_set(tp, FLASH);
12921 switch (nvmpinstrp) {
12922 case FLASH_5720VENDOR_M_ST_M25PE20:
12923 case FLASH_5720VENDOR_M_ST_M45PE20:
12924 case FLASH_5720VENDOR_A_ST_M25PE20:
12925 case FLASH_5720VENDOR_A_ST_M45PE20:
12926 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12928 case FLASH_5720VENDOR_M_ST_M25PE40:
12929 case FLASH_5720VENDOR_M_ST_M45PE40:
12930 case FLASH_5720VENDOR_A_ST_M25PE40:
12931 case FLASH_5720VENDOR_A_ST_M45PE40:
12932 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12934 case FLASH_5720VENDOR_M_ST_M25PE80:
12935 case FLASH_5720VENDOR_M_ST_M45PE80:
12936 case FLASH_5720VENDOR_A_ST_M25PE80:
12937 case FLASH_5720VENDOR_A_ST_M45PE80:
12938 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12941 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12946 tg3_flag_set(tp, NO_NVRAM);
12950 tg3_nvram_get_pagesize(tp, nvcfg1);
12951 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12952 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12955 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12956 static void __devinit tg3_nvram_init(struct tg3 *tp)
12958 tw32_f(GRC_EEPROM_ADDR,
12959 (EEPROM_ADDR_FSM_RESET |
12960 (EEPROM_DEFAULT_CLOCK_PERIOD <<
12961 EEPROM_ADDR_CLKPERD_SHIFT)));
12965 /* Enable seeprom accesses. */
12966 tw32_f(GRC_LOCAL_CTRL,
12967 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12970 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12971 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12972 tg3_flag_set(tp, NVRAM);
12974 if (tg3_nvram_lock(tp)) {
12975 netdev_warn(tp->dev,
12976 "Cannot get nvram lock, %s failed\n",
12980 tg3_enable_nvram_access(tp);
12982 tp->nvram_size = 0;
12984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12985 tg3_get_5752_nvram_info(tp);
12986 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12987 tg3_get_5755_nvram_info(tp);
12988 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12989 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12990 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12991 tg3_get_5787_nvram_info(tp);
12992 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12993 tg3_get_5761_nvram_info(tp);
12994 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12995 tg3_get_5906_nvram_info(tp);
12996 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12997 tg3_flag(tp, 57765_CLASS))
12998 tg3_get_57780_nvram_info(tp);
12999 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13000 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13001 tg3_get_5717_nvram_info(tp);
13002 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13003 tg3_get_5720_nvram_info(tp);
13005 tg3_get_nvram_info(tp);
13007 if (tp->nvram_size == 0)
13008 tg3_get_nvram_size(tp);
13010 tg3_disable_nvram_access(tp);
13011 tg3_nvram_unlock(tp);
13014 tg3_flag_clear(tp, NVRAM);
13015 tg3_flag_clear(tp, NVRAM_BUFFERED);
13017 tg3_get_eeprom_size(tp);
13021 struct subsys_tbl_ent {
13022 u16 subsys_vendor, subsys_devid;
13026 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13027 /* Broadcom boards. */
13028 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13029 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13030 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13031 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13032 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13033 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13034 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13035 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13036 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13037 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13038 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13039 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13040 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13041 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13042 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13043 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13044 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13045 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13046 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13047 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13048 { TG3PCI_SUBVENDOR_ID_BROADCOM,
13049 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13052 { TG3PCI_SUBVENDOR_ID_3COM,
13053 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13054 { TG3PCI_SUBVENDOR_ID_3COM,
13055 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13056 { TG3PCI_SUBVENDOR_ID_3COM,
13057 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13058 { TG3PCI_SUBVENDOR_ID_3COM,
13059 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13060 { TG3PCI_SUBVENDOR_ID_3COM,
13061 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13064 { TG3PCI_SUBVENDOR_ID_DELL,
13065 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13066 { TG3PCI_SUBVENDOR_ID_DELL,
13067 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13068 { TG3PCI_SUBVENDOR_ID_DELL,
13069 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13070 { TG3PCI_SUBVENDOR_ID_DELL,
13071 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13073 /* Compaq boards. */
13074 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13075 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13076 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13077 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13078 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13079 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13080 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13081 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13082 { TG3PCI_SUBVENDOR_ID_COMPAQ,
13083 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13086 { TG3PCI_SUBVENDOR_ID_IBM,
13087 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13090 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13094 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13095 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13096 tp->pdev->subsystem_vendor) &&
13097 (subsys_id_to_phy_id[i].subsys_devid ==
13098 tp->pdev->subsystem_device))
13099 return &subsys_id_to_phy_id[i];
13104 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13108 tp->phy_id = TG3_PHY_ID_INVALID;
13109 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13111 /* Assume an onboard device and WOL capable by default. */
13112 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13113 tg3_flag_set(tp, WOL_CAP);
13115 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13116 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13117 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13118 tg3_flag_set(tp, IS_NIC);
13120 val = tr32(VCPU_CFGSHDW);
13121 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13122 tg3_flag_set(tp, ASPM_WORKAROUND);
13123 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13124 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13125 tg3_flag_set(tp, WOL_ENABLE);
13126 device_set_wakeup_enable(&tp->pdev->dev, true);
13131 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13132 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13133 u32 nic_cfg, led_cfg;
13134 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13135 int eeprom_phy_serdes = 0;
13137 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13138 tp->nic_sram_data_cfg = nic_cfg;
13140 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13141 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13142 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13143 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13144 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13145 (ver > 0) && (ver < 0x100))
13146 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13148 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13149 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13151 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13152 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13153 eeprom_phy_serdes = 1;
13155 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13156 if (nic_phy_id != 0) {
13157 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13158 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13160 eeprom_phy_id = (id1 >> 16) << 10;
13161 eeprom_phy_id |= (id2 & 0xfc00) << 16;
13162 eeprom_phy_id |= (id2 & 0x03ff) << 0;
13166 tp->phy_id = eeprom_phy_id;
13167 if (eeprom_phy_serdes) {
13168 if (!tg3_flag(tp, 5705_PLUS))
13169 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13171 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13174 if (tg3_flag(tp, 5750_PLUS))
13175 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13176 SHASTA_EXT_LED_MODE_MASK);
13178 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13182 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13183 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13186 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13187 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13190 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13191 tp->led_ctrl = LED_CTRL_MODE_MAC;
13193 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13194 * read on some older 5700/5701 bootcode.
13196 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13198 GET_ASIC_REV(tp->pci_chip_rev_id) ==
13200 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13204 case SHASTA_EXT_LED_SHARED:
13205 tp->led_ctrl = LED_CTRL_MODE_SHARED;
13206 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13207 tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13208 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13209 LED_CTRL_MODE_PHY_2);
13212 case SHASTA_EXT_LED_MAC:
13213 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13216 case SHASTA_EXT_LED_COMBO:
13217 tp->led_ctrl = LED_CTRL_MODE_COMBO;
13218 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13219 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13220 LED_CTRL_MODE_PHY_2);
13225 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13227 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13228 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13230 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13231 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13233 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13234 tg3_flag_set(tp, EEPROM_WRITE_PROT);
13235 if ((tp->pdev->subsystem_vendor ==
13236 PCI_VENDOR_ID_ARIMA) &&
13237 (tp->pdev->subsystem_device == 0x205a ||
13238 tp->pdev->subsystem_device == 0x2063))
13239 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13241 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13242 tg3_flag_set(tp, IS_NIC);
13245 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13246 tg3_flag_set(tp, ENABLE_ASF);
13247 if (tg3_flag(tp, 5750_PLUS))
13248 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13251 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13252 tg3_flag(tp, 5750_PLUS))
13253 tg3_flag_set(tp, ENABLE_APE);
13255 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13256 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13257 tg3_flag_clear(tp, WOL_CAP);
13259 if (tg3_flag(tp, WOL_CAP) &&
13260 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13261 tg3_flag_set(tp, WOL_ENABLE);
13262 device_set_wakeup_enable(&tp->pdev->dev, true);
13265 if (cfg2 & (1 << 17))
13266 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13268 /* serdes signal pre-emphasis in register 0x590 set by */
13269 /* bootcode if bit 18 is set */
13270 if (cfg2 & (1 << 18))
13271 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13273 if ((tg3_flag(tp, 57765_PLUS) ||
13274 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13275 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13276 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13277 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13279 if (tg3_flag(tp, PCI_EXPRESS) &&
13280 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13281 !tg3_flag(tp, 57765_PLUS)) {
13284 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13285 if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13286 tg3_flag_set(tp, ASPM_WORKAROUND);
13289 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13290 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13291 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13292 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13293 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13294 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13297 if (tg3_flag(tp, WOL_CAP))
13298 device_set_wakeup_enable(&tp->pdev->dev,
13299 tg3_flag(tp, WOL_ENABLE));
13301 device_set_wakeup_capable(&tp->pdev->dev, false);
13304 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13309 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13310 tw32(OTP_CTRL, cmd);
13312 /* Wait for up to 1 ms for command to execute. */
13313 for (i = 0; i < 100; i++) {
13314 val = tr32(OTP_STATUS);
13315 if (val & OTP_STATUS_CMD_DONE)
13320 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13323 /* Read the gphy configuration from the OTP region of the chip. The gphy
13324 * configuration is a 32-bit value that straddles the alignment boundary.
13325 * We do two 32-bit reads and then shift and merge the results.
13327 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13329 u32 bhalf_otp, thalf_otp;
13331 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13333 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13336 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13338 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13341 thalf_otp = tr32(OTP_READ_DATA);
13343 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13345 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13348 bhalf_otp = tr32(OTP_READ_DATA);
13350 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13353 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13355 u32 adv = ADVERTISED_Autoneg;
13357 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13358 adv |= ADVERTISED_1000baseT_Half |
13359 ADVERTISED_1000baseT_Full;
13361 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13362 adv |= ADVERTISED_100baseT_Half |
13363 ADVERTISED_100baseT_Full |
13364 ADVERTISED_10baseT_Half |
13365 ADVERTISED_10baseT_Full |
13368 adv |= ADVERTISED_FIBRE;
13370 tp->link_config.advertising = adv;
13371 tp->link_config.speed = SPEED_UNKNOWN;
13372 tp->link_config.duplex = DUPLEX_UNKNOWN;
13373 tp->link_config.autoneg = AUTONEG_ENABLE;
13374 tp->link_config.active_speed = SPEED_UNKNOWN;
13375 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13380 static int __devinit tg3_phy_probe(struct tg3 *tp)
13382 u32 hw_phy_id_1, hw_phy_id_2;
13383 u32 hw_phy_id, hw_phy_id_masked;
13386 /* flow control autonegotiation is default behavior */
13387 tg3_flag_set(tp, PAUSE_AUTONEG);
13388 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13390 if (tg3_flag(tp, USE_PHYLIB))
13391 return tg3_phy_init(tp);
13393 /* Reading the PHY ID register can conflict with ASF
13394 * firmware access to the PHY hardware.
13397 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13398 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13400 /* Now read the physical PHY_ID from the chip and verify
13401 * that it is sane. If it doesn't look good, we fall back
13402 * to either the hard-coded table based PHY_ID and failing
13403 * that the value found in the eeprom area.
13405 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13406 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13408 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
13409 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13410 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
13412 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13415 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13416 tp->phy_id = hw_phy_id;
13417 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13418 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13420 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13422 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13423 /* Do nothing, phy ID already set up in
13424 * tg3_get_eeprom_hw_cfg().
13427 struct subsys_tbl_ent *p;
13429 /* No eeprom signature? Try the hardcoded
13430 * subsys device table.
13432 p = tg3_lookup_by_subsys(tp);
13436 tp->phy_id = p->phy_id;
13438 tp->phy_id == TG3_PHY_ID_BCM8002)
13439 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13443 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13444 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13446 (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13447 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13448 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13449 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13450 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13452 tg3_phy_init_link_config(tp);
13454 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13455 !tg3_flag(tp, ENABLE_APE) &&
13456 !tg3_flag(tp, ENABLE_ASF)) {
13459 tg3_readphy(tp, MII_BMSR, &bmsr);
13460 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13461 (bmsr & BMSR_LSTATUS))
13462 goto skip_phy_reset;
13464 err = tg3_phy_reset(tp);
13468 tg3_phy_set_wirespeed(tp);
13470 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13471 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13472 tp->link_config.flowctrl);
13474 tg3_writephy(tp, MII_BMCR,
13475 BMCR_ANENABLE | BMCR_ANRESTART);
13480 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13481 err = tg3_init_5401phy_dsp(tp);
13485 err = tg3_init_5401phy_dsp(tp);
13491 static void __devinit tg3_read_vpd(struct tg3 *tp)
13494 unsigned int block_end, rosize, len;
13498 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13502 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13504 goto out_not_found;
13506 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13507 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13508 i += PCI_VPD_LRDT_TAG_SIZE;
13510 if (block_end > vpdlen)
13511 goto out_not_found;
13513 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13514 PCI_VPD_RO_KEYWORD_MFR_ID);
13516 len = pci_vpd_info_field_size(&vpd_data[j]);
13518 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13519 if (j + len > block_end || len != 4 ||
13520 memcmp(&vpd_data[j], "1028", 4))
13523 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13524 PCI_VPD_RO_KEYWORD_VENDOR0);
13528 len = pci_vpd_info_field_size(&vpd_data[j]);
13530 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13531 if (j + len > block_end)
13534 memcpy(tp->fw_ver, &vpd_data[j], len);
13535 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13539 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13540 PCI_VPD_RO_KEYWORD_PARTNO);
13542 goto out_not_found;
13544 len = pci_vpd_info_field_size(&vpd_data[i]);
13546 i += PCI_VPD_INFO_FLD_HDR_SIZE;
13547 if (len > TG3_BPN_SIZE ||
13548 (len + i) > vpdlen)
13549 goto out_not_found;
13551 memcpy(tp->board_part_number, &vpd_data[i], len);
13555 if (tp->board_part_number[0])
13559 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13560 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13561 strcpy(tp->board_part_number, "BCM5717");
13562 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13563 strcpy(tp->board_part_number, "BCM5718");
13566 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13567 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13568 strcpy(tp->board_part_number, "BCM57780");
13569 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13570 strcpy(tp->board_part_number, "BCM57760");
13571 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13572 strcpy(tp->board_part_number, "BCM57790");
13573 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13574 strcpy(tp->board_part_number, "BCM57788");
13577 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13578 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13579 strcpy(tp->board_part_number, "BCM57761");
13580 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13581 strcpy(tp->board_part_number, "BCM57765");
13582 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13583 strcpy(tp->board_part_number, "BCM57781");
13584 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13585 strcpy(tp->board_part_number, "BCM57785");
13586 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13587 strcpy(tp->board_part_number, "BCM57791");
13588 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13589 strcpy(tp->board_part_number, "BCM57795");
13592 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13593 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13594 strcpy(tp->board_part_number, "BCM57762");
13595 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13596 strcpy(tp->board_part_number, "BCM57766");
13597 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13598 strcpy(tp->board_part_number, "BCM57782");
13599 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13600 strcpy(tp->board_part_number, "BCM57786");
13603 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13604 strcpy(tp->board_part_number, "BCM95906");
13607 strcpy(tp->board_part_number, "none");
13611 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13615 if (tg3_nvram_read(tp, offset, &val) ||
13616 (val & 0xfc000000) != 0x0c000000 ||
13617 tg3_nvram_read(tp, offset + 4, &val) ||
13624 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13626 u32 val, offset, start, ver_offset;
13628 bool newver = false;
13630 if (tg3_nvram_read(tp, 0xc, &offset) ||
13631 tg3_nvram_read(tp, 0x4, &start))
13634 offset = tg3_nvram_logical_addr(tp, offset);
13636 if (tg3_nvram_read(tp, offset, &val))
13639 if ((val & 0xfc000000) == 0x0c000000) {
13640 if (tg3_nvram_read(tp, offset + 4, &val))
13647 dst_off = strlen(tp->fw_ver);
13650 if (TG3_VER_SIZE - dst_off < 16 ||
13651 tg3_nvram_read(tp, offset + 8, &ver_offset))
13654 offset = offset + ver_offset - start;
13655 for (i = 0; i < 16; i += 4) {
13657 if (tg3_nvram_read_be32(tp, offset + i, &v))
13660 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13665 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13668 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13669 TG3_NVM_BCVER_MAJSFT;
13670 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13671 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13672 "v%d.%02d", major, minor);
13676 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13678 u32 val, major, minor;
13680 /* Use native endian representation */
13681 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13684 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13685 TG3_NVM_HWSB_CFG1_MAJSFT;
13686 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13687 TG3_NVM_HWSB_CFG1_MINSFT;
13689 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13692 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13694 u32 offset, major, minor, build;
13696 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13698 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13701 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13702 case TG3_EEPROM_SB_REVISION_0:
13703 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13705 case TG3_EEPROM_SB_REVISION_2:
13706 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13708 case TG3_EEPROM_SB_REVISION_3:
13709 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13711 case TG3_EEPROM_SB_REVISION_4:
13712 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13714 case TG3_EEPROM_SB_REVISION_5:
13715 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13717 case TG3_EEPROM_SB_REVISION_6:
13718 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13724 if (tg3_nvram_read(tp, offset, &val))
13727 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13728 TG3_EEPROM_SB_EDH_BLD_SHFT;
13729 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13730 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13731 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
13733 if (minor > 99 || build > 26)
13736 offset = strlen(tp->fw_ver);
13737 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13738 " v%d.%02d", major, minor);
13741 offset = strlen(tp->fw_ver);
13742 if (offset < TG3_VER_SIZE - 1)
13743 tp->fw_ver[offset] = 'a' + build - 1;
13747 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13749 u32 val, offset, start;
13752 for (offset = TG3_NVM_DIR_START;
13753 offset < TG3_NVM_DIR_END;
13754 offset += TG3_NVM_DIRENT_SIZE) {
13755 if (tg3_nvram_read(tp, offset, &val))
13758 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13762 if (offset == TG3_NVM_DIR_END)
13765 if (!tg3_flag(tp, 5705_PLUS))
13766 start = 0x08000000;
13767 else if (tg3_nvram_read(tp, offset - 4, &start))
13770 if (tg3_nvram_read(tp, offset + 4, &offset) ||
13771 !tg3_fw_img_is_valid(tp, offset) ||
13772 tg3_nvram_read(tp, offset + 8, &val))
13775 offset += val - start;
13777 vlen = strlen(tp->fw_ver);
13779 tp->fw_ver[vlen++] = ',';
13780 tp->fw_ver[vlen++] = ' ';
13782 for (i = 0; i < 4; i++) {
13784 if (tg3_nvram_read_be32(tp, offset, &v))
13787 offset += sizeof(v);
13789 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13790 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13794 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13799 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13805 if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13808 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13809 if (apedata != APE_SEG_SIG_MAGIC)
13812 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13813 if (!(apedata & APE_FW_STATUS_READY))
13816 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13818 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13819 tg3_flag_set(tp, APE_HAS_NCSI);
13825 vlen = strlen(tp->fw_ver);
13827 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13829 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13830 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13831 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13832 (apedata & APE_FW_VERSION_BLDMSK));
13835 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13838 bool vpd_vers = false;
13840 if (tp->fw_ver[0] != 0)
13843 if (tg3_flag(tp, NO_NVRAM)) {
13844 strcat(tp->fw_ver, "sb");
13848 if (tg3_nvram_read(tp, 0, &val))
13851 if (val == TG3_EEPROM_MAGIC)
13852 tg3_read_bc_ver(tp);
13853 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13854 tg3_read_sb_ver(tp, val);
13855 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13856 tg3_read_hwsb_ver(tp);
13863 if (tg3_flag(tp, ENABLE_APE)) {
13864 if (tg3_flag(tp, ENABLE_ASF))
13865 tg3_read_dash_ver(tp);
13866 } else if (tg3_flag(tp, ENABLE_ASF)) {
13867 tg3_read_mgmtfw_ver(tp);
13871 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13874 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13876 if (tg3_flag(tp, LRG_PROD_RING_CAP))
13877 return TG3_RX_RET_MAX_SIZE_5717;
13878 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13879 return TG3_RX_RET_MAX_SIZE_5700;
13881 return TG3_RX_RET_MAX_SIZE_5705;
13884 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13885 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13886 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13887 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13891 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13893 struct pci_dev *peer;
13894 unsigned int func, devnr = tp->pdev->devfn & ~7;
13896 for (func = 0; func < 8; func++) {
13897 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13898 if (peer && peer != tp->pdev)
13902 /* 5704 can be configured in single-port mode, set peer to
13903 * tp->pdev in that case.
13911 * We don't need to keep the refcount elevated; there's no way
13912 * to remove one half of this device without removing the other
13919 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13921 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13922 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13925 /* All devices that use the alternate
13926 * ASIC REV location have a CPMU.
13928 tg3_flag_set(tp, CPMU_PRESENT);
13930 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13931 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13932 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13933 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13934 reg = TG3PCI_GEN2_PRODID_ASICREV;
13935 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13936 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13937 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13938 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13939 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13940 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13941 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13942 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13943 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13944 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13945 reg = TG3PCI_GEN15_PRODID_ASICREV;
13947 reg = TG3PCI_PRODID_ASICREV;
13949 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13952 /* Wrong chip ID in 5752 A0. This code can be removed later
13953 * as A0 is not in production.
13955 if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13956 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13958 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13959 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13960 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13961 tg3_flag_set(tp, 5717_PLUS);
13963 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13964 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13965 tg3_flag_set(tp, 57765_CLASS);
13967 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13968 tg3_flag_set(tp, 57765_PLUS);
13970 /* Intentionally exclude ASIC_REV_5906 */
13971 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13972 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13973 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13974 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13975 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13976 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13977 tg3_flag(tp, 57765_PLUS))
13978 tg3_flag_set(tp, 5755_PLUS);
13980 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13981 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13982 tg3_flag_set(tp, 5780_CLASS);
13984 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13985 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13986 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13987 tg3_flag(tp, 5755_PLUS) ||
13988 tg3_flag(tp, 5780_CLASS))
13989 tg3_flag_set(tp, 5750_PLUS);
13991 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13992 tg3_flag(tp, 5750_PLUS))
13993 tg3_flag_set(tp, 5705_PLUS);
13996 static int __devinit tg3_get_invariants(struct tg3 *tp)
13999 u32 pci_state_reg, grc_misc_cfg;
14004 /* Force memory write invalidate off. If we leave it on,
14005 * then on 5700_BX chips we have to enable a workaround.
14006 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14007 * to match the cacheline size. The Broadcom driver have this
14008 * workaround but turns MWI off all the times so never uses
14009 * it. This seems to suggest that the workaround is insufficient.
14011 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14012 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14013 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14015 /* Important! -- Make sure register accesses are byteswapped
14016 * correctly. Also, for those chips that require it, make
14017 * sure that indirect register accesses are enabled before
14018 * the first operation.
14020 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14022 tp->misc_host_ctrl |= (misc_ctrl_reg &
14023 MISC_HOST_CTRL_CHIPREV);
14024 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14025 tp->misc_host_ctrl);
14027 tg3_detect_asic_rev(tp, misc_ctrl_reg);
14029 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14030 * we need to disable memory and use config. cycles
14031 * only to access all registers. The 5702/03 chips
14032 * can mistakenly decode the special cycles from the
14033 * ICH chipsets as memory write cycles, causing corruption
14034 * of register and memory space. Only certain ICH bridges
14035 * will drive special cycles with non-zero data during the
14036 * address phase which can fall within the 5703's address
14037 * range. This is not an ICH bug as the PCI spec allows
14038 * non-zero address during special cycles. However, only
14039 * these ICH bridges are known to drive non-zero addresses
14040 * during special cycles.
14042 * Since special cycles do not cross PCI bridges, we only
14043 * enable this workaround if the 5703 is on the secondary
14044 * bus of these ICH bridges.
14046 if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14047 (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14048 static struct tg3_dev_id {
14052 } ich_chipsets[] = {
14053 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14055 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14057 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14059 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14063 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14064 struct pci_dev *bridge = NULL;
14066 while (pci_id->vendor != 0) {
14067 bridge = pci_get_device(pci_id->vendor, pci_id->device,
14073 if (pci_id->rev != PCI_ANY_ID) {
14074 if (bridge->revision > pci_id->rev)
14077 if (bridge->subordinate &&
14078 (bridge->subordinate->number ==
14079 tp->pdev->bus->number)) {
14080 tg3_flag_set(tp, ICH_WORKAROUND);
14081 pci_dev_put(bridge);
14087 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14088 static struct tg3_dev_id {
14091 } bridge_chipsets[] = {
14092 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14093 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14096 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14097 struct pci_dev *bridge = NULL;
14099 while (pci_id->vendor != 0) {
14100 bridge = pci_get_device(pci_id->vendor,
14107 if (bridge->subordinate &&
14108 (bridge->subordinate->number <=
14109 tp->pdev->bus->number) &&
14110 (bridge->subordinate->subordinate >=
14111 tp->pdev->bus->number)) {
14112 tg3_flag_set(tp, 5701_DMA_BUG);
14113 pci_dev_put(bridge);
14119 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14120 * DMA addresses > 40-bit. This bridge may have other additional
14121 * 57xx devices behind it in some 4-port NIC designs for example.
14122 * Any tg3 device found behind the bridge will also need the 40-bit
14125 if (tg3_flag(tp, 5780_CLASS)) {
14126 tg3_flag_set(tp, 40BIT_DMA_BUG);
14127 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14129 struct pci_dev *bridge = NULL;
14132 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14133 PCI_DEVICE_ID_SERVERWORKS_EPB,
14135 if (bridge && bridge->subordinate &&
14136 (bridge->subordinate->number <=
14137 tp->pdev->bus->number) &&
14138 (bridge->subordinate->subordinate >=
14139 tp->pdev->bus->number)) {
14140 tg3_flag_set(tp, 40BIT_DMA_BUG);
14141 pci_dev_put(bridge);
14147 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14148 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14149 tp->pdev_peer = tg3_find_peer(tp);
14151 /* Determine TSO capabilities */
14152 if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14153 ; /* Do nothing. HW bug. */
14154 else if (tg3_flag(tp, 57765_PLUS))
14155 tg3_flag_set(tp, HW_TSO_3);
14156 else if (tg3_flag(tp, 5755_PLUS) ||
14157 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14158 tg3_flag_set(tp, HW_TSO_2);
14159 else if (tg3_flag(tp, 5750_PLUS)) {
14160 tg3_flag_set(tp, HW_TSO_1);
14161 tg3_flag_set(tp, TSO_BUG);
14162 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14163 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14164 tg3_flag_clear(tp, TSO_BUG);
14165 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14166 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14167 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14168 tg3_flag_set(tp, TSO_BUG);
14169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14170 tp->fw_needed = FIRMWARE_TG3TSO5;
14172 tp->fw_needed = FIRMWARE_TG3TSO;
14175 /* Selectively allow TSO based on operating conditions */
14176 if (tg3_flag(tp, HW_TSO_1) ||
14177 tg3_flag(tp, HW_TSO_2) ||
14178 tg3_flag(tp, HW_TSO_3) ||
14180 /* For firmware TSO, assume ASF is disabled.
14181 * We'll disable TSO later if we discover ASF
14182 * is enabled in tg3_get_eeprom_hw_cfg().
14184 tg3_flag_set(tp, TSO_CAPABLE);
14186 tg3_flag_clear(tp, TSO_CAPABLE);
14187 tg3_flag_clear(tp, TSO_BUG);
14188 tp->fw_needed = NULL;
14191 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14192 tp->fw_needed = FIRMWARE_TG3;
14196 if (tg3_flag(tp, 5750_PLUS)) {
14197 tg3_flag_set(tp, SUPPORT_MSI);
14198 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14199 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14200 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14201 tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14202 tp->pdev_peer == tp->pdev))
14203 tg3_flag_clear(tp, SUPPORT_MSI);
14205 if (tg3_flag(tp, 5755_PLUS) ||
14206 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14207 tg3_flag_set(tp, 1SHOT_MSI);
14210 if (tg3_flag(tp, 57765_PLUS)) {
14211 tg3_flag_set(tp, SUPPORT_MSIX);
14212 tp->irq_max = TG3_IRQ_MAX_VECS;
14213 tg3_rss_init_dflt_indir_tbl(tp);
14217 if (tg3_flag(tp, 5755_PLUS))
14218 tg3_flag_set(tp, SHORT_DMA_BUG);
14220 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14221 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14223 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14224 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14225 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14226 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14228 if (tg3_flag(tp, 57765_PLUS) &&
14229 tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14230 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14232 if (!tg3_flag(tp, 5705_PLUS) ||
14233 tg3_flag(tp, 5780_CLASS) ||
14234 tg3_flag(tp, USE_JUMBO_BDFLAG))
14235 tg3_flag_set(tp, JUMBO_CAPABLE);
14237 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14240 if (pci_is_pcie(tp->pdev)) {
14243 tg3_flag_set(tp, PCI_EXPRESS);
14245 pci_read_config_word(tp->pdev,
14246 pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14248 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14249 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14251 tg3_flag_clear(tp, HW_TSO_2);
14252 tg3_flag_clear(tp, TSO_CAPABLE);
14254 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14255 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14256 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14257 tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14258 tg3_flag_set(tp, CLKREQ_BUG);
14259 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14260 tg3_flag_set(tp, L1PLLPD_EN);
14262 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14263 /* BCM5785 devices are effectively PCIe devices, and should
14264 * follow PCIe codepaths, but do not have a PCIe capabilities
14267 tg3_flag_set(tp, PCI_EXPRESS);
14268 } else if (!tg3_flag(tp, 5705_PLUS) ||
14269 tg3_flag(tp, 5780_CLASS)) {
14270 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14271 if (!tp->pcix_cap) {
14272 dev_err(&tp->pdev->dev,
14273 "Cannot find PCI-X capability, aborting\n");
14277 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14278 tg3_flag_set(tp, PCIX_MODE);
14281 /* If we have an AMD 762 or VIA K8T800 chipset, write
14282 * reordering to the mailbox registers done by the host
14283 * controller can cause major troubles. We read back from
14284 * every mailbox register write to force the writes to be
14285 * posted to the chip in order.
14287 if (pci_dev_present(tg3_write_reorder_chipsets) &&
14288 !tg3_flag(tp, PCI_EXPRESS))
14289 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14291 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14292 &tp->pci_cacheline_sz);
14293 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14294 &tp->pci_lat_timer);
14295 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14296 tp->pci_lat_timer < 64) {
14297 tp->pci_lat_timer = 64;
14298 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14299 tp->pci_lat_timer);
14302 /* Important! -- It is critical that the PCI-X hw workaround
14303 * situation is decided before the first MMIO register access.
14305 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14306 /* 5700 BX chips need to have their TX producer index
14307 * mailboxes written twice to workaround a bug.
14309 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14311 /* If we are in PCI-X mode, enable register write workaround.
14313 * The workaround is to use indirect register accesses
14314 * for all chip writes not to mailbox registers.
14316 if (tg3_flag(tp, PCIX_MODE)) {
14319 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14321 /* The chip can have it's power management PCI config
14322 * space registers clobbered due to this bug.
14323 * So explicitly force the chip into D0 here.
14325 pci_read_config_dword(tp->pdev,
14326 tp->pm_cap + PCI_PM_CTRL,
14328 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14329 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14330 pci_write_config_dword(tp->pdev,
14331 tp->pm_cap + PCI_PM_CTRL,
14334 /* Also, force SERR#/PERR# in PCI command. */
14335 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14336 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14337 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14341 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14342 tg3_flag_set(tp, PCI_HIGH_SPEED);
14343 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14344 tg3_flag_set(tp, PCI_32BIT);
14346 /* Chip-specific fixup from Broadcom driver */
14347 if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14348 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14349 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14350 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14353 /* Default fast path register access methods */
14354 tp->read32 = tg3_read32;
14355 tp->write32 = tg3_write32;
14356 tp->read32_mbox = tg3_read32;
14357 tp->write32_mbox = tg3_write32;
14358 tp->write32_tx_mbox = tg3_write32;
14359 tp->write32_rx_mbox = tg3_write32;
14361 /* Various workaround register access methods */
14362 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14363 tp->write32 = tg3_write_indirect_reg32;
14364 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14365 (tg3_flag(tp, PCI_EXPRESS) &&
14366 tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14368 * Back to back register writes can cause problems on these
14369 * chips, the workaround is to read back all reg writes
14370 * except those to mailbox regs.
14372 * See tg3_write_indirect_reg32().
14374 tp->write32 = tg3_write_flush_reg32;
14377 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14378 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14379 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14380 tp->write32_rx_mbox = tg3_write_flush_reg32;
14383 if (tg3_flag(tp, ICH_WORKAROUND)) {
14384 tp->read32 = tg3_read_indirect_reg32;
14385 tp->write32 = tg3_write_indirect_reg32;
14386 tp->read32_mbox = tg3_read_indirect_mbox;
14387 tp->write32_mbox = tg3_write_indirect_mbox;
14388 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14389 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14394 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14395 pci_cmd &= ~PCI_COMMAND_MEMORY;
14396 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14398 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14399 tp->read32_mbox = tg3_read32_mbox_5906;
14400 tp->write32_mbox = tg3_write32_mbox_5906;
14401 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14402 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14405 if (tp->write32 == tg3_write_indirect_reg32 ||
14406 (tg3_flag(tp, PCIX_MODE) &&
14407 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14408 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14409 tg3_flag_set(tp, SRAM_USE_CONFIG);
14411 /* The memory arbiter has to be enabled in order for SRAM accesses
14412 * to succeed. Normally on powerup the tg3 chip firmware will make
14413 * sure it is enabled, but other entities such as system netboot
14414 * code might disable it.
14416 val = tr32(MEMARB_MODE);
14417 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14419 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14420 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14421 tg3_flag(tp, 5780_CLASS)) {
14422 if (tg3_flag(tp, PCIX_MODE)) {
14423 pci_read_config_dword(tp->pdev,
14424 tp->pcix_cap + PCI_X_STATUS,
14426 tp->pci_fn = val & 0x7;
14428 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14429 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14430 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14431 NIC_SRAM_CPMUSTAT_SIG) {
14432 tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14433 tp->pci_fn = tp->pci_fn ? 1 : 0;
14435 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14436 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14437 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14438 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14439 NIC_SRAM_CPMUSTAT_SIG) {
14440 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14441 TG3_CPMU_STATUS_FSHFT_5719;
14445 /* Get eeprom hw config before calling tg3_set_power_state().
14446 * In particular, the TG3_FLAG_IS_NIC flag must be
14447 * determined before calling tg3_set_power_state() so that
14448 * we know whether or not to switch out of Vaux power.
14449 * When the flag is set, it means that GPIO1 is used for eeprom
14450 * write protect and also implies that it is a LOM where GPIOs
14451 * are not used to switch power.
14453 tg3_get_eeprom_hw_cfg(tp);
14455 if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14456 tg3_flag_clear(tp, TSO_CAPABLE);
14457 tg3_flag_clear(tp, TSO_BUG);
14458 tp->fw_needed = NULL;
14461 if (tg3_flag(tp, ENABLE_APE)) {
14462 /* Allow reads and writes to the
14463 * APE register and memory space.
14465 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14466 PCISTATE_ALLOW_APE_SHMEM_WR |
14467 PCISTATE_ALLOW_APE_PSPACE_WR;
14468 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14471 tg3_ape_lock_init(tp);
14474 /* Set up tp->grc_local_ctrl before calling
14475 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
14476 * will bring 5700's external PHY out of reset.
14477 * It is also used as eeprom write protect on LOMs.
14479 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14480 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14481 tg3_flag(tp, EEPROM_WRITE_PROT))
14482 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14483 GRC_LCLCTRL_GPIO_OUTPUT1);
14484 /* Unused GPIO3 must be driven as output on 5752 because there
14485 * are no pull-up resistors on unused GPIO pins.
14487 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14488 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14490 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14491 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14492 tg3_flag(tp, 57765_CLASS))
14493 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14495 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14496 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14497 /* Turn off the debug UART. */
14498 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14499 if (tg3_flag(tp, IS_NIC))
14500 /* Keep VMain power. */
14501 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14502 GRC_LCLCTRL_GPIO_OUTPUT0;
14505 /* Switch out of Vaux if it is a NIC */
14506 tg3_pwrsrc_switch_to_vmain(tp);
14508 /* Derive initial jumbo mode from MTU assigned in
14509 * ether_setup() via the alloc_etherdev() call
14511 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14512 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14514 /* Determine WakeOnLan speed to use. */
14515 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14516 tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14517 tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14518 tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14519 tg3_flag_clear(tp, WOL_SPEED_100MB);
14521 tg3_flag_set(tp, WOL_SPEED_100MB);
14524 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14525 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14527 /* A few boards don't want Ethernet@WireSpeed phy feature */
14528 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14529 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14530 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14531 (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14532 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14533 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14534 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14536 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14537 GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14538 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14539 if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14540 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14542 if (tg3_flag(tp, 5705_PLUS) &&
14543 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14544 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14545 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14546 !tg3_flag(tp, 57765_PLUS)) {
14547 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14548 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14549 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14550 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14551 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14552 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14553 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14554 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14555 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14557 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14560 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14561 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14562 tp->phy_otp = tg3_read_otp_phycfg(tp);
14563 if (tp->phy_otp == 0)
14564 tp->phy_otp = TG3_OTP_DEFAULT;
14567 if (tg3_flag(tp, CPMU_PRESENT))
14568 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14570 tp->mi_mode = MAC_MI_MODE_BASE;
14572 tp->coalesce_mode = 0;
14573 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14574 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14575 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14577 /* Set these bits to enable statistics workaround. */
14578 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14579 tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14580 tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14581 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14582 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14585 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14586 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14587 tg3_flag_set(tp, USE_PHYLIB);
14589 err = tg3_mdio_init(tp);
14593 /* Initialize data/descriptor byte/word swapping. */
14594 val = tr32(GRC_MODE);
14595 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14596 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14597 GRC_MODE_WORD_SWAP_B2HRX_DATA |
14598 GRC_MODE_B2HRX_ENABLE |
14599 GRC_MODE_HTX2B_ENABLE |
14600 GRC_MODE_HOST_STACKUP);
14602 val &= GRC_MODE_HOST_STACKUP;
14604 tw32(GRC_MODE, val | tp->grc_mode);
14606 tg3_switch_clocks(tp);
14608 /* Clear this out for sanity. */
14609 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14611 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14613 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14614 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14615 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14617 if (chiprevid == CHIPREV_ID_5701_A0 ||
14618 chiprevid == CHIPREV_ID_5701_B0 ||
14619 chiprevid == CHIPREV_ID_5701_B2 ||
14620 chiprevid == CHIPREV_ID_5701_B5) {
14621 void __iomem *sram_base;
14623 /* Write some dummy words into the SRAM status block
14624 * area, see if it reads back correctly. If the return
14625 * value is bad, force enable the PCIX workaround.
14627 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14629 writel(0x00000000, sram_base);
14630 writel(0x00000000, sram_base + 4);
14631 writel(0xffffffff, sram_base + 4);
14632 if (readl(sram_base) != 0x00000000)
14633 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14638 tg3_nvram_init(tp);
14640 grc_misc_cfg = tr32(GRC_MISC_CFG);
14641 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14643 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14644 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14645 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14646 tg3_flag_set(tp, IS_5788);
14648 if (!tg3_flag(tp, IS_5788) &&
14649 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14650 tg3_flag_set(tp, TAGGED_STATUS);
14651 if (tg3_flag(tp, TAGGED_STATUS)) {
14652 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14653 HOSTCC_MODE_CLRTICK_TXBD);
14655 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14656 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14657 tp->misc_host_ctrl);
14660 /* Preserve the APE MAC_MODE bits */
14661 if (tg3_flag(tp, ENABLE_APE))
14662 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14666 /* these are limited to 10/100 only */
14667 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14668 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14669 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14670 tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14671 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14672 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14673 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14674 (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14675 (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14676 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14677 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14678 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14679 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14680 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14681 (tp->phy_flags & TG3_PHYFLG_IS_FET))
14682 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14684 err = tg3_phy_probe(tp);
14686 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14687 /* ... but do not return immediately ... */
14692 tg3_read_fw_ver(tp);
14694 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14695 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14697 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14698 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14700 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14703 /* 5700 {AX,BX} chips have a broken status block link
14704 * change bit implementation, so we must use the
14705 * status register in those cases.
14707 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14708 tg3_flag_set(tp, USE_LINKCHG_REG);
14710 tg3_flag_clear(tp, USE_LINKCHG_REG);
14712 /* The led_ctrl is set during tg3_phy_probe, here we might
14713 * have to force the link status polling mechanism based
14714 * upon subsystem IDs.
14716 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14717 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14718 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14719 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14720 tg3_flag_set(tp, USE_LINKCHG_REG);
14723 /* For all SERDES we poll the MAC status register. */
14724 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14725 tg3_flag_set(tp, POLL_SERDES);
14727 tg3_flag_clear(tp, POLL_SERDES);
14729 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14730 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14731 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14732 tg3_flag(tp, PCIX_MODE)) {
14733 tp->rx_offset = NET_SKB_PAD;
14734 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14735 tp->rx_copy_thresh = ~(u16)0;
14739 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14740 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14741 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14743 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14745 /* Increment the rx prod index on the rx std ring by at most
14746 * 8 for these chips to workaround hw errata.
14748 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14749 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14750 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14751 tp->rx_std_max_post = 8;
14753 if (tg3_flag(tp, ASPM_WORKAROUND))
14754 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14755 PCIE_PWR_MGMT_L1_THRESH_MSK;
14760 #ifdef CONFIG_SPARC
14761 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14763 struct net_device *dev = tp->dev;
14764 struct pci_dev *pdev = tp->pdev;
14765 struct device_node *dp = pci_device_to_OF_node(pdev);
14766 const unsigned char *addr;
14769 addr = of_get_property(dp, "local-mac-address", &len);
14770 if (addr && len == 6) {
14771 memcpy(dev->dev_addr, addr, 6);
14772 memcpy(dev->perm_addr, dev->dev_addr, 6);
14778 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14780 struct net_device *dev = tp->dev;
14782 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14783 memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14788 static int __devinit tg3_get_device_address(struct tg3 *tp)
14790 struct net_device *dev = tp->dev;
14791 u32 hi, lo, mac_offset;
14794 #ifdef CONFIG_SPARC
14795 if (!tg3_get_macaddr_sparc(tp))
14800 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14801 tg3_flag(tp, 5780_CLASS)) {
14802 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14804 if (tg3_nvram_lock(tp))
14805 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14807 tg3_nvram_unlock(tp);
14808 } else if (tg3_flag(tp, 5717_PLUS)) {
14809 if (tp->pci_fn & 1)
14811 if (tp->pci_fn > 1)
14812 mac_offset += 0x18c;
14813 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14816 /* First try to get it from MAC address mailbox. */
14817 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14818 if ((hi >> 16) == 0x484b) {
14819 dev->dev_addr[0] = (hi >> 8) & 0xff;
14820 dev->dev_addr[1] = (hi >> 0) & 0xff;
14822 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14823 dev->dev_addr[2] = (lo >> 24) & 0xff;
14824 dev->dev_addr[3] = (lo >> 16) & 0xff;
14825 dev->dev_addr[4] = (lo >> 8) & 0xff;
14826 dev->dev_addr[5] = (lo >> 0) & 0xff;
14828 /* Some old bootcode may report a 0 MAC address in SRAM */
14829 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14832 /* Next, try NVRAM. */
14833 if (!tg3_flag(tp, NO_NVRAM) &&
14834 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14835 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14836 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14837 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14839 /* Finally just fetch it out of the MAC control regs. */
14841 hi = tr32(MAC_ADDR_0_HIGH);
14842 lo = tr32(MAC_ADDR_0_LOW);
14844 dev->dev_addr[5] = lo & 0xff;
14845 dev->dev_addr[4] = (lo >> 8) & 0xff;
14846 dev->dev_addr[3] = (lo >> 16) & 0xff;
14847 dev->dev_addr[2] = (lo >> 24) & 0xff;
14848 dev->dev_addr[1] = hi & 0xff;
14849 dev->dev_addr[0] = (hi >> 8) & 0xff;
14853 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14854 #ifdef CONFIG_SPARC
14855 if (!tg3_get_default_macaddr_sparc(tp))
14860 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14864 #define BOUNDARY_SINGLE_CACHELINE 1
14865 #define BOUNDARY_MULTI_CACHELINE 2
14867 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14869 int cacheline_size;
14873 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14875 cacheline_size = 1024;
14877 cacheline_size = (int) byte * 4;
14879 /* On 5703 and later chips, the boundary bits have no
14882 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14883 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14884 !tg3_flag(tp, PCI_EXPRESS))
14887 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14888 goal = BOUNDARY_MULTI_CACHELINE;
14890 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14891 goal = BOUNDARY_SINGLE_CACHELINE;
14897 if (tg3_flag(tp, 57765_PLUS)) {
14898 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14905 /* PCI controllers on most RISC systems tend to disconnect
14906 * when a device tries to burst across a cache-line boundary.
14907 * Therefore, letting tg3 do so just wastes PCI bandwidth.
14909 * Unfortunately, for PCI-E there are only limited
14910 * write-side controls for this, and thus for reads
14911 * we will still get the disconnects. We'll also waste
14912 * these PCI cycles for both read and write for chips
14913 * other than 5700 and 5701 which do not implement the
14916 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14917 switch (cacheline_size) {
14922 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14923 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14924 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14926 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14927 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14932 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14933 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14937 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14938 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14941 } else if (tg3_flag(tp, PCI_EXPRESS)) {
14942 switch (cacheline_size) {
14946 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14947 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14948 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14954 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14955 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14959 switch (cacheline_size) {
14961 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14962 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14963 DMA_RWCTRL_WRITE_BNDRY_16);
14968 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14969 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14970 DMA_RWCTRL_WRITE_BNDRY_32);
14975 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14976 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14977 DMA_RWCTRL_WRITE_BNDRY_64);
14982 if (goal == BOUNDARY_SINGLE_CACHELINE) {
14983 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14984 DMA_RWCTRL_WRITE_BNDRY_128);
14989 val |= (DMA_RWCTRL_READ_BNDRY_256 |
14990 DMA_RWCTRL_WRITE_BNDRY_256);
14993 val |= (DMA_RWCTRL_READ_BNDRY_512 |
14994 DMA_RWCTRL_WRITE_BNDRY_512);
14998 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14999 DMA_RWCTRL_WRITE_BNDRY_1024);
15008 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15010 struct tg3_internal_buffer_desc test_desc;
15011 u32 sram_dma_descs;
15014 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15016 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15017 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15018 tw32(RDMAC_STATUS, 0);
15019 tw32(WDMAC_STATUS, 0);
15021 tw32(BUFMGR_MODE, 0);
15022 tw32(FTQ_RESET, 0);
15024 test_desc.addr_hi = ((u64) buf_dma) >> 32;
15025 test_desc.addr_lo = buf_dma & 0xffffffff;
15026 test_desc.nic_mbuf = 0x00002100;
15027 test_desc.len = size;
15030 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15031 * the *second* time the tg3 driver was getting loaded after an
15034 * Broadcom tells me:
15035 * ...the DMA engine is connected to the GRC block and a DMA
15036 * reset may affect the GRC block in some unpredictable way...
15037 * The behavior of resets to individual blocks has not been tested.
15039 * Broadcom noted the GRC reset will also reset all sub-components.
15042 test_desc.cqid_sqid = (13 << 8) | 2;
15044 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15047 test_desc.cqid_sqid = (16 << 8) | 7;
15049 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15052 test_desc.flags = 0x00000005;
15054 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15057 val = *(((u32 *)&test_desc) + i);
15058 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15059 sram_dma_descs + (i * sizeof(u32)));
15060 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15062 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15065 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15067 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15070 for (i = 0; i < 40; i++) {
15074 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15076 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15077 if ((val & 0xffff) == sram_dma_descs) {
15088 #define TEST_BUFFER_SIZE 0x2000
15090 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15091 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15095 static int __devinit tg3_test_dma(struct tg3 *tp)
15097 dma_addr_t buf_dma;
15098 u32 *buf, saved_dma_rwctrl;
15101 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15102 &buf_dma, GFP_KERNEL);
15108 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15109 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15111 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15113 if (tg3_flag(tp, 57765_PLUS))
15116 if (tg3_flag(tp, PCI_EXPRESS)) {
15117 /* DMA read watermark not used on PCIE */
15118 tp->dma_rwctrl |= 0x00180000;
15119 } else if (!tg3_flag(tp, PCIX_MODE)) {
15120 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15121 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15122 tp->dma_rwctrl |= 0x003f0000;
15124 tp->dma_rwctrl |= 0x003f000f;
15126 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15127 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15128 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15129 u32 read_water = 0x7;
15131 /* If the 5704 is behind the EPB bridge, we can
15132 * do the less restrictive ONE_DMA workaround for
15133 * better performance.
15135 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15136 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15137 tp->dma_rwctrl |= 0x8000;
15138 else if (ccval == 0x6 || ccval == 0x7)
15139 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15141 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15143 /* Set bit 23 to enable PCIX hw bug fix */
15145 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15146 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15148 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15149 /* 5780 always in PCIX mode */
15150 tp->dma_rwctrl |= 0x00144000;
15151 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15152 /* 5714 always in PCIX mode */
15153 tp->dma_rwctrl |= 0x00148000;
15155 tp->dma_rwctrl |= 0x001b000f;
15159 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15160 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15161 tp->dma_rwctrl &= 0xfffffff0;
15163 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15164 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15165 /* Remove this if it causes problems for some boards. */
15166 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15168 /* On 5700/5701 chips, we need to set this bit.
15169 * Otherwise the chip will issue cacheline transactions
15170 * to streamable DMA memory with not all the byte
15171 * enables turned on. This is an error on several
15172 * RISC PCI controllers, in particular sparc64.
15174 * On 5703/5704 chips, this bit has been reassigned
15175 * a different meaning. In particular, it is used
15176 * on those chips to enable a PCI-X workaround.
15178 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15181 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15184 /* Unneeded, already done by tg3_get_invariants. */
15185 tg3_switch_clocks(tp);
15188 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15189 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15192 /* It is best to perform DMA test with maximum write burst size
15193 * to expose the 5700/5701 write DMA bug.
15195 saved_dma_rwctrl = tp->dma_rwctrl;
15196 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15197 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15202 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15205 /* Send the buffer to the chip. */
15206 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15208 dev_err(&tp->pdev->dev,
15209 "%s: Buffer write failed. err = %d\n",
15215 /* validate data reached card RAM correctly. */
15216 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15218 tg3_read_mem(tp, 0x2100 + (i*4), &val);
15219 if (le32_to_cpu(val) != p[i]) {
15220 dev_err(&tp->pdev->dev,
15221 "%s: Buffer corrupted on device! "
15222 "(%d != %d)\n", __func__, val, i);
15223 /* ret = -ENODEV here? */
15228 /* Now read it back. */
15229 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15231 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15232 "err = %d\n", __func__, ret);
15237 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15241 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15242 DMA_RWCTRL_WRITE_BNDRY_16) {
15243 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15244 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15245 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15248 dev_err(&tp->pdev->dev,
15249 "%s: Buffer corrupted on read back! "
15250 "(%d != %d)\n", __func__, p[i], i);
15256 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15262 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15263 DMA_RWCTRL_WRITE_BNDRY_16) {
15264 /* DMA test passed without adjusting DMA boundary,
15265 * now look for chipsets that are known to expose the
15266 * DMA bug without failing the test.
15268 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15269 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15270 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15272 /* Safe to use the calculated DMA boundary. */
15273 tp->dma_rwctrl = saved_dma_rwctrl;
15276 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15280 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15285 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15287 if (tg3_flag(tp, 57765_PLUS)) {
15288 tp->bufmgr_config.mbuf_read_dma_low_water =
15289 DEFAULT_MB_RDMA_LOW_WATER_5705;
15290 tp->bufmgr_config.mbuf_mac_rx_low_water =
15291 DEFAULT_MB_MACRX_LOW_WATER_57765;
15292 tp->bufmgr_config.mbuf_high_water =
15293 DEFAULT_MB_HIGH_WATER_57765;
15295 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15296 DEFAULT_MB_RDMA_LOW_WATER_5705;
15297 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15298 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15299 tp->bufmgr_config.mbuf_high_water_jumbo =
15300 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15301 } else if (tg3_flag(tp, 5705_PLUS)) {
15302 tp->bufmgr_config.mbuf_read_dma_low_water =
15303 DEFAULT_MB_RDMA_LOW_WATER_5705;
15304 tp->bufmgr_config.mbuf_mac_rx_low_water =
15305 DEFAULT_MB_MACRX_LOW_WATER_5705;
15306 tp->bufmgr_config.mbuf_high_water =
15307 DEFAULT_MB_HIGH_WATER_5705;
15308 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15309 tp->bufmgr_config.mbuf_mac_rx_low_water =
15310 DEFAULT_MB_MACRX_LOW_WATER_5906;
15311 tp->bufmgr_config.mbuf_high_water =
15312 DEFAULT_MB_HIGH_WATER_5906;
15315 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15316 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15317 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15318 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15319 tp->bufmgr_config.mbuf_high_water_jumbo =
15320 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15322 tp->bufmgr_config.mbuf_read_dma_low_water =
15323 DEFAULT_MB_RDMA_LOW_WATER;
15324 tp->bufmgr_config.mbuf_mac_rx_low_water =
15325 DEFAULT_MB_MACRX_LOW_WATER;
15326 tp->bufmgr_config.mbuf_high_water =
15327 DEFAULT_MB_HIGH_WATER;
15329 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15330 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15331 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15332 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15333 tp->bufmgr_config.mbuf_high_water_jumbo =
15334 DEFAULT_MB_HIGH_WATER_JUMBO;
15337 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15338 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15341 static char * __devinit tg3_phy_string(struct tg3 *tp)
15343 switch (tp->phy_id & TG3_PHY_ID_MASK) {
15344 case TG3_PHY_ID_BCM5400: return "5400";
15345 case TG3_PHY_ID_BCM5401: return "5401";
15346 case TG3_PHY_ID_BCM5411: return "5411";
15347 case TG3_PHY_ID_BCM5701: return "5701";
15348 case TG3_PHY_ID_BCM5703: return "5703";
15349 case TG3_PHY_ID_BCM5704: return "5704";
15350 case TG3_PHY_ID_BCM5705: return "5705";
15351 case TG3_PHY_ID_BCM5750: return "5750";
15352 case TG3_PHY_ID_BCM5752: return "5752";
15353 case TG3_PHY_ID_BCM5714: return "5714";
15354 case TG3_PHY_ID_BCM5780: return "5780";
15355 case TG3_PHY_ID_BCM5755: return "5755";
15356 case TG3_PHY_ID_BCM5787: return "5787";
15357 case TG3_PHY_ID_BCM5784: return "5784";
15358 case TG3_PHY_ID_BCM5756: return "5722/5756";
15359 case TG3_PHY_ID_BCM5906: return "5906";
15360 case TG3_PHY_ID_BCM5761: return "5761";
15361 case TG3_PHY_ID_BCM5718C: return "5718C";
15362 case TG3_PHY_ID_BCM5718S: return "5718S";
15363 case TG3_PHY_ID_BCM57765: return "57765";
15364 case TG3_PHY_ID_BCM5719C: return "5719C";
15365 case TG3_PHY_ID_BCM5720C: return "5720C";
15366 case TG3_PHY_ID_BCM8002: return "8002/serdes";
15367 case 0: return "serdes";
15368 default: return "unknown";
15372 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15374 if (tg3_flag(tp, PCI_EXPRESS)) {
15375 strcpy(str, "PCI Express");
15377 } else if (tg3_flag(tp, PCIX_MODE)) {
15378 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15380 strcpy(str, "PCIX:");
15382 if ((clock_ctrl == 7) ||
15383 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15384 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15385 strcat(str, "133MHz");
15386 else if (clock_ctrl == 0)
15387 strcat(str, "33MHz");
15388 else if (clock_ctrl == 2)
15389 strcat(str, "50MHz");
15390 else if (clock_ctrl == 4)
15391 strcat(str, "66MHz");
15392 else if (clock_ctrl == 6)
15393 strcat(str, "100MHz");
15395 strcpy(str, "PCI:");
15396 if (tg3_flag(tp, PCI_HIGH_SPEED))
15397 strcat(str, "66MHz");
15399 strcat(str, "33MHz");
15401 if (tg3_flag(tp, PCI_32BIT))
15402 strcat(str, ":32-bit");
15404 strcat(str, ":64-bit");
15408 static void __devinit tg3_init_coal(struct tg3 *tp)
15410 struct ethtool_coalesce *ec = &tp->coal;
15412 memset(ec, 0, sizeof(*ec));
15413 ec->cmd = ETHTOOL_GCOALESCE;
15414 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15415 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15416 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15417 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15418 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15419 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15420 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15421 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15422 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15424 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15425 HOSTCC_MODE_CLRTICK_TXBD)) {
15426 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15427 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15428 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15429 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15432 if (tg3_flag(tp, 5705_PLUS)) {
15433 ec->rx_coalesce_usecs_irq = 0;
15434 ec->tx_coalesce_usecs_irq = 0;
15435 ec->stats_block_coalesce_usecs = 0;
15439 static int __devinit tg3_init_one(struct pci_dev *pdev,
15440 const struct pci_device_id *ent)
15442 struct net_device *dev;
15444 int i, err, pm_cap;
15445 u32 sndmbx, rcvmbx, intmbx;
15447 u64 dma_mask, persist_dma_mask;
15448 netdev_features_t features = 0;
15450 printk_once(KERN_INFO "%s\n", version);
15452 err = pci_enable_device(pdev);
15454 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15458 err = pci_request_regions(pdev, DRV_MODULE_NAME);
15460 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15461 goto err_out_disable_pdev;
15464 pci_set_master(pdev);
15466 /* Find power-management capability. */
15467 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15469 dev_err(&pdev->dev,
15470 "Cannot find Power Management capability, aborting\n");
15472 goto err_out_free_res;
15475 err = pci_set_power_state(pdev, PCI_D0);
15477 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15478 goto err_out_free_res;
15481 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15484 goto err_out_power_down;
15487 SET_NETDEV_DEV(dev, &pdev->dev);
15489 tp = netdev_priv(dev);
15492 tp->pm_cap = pm_cap;
15493 tp->rx_mode = TG3_DEF_RX_MODE;
15494 tp->tx_mode = TG3_DEF_TX_MODE;
15497 tp->msg_enable = tg3_debug;
15499 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15501 /* The word/byte swap controls here control register access byte
15502 * swapping. DMA data byte swapping is controlled in the GRC_MODE
15505 tp->misc_host_ctrl =
15506 MISC_HOST_CTRL_MASK_PCI_INT |
15507 MISC_HOST_CTRL_WORD_SWAP |
15508 MISC_HOST_CTRL_INDIR_ACCESS |
15509 MISC_HOST_CTRL_PCISTATE_RW;
15511 /* The NONFRM (non-frame) byte/word swap controls take effect
15512 * on descriptor entries, anything which isn't packet data.
15514 * The StrongARM chips on the board (one for tx, one for rx)
15515 * are running in big-endian mode.
15517 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15518 GRC_MODE_WSWAP_NONFRM_DATA);
15519 #ifdef __BIG_ENDIAN
15520 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15522 spin_lock_init(&tp->lock);
15523 spin_lock_init(&tp->indirect_lock);
15524 INIT_WORK(&tp->reset_task, tg3_reset_task);
15526 tp->regs = pci_ioremap_bar(pdev, BAR_0);
15528 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15530 goto err_out_free_dev;
15533 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15534 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15535 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15536 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15537 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15538 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15539 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15540 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15541 tg3_flag_set(tp, ENABLE_APE);
15542 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15543 if (!tp->aperegs) {
15544 dev_err(&pdev->dev,
15545 "Cannot map APE registers, aborting\n");
15547 goto err_out_iounmap;
15551 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15552 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15554 dev->ethtool_ops = &tg3_ethtool_ops;
15555 dev->watchdog_timeo = TG3_TX_TIMEOUT;
15556 dev->netdev_ops = &tg3_netdev_ops;
15557 dev->irq = pdev->irq;
15559 err = tg3_get_invariants(tp);
15561 dev_err(&pdev->dev,
15562 "Problem fetching invariants of chip, aborting\n");
15563 goto err_out_apeunmap;
15566 /* The EPB bridge inside 5714, 5715, and 5780 and any
15567 * device behind the EPB cannot support DMA addresses > 40-bit.
15568 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15569 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15570 * do DMA address check in tg3_start_xmit().
15572 if (tg3_flag(tp, IS_5788))
15573 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15574 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15575 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15576 #ifdef CONFIG_HIGHMEM
15577 dma_mask = DMA_BIT_MASK(64);
15580 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15582 /* Configure DMA attributes. */
15583 if (dma_mask > DMA_BIT_MASK(32)) {
15584 err = pci_set_dma_mask(pdev, dma_mask);
15586 features |= NETIF_F_HIGHDMA;
15587 err = pci_set_consistent_dma_mask(pdev,
15590 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15591 "DMA for consistent allocations\n");
15592 goto err_out_apeunmap;
15596 if (err || dma_mask == DMA_BIT_MASK(32)) {
15597 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15599 dev_err(&pdev->dev,
15600 "No usable DMA configuration, aborting\n");
15601 goto err_out_apeunmap;
15605 tg3_init_bufmgr_config(tp);
15607 features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15609 /* 5700 B0 chips do not support checksumming correctly due
15610 * to hardware bugs.
15612 if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15613 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15615 if (tg3_flag(tp, 5755_PLUS))
15616 features |= NETIF_F_IPV6_CSUM;
15619 /* TSO is on by default on chips that support hardware TSO.
15620 * Firmware TSO on older chips gives lower performance, so it
15621 * is off by default, but can be enabled using ethtool.
15623 if ((tg3_flag(tp, HW_TSO_1) ||
15624 tg3_flag(tp, HW_TSO_2) ||
15625 tg3_flag(tp, HW_TSO_3)) &&
15626 (features & NETIF_F_IP_CSUM))
15627 features |= NETIF_F_TSO;
15628 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15629 if (features & NETIF_F_IPV6_CSUM)
15630 features |= NETIF_F_TSO6;
15631 if (tg3_flag(tp, HW_TSO_3) ||
15632 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15633 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15634 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15635 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15636 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15637 features |= NETIF_F_TSO_ECN;
15640 dev->features |= features;
15641 dev->vlan_features |= features;
15644 * Add loopback capability only for a subset of devices that support
15645 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15646 * loopback for the remaining devices.
15648 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15649 !tg3_flag(tp, CPMU_PRESENT))
15650 /* Add the loopback capability */
15651 features |= NETIF_F_LOOPBACK;
15653 dev->hw_features |= features;
15655 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15656 !tg3_flag(tp, TSO_CAPABLE) &&
15657 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15658 tg3_flag_set(tp, MAX_RXPEND_64);
15659 tp->rx_pending = 63;
15662 err = tg3_get_device_address(tp);
15664 dev_err(&pdev->dev,
15665 "Could not obtain valid ethernet address, aborting\n");
15666 goto err_out_apeunmap;
15670 * Reset chip in case UNDI or EFI driver did not shutdown
15671 * DMA self test will enable WDMAC and we'll see (spurious)
15672 * pending DMA on the PCI bus at that point.
15674 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15675 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15676 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15677 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15680 err = tg3_test_dma(tp);
15682 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15683 goto err_out_apeunmap;
15686 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15687 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15688 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15689 for (i = 0; i < tp->irq_max; i++) {
15690 struct tg3_napi *tnapi = &tp->napi[i];
15693 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15695 tnapi->int_mbox = intmbx;
15701 tnapi->consmbox = rcvmbx;
15702 tnapi->prodmbox = sndmbx;
15705 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15707 tnapi->coal_now = HOSTCC_MODE_NOW;
15709 if (!tg3_flag(tp, SUPPORT_MSIX))
15713 * If we support MSIX, we'll be using RSS. If we're using
15714 * RSS, the first vector only handles link interrupts and the
15715 * remaining vectors handle rx and tx interrupts. Reuse the
15716 * mailbox values for the next iteration. The values we setup
15717 * above are still useful for the single vectored mode.
15732 pci_set_drvdata(pdev, dev);
15734 if (tg3_flag(tp, 5717_PLUS)) {
15735 /* Resume a low-power mode */
15736 tg3_frob_aux_power(tp, false);
15739 tg3_timer_init(tp);
15741 err = register_netdev(dev);
15743 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15744 goto err_out_apeunmap;
15747 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15748 tp->board_part_number,
15749 tp->pci_chip_rev_id,
15750 tg3_bus_string(tp, str),
15753 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15754 struct phy_device *phydev;
15755 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15757 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15758 phydev->drv->name, dev_name(&phydev->dev));
15762 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15763 ethtype = "10/100Base-TX";
15764 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15765 ethtype = "1000Base-SX";
15767 ethtype = "10/100/1000Base-T";
15769 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15770 "(WireSpeed[%d], EEE[%d])\n",
15771 tg3_phy_string(tp), ethtype,
15772 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15773 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15776 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15777 (dev->features & NETIF_F_RXCSUM) != 0,
15778 tg3_flag(tp, USE_LINKCHG_REG) != 0,
15779 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15780 tg3_flag(tp, ENABLE_ASF) != 0,
15781 tg3_flag(tp, TSO_CAPABLE) != 0);
15782 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15784 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15785 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15787 pci_save_state(pdev);
15793 iounmap(tp->aperegs);
15794 tp->aperegs = NULL;
15806 err_out_power_down:
15807 pci_set_power_state(pdev, PCI_D3hot);
15810 pci_release_regions(pdev);
15812 err_out_disable_pdev:
15813 pci_disable_device(pdev);
15814 pci_set_drvdata(pdev, NULL);
15818 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15820 struct net_device *dev = pci_get_drvdata(pdev);
15823 struct tg3 *tp = netdev_priv(dev);
15826 release_firmware(tp->fw);
15828 tg3_reset_task_cancel(tp);
15830 if (tg3_flag(tp, USE_PHYLIB)) {
15835 unregister_netdev(dev);
15837 iounmap(tp->aperegs);
15838 tp->aperegs = NULL;
15845 pci_release_regions(pdev);
15846 pci_disable_device(pdev);
15847 pci_set_drvdata(pdev, NULL);
15851 #ifdef CONFIG_PM_SLEEP
15852 static int tg3_suspend(struct device *device)
15854 struct pci_dev *pdev = to_pci_dev(device);
15855 struct net_device *dev = pci_get_drvdata(pdev);
15856 struct tg3 *tp = netdev_priv(dev);
15859 if (!netif_running(dev))
15862 tg3_reset_task_cancel(tp);
15864 tg3_netif_stop(tp);
15866 tg3_timer_stop(tp);
15868 tg3_full_lock(tp, 1);
15869 tg3_disable_ints(tp);
15870 tg3_full_unlock(tp);
15872 netif_device_detach(dev);
15874 tg3_full_lock(tp, 0);
15875 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15876 tg3_flag_clear(tp, INIT_COMPLETE);
15877 tg3_full_unlock(tp);
15879 err = tg3_power_down_prepare(tp);
15883 tg3_full_lock(tp, 0);
15885 tg3_flag_set(tp, INIT_COMPLETE);
15886 err2 = tg3_restart_hw(tp, 1);
15890 tg3_timer_start(tp);
15892 netif_device_attach(dev);
15893 tg3_netif_start(tp);
15896 tg3_full_unlock(tp);
15905 static int tg3_resume(struct device *device)
15907 struct pci_dev *pdev = to_pci_dev(device);
15908 struct net_device *dev = pci_get_drvdata(pdev);
15909 struct tg3 *tp = netdev_priv(dev);
15912 if (!netif_running(dev))
15915 netif_device_attach(dev);
15917 tg3_full_lock(tp, 0);
15919 tg3_flag_set(tp, INIT_COMPLETE);
15920 err = tg3_restart_hw(tp, 1);
15924 tg3_timer_start(tp);
15926 tg3_netif_start(tp);
15929 tg3_full_unlock(tp);
15937 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15938 #define TG3_PM_OPS (&tg3_pm_ops)
15942 #define TG3_PM_OPS NULL
15944 #endif /* CONFIG_PM_SLEEP */
15947 * tg3_io_error_detected - called when PCI error is detected
15948 * @pdev: Pointer to PCI device
15949 * @state: The current pci connection state
15951 * This function is called after a PCI bus error affecting
15952 * this device has been detected.
15954 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15955 pci_channel_state_t state)
15957 struct net_device *netdev = pci_get_drvdata(pdev);
15958 struct tg3 *tp = netdev_priv(netdev);
15959 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15961 netdev_info(netdev, "PCI I/O error detected\n");
15965 if (!netif_running(netdev))
15970 tg3_netif_stop(tp);
15972 tg3_timer_stop(tp);
15974 /* Want to make sure that the reset task doesn't run */
15975 tg3_reset_task_cancel(tp);
15977 netif_device_detach(netdev);
15979 /* Clean up software state, even if MMIO is blocked */
15980 tg3_full_lock(tp, 0);
15981 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15982 tg3_full_unlock(tp);
15985 if (state == pci_channel_io_perm_failure)
15986 err = PCI_ERS_RESULT_DISCONNECT;
15988 pci_disable_device(pdev);
15996 * tg3_io_slot_reset - called after the pci bus has been reset.
15997 * @pdev: Pointer to PCI device
15999 * Restart the card from scratch, as if from a cold-boot.
16000 * At this point, the card has exprienced a hard reset,
16001 * followed by fixups by BIOS, and has its config space
16002 * set up identically to what it was at cold boot.
16004 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16006 struct net_device *netdev = pci_get_drvdata(pdev);
16007 struct tg3 *tp = netdev_priv(netdev);
16008 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16013 if (pci_enable_device(pdev)) {
16014 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16018 pci_set_master(pdev);
16019 pci_restore_state(pdev);
16020 pci_save_state(pdev);
16022 if (!netif_running(netdev)) {
16023 rc = PCI_ERS_RESULT_RECOVERED;
16027 err = tg3_power_up(tp);
16031 rc = PCI_ERS_RESULT_RECOVERED;
16040 * tg3_io_resume - called when traffic can start flowing again.
16041 * @pdev: Pointer to PCI device
16043 * This callback is called when the error recovery driver tells
16044 * us that its OK to resume normal operation.
16046 static void tg3_io_resume(struct pci_dev *pdev)
16048 struct net_device *netdev = pci_get_drvdata(pdev);
16049 struct tg3 *tp = netdev_priv(netdev);
16054 if (!netif_running(netdev))
16057 tg3_full_lock(tp, 0);
16058 tg3_flag_set(tp, INIT_COMPLETE);
16059 err = tg3_restart_hw(tp, 1);
16060 tg3_full_unlock(tp);
16062 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16066 netif_device_attach(netdev);
16068 tg3_timer_start(tp);
16070 tg3_netif_start(tp);
16078 static struct pci_error_handlers tg3_err_handler = {
16079 .error_detected = tg3_io_error_detected,
16080 .slot_reset = tg3_io_slot_reset,
16081 .resume = tg3_io_resume
16084 static struct pci_driver tg3_driver = {
16085 .name = DRV_MODULE_NAME,
16086 .id_table = tg3_pci_tbl,
16087 .probe = tg3_init_one,
16088 .remove = __devexit_p(tg3_remove_one),
16089 .err_handler = &tg3_err_handler,
16090 .driver.pm = TG3_PM_OPS,
16093 static int __init tg3_init(void)
16095 return pci_register_driver(&tg3_driver);
16098 static void __exit tg3_cleanup(void)
16100 pci_unregister_driver(&tg3_driver);
16103 module_init(tg3_init);
16104 module_exit(tg3_cleanup);