2 * tg3.c: Broadcom Tigon3 ethernet driver.
4 * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5 * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6 * Copyright (C) 2004 Sun Microsystems Inc.
7 * Copyright (C) 2005-2013 Broadcom Corporation.
10 * Derived from proprietary unpublished source code,
11 * Copyright (C) 2000-2003 Broadcom Corporation.
13 * Permission is hereby granted for the distribution of this firmware
14 * data in hexadecimal or equivalent format, provided this copyright
15 * notice is accompanying it.
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
51 #include <net/checksum.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
62 #include <asm/idprom.h>
71 /* Functions & macros to verify TG3_FLAGS types */
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
75 return test_bit(flag, bits);
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
85 clear_bit(flag, bits);
88 #define tg3_flag(tp, flag) \
89 _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag) \
91 _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag) \
93 _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
95 #define DRV_MODULE_NAME "tg3"
97 #define TG3_MIN_NUM 132
98 #define DRV_MODULE_VERSION \
99 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE "May 21, 2013"
102 #define RESET_KIND_SHUTDOWN 0
103 #define RESET_KIND_INIT 1
104 #define RESET_KIND_SUSPEND 2
106 #define TG3_DEF_RX_MODE 0
107 #define TG3_DEF_TX_MODE 0
108 #define TG3_DEF_MSG_ENABLE \
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY 100
120 /* length of time before we decide the hardware is borked,
121 * and dev->tx_timeout() should be called to fix the problem
124 #define TG3_TX_TIMEOUT (5 * HZ)
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU 60
128 #define TG3_MAX_MTU(tp) \
129 (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132 * You can't change the ring sizes, but you can change where you place
133 * them in the NIC onboard memory.
135 #define TG3_RX_STD_RING_SIZE(tp) \
136 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137 TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING 200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140 (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141 TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING 100
144 /* Do not place this n-ring entries value into the tp struct itself,
145 * we really want to expose these constants to GCC so that modulo et
146 * al. operations are done with shifts and masks instead of with
147 * hw multiply/modulo instructions. Another solution would be to
148 * replace things like '% foo' with '& (foo - 1)'.
151 #define TG3_TX_RING_SIZE 512
152 #define TG3_DEF_TX_RING_PENDING (TG3_TX_RING_SIZE - 1)
154 #define TG3_RX_STD_RING_BYTES(tp) \
155 (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157 (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159 (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES (sizeof(struct tg3_tx_buffer_desc) * \
162 #define NEXT_TX(N) (((N) + 1) & (TG3_TX_RING_SIZE - 1))
164 #define TG3_DMA_BYTE_ENAB 64
166 #define TG3_RX_STD_DMA_SZ 1536
167 #define TG3_RX_JMB_DMA_SZ 9046
169 #define TG3_RX_DMA_TO_MAP_SZ(x) ((x) + TG3_DMA_BYTE_ENAB)
171 #define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175 (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178 (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181 * that are at least dword aligned when used in PCIX mode. The driver
182 * works around this bug by double copying the packet. This workaround
183 * is built into the normal double copy length check for efficiency.
185 * However, the double copy is only necessary on those architectures
186 * where unaligned memory accesses are inefficient. For those architectures
187 * where unaligned memory accesses incur little penalty, we can reintegrate
188 * the 5701 in the normal rx path. Doing so saves a device structure
189 * dereference by hardcoding the double copy threshold in place.
191 #define TG3_RX_COPY_THRESHOLD 256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193 #define TG3_RX_COPY_THRESH(tp) TG3_RX_COPY_THRESHOLD
195 #define TG3_RX_COPY_THRESH(tp) ((tp)->rx_copy_thresh)
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp) ((tp)->rx_offset)
201 #define TG3_RX_OFFSET(tp) (NET_SKB_PAD)
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K 2048
207 #define TG3_TX_BD_DMA_MAX_4K 4096
209 #define TG3_RAW_IP_ALIGN 2
211 #define TG3_FW_UPDATE_TIMEOUT_SEC 5
212 #define TG3_FW_UPDATE_FREQ_SEC (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
214 #define FIRMWARE_TG3 "tigon/tg3.bin"
215 #define FIRMWARE_TG357766 "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5 "tigon/tg3_tso5.bin"
219 static char version[] =
220 DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
230 static int tg3_debug = -1; /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY 0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100 0x0002
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258 TG3_DRV_DATA_FLAG_5705_10_100},
259 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261 TG3_DRV_DATA_FLAG_5705_10_100},
262 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265 TG3_DRV_DATA_FLAG_5705_10_100},
266 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287 PCI_VENDOR_ID_LENOVO,
288 TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314 {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315 PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332 .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346 {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347 {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
353 static const struct {
354 const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
358 { "rx_ucast_packets" },
359 { "rx_mcast_packets" },
360 { "rx_bcast_packets" },
362 { "rx_align_errors" },
363 { "rx_xon_pause_rcvd" },
364 { "rx_xoff_pause_rcvd" },
365 { "rx_mac_ctrl_rcvd" },
366 { "rx_xoff_entered" },
367 { "rx_frame_too_long_errors" },
369 { "rx_undersize_packets" },
370 { "rx_in_length_errors" },
371 { "rx_out_length_errors" },
372 { "rx_64_or_less_octet_packets" },
373 { "rx_65_to_127_octet_packets" },
374 { "rx_128_to_255_octet_packets" },
375 { "rx_256_to_511_octet_packets" },
376 { "rx_512_to_1023_octet_packets" },
377 { "rx_1024_to_1522_octet_packets" },
378 { "rx_1523_to_2047_octet_packets" },
379 { "rx_2048_to_4095_octet_packets" },
380 { "rx_4096_to_8191_octet_packets" },
381 { "rx_8192_to_9022_octet_packets" },
388 { "tx_flow_control" },
390 { "tx_single_collisions" },
391 { "tx_mult_collisions" },
393 { "tx_excessive_collisions" },
394 { "tx_late_collisions" },
395 { "tx_collide_2times" },
396 { "tx_collide_3times" },
397 { "tx_collide_4times" },
398 { "tx_collide_5times" },
399 { "tx_collide_6times" },
400 { "tx_collide_7times" },
401 { "tx_collide_8times" },
402 { "tx_collide_9times" },
403 { "tx_collide_10times" },
404 { "tx_collide_11times" },
405 { "tx_collide_12times" },
406 { "tx_collide_13times" },
407 { "tx_collide_14times" },
408 { "tx_collide_15times" },
409 { "tx_ucast_packets" },
410 { "tx_mcast_packets" },
411 { "tx_bcast_packets" },
412 { "tx_carrier_sense_errors" },
416 { "dma_writeq_full" },
417 { "dma_write_prioq_full" },
421 { "rx_threshold_hit" },
423 { "dma_readq_full" },
424 { "dma_read_prioq_full" },
425 { "tx_comp_queue_full" },
427 { "ring_set_send_prod_index" },
428 { "ring_status_update" },
430 { "nic_avoided_irqs" },
431 { "nic_tx_threshold_hit" },
433 { "mbuf_lwm_thresh_hit" },
436 #define TG3_NUM_STATS ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST 0
438 #define TG3_LINK_TEST 1
439 #define TG3_REGISTER_TEST 2
440 #define TG3_MEMORY_TEST 3
441 #define TG3_MAC_LOOPB_TEST 4
442 #define TG3_PHY_LOOPB_TEST 5
443 #define TG3_EXT_LOOPB_TEST 6
444 #define TG3_INTERRUPT_TEST 7
447 static const struct {
448 const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450 [TG3_NVRAM_TEST] = { "nvram test (online) " },
451 [TG3_LINK_TEST] = { "link test (online) " },
452 [TG3_REGISTER_TEST] = { "register test (offline)" },
453 [TG3_MEMORY_TEST] = { "memory test (offline)" },
454 [TG3_MAC_LOOPB_TEST] = { "mac loopback test (offline)" },
455 [TG3_PHY_LOOPB_TEST] = { "phy loopback test (offline)" },
456 [TG3_EXT_LOOPB_TEST] = { "ext loopback test (offline)" },
457 [TG3_INTERRUPT_TEST] = { "interrupt test (offline)" },
460 #define TG3_NUM_TEST ARRAY_SIZE(ethtool_test_keys)
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
465 writel(val, tp->regs + off);
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
470 return readl(tp->regs + off);
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
475 writel(val, tp->aperegs + off);
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
480 return readl(tp->aperegs + off);
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
487 spin_lock_irqsave(&tp->indirect_lock, flags);
488 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490 spin_unlock_irqrestore(&tp->indirect_lock, flags);
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
495 writel(val, tp->regs + off);
496 readl(tp->regs + off);
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
504 spin_lock_irqsave(&tp->indirect_lock, flags);
505 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507 spin_unlock_irqrestore(&tp->indirect_lock, flags);
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
515 if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517 TG3_64BIT_REG_LOW, val);
520 if (off == TG3_RX_STD_PROD_IDX_REG) {
521 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522 TG3_64BIT_REG_LOW, val);
526 spin_lock_irqsave(&tp->indirect_lock, flags);
527 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529 spin_unlock_irqrestore(&tp->indirect_lock, flags);
531 /* In indirect mode when disabling interrupts, we also need
532 * to clear the interrupt bit in the GRC local ctrl register.
534 if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
536 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537 tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
546 spin_lock_irqsave(&tp->indirect_lock, flags);
547 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548 pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549 spin_unlock_irqrestore(&tp->indirect_lock, flags);
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554 * where it is unsafe to read back the register without some delay.
555 * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556 * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
560 if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561 /* Non-posted methods */
562 tp->write32(tp, off, val);
565 tg3_write32(tp, off, val);
570 /* Wait again after the read for the posted method to guarantee that
571 * the wait time is met.
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
579 tp->write32_mbox(tp, off, val);
580 if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581 (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582 !tg3_flag(tp, ICH_WORKAROUND)))
583 tp->read32_mbox(tp, off);
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
588 void __iomem *mbox = tp->regs + off;
590 if (tg3_flag(tp, TXD_MBOX_HWBUG))
592 if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593 tg3_flag(tp, FLUSH_POSTED_WRITES))
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
599 return readl(tp->regs + off + GRCMBOX_BASE);
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
604 writel(val, tp->regs + off + GRCMBOX_BASE);
607 #define tw32_mailbox(reg, val) tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val) tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val) tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val) tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg) tp->read32_mbox(tp, reg)
613 #define tw32(reg, val) tp->write32(tp, reg, val)
614 #define tw32_f(reg, val) _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us) _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg) tp->read32(tp, reg)
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
622 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
626 spin_lock_irqsave(&tp->indirect_lock, flags);
627 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
631 /* Always leave this as zero. */
632 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
634 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635 tw32_f(TG3PCI_MEM_WIN_DATA, val);
637 /* Always leave this as zero. */
638 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
640 spin_unlock_irqrestore(&tp->indirect_lock, flags);
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
647 if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648 (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
653 spin_lock_irqsave(&tp->indirect_lock, flags);
654 if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
658 /* Always leave this as zero. */
659 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
661 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662 *val = tr32(TG3PCI_MEM_WIN_DATA);
664 /* Always leave this as zero. */
665 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
667 spin_unlock_irqrestore(&tp->indirect_lock, flags);
670 static void tg3_ape_lock_init(struct tg3 *tp)
675 if (tg3_asic_rev(tp) == ASIC_REV_5761)
676 regbase = TG3_APE_LOCK_GRANT;
678 regbase = TG3_APE_PER_LOCK_GRANT;
680 /* Make sure the driver hasn't any stale locks. */
681 for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
683 case TG3_APE_LOCK_PHY0:
684 case TG3_APE_LOCK_PHY1:
685 case TG3_APE_LOCK_PHY2:
686 case TG3_APE_LOCK_PHY3:
687 bit = APE_LOCK_GRANT_DRIVER;
691 bit = APE_LOCK_GRANT_DRIVER;
693 bit = 1 << tp->pci_fn;
695 tg3_ape_write32(tp, regbase + 4 * i, bit);
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
704 u32 status, req, gnt, bit;
706 if (!tg3_flag(tp, ENABLE_APE))
710 case TG3_APE_LOCK_GPIO:
711 if (tg3_asic_rev(tp) == ASIC_REV_5761)
713 case TG3_APE_LOCK_GRC:
714 case TG3_APE_LOCK_MEM:
716 bit = APE_LOCK_REQ_DRIVER;
718 bit = 1 << tp->pci_fn;
720 case TG3_APE_LOCK_PHY0:
721 case TG3_APE_LOCK_PHY1:
722 case TG3_APE_LOCK_PHY2:
723 case TG3_APE_LOCK_PHY3:
724 bit = APE_LOCK_REQ_DRIVER;
730 if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731 req = TG3_APE_LOCK_REQ;
732 gnt = TG3_APE_LOCK_GRANT;
734 req = TG3_APE_PER_LOCK_REQ;
735 gnt = TG3_APE_PER_LOCK_GRANT;
740 tg3_ape_write32(tp, req + off, bit);
742 /* Wait for up to 1 millisecond to acquire lock. */
743 for (i = 0; i < 100; i++) {
744 status = tg3_ape_read32(tp, gnt + off);
747 if (pci_channel_offline(tp->pdev))
754 /* Revoke the lock request. */
755 tg3_ape_write32(tp, gnt + off, bit);
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
766 if (!tg3_flag(tp, ENABLE_APE))
770 case TG3_APE_LOCK_GPIO:
771 if (tg3_asic_rev(tp) == ASIC_REV_5761)
773 case TG3_APE_LOCK_GRC:
774 case TG3_APE_LOCK_MEM:
776 bit = APE_LOCK_GRANT_DRIVER;
778 bit = 1 << tp->pci_fn;
780 case TG3_APE_LOCK_PHY0:
781 case TG3_APE_LOCK_PHY1:
782 case TG3_APE_LOCK_PHY2:
783 case TG3_APE_LOCK_PHY3:
784 bit = APE_LOCK_GRANT_DRIVER;
790 if (tg3_asic_rev(tp) == ASIC_REV_5761)
791 gnt = TG3_APE_LOCK_GRANT;
793 gnt = TG3_APE_PER_LOCK_GRANT;
795 tg3_ape_write32(tp, gnt + 4 * locknum, bit);
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
803 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
806 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
810 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
813 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
816 return timeout_us ? 0 : -EBUSY;
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
823 for (i = 0; i < timeout_us / 10; i++) {
824 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
826 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
832 return i == timeout_us / 10;
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
839 u32 i, bufoff, msgoff, maxlen, apedata;
841 if (!tg3_flag(tp, APE_HAS_NCSI))
844 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845 if (apedata != APE_SEG_SIG_MAGIC)
848 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849 if (!(apedata & APE_FW_STATUS_READY))
852 bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
854 msgoff = bufoff + 2 * sizeof(u32);
855 maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
860 /* Cap xfer sizes to scratchpad limits. */
861 length = (len > maxlen) ? maxlen : len;
864 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865 if (!(apedata & APE_FW_STATUS_READY))
868 /* Wait for up to 1 msec for APE to service previous event. */
869 err = tg3_ape_event_lock(tp, 1000);
873 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874 APE_EVENT_STATUS_SCRTCHPD_READ |
875 APE_EVENT_STATUS_EVENT_PENDING;
876 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
878 tg3_ape_write32(tp, bufoff, base_off);
879 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
881 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
886 if (tg3_ape_wait_for_event(tp, 30000))
889 for (i = 0; length; i += 4, length -= 4) {
890 u32 val = tg3_ape_read32(tp, msgoff + i);
891 memcpy(data, &val, sizeof(u32));
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
904 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905 if (apedata != APE_SEG_SIG_MAGIC)
908 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909 if (!(apedata & APE_FW_STATUS_READY))
912 /* Wait for up to 1 millisecond for APE to service previous event. */
913 err = tg3_ape_event_lock(tp, 1000);
917 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918 event | APE_EVENT_STATUS_EVENT_PENDING);
920 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
931 if (!tg3_flag(tp, ENABLE_APE))
935 case RESET_KIND_INIT:
936 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937 APE_HOST_SEG_SIG_MAGIC);
938 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939 APE_HOST_SEG_LEN_MAGIC);
940 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943 APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945 APE_HOST_BEHAV_NO_PHYLOCK);
946 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947 TG3_APE_HOST_DRVR_STATE_START);
949 event = APE_EVENT_STATUS_STATE_START;
951 case RESET_KIND_SHUTDOWN:
952 /* With the interface we are currently using,
953 * APE does not track driver state. Wiping
954 * out the HOST SEGMENT SIGNATURE forces
955 * the APE to assume OS absent status.
957 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
959 if (device_may_wakeup(&tp->pdev->dev) &&
960 tg3_flag(tp, WOL_ENABLE)) {
961 tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962 TG3_APE_HOST_WOL_SPEED_AUTO);
963 apedata = TG3_APE_HOST_DRVR_STATE_WOL;
965 apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
967 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
969 event = APE_EVENT_STATUS_STATE_UNLOAD;
971 case RESET_KIND_SUSPEND:
972 event = APE_EVENT_STATUS_STATE_SUSPEND;
978 event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
980 tg3_ape_send_event(tp, event);
983 static void tg3_disable_ints(struct tg3 *tp)
987 tw32(TG3PCI_MISC_HOST_CTRL,
988 (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989 for (i = 0; i < tp->irq_max; i++)
990 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
993 static void tg3_enable_ints(struct tg3 *tp)
1000 tw32(TG3PCI_MISC_HOST_CTRL,
1001 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1003 tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004 for (i = 0; i < tp->irq_cnt; i++) {
1005 struct tg3_napi *tnapi = &tp->napi[i];
1007 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008 if (tg3_flag(tp, 1SHOT_MSI))
1009 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1011 tp->coal_now |= tnapi->coal_now;
1014 /* Force an initial interrupt */
1015 if (!tg3_flag(tp, TAGGED_STATUS) &&
1016 (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1019 tw32(HOSTCC_MODE, tp->coal_now);
1021 tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1026 struct tg3 *tp = tnapi->tp;
1027 struct tg3_hw_status *sblk = tnapi->hw_status;
1028 unsigned int work_exists = 0;
1030 /* check for phy events */
1031 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032 if (sblk->status & SD_STATUS_LINK_CHG)
1036 /* check for TX work to do */
1037 if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1040 /* check for RX work to do */
1041 if (tnapi->rx_rcb_prod_idx &&
1042 *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1049 * similar to tg3_enable_ints, but it accurately determines whether there
1050 * is new work pending and can return without flushing the PIO write
1051 * which reenables interrupts
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1055 struct tg3 *tp = tnapi->tp;
1057 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1060 /* When doing tagged status, this work check is unnecessary.
1061 * The last_tag we write above tells the chip which piece of
1062 * work we've completed.
1064 if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066 HOSTCC_MODE_ENABLE | tnapi->coal_now);
1069 static void tg3_switch_clocks(struct tg3 *tp)
1072 u32 orig_clock_ctrl;
1074 if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1077 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1079 orig_clock_ctrl = clock_ctrl;
1080 clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081 CLOCK_CTRL_CLKRUN_OENABLE |
1083 tp->pci_clock_ctrl = clock_ctrl;
1085 if (tg3_flag(tp, 5705_PLUS)) {
1086 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088 clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1090 } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1093 (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1095 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096 clock_ctrl | (CLOCK_CTRL_ALTCLK),
1099 tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1102 #define PHY_BUSY_LOOPS 5000
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1111 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1113 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1117 tg3_ape_lock(tp, tp->phy_ape_lock);
1121 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122 MI_COM_PHY_ADDR_MASK);
1123 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124 MI_COM_REG_ADDR_MASK);
1125 frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1127 tw32_f(MAC_MI_COM, frame_val);
1129 loops = PHY_BUSY_LOOPS;
1130 while (loops != 0) {
1132 frame_val = tr32(MAC_MI_COM);
1134 if ((frame_val & MI_COM_BUSY) == 0) {
1136 frame_val = tr32(MAC_MI_COM);
1144 *val = frame_val & MI_COM_DATA_MASK;
1148 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149 tw32_f(MAC_MI_MODE, tp->mi_mode);
1153 tg3_ape_unlock(tp, tp->phy_ape_lock);
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1160 return __tg3_readphy(tp, tp->phy_addr, reg, val);
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1170 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171 (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1174 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1176 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1180 tg3_ape_lock(tp, tp->phy_ape_lock);
1182 frame_val = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183 MI_COM_PHY_ADDR_MASK);
1184 frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185 MI_COM_REG_ADDR_MASK);
1186 frame_val |= (val & MI_COM_DATA_MASK);
1187 frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1189 tw32_f(MAC_MI_COM, frame_val);
1191 loops = PHY_BUSY_LOOPS;
1192 while (loops != 0) {
1194 frame_val = tr32(MAC_MI_COM);
1195 if ((frame_val & MI_COM_BUSY) == 0) {
1197 frame_val = tr32(MAC_MI_COM);
1207 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208 tw32_f(MAC_MI_MODE, tp->mi_mode);
1212 tg3_ape_unlock(tp, tp->phy_ape_lock);
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1219 return __tg3_writephy(tp, tp->phy_addr, reg, val);
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1226 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1230 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1234 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1239 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1249 err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1253 err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1257 err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258 MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1262 err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1272 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1274 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1283 err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1285 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1294 err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295 (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296 MII_TG3_AUXCTL_SHDWSEL_MISC);
1298 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1305 if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306 set |= MII_TG3_AUXCTL_MISC_WREN;
1308 return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1316 err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1322 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1324 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1326 err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327 val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1332 static int tg3_bmcr_reset(struct tg3 *tp)
1337 /* OK, reset it, and poll the BMCR_RESET bit until it
1338 * clears or we time out.
1340 phy_control = BMCR_RESET;
1341 err = tg3_writephy(tp, MII_BMCR, phy_control);
1347 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1351 if ((phy_control & BMCR_RESET) == 0) {
1363 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1365 struct tg3 *tp = bp->priv;
1368 spin_lock_bh(&tp->lock);
1370 if (tg3_readphy(tp, reg, &val))
1373 spin_unlock_bh(&tp->lock);
1378 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1380 struct tg3 *tp = bp->priv;
1383 spin_lock_bh(&tp->lock);
1385 if (tg3_writephy(tp, reg, val))
1388 spin_unlock_bh(&tp->lock);
1393 static int tg3_mdio_reset(struct mii_bus *bp)
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1401 struct phy_device *phydev;
1403 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1404 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405 case PHY_ID_BCM50610:
1406 case PHY_ID_BCM50610M:
1407 val = MAC_PHYCFG2_50610_LED_MODES;
1409 case PHY_ID_BCMAC131:
1410 val = MAC_PHYCFG2_AC131_LED_MODES;
1412 case PHY_ID_RTL8211C:
1413 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1415 case PHY_ID_RTL8201E:
1416 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1422 if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423 tw32(MAC_PHYCFG2, val);
1425 val = tr32(MAC_PHYCFG1);
1426 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427 MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429 tw32(MAC_PHYCFG1, val);
1434 if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436 MAC_PHYCFG2_FMODE_MASK_MASK |
1437 MAC_PHYCFG2_GMODE_MASK_MASK |
1438 MAC_PHYCFG2_ACT_MASK_MASK |
1439 MAC_PHYCFG2_QUAL_MASK_MASK |
1440 MAC_PHYCFG2_INBAND_ENABLE;
1442 tw32(MAC_PHYCFG2, val);
1444 val = tr32(MAC_PHYCFG1);
1445 val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446 MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449 val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451 val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1453 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454 MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455 tw32(MAC_PHYCFG1, val);
1457 val = tr32(MAC_EXT_RGMII_MODE);
1458 val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459 MAC_RGMII_MODE_RX_QUALITY |
1460 MAC_RGMII_MODE_RX_ACTIVITY |
1461 MAC_RGMII_MODE_RX_ENG_DET |
1462 MAC_RGMII_MODE_TX_ENABLE |
1463 MAC_RGMII_MODE_TX_LOWPWR |
1464 MAC_RGMII_MODE_TX_RESET);
1465 if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467 val |= MAC_RGMII_MODE_RX_INT_B |
1468 MAC_RGMII_MODE_RX_QUALITY |
1469 MAC_RGMII_MODE_RX_ACTIVITY |
1470 MAC_RGMII_MODE_RX_ENG_DET;
1471 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472 val |= MAC_RGMII_MODE_TX_ENABLE |
1473 MAC_RGMII_MODE_TX_LOWPWR |
1474 MAC_RGMII_MODE_TX_RESET;
1476 tw32(MAC_EXT_RGMII_MODE, val);
1479 static void tg3_mdio_start(struct tg3 *tp)
1481 tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482 tw32_f(MAC_MI_MODE, tp->mi_mode);
1485 if (tg3_flag(tp, MDIOBUS_INITED) &&
1486 tg3_asic_rev(tp) == ASIC_REV_5785)
1487 tg3_mdio_config_5785(tp);
1490 static int tg3_mdio_init(struct tg3 *tp)
1494 struct phy_device *phydev;
1496 if (tg3_flag(tp, 5717_PLUS)) {
1499 tp->phy_addr = tp->pci_fn + 1;
1501 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502 is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1504 is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505 TG3_CPMU_PHY_STRAP_IS_SERDES;
1509 tp->phy_addr = TG3_PHY_MII_ADDR;
1513 if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1516 tp->mdio_bus = mdiobus_alloc();
1517 if (tp->mdio_bus == NULL)
1520 tp->mdio_bus->name = "tg3 mdio bus";
1521 snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1522 (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1523 tp->mdio_bus->priv = tp;
1524 tp->mdio_bus->parent = &tp->pdev->dev;
1525 tp->mdio_bus->read = &tg3_mdio_read;
1526 tp->mdio_bus->write = &tg3_mdio_write;
1527 tp->mdio_bus->reset = &tg3_mdio_reset;
1528 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1529 tp->mdio_bus->irq = &tp->mdio_irq[0];
1531 for (i = 0; i < PHY_MAX_ADDR; i++)
1532 tp->mdio_bus->irq[i] = PHY_POLL;
1534 /* The bus registration will look for all the PHYs on the mdio bus.
1535 * Unfortunately, it does not ensure the PHY is powered up before
1536 * accessing the PHY ID registers. A chip reset is the
1537 * quickest way to bring the device back to an operational state..
1539 if (tg3_readphy(tp, MII_BMCR, ®) || (reg & BMCR_PDOWN))
1542 i = mdiobus_register(tp->mdio_bus);
1544 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1545 mdiobus_free(tp->mdio_bus);
1549 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1551 if (!phydev || !phydev->drv) {
1552 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1553 mdiobus_unregister(tp->mdio_bus);
1554 mdiobus_free(tp->mdio_bus);
1558 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1559 case PHY_ID_BCM57780:
1560 phydev->interface = PHY_INTERFACE_MODE_GMII;
1561 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1563 case PHY_ID_BCM50610:
1564 case PHY_ID_BCM50610M:
1565 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1566 PHY_BRCM_RX_REFCLK_UNUSED |
1567 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1568 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1569 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1570 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1571 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1572 phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1573 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1574 phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1576 case PHY_ID_RTL8211C:
1577 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1579 case PHY_ID_RTL8201E:
1580 case PHY_ID_BCMAC131:
1581 phydev->interface = PHY_INTERFACE_MODE_MII;
1582 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1587 tg3_flag_set(tp, MDIOBUS_INITED);
1589 if (tg3_asic_rev(tp) == ASIC_REV_5785)
1590 tg3_mdio_config_5785(tp);
1595 static void tg3_mdio_fini(struct tg3 *tp)
1597 if (tg3_flag(tp, MDIOBUS_INITED)) {
1598 tg3_flag_clear(tp, MDIOBUS_INITED);
1599 mdiobus_unregister(tp->mdio_bus);
1600 mdiobus_free(tp->mdio_bus);
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3 *tp)
1609 val = tr32(GRC_RX_CPU_EVENT);
1610 val |= GRC_RX_CPU_DRIVER_EVENT;
1611 tw32_f(GRC_RX_CPU_EVENT, val);
1613 tp->last_event_jiffies = jiffies;
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3 *tp)
1622 unsigned int delay_cnt;
1625 /* If enough time has passed, no wait is necessary. */
1626 time_remain = (long)(tp->last_event_jiffies + 1 +
1627 usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1629 if (time_remain < 0)
1632 /* Check if we can shorten the wait time. */
1633 delay_cnt = jiffies_to_usecs(time_remain);
1634 if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1635 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1636 delay_cnt = (delay_cnt >> 3) + 1;
1638 for (i = 0; i < delay_cnt; i++) {
1639 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1641 if (pci_channel_offline(tp->pdev))
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1654 if (!tg3_readphy(tp, MII_BMCR, ®))
1656 if (!tg3_readphy(tp, MII_BMSR, ®))
1657 val |= (reg & 0xffff);
1661 if (!tg3_readphy(tp, MII_ADVERTISE, ®))
1663 if (!tg3_readphy(tp, MII_LPA, ®))
1664 val |= (reg & 0xffff);
1668 if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1669 if (!tg3_readphy(tp, MII_CTRL1000, ®))
1671 if (!tg3_readphy(tp, MII_STAT1000, ®))
1672 val |= (reg & 0xffff);
1676 if (!tg3_readphy(tp, MII_PHYADDR, ®))
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3 *tp)
1688 if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1691 tg3_phy_gather_ump_data(tp, data);
1693 tg3_wait_for_event_ack(tp);
1695 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1696 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1697 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1698 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1699 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1700 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1702 tg3_generate_fw_event(tp);
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3 *tp)
1708 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1709 /* Wait for RX cpu to ACK the previous event. */
1710 tg3_wait_for_event_ack(tp);
1712 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1714 tg3_generate_fw_event(tp);
1716 /* Wait for RX cpu to ACK this event. */
1717 tg3_wait_for_event_ack(tp);
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1724 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1725 NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1727 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1729 case RESET_KIND_INIT:
1730 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1734 case RESET_KIND_SHUTDOWN:
1735 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1739 case RESET_KIND_SUSPEND:
1740 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1749 if (kind == RESET_KIND_INIT ||
1750 kind == RESET_KIND_SUSPEND)
1751 tg3_ape_driver_state_change(tp, kind);
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1757 if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1759 case RESET_KIND_INIT:
1760 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761 DRV_STATE_START_DONE);
1764 case RESET_KIND_SHUTDOWN:
1765 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766 DRV_STATE_UNLOAD_DONE);
1774 if (kind == RESET_KIND_SHUTDOWN)
1775 tg3_ape_driver_state_change(tp, kind);
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1781 if (tg3_flag(tp, ENABLE_ASF)) {
1783 case RESET_KIND_INIT:
1784 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1788 case RESET_KIND_SHUTDOWN:
1789 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1793 case RESET_KIND_SUSPEND:
1794 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1804 static int tg3_poll_fw(struct tg3 *tp)
1809 if (tg3_flag(tp, NO_FWARE_REPORTED))
1812 if (tg3_flag(tp, IS_SSB_CORE)) {
1813 /* We don't use firmware. */
1817 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1818 /* Wait up to 20ms for init done. */
1819 for (i = 0; i < 200; i++) {
1820 if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1822 if (pci_channel_offline(tp->pdev))
1830 /* Wait for firmware initialization to complete. */
1831 for (i = 0; i < 100000; i++) {
1832 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1833 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1835 if (pci_channel_offline(tp->pdev)) {
1836 if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838 netdev_info(tp->dev, "No firmware running\n");
1847 /* Chip might not be fitted with firmware. Some Sun onboard
1848 * parts are configured like that. So don't signal the timeout
1849 * of the above loop as an error, but do report the lack of
1850 * running firmware once.
1852 if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1853 tg3_flag_set(tp, NO_FWARE_REPORTED);
1855 netdev_info(tp->dev, "No firmware running\n");
1858 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1859 /* The 57765 A0 needs a little more
1860 * time to do some important work.
1868 static void tg3_link_report(struct tg3 *tp)
1870 if (!netif_carrier_ok(tp->dev)) {
1871 netif_info(tp, link, tp->dev, "Link is down\n");
1872 tg3_ump_link_report(tp);
1873 } else if (netif_msg_link(tp)) {
1874 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1875 (tp->link_config.active_speed == SPEED_1000 ?
1877 (tp->link_config.active_speed == SPEED_100 ?
1879 (tp->link_config.active_duplex == DUPLEX_FULL ?
1882 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1883 (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1885 (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1888 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1889 netdev_info(tp->dev, "EEE is %s\n",
1890 tp->setlpicnt ? "enabled" : "disabled");
1892 tg3_ump_link_report(tp);
1895 tp->link_up = netif_carrier_ok(tp->dev);
1898 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1902 if (adv & ADVERTISE_PAUSE_CAP) {
1903 flowctrl |= FLOW_CTRL_RX;
1904 if (!(adv & ADVERTISE_PAUSE_ASYM))
1905 flowctrl |= FLOW_CTRL_TX;
1906 } else if (adv & ADVERTISE_PAUSE_ASYM)
1907 flowctrl |= FLOW_CTRL_TX;
1912 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1916 if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1917 miireg = ADVERTISE_1000XPAUSE;
1918 else if (flow_ctrl & FLOW_CTRL_TX)
1919 miireg = ADVERTISE_1000XPSE_ASYM;
1920 else if (flow_ctrl & FLOW_CTRL_RX)
1921 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1928 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1932 if (adv & ADVERTISE_1000XPAUSE) {
1933 flowctrl |= FLOW_CTRL_RX;
1934 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1935 flowctrl |= FLOW_CTRL_TX;
1936 } else if (adv & ADVERTISE_1000XPSE_ASYM)
1937 flowctrl |= FLOW_CTRL_TX;
1942 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1946 if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1947 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1948 } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1949 if (lcladv & ADVERTISE_1000XPAUSE)
1951 if (rmtadv & ADVERTISE_1000XPAUSE)
1958 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1962 u32 old_rx_mode = tp->rx_mode;
1963 u32 old_tx_mode = tp->tx_mode;
1965 if (tg3_flag(tp, USE_PHYLIB))
1966 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1968 autoneg = tp->link_config.autoneg;
1970 if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1971 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1972 flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1974 flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1976 flowctrl = tp->link_config.flowctrl;
1978 tp->link_config.active_flowctrl = flowctrl;
1980 if (flowctrl & FLOW_CTRL_RX)
1981 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1983 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1985 if (old_rx_mode != tp->rx_mode)
1986 tw32_f(MAC_RX_MODE, tp->rx_mode);
1988 if (flowctrl & FLOW_CTRL_TX)
1989 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1991 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1993 if (old_tx_mode != tp->tx_mode)
1994 tw32_f(MAC_TX_MODE, tp->tx_mode);
1997 static void tg3_adjust_link(struct net_device *dev)
1999 u8 oldflowctrl, linkmesg = 0;
2000 u32 mac_mode, lcl_adv, rmt_adv;
2001 struct tg3 *tp = netdev_priv(dev);
2002 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2004 spin_lock_bh(&tp->lock);
2006 mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2007 MAC_MODE_HALF_DUPLEX);
2009 oldflowctrl = tp->link_config.active_flowctrl;
2015 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2016 mac_mode |= MAC_MODE_PORT_MODE_MII;
2017 else if (phydev->speed == SPEED_1000 ||
2018 tg3_asic_rev(tp) != ASIC_REV_5785)
2019 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2021 mac_mode |= MAC_MODE_PORT_MODE_MII;
2023 if (phydev->duplex == DUPLEX_HALF)
2024 mac_mode |= MAC_MODE_HALF_DUPLEX;
2026 lcl_adv = mii_advertise_flowctrl(
2027 tp->link_config.flowctrl);
2030 rmt_adv = LPA_PAUSE_CAP;
2031 if (phydev->asym_pause)
2032 rmt_adv |= LPA_PAUSE_ASYM;
2035 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2037 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2039 if (mac_mode != tp->mac_mode) {
2040 tp->mac_mode = mac_mode;
2041 tw32_f(MAC_MODE, tp->mac_mode);
2045 if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2046 if (phydev->speed == SPEED_10)
2048 MAC_MI_STAT_10MBPS_MODE |
2049 MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2051 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2054 if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2055 tw32(MAC_TX_LENGTHS,
2056 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057 (6 << TX_LENGTHS_IPG_SHIFT) |
2058 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2060 tw32(MAC_TX_LENGTHS,
2061 ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062 (6 << TX_LENGTHS_IPG_SHIFT) |
2063 (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2065 if (phydev->link != tp->old_link ||
2066 phydev->speed != tp->link_config.active_speed ||
2067 phydev->duplex != tp->link_config.active_duplex ||
2068 oldflowctrl != tp->link_config.active_flowctrl)
2071 tp->old_link = phydev->link;
2072 tp->link_config.active_speed = phydev->speed;
2073 tp->link_config.active_duplex = phydev->duplex;
2075 spin_unlock_bh(&tp->lock);
2078 tg3_link_report(tp);
2081 static int tg3_phy_init(struct tg3 *tp)
2083 struct phy_device *phydev;
2085 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2088 /* Bring the PHY back to a known state. */
2091 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2093 /* Attach the MAC to the PHY. */
2094 phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2095 tg3_adjust_link, phydev->interface);
2096 if (IS_ERR(phydev)) {
2097 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2098 return PTR_ERR(phydev);
2101 /* Mask with MAC supported features. */
2102 switch (phydev->interface) {
2103 case PHY_INTERFACE_MODE_GMII:
2104 case PHY_INTERFACE_MODE_RGMII:
2105 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2106 phydev->supported &= (PHY_GBIT_FEATURES |
2108 SUPPORTED_Asym_Pause);
2112 case PHY_INTERFACE_MODE_MII:
2113 phydev->supported &= (PHY_BASIC_FEATURES |
2115 SUPPORTED_Asym_Pause);
2118 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2122 tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2124 phydev->advertising = phydev->supported;
2129 static void tg3_phy_start(struct tg3 *tp)
2131 struct phy_device *phydev;
2133 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2136 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140 phydev->speed = tp->link_config.speed;
2141 phydev->duplex = tp->link_config.duplex;
2142 phydev->autoneg = tp->link_config.autoneg;
2143 phydev->advertising = tp->link_config.advertising;
2148 phy_start_aneg(phydev);
2151 static void tg3_phy_stop(struct tg3 *tp)
2153 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2156 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2159 static void tg3_phy_fini(struct tg3 *tp)
2161 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2163 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2172 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2175 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176 /* Cannot do read-modify-write on 5401 */
2177 err = tg3_phy_auxctl_write(tp,
2178 MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179 MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2184 err = tg3_phy_auxctl_read(tp,
2185 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2189 val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190 err = tg3_phy_auxctl_write(tp,
2191 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2201 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2204 tg3_writephy(tp, MII_TG3_FET_TEST,
2205 phytest | MII_TG3_FET_SHADOW_EN);
2206 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2208 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2210 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211 tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2213 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2221 if (!tg3_flag(tp, 5705_PLUS) ||
2222 (tg3_flag(tp, 5717_PLUS) &&
2223 (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2226 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227 tg3_phy_fet_toggle_apd(tp, enable);
2231 reg = MII_TG3_MISC_SHDW_WREN |
2232 MII_TG3_MISC_SHDW_SCR5_SEL |
2233 MII_TG3_MISC_SHDW_SCR5_LPED |
2234 MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235 MII_TG3_MISC_SHDW_SCR5_SDTL |
2236 MII_TG3_MISC_SHDW_SCR5_C125OE;
2237 if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2240 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2243 reg = MII_TG3_MISC_SHDW_WREN |
2244 MII_TG3_MISC_SHDW_APD_SEL |
2245 MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2247 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2249 tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2256 if (!tg3_flag(tp, 5705_PLUS) ||
2257 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2260 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2263 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264 u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2266 tg3_writephy(tp, MII_TG3_FET_TEST,
2267 ephy | MII_TG3_FET_SHADOW_EN);
2268 if (!tg3_readphy(tp, reg, &phy)) {
2270 phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2272 phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273 tg3_writephy(tp, reg, phy);
2275 tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2280 ret = tg3_phy_auxctl_read(tp,
2281 MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2284 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2286 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287 tg3_phy_auxctl_write(tp,
2288 MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2298 if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2301 ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2303 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304 val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2316 if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2319 phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320 phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321 tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2323 phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324 ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2327 phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328 phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329 tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2331 phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332 tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2334 phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335 tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2337 phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338 ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339 tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2341 tg3_phy_toggle_auxctl_smdsp(tp, false);
2344 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2348 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2353 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2355 tp->link_config.active_duplex == DUPLEX_FULL &&
2356 (tp->link_config.active_speed == SPEED_100 ||
2357 tp->link_config.active_speed == SPEED_1000)) {
2360 if (tp->link_config.active_speed == SPEED_1000)
2361 eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2363 eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2365 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2367 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2368 TG3_CL45_D7_EEERES_STAT, &val);
2370 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2375 if (!tp->setlpicnt) {
2376 if (current_link_up &&
2377 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2379 tg3_phy_toggle_auxctl_smdsp(tp, false);
2382 val = tr32(TG3_CPMU_EEE_MODE);
2383 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2387 static void tg3_phy_eee_enable(struct tg3 *tp)
2391 if (tp->link_config.active_speed == SPEED_1000 &&
2392 (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2393 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2394 tg3_flag(tp, 57765_CLASS)) &&
2395 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396 val = MII_TG3_DSP_TAP26_ALNOKO |
2397 MII_TG3_DSP_TAP26_RMRXSTO;
2398 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2399 tg3_phy_toggle_auxctl_smdsp(tp, false);
2402 val = tr32(TG3_CPMU_EEE_MODE);
2403 tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2406 static int tg3_wait_macro_done(struct tg3 *tp)
2413 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2414 if ((tmp32 & 0x1000) == 0)
2424 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2426 static const u32 test_pat[4][6] = {
2427 { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428 { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429 { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430 { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2434 for (chan = 0; chan < 4; chan++) {
2437 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2438 (chan * 0x2000) | 0x0200);
2439 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2441 for (i = 0; i < 6; i++)
2442 tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2445 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2446 if (tg3_wait_macro_done(tp)) {
2451 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2452 (chan * 0x2000) | 0x0200);
2453 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2454 if (tg3_wait_macro_done(tp)) {
2459 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2460 if (tg3_wait_macro_done(tp)) {
2465 for (i = 0; i < 6; i += 2) {
2468 if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2469 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2470 tg3_wait_macro_done(tp)) {
2476 if (low != test_pat[chan][i] ||
2477 high != test_pat[chan][i+1]) {
2478 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2479 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2480 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2490 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2494 for (chan = 0; chan < 4; chan++) {
2497 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498 (chan * 0x2000) | 0x0200);
2499 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2500 for (i = 0; i < 6; i++)
2501 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2502 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2503 if (tg3_wait_macro_done(tp))
2510 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2512 u32 reg32, phy9_orig;
2513 int retries, do_phy_reset, err;
2519 err = tg3_bmcr_reset(tp);
2525 /* Disable transmitter and interrupt. */
2526 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32))
2530 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2532 /* Set full-duplex, 1000 mbps. */
2533 tg3_writephy(tp, MII_BMCR,
2534 BMCR_FULLDPLX | BMCR_SPEED1000);
2536 /* Set to master mode. */
2537 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2540 tg3_writephy(tp, MII_CTRL1000,
2541 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2543 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2547 /* Block the PHY control access. */
2548 tg3_phydsp_write(tp, 0x8005, 0x0800);
2550 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2553 } while (--retries);
2555 err = tg3_phy_reset_chanpat(tp);
2559 tg3_phydsp_write(tp, 0x8005, 0x0000);
2561 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2562 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2564 tg3_phy_toggle_auxctl_smdsp(tp, false);
2566 tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2568 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, ®32)) {
2570 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2577 static void tg3_carrier_off(struct tg3 *tp)
2579 netif_carrier_off(tp->dev);
2580 tp->link_up = false;
2583 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2585 if (tg3_flag(tp, ENABLE_ASF))
2586 netdev_warn(tp->dev,
2587 "Management side-band traffic will be interrupted during phy settings change\n");
2590 /* This will reset the tigon3 PHY if there is no valid
2591 * link unless the FORCE argument is non-zero.
2593 static int tg3_phy_reset(struct tg3 *tp)
2598 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2599 val = tr32(GRC_MISC_CFG);
2600 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2603 err = tg3_readphy(tp, MII_BMSR, &val);
2604 err |= tg3_readphy(tp, MII_BMSR, &val);
2608 if (netif_running(tp->dev) && tp->link_up) {
2609 netif_carrier_off(tp->dev);
2610 tg3_link_report(tp);
2613 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2614 tg3_asic_rev(tp) == ASIC_REV_5704 ||
2615 tg3_asic_rev(tp) == ASIC_REV_5705) {
2616 err = tg3_phy_reset_5703_4_5(tp);
2623 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2624 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2625 cpmuctrl = tr32(TG3_CPMU_CTRL);
2626 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2628 cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2631 err = tg3_bmcr_reset(tp);
2635 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2636 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2637 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2639 tw32(TG3_CPMU_CTRL, cpmuctrl);
2642 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2643 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2644 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2645 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2646 CPMU_LSPD_1000MB_MACCLK_12_5) {
2647 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2649 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2653 if (tg3_flag(tp, 5717_PLUS) &&
2654 (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2657 tg3_phy_apply_otp(tp);
2659 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2660 tg3_phy_toggle_apd(tp, true);
2662 tg3_phy_toggle_apd(tp, false);
2665 if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2666 !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2667 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2668 tg3_phydsp_write(tp, 0x000a, 0x0323);
2669 tg3_phy_toggle_auxctl_smdsp(tp, false);
2672 if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2673 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2674 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2677 if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2678 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2679 tg3_phydsp_write(tp, 0x000a, 0x310b);
2680 tg3_phydsp_write(tp, 0x201f, 0x9506);
2681 tg3_phydsp_write(tp, 0x401f, 0x14e2);
2682 tg3_phy_toggle_auxctl_smdsp(tp, false);
2684 } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2685 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2686 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2687 if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2688 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2689 tg3_writephy(tp, MII_TG3_TEST1,
2690 MII_TG3_TEST1_TRIM_EN | 0x4);
2692 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2694 tg3_phy_toggle_auxctl_smdsp(tp, false);
2698 /* Set Extended packet length bit (bit 14) on all chips that */
2699 /* support jumbo frames */
2700 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2701 /* Cannot do read-modify-write on 5401 */
2702 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2703 } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2704 /* Set bit 14 with read-modify-write to preserve other bits */
2705 err = tg3_phy_auxctl_read(tp,
2706 MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2708 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2709 val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2712 /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713 * jumbo frames transmission.
2715 if (tg3_flag(tp, JUMBO_CAPABLE)) {
2716 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2717 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2718 val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2721 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2722 /* adjust output voltage */
2723 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2726 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2727 tg3_phydsp_write(tp, 0xffb, 0x4000);
2729 tg3_phy_toggle_automdix(tp, true);
2730 tg3_phy_set_wirespeed(tp);
2734 #define TG3_GPIO_MSG_DRVR_PRES 0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX 0x00000002
2736 #define TG3_GPIO_MSG_MASK (TG3_GPIO_MSG_DRVR_PRES | \
2737 TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739 ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740 (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741 (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742 (TG3_GPIO_MSG_DRVR_PRES << 12))
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745 ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746 (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747 (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748 (TG3_GPIO_MSG_NEED_VAUX << 12))
2750 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2754 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2755 tg3_asic_rev(tp) == ASIC_REV_5719)
2756 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2758 status = tr32(TG3_CPMU_DRV_STATUS);
2760 shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2761 status &= ~(TG3_GPIO_MSG_MASK << shift);
2762 status |= (newstat << shift);
2764 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2765 tg3_asic_rev(tp) == ASIC_REV_5719)
2766 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2768 tw32(TG3_CPMU_DRV_STATUS, status);
2770 return status >> TG3_APE_GPIO_MSG_SHIFT;
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2775 if (!tg3_flag(tp, IS_NIC))
2778 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2779 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2780 tg3_asic_rev(tp) == ASIC_REV_5720) {
2781 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2784 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2786 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2787 TG3_GRC_LCLCTL_PWRSW_DELAY);
2789 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2791 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2792 TG3_GRC_LCLCTL_PWRSW_DELAY);
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2802 if (!tg3_flag(tp, IS_NIC) ||
2803 tg3_asic_rev(tp) == ASIC_REV_5700 ||
2804 tg3_asic_rev(tp) == ASIC_REV_5701)
2807 grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2809 tw32_wait_f(GRC_LOCAL_CTRL,
2810 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2811 TG3_GRC_LCLCTL_PWRSW_DELAY);
2813 tw32_wait_f(GRC_LOCAL_CTRL,
2815 TG3_GRC_LCLCTL_PWRSW_DELAY);
2817 tw32_wait_f(GRC_LOCAL_CTRL,
2818 grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2819 TG3_GRC_LCLCTL_PWRSW_DELAY);
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2824 if (!tg3_flag(tp, IS_NIC))
2827 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2828 tg3_asic_rev(tp) == ASIC_REV_5701) {
2829 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2830 (GRC_LCLCTRL_GPIO_OE0 |
2831 GRC_LCLCTRL_GPIO_OE1 |
2832 GRC_LCLCTRL_GPIO_OE2 |
2833 GRC_LCLCTRL_GPIO_OUTPUT0 |
2834 GRC_LCLCTRL_GPIO_OUTPUT1),
2835 TG3_GRC_LCLCTL_PWRSW_DELAY);
2836 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2837 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2838 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2840 GRC_LCLCTRL_GPIO_OE1 |
2841 GRC_LCLCTRL_GPIO_OE2 |
2842 GRC_LCLCTRL_GPIO_OUTPUT0 |
2843 GRC_LCLCTRL_GPIO_OUTPUT1 |
2845 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2846 TG3_GRC_LCLCTL_PWRSW_DELAY);
2848 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2849 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2850 TG3_GRC_LCLCTL_PWRSW_DELAY);
2852 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2853 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2854 TG3_GRC_LCLCTL_PWRSW_DELAY);
2857 u32 grc_local_ctrl = 0;
2859 /* Workaround to prevent overdrawing Amps. */
2860 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2861 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2862 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2864 TG3_GRC_LCLCTL_PWRSW_DELAY);
2867 /* On 5753 and variants, GPIO2 cannot be used. */
2868 no_gpio2 = tp->nic_sram_data_cfg &
2869 NIC_SRAM_DATA_CFG_NO_GPIO2;
2871 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2872 GRC_LCLCTRL_GPIO_OE1 |
2873 GRC_LCLCTRL_GPIO_OE2 |
2874 GRC_LCLCTRL_GPIO_OUTPUT1 |
2875 GRC_LCLCTRL_GPIO_OUTPUT2;
2877 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2878 GRC_LCLCTRL_GPIO_OUTPUT2);
2880 tw32_wait_f(GRC_LOCAL_CTRL,
2881 tp->grc_local_ctrl | grc_local_ctrl,
2882 TG3_GRC_LCLCTL_PWRSW_DELAY);
2884 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2886 tw32_wait_f(GRC_LOCAL_CTRL,
2887 tp->grc_local_ctrl | grc_local_ctrl,
2888 TG3_GRC_LCLCTL_PWRSW_DELAY);
2891 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2892 tw32_wait_f(GRC_LOCAL_CTRL,
2893 tp->grc_local_ctrl | grc_local_ctrl,
2894 TG3_GRC_LCLCTL_PWRSW_DELAY);
2899 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2903 /* Serialize power state transitions */
2904 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2907 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2908 msg = TG3_GPIO_MSG_NEED_VAUX;
2910 msg = tg3_set_function_status(tp, msg);
2912 if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2915 if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2916 tg3_pwrsrc_switch_to_vaux(tp);
2918 tg3_pwrsrc_die_with_vmain(tp);
2921 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2924 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2926 bool need_vaux = false;
2928 /* The GPIOs do something completely different on 57765. */
2929 if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2932 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2933 tg3_asic_rev(tp) == ASIC_REV_5719 ||
2934 tg3_asic_rev(tp) == ASIC_REV_5720) {
2935 tg3_frob_aux_power_5717(tp, include_wol ?
2936 tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2940 if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2941 struct net_device *dev_peer;
2943 dev_peer = pci_get_drvdata(tp->pdev_peer);
2945 /* remove_one() may have been run on the peer. */
2947 struct tg3 *tp_peer = netdev_priv(dev_peer);
2949 if (tg3_flag(tp_peer, INIT_COMPLETE))
2952 if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2953 tg3_flag(tp_peer, ENABLE_ASF))
2958 if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2959 tg3_flag(tp, ENABLE_ASF))
2963 tg3_pwrsrc_switch_to_vaux(tp);
2965 tg3_pwrsrc_die_with_vmain(tp);
2968 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2970 if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2972 else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2973 if (speed != SPEED_10)
2975 } else if (speed == SPEED_10)
2981 static bool tg3_phy_power_bug(struct tg3 *tp)
2983 switch (tg3_asic_rev(tp)) {
2988 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2997 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
3006 static bool tg3_phy_led_bug(struct tg3 *tp)
3008 switch (tg3_asic_rev(tp)) {
3010 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3019 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3023 if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3026 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3027 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3028 u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3029 u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3032 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3033 tw32(SG_DIG_CTRL, sg_dig_ctrl);
3034 tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3039 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3041 val = tr32(GRC_MISC_CFG);
3042 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3045 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3047 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3050 tg3_writephy(tp, MII_ADVERTISE, 0);
3051 tg3_writephy(tp, MII_BMCR,
3052 BMCR_ANENABLE | BMCR_ANRESTART);
3054 tg3_writephy(tp, MII_TG3_FET_TEST,
3055 phytest | MII_TG3_FET_SHADOW_EN);
3056 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3057 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3059 MII_TG3_FET_SHDW_AUXMODE4,
3062 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3065 } else if (do_low_power) {
3066 if (!tg3_phy_led_bug(tp))
3067 tg3_writephy(tp, MII_TG3_EXT_CTRL,
3068 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3070 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3071 MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3072 MII_TG3_AUXCTL_PCTL_VREG_11V;
3073 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3076 /* The PHY should not be powered down on some chips because
3079 if (tg3_phy_power_bug(tp))
3082 if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3083 tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3084 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3085 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3086 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3087 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3090 tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3093 /* tp->lock is held. */
3094 static int tg3_nvram_lock(struct tg3 *tp)
3096 if (tg3_flag(tp, NVRAM)) {
3099 if (tp->nvram_lock_cnt == 0) {
3100 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3101 for (i = 0; i < 8000; i++) {
3102 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3107 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3111 tp->nvram_lock_cnt++;
3116 /* tp->lock is held. */
3117 static void tg3_nvram_unlock(struct tg3 *tp)
3119 if (tg3_flag(tp, NVRAM)) {
3120 if (tp->nvram_lock_cnt > 0)
3121 tp->nvram_lock_cnt--;
3122 if (tp->nvram_lock_cnt == 0)
3123 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3127 /* tp->lock is held. */
3128 static void tg3_enable_nvram_access(struct tg3 *tp)
3130 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3131 u32 nvaccess = tr32(NVRAM_ACCESS);
3133 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3137 /* tp->lock is held. */
3138 static void tg3_disable_nvram_access(struct tg3 *tp)
3140 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3141 u32 nvaccess = tr32(NVRAM_ACCESS);
3143 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3147 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3148 u32 offset, u32 *val)
3153 if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3156 tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3157 EEPROM_ADDR_DEVID_MASK |
3159 tw32(GRC_EEPROM_ADDR,
3161 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3162 ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3163 EEPROM_ADDR_ADDR_MASK) |
3164 EEPROM_ADDR_READ | EEPROM_ADDR_START);
3166 for (i = 0; i < 1000; i++) {
3167 tmp = tr32(GRC_EEPROM_ADDR);
3169 if (tmp & EEPROM_ADDR_COMPLETE)
3173 if (!(tmp & EEPROM_ADDR_COMPLETE))
3176 tmp = tr32(GRC_EEPROM_DATA);
3179 * The data will always be opposite the native endian
3180 * format. Perform a blind byteswap to compensate.
3187 #define NVRAM_CMD_TIMEOUT 10000
3189 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3193 tw32(NVRAM_CMD, nvram_cmd);
3194 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3196 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3202 if (i == NVRAM_CMD_TIMEOUT)
3208 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3210 if (tg3_flag(tp, NVRAM) &&
3211 tg3_flag(tp, NVRAM_BUFFERED) &&
3212 tg3_flag(tp, FLASH) &&
3213 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3214 (tp->nvram_jedecnum == JEDEC_ATMEL))
3216 addr = ((addr / tp->nvram_pagesize) <<
3217 ATMEL_AT45DB0X1B_PAGE_POS) +
3218 (addr % tp->nvram_pagesize);
3223 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3225 if (tg3_flag(tp, NVRAM) &&
3226 tg3_flag(tp, NVRAM_BUFFERED) &&
3227 tg3_flag(tp, FLASH) &&
3228 !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3229 (tp->nvram_jedecnum == JEDEC_ATMEL))
3231 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3232 tp->nvram_pagesize) +
3233 (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3238 /* NOTE: Data read in from NVRAM is byteswapped according to
3239 * the byteswapping settings for all other register accesses.
3240 * tg3 devices are BE devices, so on a BE machine, the data
3241 * returned will be exactly as it is seen in NVRAM. On a LE
3242 * machine, the 32-bit value will be byteswapped.
3244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3248 if (!tg3_flag(tp, NVRAM))
3249 return tg3_nvram_read_using_eeprom(tp, offset, val);
3251 offset = tg3_nvram_phys_addr(tp, offset);
3253 if (offset > NVRAM_ADDR_MSK)
3256 ret = tg3_nvram_lock(tp);
3260 tg3_enable_nvram_access(tp);
3262 tw32(NVRAM_ADDR, offset);
3263 ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3264 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3267 *val = tr32(NVRAM_RDDATA);
3269 tg3_disable_nvram_access(tp);
3271 tg3_nvram_unlock(tp);
3276 /* Ensures NVRAM data is in bytestream format. */
3277 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3280 int res = tg3_nvram_read(tp, offset, &v);
3282 *val = cpu_to_be32(v);
3286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3287 u32 offset, u32 len, u8 *buf)
3292 for (i = 0; i < len; i += 4) {
3298 memcpy(&data, buf + i, 4);
3301 * The SEEPROM interface expects the data to always be opposite
3302 * the native endian format. We accomplish this by reversing
3303 * all the operations that would have been performed on the
3304 * data from a call to tg3_nvram_read_be32().
3306 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3308 val = tr32(GRC_EEPROM_ADDR);
3309 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3311 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3313 tw32(GRC_EEPROM_ADDR, val |
3314 (0 << EEPROM_ADDR_DEVID_SHIFT) |
3315 (addr & EEPROM_ADDR_ADDR_MASK) |
3319 for (j = 0; j < 1000; j++) {
3320 val = tr32(GRC_EEPROM_ADDR);
3322 if (val & EEPROM_ADDR_COMPLETE)
3326 if (!(val & EEPROM_ADDR_COMPLETE)) {
3335 /* offset and length are dword aligned */
3336 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3340 u32 pagesize = tp->nvram_pagesize;
3341 u32 pagemask = pagesize - 1;
3345 tmp = kmalloc(pagesize, GFP_KERNEL);
3351 u32 phy_addr, page_off, size;
3353 phy_addr = offset & ~pagemask;
3355 for (j = 0; j < pagesize; j += 4) {
3356 ret = tg3_nvram_read_be32(tp, phy_addr + j,
3357 (__be32 *) (tmp + j));
3364 page_off = offset & pagemask;
3371 memcpy(tmp + page_off, buf, size);
3373 offset = offset + (pagesize - page_off);
3375 tg3_enable_nvram_access(tp);
3378 * Before we can erase the flash page, we need
3379 * to issue a special "write enable" command.
3381 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3383 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3386 /* Erase the target page */
3387 tw32(NVRAM_ADDR, phy_addr);
3389 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3390 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3392 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3395 /* Issue another write enable to start the write. */
3396 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3398 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3401 for (j = 0; j < pagesize; j += 4) {
3404 data = *((__be32 *) (tmp + j));
3406 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3408 tw32(NVRAM_ADDR, phy_addr + j);
3410 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3414 nvram_cmd |= NVRAM_CMD_FIRST;
3415 else if (j == (pagesize - 4))
3416 nvram_cmd |= NVRAM_CMD_LAST;
3418 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3426 nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3427 tg3_nvram_exec_cmd(tp, nvram_cmd);
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3440 for (i = 0; i < len; i += 4, offset += 4) {
3441 u32 page_off, phy_addr, nvram_cmd;
3444 memcpy(&data, buf + i, 4);
3445 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3447 page_off = offset % tp->nvram_pagesize;
3449 phy_addr = tg3_nvram_phys_addr(tp, offset);
3451 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3453 if (page_off == 0 || i == 0)
3454 nvram_cmd |= NVRAM_CMD_FIRST;
3455 if (page_off == (tp->nvram_pagesize - 4))
3456 nvram_cmd |= NVRAM_CMD_LAST;
3459 nvram_cmd |= NVRAM_CMD_LAST;
3461 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3462 !tg3_flag(tp, FLASH) ||
3463 !tg3_flag(tp, 57765_PLUS))
3464 tw32(NVRAM_ADDR, phy_addr);
3466 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3467 !tg3_flag(tp, 5755_PLUS) &&
3468 (tp->nvram_jedecnum == JEDEC_ST) &&
3469 (nvram_cmd & NVRAM_CMD_FIRST)) {
3472 cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473 ret = tg3_nvram_exec_cmd(tp, cmd);
3477 if (!tg3_flag(tp, FLASH)) {
3478 /* We always do complete word writes to eeprom. */
3479 nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3482 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3489 /* offset and length are dword aligned */
3490 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3494 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3495 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3496 ~GRC_LCLCTRL_GPIO_OUTPUT1);
3500 if (!tg3_flag(tp, NVRAM)) {
3501 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3505 ret = tg3_nvram_lock(tp);
3509 tg3_enable_nvram_access(tp);
3510 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3511 tw32(NVRAM_WRITE1, 0x406);
3513 grc_mode = tr32(GRC_MODE);
3514 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3516 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3517 ret = tg3_nvram_write_block_buffered(tp, offset, len,
3520 ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3524 grc_mode = tr32(GRC_MODE);
3525 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3527 tg3_disable_nvram_access(tp);
3528 tg3_nvram_unlock(tp);
3531 if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3539 #define RX_CPU_SCRATCH_BASE 0x30000
3540 #define RX_CPU_SCRATCH_SIZE 0x04000
3541 #define TX_CPU_SCRATCH_BASE 0x34000
3542 #define TX_CPU_SCRATCH_SIZE 0x04000
3544 /* tp->lock is held. */
3545 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3548 const int iters = 10000;
3550 for (i = 0; i < iters; i++) {
3551 tw32(cpu_base + CPU_STATE, 0xffffffff);
3552 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3553 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3555 if (pci_channel_offline(tp->pdev))
3559 return (i == iters) ? -EBUSY : 0;
3562 /* tp->lock is held. */
3563 static int tg3_rxcpu_pause(struct tg3 *tp)
3565 int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3567 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3568 tw32_f(RX_CPU_BASE + CPU_MODE, CPU_MODE_HALT);
3574 /* tp->lock is held. */
3575 static int tg3_txcpu_pause(struct tg3 *tp)
3577 return tg3_pause_cpu(tp, TX_CPU_BASE);
3580 /* tp->lock is held. */
3581 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3583 tw32(cpu_base + CPU_STATE, 0xffffffff);
3584 tw32_f(cpu_base + CPU_MODE, 0x00000000);
3587 /* tp->lock is held. */
3588 static void tg3_rxcpu_resume(struct tg3 *tp)
3590 tg3_resume_cpu(tp, RX_CPU_BASE);
3593 /* tp->lock is held. */
3594 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3598 BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3600 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3601 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3603 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3606 if (cpu_base == RX_CPU_BASE) {
3607 rc = tg3_rxcpu_pause(tp);
3610 * There is only an Rx CPU for the 5750 derivative in the
3613 if (tg3_flag(tp, IS_SSB_CORE))
3616 rc = tg3_txcpu_pause(tp);
3620 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3621 __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3625 /* Clear firmware's nvram arbitration. */
3626 if (tg3_flag(tp, NVRAM))
3627 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3631 static int tg3_fw_data_len(struct tg3 *tp,
3632 const struct tg3_firmware_hdr *fw_hdr)
3636 /* Non fragmented firmware have one firmware header followed by a
3637 * contiguous chunk of data to be written. The length field in that
3638 * header is not the length of data to be written but the complete
3639 * length of the bss. The data length is determined based on
3640 * tp->fw->size minus headers.
3642 * Fragmented firmware have a main header followed by multiple
3643 * fragments. Each fragment is identical to non fragmented firmware
3644 * with a firmware header followed by a contiguous chunk of data. In
3645 * the main header, the length field is unused and set to 0xffffffff.
3646 * In each fragment header the length is the entire size of that
3647 * fragment i.e. fragment data + header length. Data length is
3648 * therefore length field in the header minus TG3_FW_HDR_LEN.
3650 if (tp->fw_len == 0xffffffff)
3651 fw_len = be32_to_cpu(fw_hdr->len);
3653 fw_len = tp->fw->size;
3655 return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3658 /* tp->lock is held. */
3659 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3660 u32 cpu_scratch_base, int cpu_scratch_size,
3661 const struct tg3_firmware_hdr *fw_hdr)
3664 void (*write_op)(struct tg3 *, u32, u32);
3665 int total_len = tp->fw->size;
3667 if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3669 "%s: Trying to load TX cpu firmware which is 5705\n",
3674 if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3675 write_op = tg3_write_mem;
3677 write_op = tg3_write_indirect_reg32;
3679 if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3680 /* It is possible that bootcode is still loading at this point.
3681 * Get the nvram lock first before halting the cpu.
3683 int lock_err = tg3_nvram_lock(tp);
3684 err = tg3_halt_cpu(tp, cpu_base);
3686 tg3_nvram_unlock(tp);
3690 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3691 write_op(tp, cpu_scratch_base + i, 0);
3692 tw32(cpu_base + CPU_STATE, 0xffffffff);
3693 tw32(cpu_base + CPU_MODE,
3694 tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3696 /* Subtract additional main header for fragmented firmware and
3697 * advance to the first fragment
3699 total_len -= TG3_FW_HDR_LEN;
3704 u32 *fw_data = (u32 *)(fw_hdr + 1);
3705 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3706 write_op(tp, cpu_scratch_base +
3707 (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3709 be32_to_cpu(fw_data[i]));
3711 total_len -= be32_to_cpu(fw_hdr->len);
3713 /* Advance to next fragment */
3714 fw_hdr = (struct tg3_firmware_hdr *)
3715 ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3716 } while (total_len > 0);
3724 /* tp->lock is held. */
3725 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3728 const int iters = 5;
3730 tw32(cpu_base + CPU_STATE, 0xffffffff);
3731 tw32_f(cpu_base + CPU_PC, pc);
3733 for (i = 0; i < iters; i++) {
3734 if (tr32(cpu_base + CPU_PC) == pc)
3736 tw32(cpu_base + CPU_STATE, 0xffffffff);
3737 tw32(cpu_base + CPU_MODE, CPU_MODE_HALT);
3738 tw32_f(cpu_base + CPU_PC, pc);
3742 return (i == iters) ? -EBUSY : 0;
3745 /* tp->lock is held. */
3746 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3748 const struct tg3_firmware_hdr *fw_hdr;
3751 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3753 /* Firmware blob starts with version numbers, followed by
3754 start address and length. We are setting complete length.
3755 length = end_address_of_bss - start_address_of_text.
3756 Remainder is the blob to be loaded contiguously
3757 from start address. */
3759 err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3760 RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3765 err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3766 TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3771 /* Now startup only the RX cpu. */
3772 err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3773 be32_to_cpu(fw_hdr->base_addr));
3775 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3776 "should be %08x\n", __func__,
3777 tr32(RX_CPU_BASE + CPU_PC),
3778 be32_to_cpu(fw_hdr->base_addr));
3782 tg3_rxcpu_resume(tp);
3787 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3789 const int iters = 1000;
3793 /* Wait for boot code to complete initialization and enter service
3794 * loop. It is then safe to download service patches
3796 for (i = 0; i < iters; i++) {
3797 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3804 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3808 val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3810 netdev_warn(tp->dev,
3811 "Other patches exist. Not downloading EEE patch\n");
3818 /* tp->lock is held. */
3819 static void tg3_load_57766_firmware(struct tg3 *tp)
3821 struct tg3_firmware_hdr *fw_hdr;
3823 if (!tg3_flag(tp, NO_NVRAM))
3826 if (tg3_validate_rxcpu_state(tp))
3832 /* This firmware blob has a different format than older firmware
3833 * releases as given below. The main difference is we have fragmented
3834 * data to be written to non-contiguous locations.
3836 * In the beginning we have a firmware header identical to other
3837 * firmware which consists of version, base addr and length. The length
3838 * here is unused and set to 0xffffffff.
3840 * This is followed by a series of firmware fragments which are
3841 * individually identical to previous firmware. i.e. they have the
3842 * firmware header and followed by data for that fragment. The version
3843 * field of the individual fragment header is unused.
3846 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3847 if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3850 if (tg3_rxcpu_pause(tp))
3853 /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3854 tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3856 tg3_rxcpu_resume(tp);
3859 /* tp->lock is held. */
3860 static int tg3_load_tso_firmware(struct tg3 *tp)
3862 const struct tg3_firmware_hdr *fw_hdr;
3863 unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3866 if (!tg3_flag(tp, FW_TSO))
3869 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3871 /* Firmware blob starts with version numbers, followed by
3872 start address and length. We are setting complete length.
3873 length = end_address_of_bss - start_address_of_text.
3874 Remainder is the blob to be loaded contiguously
3875 from start address. */
3877 cpu_scratch_size = tp->fw_len;
3879 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3880 cpu_base = RX_CPU_BASE;
3881 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3883 cpu_base = TX_CPU_BASE;
3884 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3885 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3888 err = tg3_load_firmware_cpu(tp, cpu_base,
3889 cpu_scratch_base, cpu_scratch_size,
3894 /* Now startup the cpu. */
3895 err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3896 be32_to_cpu(fw_hdr->base_addr));
3899 "%s fails to set CPU PC, is %08x should be %08x\n",
3900 __func__, tr32(cpu_base + CPU_PC),
3901 be32_to_cpu(fw_hdr->base_addr));
3905 tg3_resume_cpu(tp, cpu_base);
3910 /* tp->lock is held. */
3911 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3913 u32 addr_high, addr_low;
3916 addr_high = ((tp->dev->dev_addr[0] << 8) |
3917 tp->dev->dev_addr[1]);
3918 addr_low = ((tp->dev->dev_addr[2] << 24) |
3919 (tp->dev->dev_addr[3] << 16) |
3920 (tp->dev->dev_addr[4] << 8) |
3921 (tp->dev->dev_addr[5] << 0));
3922 for (i = 0; i < 4; i++) {
3923 if (i == 1 && skip_mac_1)
3925 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3926 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3929 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3930 tg3_asic_rev(tp) == ASIC_REV_5704) {
3931 for (i = 0; i < 12; i++) {
3932 tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3933 tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3937 addr_high = (tp->dev->dev_addr[0] +
3938 tp->dev->dev_addr[1] +
3939 tp->dev->dev_addr[2] +
3940 tp->dev->dev_addr[3] +
3941 tp->dev->dev_addr[4] +
3942 tp->dev->dev_addr[5]) &
3943 TX_BACKOFF_SEED_MASK;
3944 tw32(MAC_TX_BACKOFF_SEED, addr_high);
3947 static void tg3_enable_register_access(struct tg3 *tp)
3950 * Make sure register accesses (indirect or otherwise) will function
3953 pci_write_config_dword(tp->pdev,
3954 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3957 static int tg3_power_up(struct tg3 *tp)
3961 tg3_enable_register_access(tp);
3963 err = pci_set_power_state(tp->pdev, PCI_D0);
3965 /* Switch out of Vaux if it is a NIC */
3966 tg3_pwrsrc_switch_to_vmain(tp);
3968 netdev_err(tp->dev, "Transition to D0 failed\n");
3974 static int tg3_setup_phy(struct tg3 *, bool);
3976 static int tg3_power_down_prepare(struct tg3 *tp)
3979 bool device_should_wake, do_low_power;
3981 tg3_enable_register_access(tp);
3983 /* Restore the CLKREQ setting. */
3984 if (tg3_flag(tp, CLKREQ_BUG))
3985 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3986 PCI_EXP_LNKCTL_CLKREQ_EN);
3988 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3989 tw32(TG3PCI_MISC_HOST_CTRL,
3990 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3992 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3993 tg3_flag(tp, WOL_ENABLE);
3995 if (tg3_flag(tp, USE_PHYLIB)) {
3996 do_low_power = false;
3997 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3998 !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3999 struct phy_device *phydev;
4000 u32 phyid, advertising;
4002 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4004 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4006 tp->link_config.speed = phydev->speed;
4007 tp->link_config.duplex = phydev->duplex;
4008 tp->link_config.autoneg = phydev->autoneg;
4009 tp->link_config.advertising = phydev->advertising;
4011 advertising = ADVERTISED_TP |
4013 ADVERTISED_Autoneg |
4014 ADVERTISED_10baseT_Half;
4016 if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4017 if (tg3_flag(tp, WOL_SPEED_100MB))
4019 ADVERTISED_100baseT_Half |
4020 ADVERTISED_100baseT_Full |
4021 ADVERTISED_10baseT_Full;
4023 advertising |= ADVERTISED_10baseT_Full;
4026 phydev->advertising = advertising;
4028 phy_start_aneg(phydev);
4030 phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4031 if (phyid != PHY_ID_BCMAC131) {
4032 phyid &= PHY_BCM_OUI_MASK;
4033 if (phyid == PHY_BCM_OUI_1 ||
4034 phyid == PHY_BCM_OUI_2 ||
4035 phyid == PHY_BCM_OUI_3)
4036 do_low_power = true;
4040 do_low_power = true;
4042 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4043 tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4045 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4046 tg3_setup_phy(tp, false);
4049 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4052 val = tr32(GRC_VCPU_EXT_CTRL);
4053 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4054 } else if (!tg3_flag(tp, ENABLE_ASF)) {
4058 for (i = 0; i < 200; i++) {
4059 tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4060 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4065 if (tg3_flag(tp, WOL_CAP))
4066 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4067 WOL_DRV_STATE_SHUTDOWN |
4071 if (device_should_wake) {
4074 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4076 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4077 tg3_phy_auxctl_write(tp,
4078 MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4079 MII_TG3_AUXCTL_PCTL_WOL_EN |
4080 MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4081 MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4085 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4086 mac_mode = MAC_MODE_PORT_MODE_GMII;
4087 else if (tp->phy_flags &
4088 TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4089 if (tp->link_config.active_speed == SPEED_1000)
4090 mac_mode = MAC_MODE_PORT_MODE_GMII;
4092 mac_mode = MAC_MODE_PORT_MODE_MII;
4094 mac_mode = MAC_MODE_PORT_MODE_MII;
4096 mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4097 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4098 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4099 SPEED_100 : SPEED_10;
4100 if (tg3_5700_link_polarity(tp, speed))
4101 mac_mode |= MAC_MODE_LINK_POLARITY;
4103 mac_mode &= ~MAC_MODE_LINK_POLARITY;
4106 mac_mode = MAC_MODE_PORT_MODE_TBI;
4109 if (!tg3_flag(tp, 5750_PLUS))
4110 tw32(MAC_LED_CTRL, tp->led_ctrl);
4112 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4113 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4114 (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4115 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4117 if (tg3_flag(tp, ENABLE_APE))
4118 mac_mode |= MAC_MODE_APE_TX_EN |
4119 MAC_MODE_APE_RX_EN |
4120 MAC_MODE_TDE_ENABLE;
4122 tw32_f(MAC_MODE, mac_mode);
4125 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4129 if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4130 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4131 tg3_asic_rev(tp) == ASIC_REV_5701)) {
4134 base_val = tp->pci_clock_ctrl;
4135 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4136 CLOCK_CTRL_TXCLK_DISABLE);
4138 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4139 CLOCK_CTRL_PWRDOWN_PLL133, 40);
4140 } else if (tg3_flag(tp, 5780_CLASS) ||
4141 tg3_flag(tp, CPMU_PRESENT) ||
4142 tg3_asic_rev(tp) == ASIC_REV_5906) {
4144 } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4145 u32 newbits1, newbits2;
4147 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4148 tg3_asic_rev(tp) == ASIC_REV_5701) {
4149 newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4150 CLOCK_CTRL_TXCLK_DISABLE |
4152 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4153 } else if (tg3_flag(tp, 5705_PLUS)) {
4154 newbits1 = CLOCK_CTRL_625_CORE;
4155 newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4157 newbits1 = CLOCK_CTRL_ALTCLK;
4158 newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4161 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4164 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4167 if (!tg3_flag(tp, 5705_PLUS)) {
4170 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4171 tg3_asic_rev(tp) == ASIC_REV_5701) {
4172 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4173 CLOCK_CTRL_TXCLK_DISABLE |
4174 CLOCK_CTRL_44MHZ_CORE);
4176 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4179 tw32_wait_f(TG3PCI_CLOCK_CTRL,
4180 tp->pci_clock_ctrl | newbits3, 40);
4184 if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4185 tg3_power_down_phy(tp, do_low_power);
4187 tg3_frob_aux_power(tp, true);
4189 /* Workaround for unstable PLL clock */
4190 if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4191 ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4192 (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4193 u32 val = tr32(0x7d00);
4195 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4197 if (!tg3_flag(tp, ENABLE_ASF)) {
4200 err = tg3_nvram_lock(tp);
4201 tg3_halt_cpu(tp, RX_CPU_BASE);
4203 tg3_nvram_unlock(tp);
4207 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4212 static void tg3_power_down(struct tg3 *tp)
4214 tg3_power_down_prepare(tp);
4216 pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4217 pci_set_power_state(tp->pdev, PCI_D3hot);
4220 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4222 switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4223 case MII_TG3_AUX_STAT_10HALF:
4225 *duplex = DUPLEX_HALF;
4228 case MII_TG3_AUX_STAT_10FULL:
4230 *duplex = DUPLEX_FULL;
4233 case MII_TG3_AUX_STAT_100HALF:
4235 *duplex = DUPLEX_HALF;
4238 case MII_TG3_AUX_STAT_100FULL:
4240 *duplex = DUPLEX_FULL;
4243 case MII_TG3_AUX_STAT_1000HALF:
4244 *speed = SPEED_1000;
4245 *duplex = DUPLEX_HALF;
4248 case MII_TG3_AUX_STAT_1000FULL:
4249 *speed = SPEED_1000;
4250 *duplex = DUPLEX_FULL;
4254 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4255 *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4257 *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4261 *speed = SPEED_UNKNOWN;
4262 *duplex = DUPLEX_UNKNOWN;
4267 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4272 new_adv = ADVERTISE_CSMA;
4273 new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4274 new_adv |= mii_advertise_flowctrl(flowctrl);
4276 err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4280 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4281 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4283 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4284 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4285 new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4287 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4292 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4295 tw32(TG3_CPMU_EEE_MODE,
4296 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4298 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4303 /* Advertise 100-BaseTX EEE ability */
4304 if (advertise & ADVERTISED_100baseT_Full)
4305 val |= MDIO_AN_EEE_ADV_100TX;
4306 /* Advertise 1000-BaseT EEE ability */
4307 if (advertise & ADVERTISED_1000baseT_Full)
4308 val |= MDIO_AN_EEE_ADV_1000T;
4309 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4313 switch (tg3_asic_rev(tp)) {
4315 case ASIC_REV_57765:
4316 case ASIC_REV_57766:
4318 /* If we advertised any eee advertisements above... */
4320 val = MII_TG3_DSP_TAP26_ALNOKO |
4321 MII_TG3_DSP_TAP26_RMRXSTO |
4322 MII_TG3_DSP_TAP26_OPCSINPT;
4323 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4327 if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4328 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4329 MII_TG3_DSP_CH34TP2_HIBW01);
4332 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4341 static void tg3_phy_copper_begin(struct tg3 *tp)
4343 if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4344 (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4347 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4348 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4349 adv = ADVERTISED_10baseT_Half |
4350 ADVERTISED_10baseT_Full;
4351 if (tg3_flag(tp, WOL_SPEED_100MB))
4352 adv |= ADVERTISED_100baseT_Half |
4353 ADVERTISED_100baseT_Full;
4354 if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4355 adv |= ADVERTISED_1000baseT_Half |
4356 ADVERTISED_1000baseT_Full;
4358 fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4360 adv = tp->link_config.advertising;
4361 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4362 adv &= ~(ADVERTISED_1000baseT_Half |
4363 ADVERTISED_1000baseT_Full);
4365 fc = tp->link_config.flowctrl;
4368 tg3_phy_autoneg_cfg(tp, adv, fc);
4370 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372 /* Normally during power down we want to autonegotiate
4373 * the lowest possible speed for WOL. However, to avoid
4374 * link flap, we leave it untouched.
4379 tg3_writephy(tp, MII_BMCR,
4380 BMCR_ANENABLE | BMCR_ANRESTART);
4383 u32 bmcr, orig_bmcr;
4385 tp->link_config.active_speed = tp->link_config.speed;
4386 tp->link_config.active_duplex = tp->link_config.duplex;
4388 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4389 /* With autoneg disabled, 5715 only links up when the
4390 * advertisement register has the configured speed
4393 tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4397 switch (tp->link_config.speed) {
4403 bmcr |= BMCR_SPEED100;
4407 bmcr |= BMCR_SPEED1000;
4411 if (tp->link_config.duplex == DUPLEX_FULL)
4412 bmcr |= BMCR_FULLDPLX;
4414 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4415 (bmcr != orig_bmcr)) {
4416 tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4417 for (i = 0; i < 1500; i++) {
4421 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4422 tg3_readphy(tp, MII_BMSR, &tmp))
4424 if (!(tmp & BMSR_LSTATUS)) {
4429 tg3_writephy(tp, MII_BMCR, bmcr);
4435 static int tg3_phy_pull_config(struct tg3 *tp)
4440 err = tg3_readphy(tp, MII_BMCR, &val);
4444 if (!(val & BMCR_ANENABLE)) {
4445 tp->link_config.autoneg = AUTONEG_DISABLE;
4446 tp->link_config.advertising = 0;
4447 tg3_flag_clear(tp, PAUSE_AUTONEG);
4451 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4453 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4456 tp->link_config.speed = SPEED_10;
4459 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4462 tp->link_config.speed = SPEED_100;
4464 case BMCR_SPEED1000:
4465 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4466 tp->link_config.speed = SPEED_1000;
4474 if (val & BMCR_FULLDPLX)
4475 tp->link_config.duplex = DUPLEX_FULL;
4477 tp->link_config.duplex = DUPLEX_HALF;
4479 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4485 tp->link_config.autoneg = AUTONEG_ENABLE;
4486 tp->link_config.advertising = ADVERTISED_Autoneg;
4487 tg3_flag_set(tp, PAUSE_AUTONEG);
4489 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4492 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4496 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4497 tp->link_config.advertising |= adv | ADVERTISED_TP;
4499 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4501 tp->link_config.advertising |= ADVERTISED_FIBRE;
4504 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4507 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4508 err = tg3_readphy(tp, MII_CTRL1000, &val);
4512 adv = mii_ctrl1000_to_ethtool_adv_t(val);
4514 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4518 adv = tg3_decode_flowctrl_1000X(val);
4519 tp->link_config.flowctrl = adv;
4521 val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4522 adv = mii_adv_to_ethtool_adv_x(val);
4525 tp->link_config.advertising |= adv;
4532 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4536 /* Turn off tap power management. */
4537 /* Set Extended packet length bit */
4538 err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4540 err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4541 err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4542 err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4543 err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4544 err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4551 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4555 u32 advertising = tp->link_config.advertising;
4557 if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4560 if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4563 val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4566 if (advertising & ADVERTISED_100baseT_Full)
4567 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4568 if (advertising & ADVERTISED_1000baseT_Full)
4569 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4577 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4579 u32 advmsk, tgtadv, advertising;
4581 advertising = tp->link_config.advertising;
4582 tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4584 advmsk = ADVERTISE_ALL;
4585 if (tp->link_config.active_duplex == DUPLEX_FULL) {
4586 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4587 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4590 if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4593 if ((*lcladv & advmsk) != tgtadv)
4596 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4599 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4601 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4605 (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4606 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4607 tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4608 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4609 CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4611 tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4614 if (tg3_ctrl != tgtadv)
4621 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4625 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4628 if (tg3_readphy(tp, MII_STAT1000, &val))
4631 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4634 if (tg3_readphy(tp, MII_LPA, rmtadv))
4637 lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4638 tp->link_config.rmt_adv = lpeth;
4643 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4645 if (curr_link_up != tp->link_up) {
4647 netif_carrier_on(tp->dev);
4649 netif_carrier_off(tp->dev);
4650 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4651 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4654 tg3_link_report(tp);
4661 static void tg3_clear_mac_status(struct tg3 *tp)
4666 MAC_STATUS_SYNC_CHANGED |
4667 MAC_STATUS_CFG_CHANGED |
4668 MAC_STATUS_MI_COMPLETION |
4669 MAC_STATUS_LNKSTATE_CHANGED);
4673 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4675 bool current_link_up;
4677 u32 lcl_adv, rmt_adv;
4682 tg3_clear_mac_status(tp);
4684 if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4686 (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4690 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4692 /* Some third-party PHYs need to be reset on link going
4695 if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4696 tg3_asic_rev(tp) == ASIC_REV_5704 ||
4697 tg3_asic_rev(tp) == ASIC_REV_5705) &&
4699 tg3_readphy(tp, MII_BMSR, &bmsr);
4700 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4701 !(bmsr & BMSR_LSTATUS))
4707 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4708 tg3_readphy(tp, MII_BMSR, &bmsr);
4709 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4710 !tg3_flag(tp, INIT_COMPLETE))
4713 if (!(bmsr & BMSR_LSTATUS)) {
4714 err = tg3_init_5401phy_dsp(tp);
4718 tg3_readphy(tp, MII_BMSR, &bmsr);
4719 for (i = 0; i < 1000; i++) {
4721 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4722 (bmsr & BMSR_LSTATUS)) {
4728 if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4729 TG3_PHY_REV_BCM5401_B0 &&
4730 !(bmsr & BMSR_LSTATUS) &&
4731 tp->link_config.active_speed == SPEED_1000) {
4732 err = tg3_phy_reset(tp);
4734 err = tg3_init_5401phy_dsp(tp);
4739 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4740 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4741 /* 5701 {A0,B0} CRC bug workaround */
4742 tg3_writephy(tp, 0x15, 0x0a75);
4743 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4744 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4745 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4748 /* Clear pending interrupts... */
4749 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4750 tg3_readphy(tp, MII_TG3_ISTAT, &val);
4752 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4753 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4754 else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4755 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4757 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4758 tg3_asic_rev(tp) == ASIC_REV_5701) {
4759 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4760 tg3_writephy(tp, MII_TG3_EXT_CTRL,
4761 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4763 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4766 current_link_up = false;
4767 current_speed = SPEED_UNKNOWN;
4768 current_duplex = DUPLEX_UNKNOWN;
4769 tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4770 tp->link_config.rmt_adv = 0;
4772 if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4773 err = tg3_phy_auxctl_read(tp,
4774 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4776 if (!err && !(val & (1 << 10))) {
4777 tg3_phy_auxctl_write(tp,
4778 MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4785 for (i = 0; i < 100; i++) {
4786 tg3_readphy(tp, MII_BMSR, &bmsr);
4787 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788 (bmsr & BMSR_LSTATUS))
4793 if (bmsr & BMSR_LSTATUS) {
4796 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4797 for (i = 0; i < 2000; i++) {
4799 if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4804 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4809 for (i = 0; i < 200; i++) {
4810 tg3_readphy(tp, MII_BMCR, &bmcr);
4811 if (tg3_readphy(tp, MII_BMCR, &bmcr))
4813 if (bmcr && bmcr != 0x7fff)
4821 tp->link_config.active_speed = current_speed;
4822 tp->link_config.active_duplex = current_duplex;
4824 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4825 bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4827 if ((bmcr & BMCR_ANENABLE) &&
4829 tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4830 tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4831 current_link_up = true;
4833 /* EEE settings changes take effect only after a phy
4834 * reset. If we have skipped a reset due to Link Flap
4835 * Avoidance being enabled, do it now.
4837 if (!eee_config_ok &&
4838 (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4842 if (!(bmcr & BMCR_ANENABLE) &&
4843 tp->link_config.speed == current_speed &&
4844 tp->link_config.duplex == current_duplex) {
4845 current_link_up = true;
4849 if (current_link_up &&
4850 tp->link_config.active_duplex == DUPLEX_FULL) {
4853 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4854 reg = MII_TG3_FET_GEN_STAT;
4855 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4857 reg = MII_TG3_EXT_STAT;
4858 bit = MII_TG3_EXT_STAT_MDIX;
4861 if (!tg3_readphy(tp, reg, &val) && (val & bit))
4862 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4864 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4869 if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4870 tg3_phy_copper_begin(tp);
4872 if (tg3_flag(tp, ROBOSWITCH)) {
4873 current_link_up = true;
4874 /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4875 current_speed = SPEED_1000;
4876 current_duplex = DUPLEX_FULL;
4877 tp->link_config.active_speed = current_speed;
4878 tp->link_config.active_duplex = current_duplex;
4881 tg3_readphy(tp, MII_BMSR, &bmsr);
4882 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4883 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4884 current_link_up = true;
4887 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4888 if (current_link_up) {
4889 if (tp->link_config.active_speed == SPEED_100 ||
4890 tp->link_config.active_speed == SPEED_10)
4891 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4893 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4894 } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4895 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4897 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4899 /* In order for the 5750 core in BCM4785 chip to work properly
4900 * in RGMII mode, the Led Control Register must be set up.
4902 if (tg3_flag(tp, RGMII_MODE)) {
4903 u32 led_ctrl = tr32(MAC_LED_CTRL);
4904 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4906 if (tp->link_config.active_speed == SPEED_10)
4907 led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4908 else if (tp->link_config.active_speed == SPEED_100)
4909 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4910 LED_CTRL_100MBPS_ON);
4911 else if (tp->link_config.active_speed == SPEED_1000)
4912 led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4913 LED_CTRL_1000MBPS_ON);
4915 tw32(MAC_LED_CTRL, led_ctrl);
4919 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4920 if (tp->link_config.active_duplex == DUPLEX_HALF)
4921 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4923 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4924 if (current_link_up &&
4925 tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4926 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4928 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4931 /* ??? Without this setting Netgear GA302T PHY does not
4932 * ??? send/receive packets...
4934 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4935 tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4936 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4937 tw32_f(MAC_MI_MODE, tp->mi_mode);
4941 tw32_f(MAC_MODE, tp->mac_mode);
4944 tg3_phy_eee_adjust(tp, current_link_up);
4946 if (tg3_flag(tp, USE_LINKCHG_REG)) {
4947 /* Polled via timer. */
4948 tw32_f(MAC_EVENT, 0);
4950 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4954 if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4956 tp->link_config.active_speed == SPEED_1000 &&
4957 (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4960 (MAC_STATUS_SYNC_CHANGED |
4961 MAC_STATUS_CFG_CHANGED));
4964 NIC_SRAM_FIRMWARE_MBOX,
4965 NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4968 /* Prevent send BD corruption. */
4969 if (tg3_flag(tp, CLKREQ_BUG)) {
4970 if (tp->link_config.active_speed == SPEED_100 ||
4971 tp->link_config.active_speed == SPEED_10)
4972 pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4973 PCI_EXP_LNKCTL_CLKREQ_EN);
4975 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4976 PCI_EXP_LNKCTL_CLKREQ_EN);
4979 tg3_test_and_report_link_chg(tp, current_link_up);
4984 struct tg3_fiber_aneginfo {
4986 #define ANEG_STATE_UNKNOWN 0
4987 #define ANEG_STATE_AN_ENABLE 1
4988 #define ANEG_STATE_RESTART_INIT 2
4989 #define ANEG_STATE_RESTART 3
4990 #define ANEG_STATE_DISABLE_LINK_OK 4
4991 #define ANEG_STATE_ABILITY_DETECT_INIT 5
4992 #define ANEG_STATE_ABILITY_DETECT 6
4993 #define ANEG_STATE_ACK_DETECT_INIT 7
4994 #define ANEG_STATE_ACK_DETECT 8
4995 #define ANEG_STATE_COMPLETE_ACK_INIT 9
4996 #define ANEG_STATE_COMPLETE_ACK 10
4997 #define ANEG_STATE_IDLE_DETECT_INIT 11
4998 #define ANEG_STATE_IDLE_DETECT 12
4999 #define ANEG_STATE_LINK_OK 13
5000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT 14
5001 #define ANEG_STATE_NEXT_PAGE_WAIT 15
5004 #define MR_AN_ENABLE 0x00000001
5005 #define MR_RESTART_AN 0x00000002
5006 #define MR_AN_COMPLETE 0x00000004
5007 #define MR_PAGE_RX 0x00000008
5008 #define MR_NP_LOADED 0x00000010
5009 #define MR_TOGGLE_TX 0x00000020
5010 #define MR_LP_ADV_FULL_DUPLEX 0x00000040
5011 #define MR_LP_ADV_HALF_DUPLEX 0x00000080
5012 #define MR_LP_ADV_SYM_PAUSE 0x00000100
5013 #define MR_LP_ADV_ASYM_PAUSE 0x00000200
5014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5016 #define MR_LP_ADV_NEXT_PAGE 0x00001000
5017 #define MR_TOGGLE_RX 0x00002000
5018 #define MR_NP_RX 0x00004000
5020 #define MR_LINK_OK 0x80000000
5022 unsigned long link_time, cur_time;
5024 u32 ability_match_cfg;
5025 int ability_match_count;
5027 char ability_match, idle_match, ack_match;
5029 u32 txconfig, rxconfig;
5030 #define ANEG_CFG_NP 0x00000080
5031 #define ANEG_CFG_ACK 0x00000040
5032 #define ANEG_CFG_RF2 0x00000020
5033 #define ANEG_CFG_RF1 0x00000010
5034 #define ANEG_CFG_PS2 0x00000001
5035 #define ANEG_CFG_PS1 0x00008000
5036 #define ANEG_CFG_HD 0x00004000
5037 #define ANEG_CFG_FD 0x00002000
5038 #define ANEG_CFG_INVAL 0x00001f06
5043 #define ANEG_TIMER_ENAB 2
5044 #define ANEG_FAILED -1
5046 #define ANEG_STATE_SETTLE_TIME 10000
5048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5049 struct tg3_fiber_aneginfo *ap)
5052 unsigned long delta;
5056 if (ap->state == ANEG_STATE_UNKNOWN) {
5060 ap->ability_match_cfg = 0;
5061 ap->ability_match_count = 0;
5062 ap->ability_match = 0;
5068 if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5069 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5071 if (rx_cfg_reg != ap->ability_match_cfg) {
5072 ap->ability_match_cfg = rx_cfg_reg;
5073 ap->ability_match = 0;
5074 ap->ability_match_count = 0;
5076 if (++ap->ability_match_count > 1) {
5077 ap->ability_match = 1;
5078 ap->ability_match_cfg = rx_cfg_reg;
5081 if (rx_cfg_reg & ANEG_CFG_ACK)
5089 ap->ability_match_cfg = 0;
5090 ap->ability_match_count = 0;
5091 ap->ability_match = 0;
5097 ap->rxconfig = rx_cfg_reg;
5100 switch (ap->state) {
5101 case ANEG_STATE_UNKNOWN:
5102 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5103 ap->state = ANEG_STATE_AN_ENABLE;
5106 case ANEG_STATE_AN_ENABLE:
5107 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5108 if (ap->flags & MR_AN_ENABLE) {
5111 ap->ability_match_cfg = 0;
5112 ap->ability_match_count = 0;
5113 ap->ability_match = 0;
5117 ap->state = ANEG_STATE_RESTART_INIT;
5119 ap->state = ANEG_STATE_DISABLE_LINK_OK;
5123 case ANEG_STATE_RESTART_INIT:
5124 ap->link_time = ap->cur_time;
5125 ap->flags &= ~(MR_NP_LOADED);
5127 tw32(MAC_TX_AUTO_NEG, 0);
5128 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5129 tw32_f(MAC_MODE, tp->mac_mode);
5132 ret = ANEG_TIMER_ENAB;
5133 ap->state = ANEG_STATE_RESTART;
5136 case ANEG_STATE_RESTART:
5137 delta = ap->cur_time - ap->link_time;
5138 if (delta > ANEG_STATE_SETTLE_TIME)
5139 ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5141 ret = ANEG_TIMER_ENAB;
5144 case ANEG_STATE_DISABLE_LINK_OK:
5148 case ANEG_STATE_ABILITY_DETECT_INIT:
5149 ap->flags &= ~(MR_TOGGLE_TX);
5150 ap->txconfig = ANEG_CFG_FD;
5151 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5152 if (flowctrl & ADVERTISE_1000XPAUSE)
5153 ap->txconfig |= ANEG_CFG_PS1;
5154 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5155 ap->txconfig |= ANEG_CFG_PS2;
5156 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5157 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5158 tw32_f(MAC_MODE, tp->mac_mode);
5161 ap->state = ANEG_STATE_ABILITY_DETECT;
5164 case ANEG_STATE_ABILITY_DETECT:
5165 if (ap->ability_match != 0 && ap->rxconfig != 0)
5166 ap->state = ANEG_STATE_ACK_DETECT_INIT;
5169 case ANEG_STATE_ACK_DETECT_INIT:
5170 ap->txconfig |= ANEG_CFG_ACK;
5171 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5172 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5173 tw32_f(MAC_MODE, tp->mac_mode);
5176 ap->state = ANEG_STATE_ACK_DETECT;
5179 case ANEG_STATE_ACK_DETECT:
5180 if (ap->ack_match != 0) {
5181 if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5182 (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5183 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5185 ap->state = ANEG_STATE_AN_ENABLE;
5187 } else if (ap->ability_match != 0 &&
5188 ap->rxconfig == 0) {
5189 ap->state = ANEG_STATE_AN_ENABLE;
5193 case ANEG_STATE_COMPLETE_ACK_INIT:
5194 if (ap->rxconfig & ANEG_CFG_INVAL) {
5198 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5199 MR_LP_ADV_HALF_DUPLEX |
5200 MR_LP_ADV_SYM_PAUSE |
5201 MR_LP_ADV_ASYM_PAUSE |
5202 MR_LP_ADV_REMOTE_FAULT1 |
5203 MR_LP_ADV_REMOTE_FAULT2 |
5204 MR_LP_ADV_NEXT_PAGE |
5207 if (ap->rxconfig & ANEG_CFG_FD)
5208 ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5209 if (ap->rxconfig & ANEG_CFG_HD)
5210 ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5211 if (ap->rxconfig & ANEG_CFG_PS1)
5212 ap->flags |= MR_LP_ADV_SYM_PAUSE;
5213 if (ap->rxconfig & ANEG_CFG_PS2)
5214 ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5215 if (ap->rxconfig & ANEG_CFG_RF1)
5216 ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5217 if (ap->rxconfig & ANEG_CFG_RF2)
5218 ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5219 if (ap->rxconfig & ANEG_CFG_NP)
5220 ap->flags |= MR_LP_ADV_NEXT_PAGE;
5222 ap->link_time = ap->cur_time;
5224 ap->flags ^= (MR_TOGGLE_TX);
5225 if (ap->rxconfig & 0x0008)
5226 ap->flags |= MR_TOGGLE_RX;
5227 if (ap->rxconfig & ANEG_CFG_NP)
5228 ap->flags |= MR_NP_RX;
5229 ap->flags |= MR_PAGE_RX;
5231 ap->state = ANEG_STATE_COMPLETE_ACK;
5232 ret = ANEG_TIMER_ENAB;
5235 case ANEG_STATE_COMPLETE_ACK:
5236 if (ap->ability_match != 0 &&
5237 ap->rxconfig == 0) {
5238 ap->state = ANEG_STATE_AN_ENABLE;
5241 delta = ap->cur_time - ap->link_time;
5242 if (delta > ANEG_STATE_SETTLE_TIME) {
5243 if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5244 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5246 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5247 !(ap->flags & MR_NP_RX)) {
5248 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5256 case ANEG_STATE_IDLE_DETECT_INIT:
5257 ap->link_time = ap->cur_time;
5258 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5259 tw32_f(MAC_MODE, tp->mac_mode);
5262 ap->state = ANEG_STATE_IDLE_DETECT;
5263 ret = ANEG_TIMER_ENAB;
5266 case ANEG_STATE_IDLE_DETECT:
5267 if (ap->ability_match != 0 &&
5268 ap->rxconfig == 0) {
5269 ap->state = ANEG_STATE_AN_ENABLE;
5272 delta = ap->cur_time - ap->link_time;
5273 if (delta > ANEG_STATE_SETTLE_TIME) {
5274 /* XXX another gem from the Broadcom driver :( */
5275 ap->state = ANEG_STATE_LINK_OK;
5279 case ANEG_STATE_LINK_OK:
5280 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5284 case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5285 /* ??? unimplemented */
5288 case ANEG_STATE_NEXT_PAGE_WAIT:
5289 /* ??? unimplemented */
5300 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5303 struct tg3_fiber_aneginfo aninfo;
5304 int status = ANEG_FAILED;
5308 tw32_f(MAC_TX_AUTO_NEG, 0);
5310 tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5311 tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5314 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5317 memset(&aninfo, 0, sizeof(aninfo));
5318 aninfo.flags |= MR_AN_ENABLE;
5319 aninfo.state = ANEG_STATE_UNKNOWN;
5320 aninfo.cur_time = 0;
5322 while (++tick < 195000) {
5323 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5324 if (status == ANEG_DONE || status == ANEG_FAILED)
5330 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331 tw32_f(MAC_MODE, tp->mac_mode);
5334 *txflags = aninfo.txconfig;
5335 *rxflags = aninfo.flags;
5337 if (status == ANEG_DONE &&
5338 (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5339 MR_LP_ADV_FULL_DUPLEX)))
5345 static void tg3_init_bcm8002(struct tg3 *tp)
5347 u32 mac_status = tr32(MAC_STATUS);
5350 /* Reset when initting first time or we have a link. */
5351 if (tg3_flag(tp, INIT_COMPLETE) &&
5352 !(mac_status & MAC_STATUS_PCS_SYNCED))
5355 /* Set PLL lock range. */
5356 tg3_writephy(tp, 0x16, 0x8007);
5359 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5361 /* Wait for reset to complete. */
5362 /* XXX schedule_timeout() ... */
5363 for (i = 0; i < 500; i++)
5366 /* Config mode; select PMA/Ch 1 regs. */
5367 tg3_writephy(tp, 0x10, 0x8411);
5369 /* Enable auto-lock and comdet, select txclk for tx. */
5370 tg3_writephy(tp, 0x11, 0x0a10);
5372 tg3_writephy(tp, 0x18, 0x00a0);
5373 tg3_writephy(tp, 0x16, 0x41ff);
5375 /* Assert and deassert POR. */
5376 tg3_writephy(tp, 0x13, 0x0400);
5378 tg3_writephy(tp, 0x13, 0x0000);
5380 tg3_writephy(tp, 0x11, 0x0a50);
5382 tg3_writephy(tp, 0x11, 0x0a10);
5384 /* Wait for signal to stabilize */
5385 /* XXX schedule_timeout() ... */
5386 for (i = 0; i < 15000; i++)
5389 /* Deselect the channel register so we can read the PHYID
5392 tg3_writephy(tp, 0x10, 0x8011);
5395 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5398 bool current_link_up;
5399 u32 sg_dig_ctrl, sg_dig_status;
5400 u32 serdes_cfg, expected_sg_dig_ctrl;
5401 int workaround, port_a;
5404 expected_sg_dig_ctrl = 0;
5407 current_link_up = false;
5409 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5410 tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5412 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5415 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5416 /* preserve bits 20-23 for voltage regulator */
5417 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5420 sg_dig_ctrl = tr32(SG_DIG_CTRL);
5422 if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5423 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5425 u32 val = serdes_cfg;
5431 tw32_f(MAC_SERDES_CFG, val);
5434 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5436 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5437 tg3_setup_flow_control(tp, 0, 0);
5438 current_link_up = true;
5443 /* Want auto-negotiation. */
5444 expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5446 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5447 if (flowctrl & ADVERTISE_1000XPAUSE)
5448 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5449 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5450 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5452 if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5453 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5454 tp->serdes_counter &&
5455 ((mac_status & (MAC_STATUS_PCS_SYNCED |
5456 MAC_STATUS_RCVD_CFG)) ==
5457 MAC_STATUS_PCS_SYNCED)) {
5458 tp->serdes_counter--;
5459 current_link_up = true;
5464 tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5465 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5467 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5469 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5470 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471 } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5472 MAC_STATUS_SIGNAL_DET)) {
5473 sg_dig_status = tr32(SG_DIG_STATUS);
5474 mac_status = tr32(MAC_STATUS);
5476 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5477 (mac_status & MAC_STATUS_PCS_SYNCED)) {
5478 u32 local_adv = 0, remote_adv = 0;
5480 if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5481 local_adv |= ADVERTISE_1000XPAUSE;
5482 if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5483 local_adv |= ADVERTISE_1000XPSE_ASYM;
5485 if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5486 remote_adv |= LPA_1000XPAUSE;
5487 if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5488 remote_adv |= LPA_1000XPAUSE_ASYM;
5490 tp->link_config.rmt_adv =
5491 mii_adv_to_ethtool_adv_x(remote_adv);
5493 tg3_setup_flow_control(tp, local_adv, remote_adv);
5494 current_link_up = true;
5495 tp->serdes_counter = 0;
5496 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5498 if (tp->serdes_counter)
5499 tp->serdes_counter--;
5502 u32 val = serdes_cfg;
5509 tw32_f(MAC_SERDES_CFG, val);
5512 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5515 /* Link parallel detection - link is up */
5516 /* only if we have PCS_SYNC and not */
5517 /* receiving config code words */
5518 mac_status = tr32(MAC_STATUS);
5519 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5520 !(mac_status & MAC_STATUS_RCVD_CFG)) {
5521 tg3_setup_flow_control(tp, 0, 0);
5522 current_link_up = true;
5524 TG3_PHYFLG_PARALLEL_DETECT;
5525 tp->serdes_counter =
5526 SERDES_PARALLEL_DET_TIMEOUT;
5528 goto restart_autoneg;
5532 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5533 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5537 return current_link_up;
5540 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5542 bool current_link_up = false;
5544 if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5547 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5548 u32 txflags, rxflags;
5551 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5552 u32 local_adv = 0, remote_adv = 0;
5554 if (txflags & ANEG_CFG_PS1)
5555 local_adv |= ADVERTISE_1000XPAUSE;
5556 if (txflags & ANEG_CFG_PS2)
5557 local_adv |= ADVERTISE_1000XPSE_ASYM;
5559 if (rxflags & MR_LP_ADV_SYM_PAUSE)
5560 remote_adv |= LPA_1000XPAUSE;
5561 if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5562 remote_adv |= LPA_1000XPAUSE_ASYM;
5564 tp->link_config.rmt_adv =
5565 mii_adv_to_ethtool_adv_x(remote_adv);
5567 tg3_setup_flow_control(tp, local_adv, remote_adv);
5569 current_link_up = true;
5571 for (i = 0; i < 30; i++) {
5574 (MAC_STATUS_SYNC_CHANGED |
5575 MAC_STATUS_CFG_CHANGED));
5577 if ((tr32(MAC_STATUS) &
5578 (MAC_STATUS_SYNC_CHANGED |
5579 MAC_STATUS_CFG_CHANGED)) == 0)
5583 mac_status = tr32(MAC_STATUS);
5584 if (!current_link_up &&
5585 (mac_status & MAC_STATUS_PCS_SYNCED) &&
5586 !(mac_status & MAC_STATUS_RCVD_CFG))
5587 current_link_up = true;
5589 tg3_setup_flow_control(tp, 0, 0);
5591 /* Forcing 1000FD link up. */
5592 current_link_up = true;
5594 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5597 tw32_f(MAC_MODE, tp->mac_mode);
5602 return current_link_up;
5605 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5608 u16 orig_active_speed;
5609 u8 orig_active_duplex;
5611 bool current_link_up;
5614 orig_pause_cfg = tp->link_config.active_flowctrl;
5615 orig_active_speed = tp->link_config.active_speed;
5616 orig_active_duplex = tp->link_config.active_duplex;
5618 if (!tg3_flag(tp, HW_AUTONEG) &&
5620 tg3_flag(tp, INIT_COMPLETE)) {
5621 mac_status = tr32(MAC_STATUS);
5622 mac_status &= (MAC_STATUS_PCS_SYNCED |
5623 MAC_STATUS_SIGNAL_DET |
5624 MAC_STATUS_CFG_CHANGED |
5625 MAC_STATUS_RCVD_CFG);
5626 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5627 MAC_STATUS_SIGNAL_DET)) {
5628 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5629 MAC_STATUS_CFG_CHANGED));
5634 tw32_f(MAC_TX_AUTO_NEG, 0);
5636 tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5637 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5638 tw32_f(MAC_MODE, tp->mac_mode);
5641 if (tp->phy_id == TG3_PHY_ID_BCM8002)
5642 tg3_init_bcm8002(tp);
5644 /* Enable link change event even when serdes polling. */
5645 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5648 current_link_up = false;
5649 tp->link_config.rmt_adv = 0;
5650 mac_status = tr32(MAC_STATUS);
5652 if (tg3_flag(tp, HW_AUTONEG))
5653 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5655 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5657 tp->napi[0].hw_status->status =
5658 (SD_STATUS_UPDATED |
5659 (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5661 for (i = 0; i < 100; i++) {
5662 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5663 MAC_STATUS_CFG_CHANGED));
5665 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5666 MAC_STATUS_CFG_CHANGED |
5667 MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5671 mac_status = tr32(MAC_STATUS);
5672 if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5673 current_link_up = false;
5674 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5675 tp->serdes_counter == 0) {
5676 tw32_f(MAC_MODE, (tp->mac_mode |
5677 MAC_MODE_SEND_CONFIGS));
5679 tw32_f(MAC_MODE, tp->mac_mode);
5683 if (current_link_up) {
5684 tp->link_config.active_speed = SPEED_1000;
5685 tp->link_config.active_duplex = DUPLEX_FULL;
5686 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5687 LED_CTRL_LNKLED_OVERRIDE |
5688 LED_CTRL_1000MBPS_ON));
5690 tp->link_config.active_speed = SPEED_UNKNOWN;
5691 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5692 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5693 LED_CTRL_LNKLED_OVERRIDE |
5694 LED_CTRL_TRAFFIC_OVERRIDE));
5697 if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5698 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5699 if (orig_pause_cfg != now_pause_cfg ||
5700 orig_active_speed != tp->link_config.active_speed ||
5701 orig_active_duplex != tp->link_config.active_duplex)
5702 tg3_link_report(tp);
5708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5712 u16 current_speed = SPEED_UNKNOWN;
5713 u8 current_duplex = DUPLEX_UNKNOWN;
5714 bool current_link_up = false;
5715 u32 local_adv, remote_adv, sgsr;
5717 if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5718 tg3_asic_rev(tp) == ASIC_REV_5720) &&
5719 !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5720 (sgsr & SERDES_TG3_SGMII_MODE)) {
5725 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5727 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5728 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5730 current_link_up = true;
5731 if (sgsr & SERDES_TG3_SPEED_1000) {
5732 current_speed = SPEED_1000;
5733 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5734 } else if (sgsr & SERDES_TG3_SPEED_100) {
5735 current_speed = SPEED_100;
5736 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5738 current_speed = SPEED_10;
5739 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5742 if (sgsr & SERDES_TG3_FULL_DUPLEX)
5743 current_duplex = DUPLEX_FULL;
5745 current_duplex = DUPLEX_HALF;
5748 tw32_f(MAC_MODE, tp->mac_mode);
5751 tg3_clear_mac_status(tp);
5753 goto fiber_setup_done;
5756 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5757 tw32_f(MAC_MODE, tp->mac_mode);
5760 tg3_clear_mac_status(tp);
5765 tp->link_config.rmt_adv = 0;
5767 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5768 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5769 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5770 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5771 bmsr |= BMSR_LSTATUS;
5773 bmsr &= ~BMSR_LSTATUS;
5776 err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5778 if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5779 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5780 /* do nothing, just check for link up at the end */
5781 } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5784 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5785 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5786 ADVERTISE_1000XPAUSE |
5787 ADVERTISE_1000XPSE_ASYM |
5790 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5791 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5793 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5794 tg3_writephy(tp, MII_ADVERTISE, newadv);
5795 bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5796 tg3_writephy(tp, MII_BMCR, bmcr);
5798 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5799 tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5800 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5807 bmcr &= ~BMCR_SPEED1000;
5808 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5810 if (tp->link_config.duplex == DUPLEX_FULL)
5811 new_bmcr |= BMCR_FULLDPLX;
5813 if (new_bmcr != bmcr) {
5814 /* BMCR_SPEED1000 is a reserved bit that needs
5815 * to be set on write.
5817 new_bmcr |= BMCR_SPEED1000;
5819 /* Force a linkdown */
5823 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5824 adv &= ~(ADVERTISE_1000XFULL |
5825 ADVERTISE_1000XHALF |
5827 tg3_writephy(tp, MII_ADVERTISE, adv);
5828 tg3_writephy(tp, MII_BMCR, bmcr |
5832 tg3_carrier_off(tp);
5834 tg3_writephy(tp, MII_BMCR, new_bmcr);
5836 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5837 err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5838 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5839 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5840 bmsr |= BMSR_LSTATUS;
5842 bmsr &= ~BMSR_LSTATUS;
5844 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5848 if (bmsr & BMSR_LSTATUS) {
5849 current_speed = SPEED_1000;
5850 current_link_up = true;
5851 if (bmcr & BMCR_FULLDPLX)
5852 current_duplex = DUPLEX_FULL;
5854 current_duplex = DUPLEX_HALF;
5859 if (bmcr & BMCR_ANENABLE) {
5862 err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5863 err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5864 common = local_adv & remote_adv;
5865 if (common & (ADVERTISE_1000XHALF |
5866 ADVERTISE_1000XFULL)) {
5867 if (common & ADVERTISE_1000XFULL)
5868 current_duplex = DUPLEX_FULL;
5870 current_duplex = DUPLEX_HALF;
5872 tp->link_config.rmt_adv =
5873 mii_adv_to_ethtool_adv_x(remote_adv);
5874 } else if (!tg3_flag(tp, 5780_CLASS)) {
5875 /* Link is up via parallel detect */
5877 current_link_up = false;
5883 if (current_link_up && current_duplex == DUPLEX_FULL)
5884 tg3_setup_flow_control(tp, local_adv, remote_adv);
5886 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5887 if (tp->link_config.active_duplex == DUPLEX_HALF)
5888 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5890 tw32_f(MAC_MODE, tp->mac_mode);
5893 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5895 tp->link_config.active_speed = current_speed;
5896 tp->link_config.active_duplex = current_duplex;
5898 tg3_test_and_report_link_chg(tp, current_link_up);
5902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5904 if (tp->serdes_counter) {
5905 /* Give autoneg time to complete. */
5906 tp->serdes_counter--;
5911 (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5914 tg3_readphy(tp, MII_BMCR, &bmcr);
5915 if (bmcr & BMCR_ANENABLE) {
5918 /* Select shadow register 0x1f */
5919 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5920 tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5922 /* Select expansion interrupt status register */
5923 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5924 MII_TG3_DSP_EXP1_INT_STAT);
5925 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5926 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5928 if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5929 /* We have signal detect and not receiving
5930 * config code words, link is up by parallel
5934 bmcr &= ~BMCR_ANENABLE;
5935 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5936 tg3_writephy(tp, MII_BMCR, bmcr);
5937 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5940 } else if (tp->link_up &&
5941 (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5942 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5945 /* Select expansion interrupt status register */
5946 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5947 MII_TG3_DSP_EXP1_INT_STAT);
5948 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5952 /* Config code words received, turn on autoneg. */
5953 tg3_readphy(tp, MII_BMCR, &bmcr);
5954 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5956 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5962 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5967 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5968 err = tg3_setup_fiber_phy(tp, force_reset);
5969 else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5970 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5972 err = tg3_setup_copper_phy(tp, force_reset);
5974 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5977 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5978 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5980 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5985 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5986 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5987 tw32(GRC_MISC_CFG, val);
5990 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5991 (6 << TX_LENGTHS_IPG_SHIFT);
5992 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5993 tg3_asic_rev(tp) == ASIC_REV_5762)
5994 val |= tr32(MAC_TX_LENGTHS) &
5995 (TX_LENGTHS_JMB_FRM_LEN_MSK |
5996 TX_LENGTHS_CNT_DWN_VAL_MSK);
5998 if (tp->link_config.active_speed == SPEED_1000 &&
5999 tp->link_config.active_duplex == DUPLEX_HALF)
6000 tw32(MAC_TX_LENGTHS, val |
6001 (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6003 tw32(MAC_TX_LENGTHS, val |
6004 (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6006 if (!tg3_flag(tp, 5705_PLUS)) {
6008 tw32(HOSTCC_STAT_COAL_TICKS,
6009 tp->coal.stats_block_coalesce_usecs);
6011 tw32(HOSTCC_STAT_COAL_TICKS, 0);
6015 if (tg3_flag(tp, ASPM_WORKAROUND)) {
6016 val = tr32(PCIE_PWR_MGMT_THRESH);
6018 val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6021 val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6022 tw32(PCIE_PWR_MGMT_THRESH, val);
6028 /* tp->lock must be held */
6029 static u64 tg3_refclk_read(struct tg3 *tp)
6031 u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6032 return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6035 /* tp->lock must be held */
6036 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6038 tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6039 tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6040 tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6041 tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6044 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6045 static inline void tg3_full_unlock(struct tg3 *tp);
6046 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6048 struct tg3 *tp = netdev_priv(dev);
6050 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6051 SOF_TIMESTAMPING_RX_SOFTWARE |
6052 SOF_TIMESTAMPING_SOFTWARE;
6054 if (tg3_flag(tp, PTP_CAPABLE)) {
6055 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6056 SOF_TIMESTAMPING_RX_HARDWARE |
6057 SOF_TIMESTAMPING_RAW_HARDWARE;
6061 info->phc_index = ptp_clock_index(tp->ptp_clock);
6063 info->phc_index = -1;
6065 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6067 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6068 (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6069 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6070 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6074 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6076 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6077 bool neg_adj = false;
6085 /* Frequency adjustment is performed using hardware with a 24 bit
6086 * accumulator and a programmable correction value. On each clk, the
6087 * correction value gets added to the accumulator and when it
6088 * overflows, the time counter is incremented/decremented.
6090 * So conversion from ppb to correction value is
6091 * ppb * (1 << 24) / 1000000000
6093 correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6094 TG3_EAV_REF_CLK_CORRECT_MASK;
6096 tg3_full_lock(tp, 0);
6099 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6100 TG3_EAV_REF_CLK_CORRECT_EN |
6101 (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6103 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6105 tg3_full_unlock(tp);
6110 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6112 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6114 tg3_full_lock(tp, 0);
6115 tp->ptp_adjust += delta;
6116 tg3_full_unlock(tp);
6121 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6125 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6127 tg3_full_lock(tp, 0);
6128 ns = tg3_refclk_read(tp);
6129 ns += tp->ptp_adjust;
6130 tg3_full_unlock(tp);
6132 ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6133 ts->tv_nsec = remainder;
6138 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6139 const struct timespec *ts)
6142 struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6144 ns = timespec_to_ns(ts);
6146 tg3_full_lock(tp, 0);
6147 tg3_refclk_write(tp, ns);
6149 tg3_full_unlock(tp);
6154 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6155 struct ptp_clock_request *rq, int on)
6160 static const struct ptp_clock_info tg3_ptp_caps = {
6161 .owner = THIS_MODULE,
6162 .name = "tg3 clock",
6163 .max_adj = 250000000,
6168 .adjfreq = tg3_ptp_adjfreq,
6169 .adjtime = tg3_ptp_adjtime,
6170 .gettime = tg3_ptp_gettime,
6171 .settime = tg3_ptp_settime,
6172 .enable = tg3_ptp_enable,
6175 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6176 struct skb_shared_hwtstamps *timestamp)
6178 memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6179 timestamp->hwtstamp = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6183 /* tp->lock must be held */
6184 static void tg3_ptp_init(struct tg3 *tp)
6186 if (!tg3_flag(tp, PTP_CAPABLE))
6189 /* Initialize the hardware clock to the system time. */
6190 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6192 tp->ptp_info = tg3_ptp_caps;
6195 /* tp->lock must be held */
6196 static void tg3_ptp_resume(struct tg3 *tp)
6198 if (!tg3_flag(tp, PTP_CAPABLE))
6201 tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6205 static void tg3_ptp_fini(struct tg3 *tp)
6207 if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6210 ptp_clock_unregister(tp->ptp_clock);
6211 tp->ptp_clock = NULL;
6215 static inline int tg3_irq_sync(struct tg3 *tp)
6217 return tp->irq_sync;
6220 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6224 dst = (u32 *)((u8 *)dst + off);
6225 for (i = 0; i < len; i += sizeof(u32))
6226 *dst++ = tr32(off + i);
6229 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6231 tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6232 tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6233 tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6234 tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6235 tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6236 tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6237 tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6238 tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6239 tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6240 tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6241 tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6242 tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6243 tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6244 tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6245 tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6246 tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6247 tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6248 tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6249 tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6251 if (tg3_flag(tp, SUPPORT_MSIX))
6252 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6254 tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6255 tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6256 tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6257 tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6258 tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6259 tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6260 tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6261 tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6263 if (!tg3_flag(tp, 5705_PLUS)) {
6264 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6265 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6266 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6269 tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6270 tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6271 tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6272 tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6273 tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6275 if (tg3_flag(tp, NVRAM))
6276 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6279 static void tg3_dump_state(struct tg3 *tp)
6284 regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6288 if (tg3_flag(tp, PCI_EXPRESS)) {
6289 /* Read up to but not including private PCI registers */
6290 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6291 regs[i / sizeof(u32)] = tr32(i);
6293 tg3_dump_legacy_regs(tp, regs);
6295 for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6296 if (!regs[i + 0] && !regs[i + 1] &&
6297 !regs[i + 2] && !regs[i + 3])
6300 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6302 regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6307 for (i = 0; i < tp->irq_cnt; i++) {
6308 struct tg3_napi *tnapi = &tp->napi[i];
6310 /* SW status block */
6312 "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6314 tnapi->hw_status->status,
6315 tnapi->hw_status->status_tag,
6316 tnapi->hw_status->rx_jumbo_consumer,
6317 tnapi->hw_status->rx_consumer,
6318 tnapi->hw_status->rx_mini_consumer,
6319 tnapi->hw_status->idx[0].rx_producer,
6320 tnapi->hw_status->idx[0].tx_consumer);
6323 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6325 tnapi->last_tag, tnapi->last_irq_tag,
6326 tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6328 tnapi->prodring.rx_std_prod_idx,
6329 tnapi->prodring.rx_std_cons_idx,
6330 tnapi->prodring.rx_jmb_prod_idx,
6331 tnapi->prodring.rx_jmb_cons_idx);
6335 /* This is called whenever we suspect that the system chipset is re-
6336 * ordering the sequence of MMIO to the tx send mailbox. The symptom
6337 * is bogus tx completions. We try to recover by setting the
6338 * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6341 static void tg3_tx_recover(struct tg3 *tp)
6343 BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6344 tp->write32_tx_mbox == tg3_write_indirect_mbox);
6346 netdev_warn(tp->dev,
6347 "The system may be re-ordering memory-mapped I/O "
6348 "cycles to the network device, attempting to recover. "
6349 "Please report the problem to the driver maintainer "
6350 "and include system chipset information.\n");
6352 spin_lock(&tp->lock);
6353 tg3_flag_set(tp, TX_RECOVERY_PENDING);
6354 spin_unlock(&tp->lock);
6357 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6359 /* Tell compiler to fetch tx indices from memory. */
6361 return tnapi->tx_pending -
6362 ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6365 /* Tigon3 never reports partial packet sends. So we do not
6366 * need special logic to handle SKBs that have not had all
6367 * of their frags sent yet, like SunGEM does.
6369 static void tg3_tx(struct tg3_napi *tnapi)
6371 struct tg3 *tp = tnapi->tp;
6372 u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6373 u32 sw_idx = tnapi->tx_cons;
6374 struct netdev_queue *txq;
6375 int index = tnapi - tp->napi;
6376 unsigned int pkts_compl = 0, bytes_compl = 0;
6378 if (tg3_flag(tp, ENABLE_TSS))
6381 txq = netdev_get_tx_queue(tp->dev, index);
6383 while (sw_idx != hw_idx) {
6384 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6385 struct sk_buff *skb = ri->skb;
6388 if (unlikely(skb == NULL)) {
6393 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6394 struct skb_shared_hwtstamps timestamp;
6395 u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6396 hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6398 tg3_hwclock_to_timestamp(tp, hwclock, ×tamp);
6400 skb_tstamp_tx(skb, ×tamp);
6403 pci_unmap_single(tp->pdev,
6404 dma_unmap_addr(ri, mapping),
6410 while (ri->fragmented) {
6411 ri->fragmented = false;
6412 sw_idx = NEXT_TX(sw_idx);
6413 ri = &tnapi->tx_buffers[sw_idx];
6416 sw_idx = NEXT_TX(sw_idx);
6418 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6419 ri = &tnapi->tx_buffers[sw_idx];
6420 if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6423 pci_unmap_page(tp->pdev,
6424 dma_unmap_addr(ri, mapping),
6425 skb_frag_size(&skb_shinfo(skb)->frags[i]),
6428 while (ri->fragmented) {
6429 ri->fragmented = false;
6430 sw_idx = NEXT_TX(sw_idx);
6431 ri = &tnapi->tx_buffers[sw_idx];
6434 sw_idx = NEXT_TX(sw_idx);
6438 bytes_compl += skb->len;
6440 dev_kfree_skb_any(skb);
6442 if (unlikely(tx_bug)) {
6448 netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6450 tnapi->tx_cons = sw_idx;
6452 /* Need to make the tx_cons update visible to tg3_start_xmit()
6453 * before checking for netif_queue_stopped(). Without the
6454 * memory barrier, there is a small possibility that tg3_start_xmit()
6455 * will miss it and cause the queue to be stopped forever.
6459 if (unlikely(netif_tx_queue_stopped(txq) &&
6460 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6461 __netif_tx_lock(txq, smp_processor_id());
6462 if (netif_tx_queue_stopped(txq) &&
6463 (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6464 netif_tx_wake_queue(txq);
6465 __netif_tx_unlock(txq);
6469 static void tg3_frag_free(bool is_frag, void *data)
6472 put_page(virt_to_head_page(data));
6477 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6479 unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6480 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6485 pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6486 map_sz, PCI_DMA_FROMDEVICE);
6487 tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6492 /* Returns size of skb allocated or < 0 on error.
6494 * We only need to fill in the address because the other members
6495 * of the RX descriptor are invariant, see tg3_init_rings.
6497 * Note the purposeful assymetry of cpu vs. chip accesses. For
6498 * posting buffers we only dirty the first cache line of the RX
6499 * descriptor (containing the address). Whereas for the RX status
6500 * buffers the cpu only reads the last cacheline of the RX descriptor
6501 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6503 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6504 u32 opaque_key, u32 dest_idx_unmasked,
6505 unsigned int *frag_size)
6507 struct tg3_rx_buffer_desc *desc;
6508 struct ring_info *map;
6511 int skb_size, data_size, dest_idx;
6513 switch (opaque_key) {
6514 case RXD_OPAQUE_RING_STD:
6515 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6516 desc = &tpr->rx_std[dest_idx];
6517 map = &tpr->rx_std_buffers[dest_idx];
6518 data_size = tp->rx_pkt_map_sz;
6521 case RXD_OPAQUE_RING_JUMBO:
6522 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6523 desc = &tpr->rx_jmb[dest_idx].std;
6524 map = &tpr->rx_jmb_buffers[dest_idx];
6525 data_size = TG3_RX_JMB_MAP_SZ;
6532 /* Do not overwrite any of the map or rp information
6533 * until we are sure we can commit to a new buffer.
6535 * Callers depend upon this behavior and assume that
6536 * we leave everything unchanged if we fail.
6538 skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6539 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6540 if (skb_size <= PAGE_SIZE) {
6541 data = netdev_alloc_frag(skb_size);
6542 *frag_size = skb_size;
6544 data = kmalloc(skb_size, GFP_ATOMIC);
6550 mapping = pci_map_single(tp->pdev,
6551 data + TG3_RX_OFFSET(tp),
6553 PCI_DMA_FROMDEVICE);
6554 if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6555 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6560 dma_unmap_addr_set(map, mapping, mapping);
6562 desc->addr_hi = ((u64)mapping >> 32);
6563 desc->addr_lo = ((u64)mapping & 0xffffffff);
6568 /* We only need to move over in the address because the other
6569 * members of the RX descriptor are invariant. See notes above
6570 * tg3_alloc_rx_data for full details.
6572 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6573 struct tg3_rx_prodring_set *dpr,
6574 u32 opaque_key, int src_idx,
6575 u32 dest_idx_unmasked)
6577 struct tg3 *tp = tnapi->tp;
6578 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6579 struct ring_info *src_map, *dest_map;
6580 struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6583 switch (opaque_key) {
6584 case RXD_OPAQUE_RING_STD:
6585 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6586 dest_desc = &dpr->rx_std[dest_idx];
6587 dest_map = &dpr->rx_std_buffers[dest_idx];
6588 src_desc = &spr->rx_std[src_idx];
6589 src_map = &spr->rx_std_buffers[src_idx];
6592 case RXD_OPAQUE_RING_JUMBO:
6593 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6594 dest_desc = &dpr->rx_jmb[dest_idx].std;
6595 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6596 src_desc = &spr->rx_jmb[src_idx].std;
6597 src_map = &spr->rx_jmb_buffers[src_idx];
6604 dest_map->data = src_map->data;
6605 dma_unmap_addr_set(dest_map, mapping,
6606 dma_unmap_addr(src_map, mapping));
6607 dest_desc->addr_hi = src_desc->addr_hi;
6608 dest_desc->addr_lo = src_desc->addr_lo;
6610 /* Ensure that the update to the skb happens after the physical
6611 * addresses have been transferred to the new BD location.
6615 src_map->data = NULL;
6618 /* The RX ring scheme is composed of multiple rings which post fresh
6619 * buffers to the chip, and one special ring the chip uses to report
6620 * status back to the host.
6622 * The special ring reports the status of received packets to the
6623 * host. The chip does not write into the original descriptor the
6624 * RX buffer was obtained from. The chip simply takes the original
6625 * descriptor as provided by the host, updates the status and length
6626 * field, then writes this into the next status ring entry.
6628 * Each ring the host uses to post buffers to the chip is described
6629 * by a TG3_BDINFO entry in the chips SRAM area. When a packet arrives,
6630 * it is first placed into the on-chip ram. When the packet's length
6631 * is known, it walks down the TG3_BDINFO entries to select the ring.
6632 * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6633 * which is within the range of the new packet's length is chosen.
6635 * The "separate ring for rx status" scheme may sound queer, but it makes
6636 * sense from a cache coherency perspective. If only the host writes
6637 * to the buffer post rings, and only the chip writes to the rx status
6638 * rings, then cache lines never move beyond shared-modified state.
6639 * If both the host and chip were to write into the same ring, cache line
6640 * eviction could occur since both entities want it in an exclusive state.
6642 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6644 struct tg3 *tp = tnapi->tp;
6645 u32 work_mask, rx_std_posted = 0;
6646 u32 std_prod_idx, jmb_prod_idx;
6647 u32 sw_idx = tnapi->rx_rcb_ptr;
6650 struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6652 hw_idx = *(tnapi->rx_rcb_prod_idx);
6654 * We need to order the read of hw_idx and the read of
6655 * the opaque cookie.
6660 std_prod_idx = tpr->rx_std_prod_idx;
6661 jmb_prod_idx = tpr->rx_jmb_prod_idx;
6662 while (sw_idx != hw_idx && budget > 0) {
6663 struct ring_info *ri;
6664 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6666 struct sk_buff *skb;
6667 dma_addr_t dma_addr;
6668 u32 opaque_key, desc_idx, *post_ptr;
6672 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6673 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6674 if (opaque_key == RXD_OPAQUE_RING_STD) {
6675 ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6676 dma_addr = dma_unmap_addr(ri, mapping);
6678 post_ptr = &std_prod_idx;
6680 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6681 ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6682 dma_addr = dma_unmap_addr(ri, mapping);
6684 post_ptr = &jmb_prod_idx;
6686 goto next_pkt_nopost;
6688 work_mask |= opaque_key;
6690 if (desc->err_vlan & RXD_ERR_MASK) {
6692 tg3_recycle_rx(tnapi, tpr, opaque_key,
6693 desc_idx, *post_ptr);
6695 /* Other statistics kept track of by card. */
6700 prefetch(data + TG3_RX_OFFSET(tp));
6701 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6704 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6705 RXD_FLAG_PTPSTAT_PTPV1 ||
6706 (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6707 RXD_FLAG_PTPSTAT_PTPV2) {
6708 tstamp = tr32(TG3_RX_TSTAMP_LSB);
6709 tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6712 if (len > TG3_RX_COPY_THRESH(tp)) {
6714 unsigned int frag_size;
6716 skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6717 *post_ptr, &frag_size);
6721 pci_unmap_single(tp->pdev, dma_addr, skb_size,
6722 PCI_DMA_FROMDEVICE);
6724 /* Ensure that the update to the data happens
6725 * after the usage of the old DMA mapping.
6731 skb = build_skb(data, frag_size);
6733 tg3_frag_free(frag_size != 0, data);
6734 goto drop_it_no_recycle;
6736 skb_reserve(skb, TG3_RX_OFFSET(tp));
6738 tg3_recycle_rx(tnapi, tpr, opaque_key,
6739 desc_idx, *post_ptr);
6741 skb = netdev_alloc_skb(tp->dev,
6742 len + TG3_RAW_IP_ALIGN);
6744 goto drop_it_no_recycle;
6746 skb_reserve(skb, TG3_RAW_IP_ALIGN);
6747 pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6749 data + TG3_RX_OFFSET(tp),
6751 pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6756 tg3_hwclock_to_timestamp(tp, tstamp,
6757 skb_hwtstamps(skb));
6759 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6760 (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6761 (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6762 >> RXD_TCPCSUM_SHIFT) == 0xffff))
6763 skb->ip_summed = CHECKSUM_UNNECESSARY;
6765 skb_checksum_none_assert(skb);
6767 skb->protocol = eth_type_trans(skb, tp->dev);
6769 if (len > (tp->dev->mtu + ETH_HLEN) &&
6770 skb->protocol != htons(ETH_P_8021Q) &&
6771 skb->protocol != htons(ETH_P_8021AD)) {
6772 dev_kfree_skb_any(skb);
6773 goto drop_it_no_recycle;
6776 if (desc->type_flags & RXD_FLAG_VLAN &&
6777 !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6778 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6779 desc->err_vlan & RXD_VLAN_MASK);
6781 napi_gro_receive(&tnapi->napi, skb);
6789 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6790 tpr->rx_std_prod_idx = std_prod_idx &
6791 tp->rx_std_ring_mask;
6792 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6793 tpr->rx_std_prod_idx);
6794 work_mask &= ~RXD_OPAQUE_RING_STD;
6799 sw_idx &= tp->rx_ret_ring_mask;
6801 /* Refresh hw_idx to see if there is new work */
6802 if (sw_idx == hw_idx) {
6803 hw_idx = *(tnapi->rx_rcb_prod_idx);
6808 /* ACK the status ring. */
6809 tnapi->rx_rcb_ptr = sw_idx;
6810 tw32_rx_mbox(tnapi->consmbox, sw_idx);
6812 /* Refill RX ring(s). */
6813 if (!tg3_flag(tp, ENABLE_RSS)) {
6814 /* Sync BD data before updating mailbox */
6817 if (work_mask & RXD_OPAQUE_RING_STD) {
6818 tpr->rx_std_prod_idx = std_prod_idx &
6819 tp->rx_std_ring_mask;
6820 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6821 tpr->rx_std_prod_idx);
6823 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6824 tpr->rx_jmb_prod_idx = jmb_prod_idx &
6825 tp->rx_jmb_ring_mask;
6826 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6827 tpr->rx_jmb_prod_idx);
6830 } else if (work_mask) {
6831 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6832 * updated before the producer indices can be updated.
6836 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6837 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6839 if (tnapi != &tp->napi[1]) {
6840 tp->rx_refill = true;
6841 napi_schedule(&tp->napi[1].napi);
6848 static void tg3_poll_link(struct tg3 *tp)
6850 /* handle link change and other phy events */
6851 if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6852 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6854 if (sblk->status & SD_STATUS_LINK_CHG) {
6855 sblk->status = SD_STATUS_UPDATED |
6856 (sblk->status & ~SD_STATUS_LINK_CHG);
6857 spin_lock(&tp->lock);
6858 if (tg3_flag(tp, USE_PHYLIB)) {
6860 (MAC_STATUS_SYNC_CHANGED |
6861 MAC_STATUS_CFG_CHANGED |
6862 MAC_STATUS_MI_COMPLETION |
6863 MAC_STATUS_LNKSTATE_CHANGED));
6866 tg3_setup_phy(tp, false);
6867 spin_unlock(&tp->lock);
6872 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6873 struct tg3_rx_prodring_set *dpr,
6874 struct tg3_rx_prodring_set *spr)
6876 u32 si, di, cpycnt, src_prod_idx;
6880 src_prod_idx = spr->rx_std_prod_idx;
6882 /* Make sure updates to the rx_std_buffers[] entries and the
6883 * standard producer index are seen in the correct order.
6887 if (spr->rx_std_cons_idx == src_prod_idx)
6890 if (spr->rx_std_cons_idx < src_prod_idx)
6891 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6893 cpycnt = tp->rx_std_ring_mask + 1 -
6894 spr->rx_std_cons_idx;
6896 cpycnt = min(cpycnt,
6897 tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6899 si = spr->rx_std_cons_idx;
6900 di = dpr->rx_std_prod_idx;
6902 for (i = di; i < di + cpycnt; i++) {
6903 if (dpr->rx_std_buffers[i].data) {
6913 /* Ensure that updates to the rx_std_buffers ring and the
6914 * shadowed hardware producer ring from tg3_recycle_skb() are
6915 * ordered correctly WRT the skb check above.
6919 memcpy(&dpr->rx_std_buffers[di],
6920 &spr->rx_std_buffers[si],
6921 cpycnt * sizeof(struct ring_info));
6923 for (i = 0; i < cpycnt; i++, di++, si++) {
6924 struct tg3_rx_buffer_desc *sbd, *dbd;
6925 sbd = &spr->rx_std[si];
6926 dbd = &dpr->rx_std[di];
6927 dbd->addr_hi = sbd->addr_hi;
6928 dbd->addr_lo = sbd->addr_lo;
6931 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6932 tp->rx_std_ring_mask;
6933 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6934 tp->rx_std_ring_mask;
6938 src_prod_idx = spr->rx_jmb_prod_idx;
6940 /* Make sure updates to the rx_jmb_buffers[] entries and
6941 * the jumbo producer index are seen in the correct order.
6945 if (spr->rx_jmb_cons_idx == src_prod_idx)
6948 if (spr->rx_jmb_cons_idx < src_prod_idx)
6949 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6951 cpycnt = tp->rx_jmb_ring_mask + 1 -
6952 spr->rx_jmb_cons_idx;
6954 cpycnt = min(cpycnt,
6955 tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6957 si = spr->rx_jmb_cons_idx;
6958 di = dpr->rx_jmb_prod_idx;
6960 for (i = di; i < di + cpycnt; i++) {
6961 if (dpr->rx_jmb_buffers[i].data) {
6971 /* Ensure that updates to the rx_jmb_buffers ring and the
6972 * shadowed hardware producer ring from tg3_recycle_skb() are
6973 * ordered correctly WRT the skb check above.
6977 memcpy(&dpr->rx_jmb_buffers[di],
6978 &spr->rx_jmb_buffers[si],
6979 cpycnt * sizeof(struct ring_info));
6981 for (i = 0; i < cpycnt; i++, di++, si++) {
6982 struct tg3_rx_buffer_desc *sbd, *dbd;
6983 sbd = &spr->rx_jmb[si].std;
6984 dbd = &dpr->rx_jmb[di].std;
6985 dbd->addr_hi = sbd->addr_hi;
6986 dbd->addr_lo = sbd->addr_lo;
6989 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6990 tp->rx_jmb_ring_mask;
6991 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6992 tp->rx_jmb_ring_mask;
6998 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
7000 struct tg3 *tp = tnapi->tp;
7002 /* run TX completion thread */
7003 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7005 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7009 if (!tnapi->rx_rcb_prod_idx)
7012 /* run RX thread, within the bounds set by NAPI.
7013 * All RX "locking" is done by ensuring outside
7014 * code synchronizes with tg3->napi.poll()
7016 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7017 work_done += tg3_rx(tnapi, budget - work_done);
7019 if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7020 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7022 u32 std_prod_idx = dpr->rx_std_prod_idx;
7023 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7025 tp->rx_refill = false;
7026 for (i = 1; i <= tp->rxq_cnt; i++)
7027 err |= tg3_rx_prodring_xfer(tp, dpr,
7028 &tp->napi[i].prodring);
7032 if (std_prod_idx != dpr->rx_std_prod_idx)
7033 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7034 dpr->rx_std_prod_idx);
7036 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7037 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7038 dpr->rx_jmb_prod_idx);
7043 tw32_f(HOSTCC_MODE, tp->coal_now);
7049 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7051 if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7052 schedule_work(&tp->reset_task);
7055 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7057 cancel_work_sync(&tp->reset_task);
7058 tg3_flag_clear(tp, RESET_TASK_PENDING);
7059 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7062 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7064 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7065 struct tg3 *tp = tnapi->tp;
7067 struct tg3_hw_status *sblk = tnapi->hw_status;
7070 work_done = tg3_poll_work(tnapi, work_done, budget);
7072 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7075 if (unlikely(work_done >= budget))
7078 /* tp->last_tag is used in tg3_int_reenable() below
7079 * to tell the hw how much work has been processed,
7080 * so we must read it before checking for more work.
7082 tnapi->last_tag = sblk->status_tag;
7083 tnapi->last_irq_tag = tnapi->last_tag;
7086 /* check for RX/TX work to do */
7087 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7088 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7090 /* This test here is not race free, but will reduce
7091 * the number of interrupts by looping again.
7093 if (tnapi == &tp->napi[1] && tp->rx_refill)
7096 napi_complete(napi);
7097 /* Reenable interrupts. */
7098 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7100 /* This test here is synchronized by napi_schedule()
7101 * and napi_complete() to close the race condition.
7103 if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7104 tw32(HOSTCC_MODE, tp->coalesce_mode |
7105 HOSTCC_MODE_ENABLE |
7116 /* work_done is guaranteed to be less than budget. */
7117 napi_complete(napi);
7118 tg3_reset_task_schedule(tp);
7122 static void tg3_process_error(struct tg3 *tp)
7125 bool real_error = false;
7127 if (tg3_flag(tp, ERROR_PROCESSED))
7130 /* Check Flow Attention register */
7131 val = tr32(HOSTCC_FLOW_ATTN);
7132 if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7133 netdev_err(tp->dev, "FLOW Attention error. Resetting chip.\n");
7137 if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7138 netdev_err(tp->dev, "MSI Status error. Resetting chip.\n");
7142 if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7143 netdev_err(tp->dev, "DMA Status error. Resetting chip.\n");
7152 tg3_flag_set(tp, ERROR_PROCESSED);
7153 tg3_reset_task_schedule(tp);
7156 static int tg3_poll(struct napi_struct *napi, int budget)
7158 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7159 struct tg3 *tp = tnapi->tp;
7161 struct tg3_hw_status *sblk = tnapi->hw_status;
7164 if (sblk->status & SD_STATUS_ERROR)
7165 tg3_process_error(tp);
7169 work_done = tg3_poll_work(tnapi, work_done, budget);
7171 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7174 if (unlikely(work_done >= budget))
7177 if (tg3_flag(tp, TAGGED_STATUS)) {
7178 /* tp->last_tag is used in tg3_int_reenable() below
7179 * to tell the hw how much work has been processed,
7180 * so we must read it before checking for more work.
7182 tnapi->last_tag = sblk->status_tag;
7183 tnapi->last_irq_tag = tnapi->last_tag;
7186 sblk->status &= ~SD_STATUS_UPDATED;
7188 if (likely(!tg3_has_work(tnapi))) {
7189 napi_complete(napi);
7190 tg3_int_reenable(tnapi);
7198 /* work_done is guaranteed to be less than budget. */
7199 napi_complete(napi);
7200 tg3_reset_task_schedule(tp);
7204 static void tg3_napi_disable(struct tg3 *tp)
7208 for (i = tp->irq_cnt - 1; i >= 0; i--)
7209 napi_disable(&tp->napi[i].napi);
7212 static void tg3_napi_enable(struct tg3 *tp)
7216 for (i = 0; i < tp->irq_cnt; i++)
7217 napi_enable(&tp->napi[i].napi);
7220 static void tg3_napi_init(struct tg3 *tp)
7224 netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7225 for (i = 1; i < tp->irq_cnt; i++)
7226 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7229 static void tg3_napi_fini(struct tg3 *tp)
7233 for (i = 0; i < tp->irq_cnt; i++)
7234 netif_napi_del(&tp->napi[i].napi);
7237 static inline void tg3_netif_stop(struct tg3 *tp)
7239 tp->dev->trans_start = jiffies; /* prevent tx timeout */
7240 tg3_napi_disable(tp);
7241 netif_carrier_off(tp->dev);
7242 netif_tx_disable(tp->dev);
7245 /* tp->lock must be held */
7246 static inline void tg3_netif_start(struct tg3 *tp)
7250 /* NOTE: unconditional netif_tx_wake_all_queues is only
7251 * appropriate so long as all callers are assured to
7252 * have free tx slots (such as after tg3_init_hw)
7254 netif_tx_wake_all_queues(tp->dev);
7257 netif_carrier_on(tp->dev);
7259 tg3_napi_enable(tp);
7260 tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7261 tg3_enable_ints(tp);
7264 static void tg3_irq_quiesce(struct tg3 *tp)
7268 BUG_ON(tp->irq_sync);
7273 for (i = 0; i < tp->irq_cnt; i++)
7274 synchronize_irq(tp->napi[i].irq_vec);
7277 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7278 * If irq_sync is non-zero, then the IRQ handler must be synchronized
7279 * with as well. Most of the time, this is not necessary except when
7280 * shutting down the device.
7282 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7284 spin_lock_bh(&tp->lock);
7286 tg3_irq_quiesce(tp);
7289 static inline void tg3_full_unlock(struct tg3 *tp)
7291 spin_unlock_bh(&tp->lock);
7294 /* One-shot MSI handler - Chip automatically disables interrupt
7295 * after sending MSI so driver doesn't have to do it.
7297 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7299 struct tg3_napi *tnapi = dev_id;
7300 struct tg3 *tp = tnapi->tp;
7302 prefetch(tnapi->hw_status);
7304 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7306 if (likely(!tg3_irq_sync(tp)))
7307 napi_schedule(&tnapi->napi);
7312 /* MSI ISR - No need to check for interrupt sharing and no need to
7313 * flush status block and interrupt mailbox. PCI ordering rules
7314 * guarantee that MSI will arrive after the status block.
7316 static irqreturn_t tg3_msi(int irq, void *dev_id)
7318 struct tg3_napi *tnapi = dev_id;
7319 struct tg3 *tp = tnapi->tp;
7321 prefetch(tnapi->hw_status);
7323 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7325 * Writing any value to intr-mbox-0 clears PCI INTA# and
7326 * chip-internal interrupt pending events.
7327 * Writing non-zero to intr-mbox-0 additional tells the
7328 * NIC to stop sending us irqs, engaging "in-intr-handler"
7331 tw32_mailbox(tnapi->int_mbox, 0x00000001);
7332 if (likely(!tg3_irq_sync(tp)))
7333 napi_schedule(&tnapi->napi);
7335 return IRQ_RETVAL(1);
7338 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7340 struct tg3_napi *tnapi = dev_id;
7341 struct tg3 *tp = tnapi->tp;
7342 struct tg3_hw_status *sblk = tnapi->hw_status;
7343 unsigned int handled = 1;
7345 /* In INTx mode, it is possible for the interrupt to arrive at
7346 * the CPU before the status block posted prior to the interrupt.
7347 * Reading the PCI State register will confirm whether the
7348 * interrupt is ours and will flush the status block.
7350 if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7351 if (tg3_flag(tp, CHIP_RESETTING) ||
7352 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7359 * Writing any value to intr-mbox-0 clears PCI INTA# and
7360 * chip-internal interrupt pending events.
7361 * Writing non-zero to intr-mbox-0 additional tells the
7362 * NIC to stop sending us irqs, engaging "in-intr-handler"
7365 * Flush the mailbox to de-assert the IRQ immediately to prevent
7366 * spurious interrupts. The flush impacts performance but
7367 * excessive spurious interrupts can be worse in some cases.
7369 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7370 if (tg3_irq_sync(tp))
7372 sblk->status &= ~SD_STATUS_UPDATED;
7373 if (likely(tg3_has_work(tnapi))) {
7374 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7375 napi_schedule(&tnapi->napi);
7377 /* No work, shared interrupt perhaps? re-enable
7378 * interrupts, and flush that PCI write
7380 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7384 return IRQ_RETVAL(handled);
7387 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7389 struct tg3_napi *tnapi = dev_id;
7390 struct tg3 *tp = tnapi->tp;
7391 struct tg3_hw_status *sblk = tnapi->hw_status;
7392 unsigned int handled = 1;
7394 /* In INTx mode, it is possible for the interrupt to arrive at
7395 * the CPU before the status block posted prior to the interrupt.
7396 * Reading the PCI State register will confirm whether the
7397 * interrupt is ours and will flush the status block.
7399 if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7400 if (tg3_flag(tp, CHIP_RESETTING) ||
7401 (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7408 * writing any value to intr-mbox-0 clears PCI INTA# and
7409 * chip-internal interrupt pending events.
7410 * writing non-zero to intr-mbox-0 additional tells the
7411 * NIC to stop sending us irqs, engaging "in-intr-handler"
7414 * Flush the mailbox to de-assert the IRQ immediately to prevent
7415 * spurious interrupts. The flush impacts performance but
7416 * excessive spurious interrupts can be worse in some cases.
7418 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7421 * In a shared interrupt configuration, sometimes other devices'
7422 * interrupts will scream. We record the current status tag here
7423 * so that the above check can report that the screaming interrupts
7424 * are unhandled. Eventually they will be silenced.
7426 tnapi->last_irq_tag = sblk->status_tag;
7428 if (tg3_irq_sync(tp))
7431 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7433 napi_schedule(&tnapi->napi);
7436 return IRQ_RETVAL(handled);
7439 /* ISR for interrupt test */
7440 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7442 struct tg3_napi *tnapi = dev_id;
7443 struct tg3 *tp = tnapi->tp;
7444 struct tg3_hw_status *sblk = tnapi->hw_status;
7446 if ((sblk->status & SD_STATUS_UPDATED) ||
7447 !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7448 tg3_disable_ints(tp);
7449 return IRQ_RETVAL(1);
7451 return IRQ_RETVAL(0);
7454 #ifdef CONFIG_NET_POLL_CONTROLLER
7455 static void tg3_poll_controller(struct net_device *dev)
7458 struct tg3 *tp = netdev_priv(dev);
7460 if (tg3_irq_sync(tp))
7463 for (i = 0; i < tp->irq_cnt; i++)
7464 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7468 static void tg3_tx_timeout(struct net_device *dev)
7470 struct tg3 *tp = netdev_priv(dev);
7472 if (netif_msg_tx_err(tp)) {
7473 netdev_err(dev, "transmit timed out, resetting\n");
7477 tg3_reset_task_schedule(tp);
7480 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7481 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7483 u32 base = (u32) mapping & 0xffffffff;
7485 return base + len + 8 < base;
7488 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7489 * of any 4GB boundaries: 4G, 8G, etc
7491 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7494 if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7495 u32 base = (u32) mapping & 0xffffffff;
7497 return ((base + len + (mss & 0x3fff)) < base);
7502 /* Test for DMA addresses > 40-bit */
7503 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7506 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7507 if (tg3_flag(tp, 40BIT_DMA_BUG))
7508 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7515 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7516 dma_addr_t mapping, u32 len, u32 flags,
7519 txbd->addr_hi = ((u64) mapping >> 32);
7520 txbd->addr_lo = ((u64) mapping & 0xffffffff);
7521 txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7522 txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7525 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7526 dma_addr_t map, u32 len, u32 flags,
7529 struct tg3 *tp = tnapi->tp;
7532 if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7535 if (tg3_4g_overflow_test(map, len))
7538 if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7541 if (tg3_40bit_overflow_test(tp, map, len))
7544 if (tp->dma_limit) {
7545 u32 prvidx = *entry;
7546 u32 tmp_flag = flags & ~TXD_FLAG_END;
7547 while (len > tp->dma_limit && *budget) {
7548 u32 frag_len = tp->dma_limit;
7549 len -= tp->dma_limit;
7551 /* Avoid the 8byte DMA problem */
7553 len += tp->dma_limit / 2;
7554 frag_len = tp->dma_limit / 2;
7557 tnapi->tx_buffers[*entry].fragmented = true;
7559 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7560 frag_len, tmp_flag, mss, vlan);
7563 *entry = NEXT_TX(*entry);
7570 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7571 len, flags, mss, vlan);
7573 *entry = NEXT_TX(*entry);
7576 tnapi->tx_buffers[prvidx].fragmented = false;
7580 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7581 len, flags, mss, vlan);
7582 *entry = NEXT_TX(*entry);
7588 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7591 struct sk_buff *skb;
7592 struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7597 pci_unmap_single(tnapi->tp->pdev,
7598 dma_unmap_addr(txb, mapping),
7602 while (txb->fragmented) {
7603 txb->fragmented = false;
7604 entry = NEXT_TX(entry);
7605 txb = &tnapi->tx_buffers[entry];
7608 for (i = 0; i <= last; i++) {
7609 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7611 entry = NEXT_TX(entry);
7612 txb = &tnapi->tx_buffers[entry];
7614 pci_unmap_page(tnapi->tp->pdev,
7615 dma_unmap_addr(txb, mapping),
7616 skb_frag_size(frag), PCI_DMA_TODEVICE);
7618 while (txb->fragmented) {
7619 txb->fragmented = false;
7620 entry = NEXT_TX(entry);
7621 txb = &tnapi->tx_buffers[entry];
7626 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7627 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7628 struct sk_buff **pskb,
7629 u32 *entry, u32 *budget,
7630 u32 base_flags, u32 mss, u32 vlan)
7632 struct tg3 *tp = tnapi->tp;
7633 struct sk_buff *new_skb, *skb = *pskb;
7634 dma_addr_t new_addr = 0;
7637 if (tg3_asic_rev(tp) != ASIC_REV_5701)
7638 new_skb = skb_copy(skb, GFP_ATOMIC);
7640 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7642 new_skb = skb_copy_expand(skb,
7643 skb_headroom(skb) + more_headroom,
7644 skb_tailroom(skb), GFP_ATOMIC);
7650 /* New SKB is guaranteed to be linear. */
7651 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7653 /* Make sure the mapping succeeded */
7654 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7655 dev_kfree_skb_any(new_skb);
7658 u32 save_entry = *entry;
7660 base_flags |= TXD_FLAG_END;
7662 tnapi->tx_buffers[*entry].skb = new_skb;
7663 dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7666 if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7667 new_skb->len, base_flags,
7669 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7670 dev_kfree_skb_any(new_skb);
7676 dev_kfree_skb_any(skb);
7681 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7683 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7684 * TSO header is greater than 80 bytes.
7686 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7688 struct sk_buff *segs, *nskb;
7689 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7691 /* Estimate the number of fragments in the worst case */
7692 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7693 netif_stop_queue(tp->dev);
7695 /* netif_tx_stop_queue() must be done before checking
7696 * checking tx index in tg3_tx_avail() below, because in
7697 * tg3_tx(), we update tx index before checking for
7698 * netif_tx_queue_stopped().
7701 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7702 return NETDEV_TX_BUSY;
7704 netif_wake_queue(tp->dev);
7707 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7709 goto tg3_tso_bug_end;
7715 tg3_start_xmit(nskb, tp->dev);
7719 dev_kfree_skb_any(skb);
7721 return NETDEV_TX_OK;
7724 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7725 * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7727 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7729 struct tg3 *tp = netdev_priv(dev);
7730 u32 len, entry, base_flags, mss, vlan = 0;
7732 int i = -1, would_hit_hwbug;
7734 struct tg3_napi *tnapi;
7735 struct netdev_queue *txq;
7738 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7739 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7740 if (tg3_flag(tp, ENABLE_TSS))
7743 budget = tg3_tx_avail(tnapi);
7745 /* We are running in BH disabled context with netif_tx_lock
7746 * and TX reclaim runs via tp->napi.poll inside of a software
7747 * interrupt. Furthermore, IRQ processing runs lockless so we have
7748 * no IRQ context deadlocks to worry about either. Rejoice!
7750 if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7751 if (!netif_tx_queue_stopped(txq)) {
7752 netif_tx_stop_queue(txq);
7754 /* This is a hard error, log it. */
7756 "BUG! Tx Ring full when queue awake!\n");
7758 return NETDEV_TX_BUSY;
7761 entry = tnapi->tx_prod;
7764 mss = skb_shinfo(skb)->gso_size;
7767 u32 tcp_opt_len, hdr_len;
7769 if (skb_header_cloned(skb) &&
7770 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7774 tcp_opt_len = tcp_optlen(skb);
7776 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7778 /* HW/FW can not correctly segment packets that have been
7779 * vlan encapsulated.
7781 if (skb->protocol == htons(ETH_P_8021Q) ||
7782 skb->protocol == htons(ETH_P_8021AD))
7783 return tg3_tso_bug(tp, skb);
7785 if (!skb_is_gso_v6(skb)) {
7787 iph->tot_len = htons(mss + hdr_len);
7790 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7791 tg3_flag(tp, TSO_BUG))
7792 return tg3_tso_bug(tp, skb);
7794 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7795 TXD_FLAG_CPU_POST_DMA);
7797 if (tg3_flag(tp, HW_TSO_1) ||
7798 tg3_flag(tp, HW_TSO_2) ||
7799 tg3_flag(tp, HW_TSO_3)) {
7800 tcp_hdr(skb)->check = 0;
7801 base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7803 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7808 if (tg3_flag(tp, HW_TSO_3)) {
7809 mss |= (hdr_len & 0xc) << 12;
7811 base_flags |= 0x00000010;
7812 base_flags |= (hdr_len & 0x3e0) << 5;
7813 } else if (tg3_flag(tp, HW_TSO_2))
7814 mss |= hdr_len << 9;
7815 else if (tg3_flag(tp, HW_TSO_1) ||
7816 tg3_asic_rev(tp) == ASIC_REV_5705) {
7817 if (tcp_opt_len || iph->ihl > 5) {
7820 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7821 mss |= (tsflags << 11);
7824 if (tcp_opt_len || iph->ihl > 5) {
7827 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7828 base_flags |= tsflags << 12;
7831 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7832 /* HW/FW can not correctly checksum packets that have been
7833 * vlan encapsulated.
7835 if (skb->protocol == htons(ETH_P_8021Q) ||
7836 skb->protocol == htons(ETH_P_8021AD)) {
7837 if (skb_checksum_help(skb))
7840 base_flags |= TXD_FLAG_TCPUDP_CSUM;
7844 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7845 !mss && skb->len > VLAN_ETH_FRAME_LEN)
7846 base_flags |= TXD_FLAG_JMB_PKT;
7848 if (vlan_tx_tag_present(skb)) {
7849 base_flags |= TXD_FLAG_VLAN;
7850 vlan = vlan_tx_tag_get(skb);
7853 if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7854 tg3_flag(tp, TX_TSTAMP_EN)) {
7855 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7856 base_flags |= TXD_FLAG_HWTSTAMP;
7859 len = skb_headlen(skb);
7861 mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7862 if (pci_dma_mapping_error(tp->pdev, mapping))
7866 tnapi->tx_buffers[entry].skb = skb;
7867 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7869 would_hit_hwbug = 0;
7871 if (tg3_flag(tp, 5701_DMA_BUG))
7872 would_hit_hwbug = 1;
7874 if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7875 ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7877 would_hit_hwbug = 1;
7878 } else if (skb_shinfo(skb)->nr_frags > 0) {
7881 if (!tg3_flag(tp, HW_TSO_1) &&
7882 !tg3_flag(tp, HW_TSO_2) &&
7883 !tg3_flag(tp, HW_TSO_3))
7886 /* Now loop through additional data
7887 * fragments, and queue them.
7889 last = skb_shinfo(skb)->nr_frags - 1;
7890 for (i = 0; i <= last; i++) {
7891 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7893 len = skb_frag_size(frag);
7894 mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7895 len, DMA_TO_DEVICE);
7897 tnapi->tx_buffers[entry].skb = NULL;
7898 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7900 if (dma_mapping_error(&tp->pdev->dev, mapping))
7904 tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7906 ((i == last) ? TXD_FLAG_END : 0),
7908 would_hit_hwbug = 1;
7914 if (would_hit_hwbug) {
7915 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7917 /* If the workaround fails due to memory/mapping
7918 * failure, silently drop this packet.
7920 entry = tnapi->tx_prod;
7921 budget = tg3_tx_avail(tnapi);
7922 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7923 base_flags, mss, vlan))
7927 skb_tx_timestamp(skb);
7928 netdev_tx_sent_queue(txq, skb->len);
7930 /* Sync BD data before updating mailbox */
7933 /* Packets are ready, update Tx producer idx local and on card. */
7934 tw32_tx_mbox(tnapi->prodmbox, entry);
7936 tnapi->tx_prod = entry;
7937 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7938 netif_tx_stop_queue(txq);
7940 /* netif_tx_stop_queue() must be done before checking
7941 * checking tx index in tg3_tx_avail() below, because in
7942 * tg3_tx(), we update tx index before checking for
7943 * netif_tx_queue_stopped().
7946 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7947 netif_tx_wake_queue(txq);
7951 return NETDEV_TX_OK;
7954 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7955 tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7957 dev_kfree_skb_any(skb);
7960 return NETDEV_TX_OK;
7963 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7966 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7967 MAC_MODE_PORT_MODE_MASK);
7969 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7971 if (!tg3_flag(tp, 5705_PLUS))
7972 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7974 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7975 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7977 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7979 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7981 if (tg3_flag(tp, 5705_PLUS) ||
7982 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7983 tg3_asic_rev(tp) == ASIC_REV_5700)
7984 tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7987 tw32(MAC_MODE, tp->mac_mode);
7991 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7993 u32 val, bmcr, mac_mode, ptest = 0;
7995 tg3_phy_toggle_apd(tp, false);
7996 tg3_phy_toggle_automdix(tp, false);
7998 if (extlpbk && tg3_phy_set_extloopbk(tp))
8001 bmcr = BMCR_FULLDPLX;
8006 bmcr |= BMCR_SPEED100;
8010 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8012 bmcr |= BMCR_SPEED100;
8015 bmcr |= BMCR_SPEED1000;
8020 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8021 tg3_readphy(tp, MII_CTRL1000, &val);
8022 val |= CTL1000_AS_MASTER |
8023 CTL1000_ENABLE_MASTER;
8024 tg3_writephy(tp, MII_CTRL1000, val);
8026 ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8027 MII_TG3_FET_PTEST_TRIM_2;
8028 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8031 bmcr |= BMCR_LOOPBACK;
8033 tg3_writephy(tp, MII_BMCR, bmcr);
8035 /* The write needs to be flushed for the FETs */
8036 if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8037 tg3_readphy(tp, MII_BMCR, &bmcr);
8041 if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8042 tg3_asic_rev(tp) == ASIC_REV_5785) {
8043 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8044 MII_TG3_FET_PTEST_FRC_TX_LINK |
8045 MII_TG3_FET_PTEST_FRC_TX_LOCK);
8047 /* The write needs to be flushed for the AC131 */
8048 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8051 /* Reset to prevent losing 1st rx packet intermittently */
8052 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8053 tg3_flag(tp, 5780_CLASS)) {
8054 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8056 tw32_f(MAC_RX_MODE, tp->rx_mode);
8059 mac_mode = tp->mac_mode &
8060 ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8061 if (speed == SPEED_1000)
8062 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8064 mac_mode |= MAC_MODE_PORT_MODE_MII;
8066 if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8067 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8069 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8070 mac_mode &= ~MAC_MODE_LINK_POLARITY;
8071 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8072 mac_mode |= MAC_MODE_LINK_POLARITY;
8074 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8075 MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8078 tw32(MAC_MODE, mac_mode);
8084 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8086 struct tg3 *tp = netdev_priv(dev);
8088 if (features & NETIF_F_LOOPBACK) {
8089 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8092 spin_lock_bh(&tp->lock);
8093 tg3_mac_loopback(tp, true);
8094 netif_carrier_on(tp->dev);
8095 spin_unlock_bh(&tp->lock);
8096 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8098 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8101 spin_lock_bh(&tp->lock);
8102 tg3_mac_loopback(tp, false);
8103 /* Force link status check */
8104 tg3_setup_phy(tp, true);
8105 spin_unlock_bh(&tp->lock);
8106 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8110 static netdev_features_t tg3_fix_features(struct net_device *dev,
8111 netdev_features_t features)
8113 struct tg3 *tp = netdev_priv(dev);
8115 if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8116 features &= ~NETIF_F_ALL_TSO;
8121 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8123 netdev_features_t changed = dev->features ^ features;
8125 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8126 tg3_set_loopback(dev, features);
8131 static void tg3_rx_prodring_free(struct tg3 *tp,
8132 struct tg3_rx_prodring_set *tpr)
8136 if (tpr != &tp->napi[0].prodring) {
8137 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8138 i = (i + 1) & tp->rx_std_ring_mask)
8139 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8142 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8143 for (i = tpr->rx_jmb_cons_idx;
8144 i != tpr->rx_jmb_prod_idx;
8145 i = (i + 1) & tp->rx_jmb_ring_mask) {
8146 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8154 for (i = 0; i <= tp->rx_std_ring_mask; i++)
8155 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8158 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8159 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8160 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8165 /* Initialize rx rings for packet processing.
8167 * The chip has been shut down and the driver detached from
8168 * the networking, so no interrupts or new tx packets will
8169 * end up in the driver. tp->{tx,}lock are held and thus
8172 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8173 struct tg3_rx_prodring_set *tpr)
8175 u32 i, rx_pkt_dma_sz;
8177 tpr->rx_std_cons_idx = 0;
8178 tpr->rx_std_prod_idx = 0;
8179 tpr->rx_jmb_cons_idx = 0;
8180 tpr->rx_jmb_prod_idx = 0;
8182 if (tpr != &tp->napi[0].prodring) {
8183 memset(&tpr->rx_std_buffers[0], 0,
8184 TG3_RX_STD_BUFF_RING_SIZE(tp));
8185 if (tpr->rx_jmb_buffers)
8186 memset(&tpr->rx_jmb_buffers[0], 0,
8187 TG3_RX_JMB_BUFF_RING_SIZE(tp));
8191 /* Zero out all descriptors. */
8192 memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8194 rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8195 if (tg3_flag(tp, 5780_CLASS) &&
8196 tp->dev->mtu > ETH_DATA_LEN)
8197 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8198 tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8200 /* Initialize invariants of the rings, we only set this
8201 * stuff once. This works because the card does not
8202 * write into the rx buffer posting rings.
8204 for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8205 struct tg3_rx_buffer_desc *rxd;
8207 rxd = &tpr->rx_std[i];
8208 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8209 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8210 rxd->opaque = (RXD_OPAQUE_RING_STD |
8211 (i << RXD_OPAQUE_INDEX_SHIFT));
8214 /* Now allocate fresh SKBs for each rx ring. */
8215 for (i = 0; i < tp->rx_pending; i++) {
8216 unsigned int frag_size;
8218 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8220 netdev_warn(tp->dev,
8221 "Using a smaller RX standard ring. Only "
8222 "%d out of %d buffers were allocated "
8223 "successfully\n", i, tp->rx_pending);
8231 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8234 memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8236 if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8239 for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8240 struct tg3_rx_buffer_desc *rxd;
8242 rxd = &tpr->rx_jmb[i].std;
8243 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8244 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8246 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8247 (i << RXD_OPAQUE_INDEX_SHIFT));
8250 for (i = 0; i < tp->rx_jumbo_pending; i++) {
8251 unsigned int frag_size;
8253 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8255 netdev_warn(tp->dev,
8256 "Using a smaller RX jumbo ring. Only %d "
8257 "out of %d buffers were allocated "
8258 "successfully\n", i, tp->rx_jumbo_pending);
8261 tp->rx_jumbo_pending = i;
8270 tg3_rx_prodring_free(tp, tpr);
8274 static void tg3_rx_prodring_fini(struct tg3 *tp,
8275 struct tg3_rx_prodring_set *tpr)
8277 kfree(tpr->rx_std_buffers);
8278 tpr->rx_std_buffers = NULL;
8279 kfree(tpr->rx_jmb_buffers);
8280 tpr->rx_jmb_buffers = NULL;
8282 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8283 tpr->rx_std, tpr->rx_std_mapping);
8287 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8288 tpr->rx_jmb, tpr->rx_jmb_mapping);
8293 static int tg3_rx_prodring_init(struct tg3 *tp,
8294 struct tg3_rx_prodring_set *tpr)
8296 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8298 if (!tpr->rx_std_buffers)
8301 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8302 TG3_RX_STD_RING_BYTES(tp),
8303 &tpr->rx_std_mapping,
8308 if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8309 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8311 if (!tpr->rx_jmb_buffers)
8314 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8315 TG3_RX_JMB_RING_BYTES(tp),
8316 &tpr->rx_jmb_mapping,
8325 tg3_rx_prodring_fini(tp, tpr);
8329 /* Free up pending packets in all rx/tx rings.
8331 * The chip has been shut down and the driver detached from
8332 * the networking, so no interrupts or new tx packets will
8333 * end up in the driver. tp->{tx,}lock is not held and we are not
8334 * in an interrupt context and thus may sleep.
8336 static void tg3_free_rings(struct tg3 *tp)
8340 for (j = 0; j < tp->irq_cnt; j++) {
8341 struct tg3_napi *tnapi = &tp->napi[j];
8343 tg3_rx_prodring_free(tp, &tnapi->prodring);
8345 if (!tnapi->tx_buffers)
8348 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8349 struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8354 tg3_tx_skb_unmap(tnapi, i,
8355 skb_shinfo(skb)->nr_frags - 1);
8357 dev_kfree_skb_any(skb);
8359 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8363 /* Initialize tx/rx rings for packet processing.
8365 * The chip has been shut down and the driver detached from
8366 * the networking, so no interrupts or new tx packets will
8367 * end up in the driver. tp->{tx,}lock are held and thus
8370 static int tg3_init_rings(struct tg3 *tp)
8374 /* Free up all the SKBs. */
8377 for (i = 0; i < tp->irq_cnt; i++) {
8378 struct tg3_napi *tnapi = &tp->napi[i];
8380 tnapi->last_tag = 0;
8381 tnapi->last_irq_tag = 0;
8382 tnapi->hw_status->status = 0;
8383 tnapi->hw_status->status_tag = 0;
8384 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8389 memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8391 tnapi->rx_rcb_ptr = 0;
8393 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8395 if (tnapi->prodring.rx_std &&
8396 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8405 static void tg3_mem_tx_release(struct tg3 *tp)
8409 for (i = 0; i < tp->irq_max; i++) {
8410 struct tg3_napi *tnapi = &tp->napi[i];
8412 if (tnapi->tx_ring) {
8413 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8414 tnapi->tx_ring, tnapi->tx_desc_mapping);
8415 tnapi->tx_ring = NULL;
8418 kfree(tnapi->tx_buffers);
8419 tnapi->tx_buffers = NULL;
8423 static int tg3_mem_tx_acquire(struct tg3 *tp)
8426 struct tg3_napi *tnapi = &tp->napi[0];
8428 /* If multivector TSS is enabled, vector 0 does not handle
8429 * tx interrupts. Don't allocate any resources for it.
8431 if (tg3_flag(tp, ENABLE_TSS))
8434 for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8435 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8436 TG3_TX_RING_SIZE, GFP_KERNEL);
8437 if (!tnapi->tx_buffers)
8440 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8442 &tnapi->tx_desc_mapping,
8444 if (!tnapi->tx_ring)
8451 tg3_mem_tx_release(tp);
8455 static void tg3_mem_rx_release(struct tg3 *tp)
8459 for (i = 0; i < tp->irq_max; i++) {
8460 struct tg3_napi *tnapi = &tp->napi[i];
8462 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8467 dma_free_coherent(&tp->pdev->dev,
8468 TG3_RX_RCB_RING_BYTES(tp),
8470 tnapi->rx_rcb_mapping);
8471 tnapi->rx_rcb = NULL;
8475 static int tg3_mem_rx_acquire(struct tg3 *tp)
8477 unsigned int i, limit;
8479 limit = tp->rxq_cnt;
8481 /* If RSS is enabled, we need a (dummy) producer ring
8482 * set on vector zero. This is the true hw prodring.
8484 if (tg3_flag(tp, ENABLE_RSS))
8487 for (i = 0; i < limit; i++) {
8488 struct tg3_napi *tnapi = &tp->napi[i];
8490 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8493 /* If multivector RSS is enabled, vector 0
8494 * does not handle rx or tx interrupts.
8495 * Don't allocate any resources for it.
8497 if (!i && tg3_flag(tp, ENABLE_RSS))
8500 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8501 TG3_RX_RCB_RING_BYTES(tp),
8502 &tnapi->rx_rcb_mapping,
8503 GFP_KERNEL | __GFP_ZERO);
8511 tg3_mem_rx_release(tp);
8516 * Must not be invoked with interrupt sources disabled and
8517 * the hardware shutdown down.
8519 static void tg3_free_consistent(struct tg3 *tp)
8523 for (i = 0; i < tp->irq_cnt; i++) {
8524 struct tg3_napi *tnapi = &tp->napi[i];
8526 if (tnapi->hw_status) {
8527 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8529 tnapi->status_mapping);
8530 tnapi->hw_status = NULL;
8534 tg3_mem_rx_release(tp);
8535 tg3_mem_tx_release(tp);
8538 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8539 tp->hw_stats, tp->stats_mapping);
8540 tp->hw_stats = NULL;
8545 * Must not be invoked with interrupt sources disabled and
8546 * the hardware shutdown down. Can sleep.
8548 static int tg3_alloc_consistent(struct tg3 *tp)
8552 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8553 sizeof(struct tg3_hw_stats),
8555 GFP_KERNEL | __GFP_ZERO);
8559 for (i = 0; i < tp->irq_cnt; i++) {
8560 struct tg3_napi *tnapi = &tp->napi[i];
8561 struct tg3_hw_status *sblk;
8563 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8565 &tnapi->status_mapping,
8566 GFP_KERNEL | __GFP_ZERO);
8567 if (!tnapi->hw_status)
8570 sblk = tnapi->hw_status;
8572 if (tg3_flag(tp, ENABLE_RSS)) {
8573 u16 *prodptr = NULL;
8576 * When RSS is enabled, the status block format changes
8577 * slightly. The "rx_jumbo_consumer", "reserved",
8578 * and "rx_mini_consumer" members get mapped to the
8579 * other three rx return ring producer indexes.
8583 prodptr = &sblk->idx[0].rx_producer;
8586 prodptr = &sblk->rx_jumbo_consumer;
8589 prodptr = &sblk->reserved;
8592 prodptr = &sblk->rx_mini_consumer;
8595 tnapi->rx_rcb_prod_idx = prodptr;
8597 tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8601 if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8607 tg3_free_consistent(tp);
8611 #define MAX_WAIT_CNT 1000
8613 /* To stop a block, clear the enable bit and poll till it
8614 * clears. tp->lock is held.
8616 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8621 if (tg3_flag(tp, 5705_PLUS)) {
8628 /* We can't enable/disable these bits of the
8629 * 5705/5750, just say success.
8642 for (i = 0; i < MAX_WAIT_CNT; i++) {
8643 if (pci_channel_offline(tp->pdev)) {
8644 dev_err(&tp->pdev->dev,
8645 "tg3_stop_block device offline, "
8646 "ofs=%lx enable_bit=%x\n",
8653 if ((val & enable_bit) == 0)
8657 if (i == MAX_WAIT_CNT && !silent) {
8658 dev_err(&tp->pdev->dev,
8659 "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8667 /* tp->lock is held. */
8668 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8672 tg3_disable_ints(tp);
8674 if (pci_channel_offline(tp->pdev)) {
8675 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8676 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8681 tp->rx_mode &= ~RX_MODE_ENABLE;
8682 tw32_f(MAC_RX_MODE, tp->rx_mode);
8685 err = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8686 err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8687 err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8688 err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8689 err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8690 err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8692 err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8693 err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8694 err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8695 err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8696 err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8697 err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8698 err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8700 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8701 tw32_f(MAC_MODE, tp->mac_mode);
8704 tp->tx_mode &= ~TX_MODE_ENABLE;
8705 tw32_f(MAC_TX_MODE, tp->tx_mode);
8707 for (i = 0; i < MAX_WAIT_CNT; i++) {
8709 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8712 if (i >= MAX_WAIT_CNT) {
8713 dev_err(&tp->pdev->dev,
8714 "%s timed out, TX_MODE_ENABLE will not clear "
8715 "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8719 err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8720 err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8721 err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8723 tw32(FTQ_RESET, 0xffffffff);
8724 tw32(FTQ_RESET, 0x00000000);
8726 err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8727 err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8730 for (i = 0; i < tp->irq_cnt; i++) {
8731 struct tg3_napi *tnapi = &tp->napi[i];
8732 if (tnapi->hw_status)
8733 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8739 /* Save PCI command register before chip reset */
8740 static void tg3_save_pci_state(struct tg3 *tp)
8742 pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8745 /* Restore PCI state after chip reset */
8746 static void tg3_restore_pci_state(struct tg3 *tp)
8750 /* Re-enable indirect register accesses. */
8751 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8752 tp->misc_host_ctrl);
8754 /* Set MAX PCI retry to zero. */
8755 val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8756 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8757 tg3_flag(tp, PCIX_MODE))
8758 val |= PCISTATE_RETRY_SAME_DMA;
8759 /* Allow reads and writes to the APE register and memory space. */
8760 if (tg3_flag(tp, ENABLE_APE))
8761 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8762 PCISTATE_ALLOW_APE_SHMEM_WR |
8763 PCISTATE_ALLOW_APE_PSPACE_WR;
8764 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8766 pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8768 if (!tg3_flag(tp, PCI_EXPRESS)) {
8769 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8770 tp->pci_cacheline_sz);
8771 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8775 /* Make sure PCI-X relaxed ordering bit is clear. */
8776 if (tg3_flag(tp, PCIX_MODE)) {
8779 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8781 pcix_cmd &= ~PCI_X_CMD_ERO;
8782 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8786 if (tg3_flag(tp, 5780_CLASS)) {
8788 /* Chip reset on 5780 will reset MSI enable bit,
8789 * so need to restore it.
8791 if (tg3_flag(tp, USING_MSI)) {
8794 pci_read_config_word(tp->pdev,
8795 tp->msi_cap + PCI_MSI_FLAGS,
8797 pci_write_config_word(tp->pdev,
8798 tp->msi_cap + PCI_MSI_FLAGS,
8799 ctrl | PCI_MSI_FLAGS_ENABLE);
8800 val = tr32(MSGINT_MODE);
8801 tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8806 /* tp->lock is held. */
8807 static int tg3_chip_reset(struct tg3 *tp)
8810 void (*write_op)(struct tg3 *, u32, u32);
8815 tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8817 /* No matching tg3_nvram_unlock() after this because
8818 * chip reset below will undo the nvram lock.
8820 tp->nvram_lock_cnt = 0;
8822 /* GRC_MISC_CFG core clock reset will clear the memory
8823 * enable bit in PCI register 4 and the MSI enable bit
8824 * on some chips, so we save relevant registers here.
8826 tg3_save_pci_state(tp);
8828 if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8829 tg3_flag(tp, 5755_PLUS))
8830 tw32(GRC_FASTBOOT_PC, 0);
8833 * We must avoid the readl() that normally takes place.
8834 * It locks machines, causes machine checks, and other
8835 * fun things. So, temporarily disable the 5701
8836 * hardware workaround, while we do the reset.
8838 write_op = tp->write32;
8839 if (write_op == tg3_write_flush_reg32)
8840 tp->write32 = tg3_write32;
8842 /* Prevent the irq handler from reading or writing PCI registers
8843 * during chip reset when the memory enable bit in the PCI command
8844 * register may be cleared. The chip does not generate interrupt
8845 * at this time, but the irq handler may still be called due to irq
8846 * sharing or irqpoll.
8848 tg3_flag_set(tp, CHIP_RESETTING);
8849 for (i = 0; i < tp->irq_cnt; i++) {
8850 struct tg3_napi *tnapi = &tp->napi[i];
8851 if (tnapi->hw_status) {
8852 tnapi->hw_status->status = 0;
8853 tnapi->hw_status->status_tag = 0;
8855 tnapi->last_tag = 0;
8856 tnapi->last_irq_tag = 0;
8860 for (i = 0; i < tp->irq_cnt; i++)
8861 synchronize_irq(tp->napi[i].irq_vec);
8863 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8864 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8865 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8869 val = GRC_MISC_CFG_CORECLK_RESET;
8871 if (tg3_flag(tp, PCI_EXPRESS)) {
8872 /* Force PCIe 1.0a mode */
8873 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8874 !tg3_flag(tp, 57765_PLUS) &&
8875 tr32(TG3_PCIE_PHY_TSTCTL) ==
8876 (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8877 tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8879 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8880 tw32(GRC_MISC_CFG, (1 << 29));
8885 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8886 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8887 tw32(GRC_VCPU_EXT_CTRL,
8888 tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8891 /* Manage gphy power for all CPMU absent PCIe devices. */
8892 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8893 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8895 tw32(GRC_MISC_CFG, val);
8897 /* restore 5701 hardware bug workaround write method */
8898 tp->write32 = write_op;
8900 /* Unfortunately, we have to delay before the PCI read back.
8901 * Some 575X chips even will not respond to a PCI cfg access
8902 * when the reset command is given to the chip.
8904 * How do these hardware designers expect things to work
8905 * properly if the PCI write is posted for a long period
8906 * of time? It is always necessary to have some method by
8907 * which a register read back can occur to push the write
8908 * out which does the reset.
8910 * For most tg3 variants the trick below was working.
8915 /* Flush PCI posted writes. The normal MMIO registers
8916 * are inaccessible at this time so this is the only
8917 * way to make this reliably (actually, this is no longer
8918 * the case, see above). I tried to use indirect
8919 * register read/write but this upset some 5701 variants.
8921 pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8925 if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8928 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8932 /* Wait for link training to complete. */
8933 for (j = 0; j < 5000; j++)
8936 pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8937 pci_write_config_dword(tp->pdev, 0xc4,
8938 cfg_val | (1 << 15));
8941 /* Clear the "no snoop" and "relaxed ordering" bits. */
8942 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8944 * Older PCIe devices only support the 128 byte
8945 * MPS setting. Enforce the restriction.
8947 if (!tg3_flag(tp, CPMU_PRESENT))
8948 val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8949 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8951 /* Clear error status */
8952 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8953 PCI_EXP_DEVSTA_CED |
8954 PCI_EXP_DEVSTA_NFED |
8955 PCI_EXP_DEVSTA_FED |
8956 PCI_EXP_DEVSTA_URD);
8959 tg3_restore_pci_state(tp);
8961 tg3_flag_clear(tp, CHIP_RESETTING);
8962 tg3_flag_clear(tp, ERROR_PROCESSED);
8965 if (tg3_flag(tp, 5780_CLASS))
8966 val = tr32(MEMARB_MODE);
8967 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8969 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8971 tw32(0x5000, 0x400);
8974 if (tg3_flag(tp, IS_SSB_CORE)) {
8976 * BCM4785: In order to avoid repercussions from using
8977 * potentially defective internal ROM, stop the Rx RISC CPU,
8978 * which is not required.
8981 tg3_halt_cpu(tp, RX_CPU_BASE);
8984 err = tg3_poll_fw(tp);
8988 tw32(GRC_MODE, tp->grc_mode);
8990 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8993 tw32(0xc4, val | (1 << 15));
8996 if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8997 tg3_asic_rev(tp) == ASIC_REV_5705) {
8998 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8999 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9000 tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9001 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9004 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9005 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9007 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9008 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9013 tw32_f(MAC_MODE, val);
9016 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9020 if (tg3_flag(tp, PCI_EXPRESS) &&
9021 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9022 tg3_asic_rev(tp) != ASIC_REV_5785 &&
9023 !tg3_flag(tp, 57765_PLUS)) {
9026 tw32(0x7c00, val | (1 << 25));
9029 if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9030 val = tr32(TG3_CPMU_CLCK_ORIDE);
9031 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9034 /* Reprobe ASF enable state. */
9035 tg3_flag_clear(tp, ENABLE_ASF);
9036 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9037 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9039 tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9040 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9041 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9044 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9045 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9046 tg3_flag_set(tp, ENABLE_ASF);
9047 tp->last_event_jiffies = jiffies;
9048 if (tg3_flag(tp, 5750_PLUS))
9049 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9051 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9052 if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9053 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9054 if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9055 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9062 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9063 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9065 /* tp->lock is held. */
9066 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9072 tg3_write_sig_pre_reset(tp, kind);
9074 tg3_abort_hw(tp, silent);
9075 err = tg3_chip_reset(tp);
9077 __tg3_set_mac_addr(tp, false);
9079 tg3_write_sig_legacy(tp, kind);
9080 tg3_write_sig_post_reset(tp, kind);
9083 /* Save the stats across chip resets... */
9084 tg3_get_nstats(tp, &tp->net_stats_prev);
9085 tg3_get_estats(tp, &tp->estats_prev);
9087 /* And make sure the next sample is new data */
9088 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9097 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9099 struct tg3 *tp = netdev_priv(dev);
9100 struct sockaddr *addr = p;
9102 bool skip_mac_1 = false;
9104 if (!is_valid_ether_addr(addr->sa_data))
9105 return -EADDRNOTAVAIL;
9107 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9109 if (!netif_running(dev))
9112 if (tg3_flag(tp, ENABLE_ASF)) {
9113 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9115 addr0_high = tr32(MAC_ADDR_0_HIGH);
9116 addr0_low = tr32(MAC_ADDR_0_LOW);
9117 addr1_high = tr32(MAC_ADDR_1_HIGH);
9118 addr1_low = tr32(MAC_ADDR_1_LOW);
9120 /* Skip MAC addr 1 if ASF is using it. */
9121 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9122 !(addr1_high == 0 && addr1_low == 0))
9125 spin_lock_bh(&tp->lock);
9126 __tg3_set_mac_addr(tp, skip_mac_1);
9127 spin_unlock_bh(&tp->lock);
9132 /* tp->lock is held. */
9133 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9134 dma_addr_t mapping, u32 maxlen_flags,
9138 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9139 ((u64) mapping >> 32));
9141 (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9142 ((u64) mapping & 0xffffffff));
9144 (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9147 if (!tg3_flag(tp, 5705_PLUS))
9149 (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9154 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9158 if (!tg3_flag(tp, ENABLE_TSS)) {
9159 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9160 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9161 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9163 tw32(HOSTCC_TXCOL_TICKS, 0);
9164 tw32(HOSTCC_TXMAX_FRAMES, 0);
9165 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9167 for (; i < tp->txq_cnt; i++) {
9170 reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9171 tw32(reg, ec->tx_coalesce_usecs);
9172 reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9173 tw32(reg, ec->tx_max_coalesced_frames);
9174 reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9175 tw32(reg, ec->tx_max_coalesced_frames_irq);
9179 for (; i < tp->irq_max - 1; i++) {
9180 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9181 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9182 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9186 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9189 u32 limit = tp->rxq_cnt;
9191 if (!tg3_flag(tp, ENABLE_RSS)) {
9192 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9193 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9194 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9197 tw32(HOSTCC_RXCOL_TICKS, 0);
9198 tw32(HOSTCC_RXMAX_FRAMES, 0);
9199 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9202 for (; i < limit; i++) {
9205 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9206 tw32(reg, ec->rx_coalesce_usecs);
9207 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9208 tw32(reg, ec->rx_max_coalesced_frames);
9209 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9210 tw32(reg, ec->rx_max_coalesced_frames_irq);
9213 for (; i < tp->irq_max - 1; i++) {
9214 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9215 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9216 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9220 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9222 tg3_coal_tx_init(tp, ec);
9223 tg3_coal_rx_init(tp, ec);
9225 if (!tg3_flag(tp, 5705_PLUS)) {
9226 u32 val = ec->stats_block_coalesce_usecs;
9228 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9229 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9234 tw32(HOSTCC_STAT_COAL_TICKS, val);
9238 /* tp->lock is held. */
9239 static void tg3_rings_reset(struct tg3 *tp)
9242 u32 stblk, txrcb, rxrcb, limit;
9243 struct tg3_napi *tnapi = &tp->napi[0];
9245 /* Disable all transmit rings but the first. */
9246 if (!tg3_flag(tp, 5705_PLUS))
9247 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9248 else if (tg3_flag(tp, 5717_PLUS))
9249 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9250 else if (tg3_flag(tp, 57765_CLASS) ||
9251 tg3_asic_rev(tp) == ASIC_REV_5762)
9252 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9254 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9256 for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9257 txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9258 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9259 BDINFO_FLAGS_DISABLED);
9262 /* Disable all receive return rings but the first. */
9263 if (tg3_flag(tp, 5717_PLUS))
9264 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9265 else if (!tg3_flag(tp, 5705_PLUS))
9266 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9267 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9268 tg3_asic_rev(tp) == ASIC_REV_5762 ||
9269 tg3_flag(tp, 57765_CLASS))
9270 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9272 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9274 for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9275 rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9276 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9277 BDINFO_FLAGS_DISABLED);
9279 /* Disable interrupts */
9280 tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9281 tp->napi[0].chk_msi_cnt = 0;
9282 tp->napi[0].last_rx_cons = 0;
9283 tp->napi[0].last_tx_cons = 0;
9285 /* Zero mailbox registers. */
9286 if (tg3_flag(tp, SUPPORT_MSIX)) {
9287 for (i = 1; i < tp->irq_max; i++) {
9288 tp->napi[i].tx_prod = 0;
9289 tp->napi[i].tx_cons = 0;
9290 if (tg3_flag(tp, ENABLE_TSS))
9291 tw32_mailbox(tp->napi[i].prodmbox, 0);
9292 tw32_rx_mbox(tp->napi[i].consmbox, 0);
9293 tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9294 tp->napi[i].chk_msi_cnt = 0;
9295 tp->napi[i].last_rx_cons = 0;
9296 tp->napi[i].last_tx_cons = 0;
9298 if (!tg3_flag(tp, ENABLE_TSS))
9299 tw32_mailbox(tp->napi[0].prodmbox, 0);
9301 tp->napi[0].tx_prod = 0;
9302 tp->napi[0].tx_cons = 0;
9303 tw32_mailbox(tp->napi[0].prodmbox, 0);
9304 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9307 /* Make sure the NIC-based send BD rings are disabled. */
9308 if (!tg3_flag(tp, 5705_PLUS)) {
9309 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9310 for (i = 0; i < 16; i++)
9311 tw32_tx_mbox(mbox + i * 8, 0);
9314 txrcb = NIC_SRAM_SEND_RCB;
9315 rxrcb = NIC_SRAM_RCV_RET_RCB;
9317 /* Clear status block in ram. */
9318 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9320 /* Set status block DMA address */
9321 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9322 ((u64) tnapi->status_mapping >> 32));
9323 tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9324 ((u64) tnapi->status_mapping & 0xffffffff));
9326 if (tnapi->tx_ring) {
9327 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9328 (TG3_TX_RING_SIZE <<
9329 BDINFO_FLAGS_MAXLEN_SHIFT),
9330 NIC_SRAM_TX_BUFFER_DESC);
9331 txrcb += TG3_BDINFO_SIZE;
9334 if (tnapi->rx_rcb) {
9335 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9336 (tp->rx_ret_ring_mask + 1) <<
9337 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9338 rxrcb += TG3_BDINFO_SIZE;
9341 stblk = HOSTCC_STATBLCK_RING1;
9343 for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9344 u64 mapping = (u64)tnapi->status_mapping;
9345 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9346 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9348 /* Clear status block in ram. */
9349 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9351 if (tnapi->tx_ring) {
9352 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9353 (TG3_TX_RING_SIZE <<
9354 BDINFO_FLAGS_MAXLEN_SHIFT),
9355 NIC_SRAM_TX_BUFFER_DESC);
9356 txrcb += TG3_BDINFO_SIZE;
9359 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9360 ((tp->rx_ret_ring_mask + 1) <<
9361 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9364 rxrcb += TG3_BDINFO_SIZE;
9368 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9370 u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9372 if (!tg3_flag(tp, 5750_PLUS) ||
9373 tg3_flag(tp, 5780_CLASS) ||
9374 tg3_asic_rev(tp) == ASIC_REV_5750 ||
9375 tg3_asic_rev(tp) == ASIC_REV_5752 ||
9376 tg3_flag(tp, 57765_PLUS))
9377 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9378 else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9379 tg3_asic_rev(tp) == ASIC_REV_5787)
9380 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9382 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9384 nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9385 host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9387 val = min(nic_rep_thresh, host_rep_thresh);
9388 tw32(RCVBDI_STD_THRESH, val);
9390 if (tg3_flag(tp, 57765_PLUS))
9391 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9393 if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9396 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9398 host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9400 val = min(bdcache_maxcnt / 2, host_rep_thresh);
9401 tw32(RCVBDI_JUMBO_THRESH, val);
9403 if (tg3_flag(tp, 57765_PLUS))
9404 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9407 static inline u32 calc_crc(unsigned char *buf, int len)
9415 for (j = 0; j < len; j++) {
9418 for (k = 0; k < 8; k++) {
9431 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9433 /* accept or reject all multicast frames */
9434 tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9435 tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9436 tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9437 tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9440 static void __tg3_set_rx_mode(struct net_device *dev)
9442 struct tg3 *tp = netdev_priv(dev);
9445 rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9446 RX_MODE_KEEP_VLAN_TAG);
9448 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9449 /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9452 if (!tg3_flag(tp, ENABLE_ASF))
9453 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9456 if (dev->flags & IFF_PROMISC) {
9457 /* Promiscuous mode. */
9458 rx_mode |= RX_MODE_PROMISC;
9459 } else if (dev->flags & IFF_ALLMULTI) {
9460 /* Accept all multicast. */
9461 tg3_set_multi(tp, 1);
9462 } else if (netdev_mc_empty(dev)) {
9463 /* Reject all multicast. */
9464 tg3_set_multi(tp, 0);
9466 /* Accept one or more multicast(s). */
9467 struct netdev_hw_addr *ha;
9468 u32 mc_filter[4] = { 0, };
9473 netdev_for_each_mc_addr(ha, dev) {
9474 crc = calc_crc(ha->addr, ETH_ALEN);
9476 regidx = (bit & 0x60) >> 5;
9478 mc_filter[regidx] |= (1 << bit);
9481 tw32(MAC_HASH_REG_0, mc_filter[0]);
9482 tw32(MAC_HASH_REG_1, mc_filter[1]);
9483 tw32(MAC_HASH_REG_2, mc_filter[2]);
9484 tw32(MAC_HASH_REG_3, mc_filter[3]);
9487 if (rx_mode != tp->rx_mode) {
9488 tp->rx_mode = rx_mode;
9489 tw32_f(MAC_RX_MODE, rx_mode);
9494 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9498 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9499 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9502 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9506 if (!tg3_flag(tp, SUPPORT_MSIX))
9509 if (tp->rxq_cnt == 1) {
9510 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9514 /* Validate table against current IRQ count */
9515 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9516 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9520 if (i != TG3_RSS_INDIR_TBL_SIZE)
9521 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9524 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9527 u32 reg = MAC_RSS_INDIR_TBL_0;
9529 while (i < TG3_RSS_INDIR_TBL_SIZE) {
9530 u32 val = tp->rss_ind_tbl[i];
9532 for (; i % 8; i++) {
9534 val |= tp->rss_ind_tbl[i];
9541 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9543 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9544 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9546 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9549 /* tp->lock is held. */
9550 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9552 u32 val, rdmac_mode;
9554 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9556 tg3_disable_ints(tp);
9560 tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9562 if (tg3_flag(tp, INIT_COMPLETE))
9563 tg3_abort_hw(tp, 1);
9565 /* Enable MAC control of LPI */
9566 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9567 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9568 TG3_CPMU_EEE_LNKIDL_UART_IDL;
9569 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9570 val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9572 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9574 tw32_f(TG3_CPMU_EEE_CTRL,
9575 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9577 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9578 TG3_CPMU_EEEMD_LPI_IN_TX |
9579 TG3_CPMU_EEEMD_LPI_IN_RX |
9580 TG3_CPMU_EEEMD_EEE_ENABLE;
9582 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9583 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9585 if (tg3_flag(tp, ENABLE_APE))
9586 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9588 tw32_f(TG3_CPMU_EEE_MODE, val);
9590 tw32_f(TG3_CPMU_EEE_DBTMR1,
9591 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9592 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9594 tw32_f(TG3_CPMU_EEE_DBTMR2,
9595 TG3_CPMU_DBTMR2_APE_TX_2047US |
9596 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9599 if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9600 !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9601 tg3_phy_pull_config(tp);
9602 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9608 err = tg3_chip_reset(tp);
9612 tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9614 if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9615 val = tr32(TG3_CPMU_CTRL);
9616 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9617 tw32(TG3_CPMU_CTRL, val);
9619 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9620 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9621 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9622 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9624 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9625 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9626 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9627 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9629 val = tr32(TG3_CPMU_HST_ACC);
9630 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9631 val |= CPMU_HST_ACC_MACCLK_6_25;
9632 tw32(TG3_CPMU_HST_ACC, val);
9635 if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9636 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9637 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9638 PCIE_PWR_MGMT_L1_THRESH_4MS;
9639 tw32(PCIE_PWR_MGMT_THRESH, val);
9641 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9642 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9644 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9646 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9647 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9650 if (tg3_flag(tp, L1PLLPD_EN)) {
9651 u32 grc_mode = tr32(GRC_MODE);
9653 /* Access the lower 1K of PL PCIE block registers. */
9654 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9655 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9657 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9658 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9659 val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9661 tw32(GRC_MODE, grc_mode);
9664 if (tg3_flag(tp, 57765_CLASS)) {
9665 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9666 u32 grc_mode = tr32(GRC_MODE);
9668 /* Access the lower 1K of PL PCIE block registers. */
9669 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9670 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9672 val = tr32(TG3_PCIE_TLDLPL_PORT +
9673 TG3_PCIE_PL_LO_PHYCTL5);
9674 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9675 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9677 tw32(GRC_MODE, grc_mode);
9680 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9683 /* Fix transmit hangs */
9684 val = tr32(TG3_CPMU_PADRNG_CTL);
9685 val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9686 tw32(TG3_CPMU_PADRNG_CTL, val);
9688 grc_mode = tr32(GRC_MODE);
9690 /* Access the lower 1K of DL PCIE block registers. */
9691 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9692 tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9694 val = tr32(TG3_PCIE_TLDLPL_PORT +
9695 TG3_PCIE_DL_LO_FTSMAX);
9696 val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9697 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9698 val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9700 tw32(GRC_MODE, grc_mode);
9703 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9704 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9705 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9706 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9709 /* This works around an issue with Athlon chipsets on
9710 * B3 tigon3 silicon. This bit has no effect on any
9711 * other revision. But do not set this on PCI Express
9712 * chips and don't even touch the clocks if the CPMU is present.
9714 if (!tg3_flag(tp, CPMU_PRESENT)) {
9715 if (!tg3_flag(tp, PCI_EXPRESS))
9716 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9717 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9720 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9721 tg3_flag(tp, PCIX_MODE)) {
9722 val = tr32(TG3PCI_PCISTATE);
9723 val |= PCISTATE_RETRY_SAME_DMA;
9724 tw32(TG3PCI_PCISTATE, val);
9727 if (tg3_flag(tp, ENABLE_APE)) {
9728 /* Allow reads and writes to the
9729 * APE register and memory space.
9731 val = tr32(TG3PCI_PCISTATE);
9732 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9733 PCISTATE_ALLOW_APE_SHMEM_WR |
9734 PCISTATE_ALLOW_APE_PSPACE_WR;
9735 tw32(TG3PCI_PCISTATE, val);
9738 if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9739 /* Enable some hw fixes. */
9740 val = tr32(TG3PCI_MSI_DATA);
9741 val |= (1 << 26) | (1 << 28) | (1 << 29);
9742 tw32(TG3PCI_MSI_DATA, val);
9745 /* Descriptor ring init may make accesses to the
9746 * NIC SRAM area to setup the TX descriptors, so we
9747 * can only do this after the hardware has been
9748 * successfully reset.
9750 err = tg3_init_rings(tp);
9754 if (tg3_flag(tp, 57765_PLUS)) {
9755 val = tr32(TG3PCI_DMA_RW_CTRL) &
9756 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9757 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9758 val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9759 if (!tg3_flag(tp, 57765_CLASS) &&
9760 tg3_asic_rev(tp) != ASIC_REV_5717 &&
9761 tg3_asic_rev(tp) != ASIC_REV_5762)
9762 val |= DMA_RWCTRL_TAGGED_STAT_WA;
9763 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9764 } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9765 tg3_asic_rev(tp) != ASIC_REV_5761) {
9766 /* This value is determined during the probe time DMA
9767 * engine test, tg3_test_dma.
9769 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9772 tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9773 GRC_MODE_4X_NIC_SEND_RINGS |
9774 GRC_MODE_NO_TX_PHDR_CSUM |
9775 GRC_MODE_NO_RX_PHDR_CSUM);
9776 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9778 /* Pseudo-header checksum is done by hardware logic and not
9779 * the offload processers, so make the chip do the pseudo-
9780 * header checksums on receive. For transmit it is more
9781 * convenient to do the pseudo-header checksum in software
9782 * as Linux does that on transmit for us in all cases.
9784 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9786 val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9788 tw32(TG3_RX_PTP_CTL,
9789 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9791 if (tg3_flag(tp, PTP_CAPABLE))
9792 val |= GRC_MODE_TIME_SYNC_ENABLE;
9794 tw32(GRC_MODE, tp->grc_mode | val);
9796 /* Setup the timer prescalar register. Clock is always 66Mhz. */
9797 val = tr32(GRC_MISC_CFG);
9799 val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9800 tw32(GRC_MISC_CFG, val);
9802 /* Initialize MBUF/DESC pool. */
9803 if (tg3_flag(tp, 5750_PLUS)) {
9805 } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9806 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9807 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9808 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9810 tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9811 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9812 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9813 } else if (tg3_flag(tp, TSO_CAPABLE)) {
9816 fw_len = tp->fw_len;
9817 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9818 tw32(BUFMGR_MB_POOL_ADDR,
9819 NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9820 tw32(BUFMGR_MB_POOL_SIZE,
9821 NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9824 if (tp->dev->mtu <= ETH_DATA_LEN) {
9825 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9826 tp->bufmgr_config.mbuf_read_dma_low_water);
9827 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9828 tp->bufmgr_config.mbuf_mac_rx_low_water);
9829 tw32(BUFMGR_MB_HIGH_WATER,
9830 tp->bufmgr_config.mbuf_high_water);
9832 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9833 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9834 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9835 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9836 tw32(BUFMGR_MB_HIGH_WATER,
9837 tp->bufmgr_config.mbuf_high_water_jumbo);
9839 tw32(BUFMGR_DMA_LOW_WATER,
9840 tp->bufmgr_config.dma_low_water);
9841 tw32(BUFMGR_DMA_HIGH_WATER,
9842 tp->bufmgr_config.dma_high_water);
9844 val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9845 if (tg3_asic_rev(tp) == ASIC_REV_5719)
9846 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9847 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9848 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9849 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9850 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9851 tw32(BUFMGR_MODE, val);
9852 for (i = 0; i < 2000; i++) {
9853 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9858 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9862 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9863 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9865 tg3_setup_rxbd_thresholds(tp);
9867 /* Initialize TG3_BDINFO's at:
9868 * RCVDBDI_STD_BD: standard eth size rx ring
9869 * RCVDBDI_JUMBO_BD: jumbo frame rx ring
9870 * RCVDBDI_MINI_BD: small frame rx ring (??? does not work)
9873 * TG3_BDINFO_HOST_ADDR: high/low parts of DMA address of ring
9874 * TG3_BDINFO_MAXLEN_FLAGS: (rx max buffer size << 16) |
9875 * ring attribute flags
9876 * TG3_BDINFO_NIC_ADDR: location of descriptors in nic SRAM
9878 * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9879 * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9881 * The size of each ring is fixed in the firmware, but the location is
9884 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9885 ((u64) tpr->rx_std_mapping >> 32));
9886 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9887 ((u64) tpr->rx_std_mapping & 0xffffffff));
9888 if (!tg3_flag(tp, 5717_PLUS))
9889 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9890 NIC_SRAM_RX_BUFFER_DESC);
9892 /* Disable the mini ring */
9893 if (!tg3_flag(tp, 5705_PLUS))
9894 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9895 BDINFO_FLAGS_DISABLED);
9897 /* Program the jumbo buffer descriptor ring control
9898 * blocks on those devices that have them.
9900 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9901 (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9903 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9904 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9905 ((u64) tpr->rx_jmb_mapping >> 32));
9906 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9907 ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9908 val = TG3_RX_JMB_RING_SIZE(tp) <<
9909 BDINFO_FLAGS_MAXLEN_SHIFT;
9910 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9911 val | BDINFO_FLAGS_USE_EXT_RECV);
9912 if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9913 tg3_flag(tp, 57765_CLASS) ||
9914 tg3_asic_rev(tp) == ASIC_REV_5762)
9915 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9916 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9918 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9919 BDINFO_FLAGS_DISABLED);
9922 if (tg3_flag(tp, 57765_PLUS)) {
9923 val = TG3_RX_STD_RING_SIZE(tp);
9924 val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9925 val |= (TG3_RX_STD_DMA_SZ << 2);
9927 val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9929 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9931 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9933 tpr->rx_std_prod_idx = tp->rx_pending;
9934 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9936 tpr->rx_jmb_prod_idx =
9937 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9938 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9940 tg3_rings_reset(tp);
9942 /* Initialize MAC address and backoff seed. */
9943 __tg3_set_mac_addr(tp, false);
9945 /* MTU + ethernet header + FCS + optional VLAN tag */
9946 tw32(MAC_RX_MTU_SIZE,
9947 tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9949 /* The slot time is changed by tg3_setup_phy if we
9950 * run at gigabit with half duplex.
9952 val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9953 (6 << TX_LENGTHS_IPG_SHIFT) |
9954 (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9956 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9957 tg3_asic_rev(tp) == ASIC_REV_5762)
9958 val |= tr32(MAC_TX_LENGTHS) &
9959 (TX_LENGTHS_JMB_FRM_LEN_MSK |
9960 TX_LENGTHS_CNT_DWN_VAL_MSK);
9962 tw32(MAC_TX_LENGTHS, val);
9964 /* Receive rules. */
9965 tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9966 tw32(RCVLPC_CONFIG, 0x0181);
9968 /* Calculate RDMAC_MODE setting early, we need it to determine
9969 * the RCVLPC_STATE_ENABLE mask.
9971 rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9972 RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9973 RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9974 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9975 RDMAC_MODE_LNGREAD_ENAB);
9977 if (tg3_asic_rev(tp) == ASIC_REV_5717)
9978 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9980 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9981 tg3_asic_rev(tp) == ASIC_REV_5785 ||
9982 tg3_asic_rev(tp) == ASIC_REV_57780)
9983 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9984 RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9985 RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9987 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9988 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9989 if (tg3_flag(tp, TSO_CAPABLE) &&
9990 tg3_asic_rev(tp) == ASIC_REV_5705) {
9991 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9992 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9993 !tg3_flag(tp, IS_5788)) {
9994 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9998 if (tg3_flag(tp, PCI_EXPRESS))
9999 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10001 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10003 if (tp->dev->mtu <= ETH_DATA_LEN) {
10004 rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10005 tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10009 if (tg3_flag(tp, HW_TSO_1) ||
10010 tg3_flag(tp, HW_TSO_2) ||
10011 tg3_flag(tp, HW_TSO_3))
10012 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10014 if (tg3_flag(tp, 57765_PLUS) ||
10015 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10016 tg3_asic_rev(tp) == ASIC_REV_57780)
10017 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10019 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10020 tg3_asic_rev(tp) == ASIC_REV_5762)
10021 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10023 if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10024 tg3_asic_rev(tp) == ASIC_REV_5784 ||
10025 tg3_asic_rev(tp) == ASIC_REV_5785 ||
10026 tg3_asic_rev(tp) == ASIC_REV_57780 ||
10027 tg3_flag(tp, 57765_PLUS)) {
10030 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10031 tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10033 tgtreg = TG3_RDMA_RSRVCTRL_REG;
10035 val = tr32(tgtreg);
10036 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10037 tg3_asic_rev(tp) == ASIC_REV_5762) {
10038 val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10039 TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10040 TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10041 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10042 TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10043 TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10045 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10048 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10049 tg3_asic_rev(tp) == ASIC_REV_5720 ||
10050 tg3_asic_rev(tp) == ASIC_REV_5762) {
10053 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10054 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10056 tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10058 val = tr32(tgtreg);
10060 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10061 TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10064 /* Receive/send statistics. */
10065 if (tg3_flag(tp, 5750_PLUS)) {
10066 val = tr32(RCVLPC_STATS_ENABLE);
10067 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10068 tw32(RCVLPC_STATS_ENABLE, val);
10069 } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10070 tg3_flag(tp, TSO_CAPABLE)) {
10071 val = tr32(RCVLPC_STATS_ENABLE);
10072 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10073 tw32(RCVLPC_STATS_ENABLE, val);
10075 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10077 tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10078 tw32(SNDDATAI_STATSENAB, 0xffffff);
10079 tw32(SNDDATAI_STATSCTRL,
10080 (SNDDATAI_SCTRL_ENABLE |
10081 SNDDATAI_SCTRL_FASTUPD));
10083 /* Setup host coalescing engine. */
10084 tw32(HOSTCC_MODE, 0);
10085 for (i = 0; i < 2000; i++) {
10086 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10091 __tg3_set_coalesce(tp, &tp->coal);
10093 if (!tg3_flag(tp, 5705_PLUS)) {
10094 /* Status/statistics block address. See tg3_timer,
10095 * the tg3_periodic_fetch_stats call there, and
10096 * tg3_get_stats to see how this works for 5705/5750 chips.
10098 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10099 ((u64) tp->stats_mapping >> 32));
10100 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10101 ((u64) tp->stats_mapping & 0xffffffff));
10102 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10104 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10106 /* Clear statistics and status block memory areas */
10107 for (i = NIC_SRAM_STATS_BLK;
10108 i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10109 i += sizeof(u32)) {
10110 tg3_write_mem(tp, i, 0);
10115 tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10117 tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10118 tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10119 if (!tg3_flag(tp, 5705_PLUS))
10120 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10122 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10123 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10124 /* reset to prevent losing 1st rx packet intermittently */
10125 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10129 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10130 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10131 MAC_MODE_FHDE_ENABLE;
10132 if (tg3_flag(tp, ENABLE_APE))
10133 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10134 if (!tg3_flag(tp, 5705_PLUS) &&
10135 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10136 tg3_asic_rev(tp) != ASIC_REV_5700)
10137 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10138 tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10141 /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10142 * If TG3_FLAG_IS_NIC is zero, we should read the
10143 * register to preserve the GPIO settings for LOMs. The GPIOs,
10144 * whether used as inputs or outputs, are set by boot code after
10147 if (!tg3_flag(tp, IS_NIC)) {
10150 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10151 GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10152 GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10154 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10155 gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10156 GRC_LCLCTRL_GPIO_OUTPUT3;
10158 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10159 gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10161 tp->grc_local_ctrl &= ~gpio_mask;
10162 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10164 /* GPIO1 must be driven high for eeprom write protect */
10165 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10166 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10167 GRC_LCLCTRL_GPIO_OUTPUT1);
10169 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10172 if (tg3_flag(tp, USING_MSIX)) {
10173 val = tr32(MSGINT_MODE);
10174 val |= MSGINT_MODE_ENABLE;
10175 if (tp->irq_cnt > 1)
10176 val |= MSGINT_MODE_MULTIVEC_EN;
10177 if (!tg3_flag(tp, 1SHOT_MSI))
10178 val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10179 tw32(MSGINT_MODE, val);
10182 if (!tg3_flag(tp, 5705_PLUS)) {
10183 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10187 val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10188 WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10189 WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10190 WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10191 WDMAC_MODE_LNGREAD_ENAB);
10193 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10194 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10195 if (tg3_flag(tp, TSO_CAPABLE) &&
10196 (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10197 tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10199 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10200 !tg3_flag(tp, IS_5788)) {
10201 val |= WDMAC_MODE_RX_ACCEL;
10205 /* Enable host coalescing bug fix */
10206 if (tg3_flag(tp, 5755_PLUS))
10207 val |= WDMAC_MODE_STATUS_TAG_FIX;
10209 if (tg3_asic_rev(tp) == ASIC_REV_5785)
10210 val |= WDMAC_MODE_BURST_ALL_DATA;
10212 tw32_f(WDMAC_MODE, val);
10215 if (tg3_flag(tp, PCIX_MODE)) {
10218 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10220 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10221 pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10222 pcix_cmd |= PCI_X_CMD_READ_2K;
10223 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10224 pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10225 pcix_cmd |= PCI_X_CMD_READ_2K;
10227 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10231 tw32_f(RDMAC_MODE, rdmac_mode);
10234 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10235 tg3_asic_rev(tp) == ASIC_REV_5720) {
10236 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10237 if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10240 if (i < TG3_NUM_RDMA_CHANNELS) {
10241 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10242 val |= tg3_lso_rd_dma_workaround_bit(tp);
10243 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10244 tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10248 tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10249 if (!tg3_flag(tp, 5705_PLUS))
10250 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10252 if (tg3_asic_rev(tp) == ASIC_REV_5761)
10253 tw32(SNDDATAC_MODE,
10254 SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10256 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10258 tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10259 tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10260 val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10261 if (tg3_flag(tp, LRG_PROD_RING_CAP))
10262 val |= RCVDBDI_MODE_LRG_RING_SZ;
10263 tw32(RCVDBDI_MODE, val);
10264 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10265 if (tg3_flag(tp, HW_TSO_1) ||
10266 tg3_flag(tp, HW_TSO_2) ||
10267 tg3_flag(tp, HW_TSO_3))
10268 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10269 val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10270 if (tg3_flag(tp, ENABLE_TSS))
10271 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10272 tw32(SNDBDI_MODE, val);
10273 tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10275 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10276 err = tg3_load_5701_a0_firmware_fix(tp);
10281 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10282 /* Ignore any errors for the firmware download. If download
10283 * fails, the device will operate with EEE disabled
10285 tg3_load_57766_firmware(tp);
10288 if (tg3_flag(tp, TSO_CAPABLE)) {
10289 err = tg3_load_tso_firmware(tp);
10294 tp->tx_mode = TX_MODE_ENABLE;
10296 if (tg3_flag(tp, 5755_PLUS) ||
10297 tg3_asic_rev(tp) == ASIC_REV_5906)
10298 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10300 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10301 tg3_asic_rev(tp) == ASIC_REV_5762) {
10302 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10303 tp->tx_mode &= ~val;
10304 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10307 tw32_f(MAC_TX_MODE, tp->tx_mode);
10310 if (tg3_flag(tp, ENABLE_RSS)) {
10311 tg3_rss_write_indir_tbl(tp);
10313 /* Setup the "secret" hash key. */
10314 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10315 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10316 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10317 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10318 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10319 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10320 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10321 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10322 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10323 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10326 tp->rx_mode = RX_MODE_ENABLE;
10327 if (tg3_flag(tp, 5755_PLUS))
10328 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10330 if (tg3_flag(tp, ENABLE_RSS))
10331 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10332 RX_MODE_RSS_ITBL_HASH_BITS_7 |
10333 RX_MODE_RSS_IPV6_HASH_EN |
10334 RX_MODE_RSS_TCP_IPV6_HASH_EN |
10335 RX_MODE_RSS_IPV4_HASH_EN |
10336 RX_MODE_RSS_TCP_IPV4_HASH_EN;
10338 tw32_f(MAC_RX_MODE, tp->rx_mode);
10341 tw32(MAC_LED_CTRL, tp->led_ctrl);
10343 tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10344 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10345 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10348 tw32_f(MAC_RX_MODE, tp->rx_mode);
10351 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10352 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10353 !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10354 /* Set drive transmission level to 1.2V */
10355 /* only if the signal pre-emphasis bit is not set */
10356 val = tr32(MAC_SERDES_CFG);
10359 tw32(MAC_SERDES_CFG, val);
10361 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10362 tw32(MAC_SERDES_CFG, 0x616000);
10365 /* Prevent chip from dropping frames when flow control
10368 if (tg3_flag(tp, 57765_CLASS))
10372 tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10374 if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10375 (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10376 /* Use hardware link auto-negotiation */
10377 tg3_flag_set(tp, HW_AUTONEG);
10380 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10381 tg3_asic_rev(tp) == ASIC_REV_5714) {
10384 tmp = tr32(SERDES_RX_CTRL);
10385 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10386 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10387 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10388 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10391 if (!tg3_flag(tp, USE_PHYLIB)) {
10392 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10393 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10395 err = tg3_setup_phy(tp, false);
10399 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10400 !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10403 /* Clear CRC stats. */
10404 if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10405 tg3_writephy(tp, MII_TG3_TEST1,
10406 tmp | MII_TG3_TEST1_CRC_EN);
10407 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10412 __tg3_set_rx_mode(tp->dev);
10414 /* Initialize receive rules. */
10415 tw32(MAC_RCV_RULE_0, 0xc2000000 & RCV_RULE_DISABLE_MASK);
10416 tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10417 tw32(MAC_RCV_RULE_1, 0x86000004 & RCV_RULE_DISABLE_MASK);
10418 tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10420 if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10424 if (tg3_flag(tp, ENABLE_ASF))
10428 tw32(MAC_RCV_RULE_15, 0); tw32(MAC_RCV_VALUE_15, 0);
10430 tw32(MAC_RCV_RULE_14, 0); tw32(MAC_RCV_VALUE_14, 0);
10432 tw32(MAC_RCV_RULE_13, 0); tw32(MAC_RCV_VALUE_13, 0);
10434 tw32(MAC_RCV_RULE_12, 0); tw32(MAC_RCV_VALUE_12, 0);
10436 tw32(MAC_RCV_RULE_11, 0); tw32(MAC_RCV_VALUE_11, 0);
10438 tw32(MAC_RCV_RULE_10, 0); tw32(MAC_RCV_VALUE_10, 0);
10440 tw32(MAC_RCV_RULE_9, 0); tw32(MAC_RCV_VALUE_9, 0);
10442 tw32(MAC_RCV_RULE_8, 0); tw32(MAC_RCV_VALUE_8, 0);
10444 tw32(MAC_RCV_RULE_7, 0); tw32(MAC_RCV_VALUE_7, 0);
10446 tw32(MAC_RCV_RULE_6, 0); tw32(MAC_RCV_VALUE_6, 0);
10448 tw32(MAC_RCV_RULE_5, 0); tw32(MAC_RCV_VALUE_5, 0);
10450 tw32(MAC_RCV_RULE_4, 0); tw32(MAC_RCV_VALUE_4, 0);
10452 /* tw32(MAC_RCV_RULE_3, 0); tw32(MAC_RCV_VALUE_3, 0); */
10454 /* tw32(MAC_RCV_RULE_2, 0); tw32(MAC_RCV_VALUE_2, 0); */
10462 if (tg3_flag(tp, ENABLE_APE))
10463 /* Write our heartbeat update interval to APE. */
10464 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10465 APE_HOST_HEARTBEAT_INT_DISABLE);
10467 tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10472 /* Called at device open time to get the chip ready for
10473 * packet processing. Invoked with tp->lock held.
10475 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10477 /* Chip may have been just powered on. If so, the boot code may still
10478 * be running initialization. Wait for it to finish to avoid races in
10479 * accessing the hardware.
10481 tg3_enable_register_access(tp);
10484 tg3_switch_clocks(tp);
10486 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10488 return tg3_reset_hw(tp, reset_phy);
10491 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10495 for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10496 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10498 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10501 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10502 !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10503 memset(ocir, 0, TG3_OCIR_LEN);
10507 /* sysfs attributes for hwmon */
10508 static ssize_t tg3_show_temp(struct device *dev,
10509 struct device_attribute *devattr, char *buf)
10511 struct pci_dev *pdev = to_pci_dev(dev);
10512 struct net_device *netdev = pci_get_drvdata(pdev);
10513 struct tg3 *tp = netdev_priv(netdev);
10514 struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10517 spin_lock_bh(&tp->lock);
10518 tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10519 sizeof(temperature));
10520 spin_unlock_bh(&tp->lock);
10521 return sprintf(buf, "%u\n", temperature);
10525 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10526 TG3_TEMP_SENSOR_OFFSET);
10527 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10528 TG3_TEMP_CAUTION_OFFSET);
10529 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10530 TG3_TEMP_MAX_OFFSET);
10532 static struct attribute *tg3_attributes[] = {
10533 &sensor_dev_attr_temp1_input.dev_attr.attr,
10534 &sensor_dev_attr_temp1_crit.dev_attr.attr,
10535 &sensor_dev_attr_temp1_max.dev_attr.attr,
10539 static const struct attribute_group tg3_group = {
10540 .attrs = tg3_attributes,
10543 static void tg3_hwmon_close(struct tg3 *tp)
10545 if (tp->hwmon_dev) {
10546 hwmon_device_unregister(tp->hwmon_dev);
10547 tp->hwmon_dev = NULL;
10548 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10552 static void tg3_hwmon_open(struct tg3 *tp)
10556 struct pci_dev *pdev = tp->pdev;
10557 struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10559 tg3_sd_scan_scratchpad(tp, ocirs);
10561 for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10562 if (!ocirs[i].src_data_length)
10565 size += ocirs[i].src_hdr_length;
10566 size += ocirs[i].src_data_length;
10572 /* Register hwmon sysfs hooks */
10573 err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10575 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10579 tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10580 if (IS_ERR(tp->hwmon_dev)) {
10581 tp->hwmon_dev = NULL;
10582 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10583 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10588 #define TG3_STAT_ADD32(PSTAT, REG) \
10589 do { u32 __val = tr32(REG); \
10590 (PSTAT)->low += __val; \
10591 if ((PSTAT)->low < __val) \
10592 (PSTAT)->high += 1; \
10595 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10597 struct tg3_hw_stats *sp = tp->hw_stats;
10602 TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10603 TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10604 TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10605 TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10606 TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10607 TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10608 TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10609 TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10610 TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10611 TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10612 TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10613 TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10614 TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10615 if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10616 (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10617 sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10620 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10621 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10622 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10623 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10626 TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10627 TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10628 TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10629 TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10630 TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10631 TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10632 TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10633 TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10634 TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10635 TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10636 TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10637 TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10638 TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10639 TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10641 TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10642 if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10643 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10644 tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10645 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10647 u32 val = tr32(HOSTCC_FLOW_ATTN);
10648 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10650 tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10651 sp->rx_discards.low += val;
10652 if (sp->rx_discards.low < val)
10653 sp->rx_discards.high += 1;
10655 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10657 TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10660 static void tg3_chk_missed_msi(struct tg3 *tp)
10664 for (i = 0; i < tp->irq_cnt; i++) {
10665 struct tg3_napi *tnapi = &tp->napi[i];
10667 if (tg3_has_work(tnapi)) {
10668 if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10669 tnapi->last_tx_cons == tnapi->tx_cons) {
10670 if (tnapi->chk_msi_cnt < 1) {
10671 tnapi->chk_msi_cnt++;
10677 tnapi->chk_msi_cnt = 0;
10678 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10679 tnapi->last_tx_cons = tnapi->tx_cons;
10683 static void tg3_timer(unsigned long __opaque)
10685 struct tg3 *tp = (struct tg3 *) __opaque;
10687 if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10688 goto restart_timer;
10690 spin_lock(&tp->lock);
10692 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10693 tg3_flag(tp, 57765_CLASS))
10694 tg3_chk_missed_msi(tp);
10696 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10697 /* BCM4785: Flush posted writes from GbE to host memory. */
10701 if (!tg3_flag(tp, TAGGED_STATUS)) {
10702 /* All of this garbage is because when using non-tagged
10703 * IRQ status the mailbox/status_block protocol the chip
10704 * uses with the cpu is race prone.
10706 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10707 tw32(GRC_LOCAL_CTRL,
10708 tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10710 tw32(HOSTCC_MODE, tp->coalesce_mode |
10711 HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10714 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10715 spin_unlock(&tp->lock);
10716 tg3_reset_task_schedule(tp);
10717 goto restart_timer;
10721 /* This part only runs once per second. */
10722 if (!--tp->timer_counter) {
10723 if (tg3_flag(tp, 5705_PLUS))
10724 tg3_periodic_fetch_stats(tp);
10726 if (tp->setlpicnt && !--tp->setlpicnt)
10727 tg3_phy_eee_enable(tp);
10729 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10733 mac_stat = tr32(MAC_STATUS);
10736 if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10737 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10739 } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10743 tg3_setup_phy(tp, false);
10744 } else if (tg3_flag(tp, POLL_SERDES)) {
10745 u32 mac_stat = tr32(MAC_STATUS);
10746 int need_setup = 0;
10749 (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10752 if (!tp->link_up &&
10753 (mac_stat & (MAC_STATUS_PCS_SYNCED |
10754 MAC_STATUS_SIGNAL_DET))) {
10758 if (!tp->serdes_counter) {
10761 ~MAC_MODE_PORT_MODE_MASK));
10763 tw32_f(MAC_MODE, tp->mac_mode);
10766 tg3_setup_phy(tp, false);
10768 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10769 tg3_flag(tp, 5780_CLASS)) {
10770 tg3_serdes_parallel_detect(tp);
10773 tp->timer_counter = tp->timer_multiplier;
10776 /* Heartbeat is only sent once every 2 seconds.
10778 * The heartbeat is to tell the ASF firmware that the host
10779 * driver is still alive. In the event that the OS crashes,
10780 * ASF needs to reset the hardware to free up the FIFO space
10781 * that may be filled with rx packets destined for the host.
10782 * If the FIFO is full, ASF will no longer function properly.
10784 * Unintended resets have been reported on real time kernels
10785 * where the timer doesn't run on time. Netpoll will also have
10788 * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10789 * to check the ring condition when the heartbeat is expiring
10790 * before doing the reset. This will prevent most unintended
10793 if (!--tp->asf_counter) {
10794 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10795 tg3_wait_for_event_ack(tp);
10797 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10798 FWCMD_NICDRV_ALIVE3);
10799 tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10800 tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10801 TG3_FW_UPDATE_TIMEOUT_SEC);
10803 tg3_generate_fw_event(tp);
10805 tp->asf_counter = tp->asf_multiplier;
10808 spin_unlock(&tp->lock);
10811 tp->timer.expires = jiffies + tp->timer_offset;
10812 add_timer(&tp->timer);
10815 static void tg3_timer_init(struct tg3 *tp)
10817 if (tg3_flag(tp, TAGGED_STATUS) &&
10818 tg3_asic_rev(tp) != ASIC_REV_5717 &&
10819 !tg3_flag(tp, 57765_CLASS))
10820 tp->timer_offset = HZ;
10822 tp->timer_offset = HZ / 10;
10824 BUG_ON(tp->timer_offset > HZ);
10826 tp->timer_multiplier = (HZ / tp->timer_offset);
10827 tp->asf_multiplier = (HZ / tp->timer_offset) *
10828 TG3_FW_UPDATE_FREQ_SEC;
10830 init_timer(&tp->timer);
10831 tp->timer.data = (unsigned long) tp;
10832 tp->timer.function = tg3_timer;
10835 static void tg3_timer_start(struct tg3 *tp)
10837 tp->asf_counter = tp->asf_multiplier;
10838 tp->timer_counter = tp->timer_multiplier;
10840 tp->timer.expires = jiffies + tp->timer_offset;
10841 add_timer(&tp->timer);
10844 static void tg3_timer_stop(struct tg3 *tp)
10846 del_timer_sync(&tp->timer);
10849 /* Restart hardware after configuration changes, self-test, etc.
10850 * Invoked with tp->lock held.
10852 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10853 __releases(tp->lock)
10854 __acquires(tp->lock)
10858 err = tg3_init_hw(tp, reset_phy);
10860 netdev_err(tp->dev,
10861 "Failed to re-initialize device, aborting\n");
10862 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10863 tg3_full_unlock(tp);
10864 tg3_timer_stop(tp);
10866 tg3_napi_enable(tp);
10867 dev_close(tp->dev);
10868 tg3_full_lock(tp, 0);
10873 static void tg3_reset_task(struct work_struct *work)
10875 struct tg3 *tp = container_of(work, struct tg3, reset_task);
10878 tg3_full_lock(tp, 0);
10880 if (!netif_running(tp->dev)) {
10881 tg3_flag_clear(tp, RESET_TASK_PENDING);
10882 tg3_full_unlock(tp);
10886 tg3_full_unlock(tp);
10890 tg3_netif_stop(tp);
10892 tg3_full_lock(tp, 1);
10894 if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10895 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10896 tp->write32_rx_mbox = tg3_write_flush_reg32;
10897 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10898 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10901 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10902 err = tg3_init_hw(tp, true);
10906 tg3_netif_start(tp);
10909 tg3_full_unlock(tp);
10914 tg3_flag_clear(tp, RESET_TASK_PENDING);
10917 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10920 unsigned long flags;
10922 struct tg3_napi *tnapi = &tp->napi[irq_num];
10924 if (tp->irq_cnt == 1)
10925 name = tp->dev->name;
10927 name = &tnapi->irq_lbl[0];
10928 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10929 name[IFNAMSIZ-1] = 0;
10932 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10934 if (tg3_flag(tp, 1SHOT_MSI))
10935 fn = tg3_msi_1shot;
10938 fn = tg3_interrupt;
10939 if (tg3_flag(tp, TAGGED_STATUS))
10940 fn = tg3_interrupt_tagged;
10941 flags = IRQF_SHARED;
10944 return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10947 static int tg3_test_interrupt(struct tg3 *tp)
10949 struct tg3_napi *tnapi = &tp->napi[0];
10950 struct net_device *dev = tp->dev;
10951 int err, i, intr_ok = 0;
10954 if (!netif_running(dev))
10957 tg3_disable_ints(tp);
10959 free_irq(tnapi->irq_vec, tnapi);
10962 * Turn off MSI one shot mode. Otherwise this test has no
10963 * observable way to know whether the interrupt was delivered.
10965 if (tg3_flag(tp, 57765_PLUS)) {
10966 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10967 tw32(MSGINT_MODE, val);
10970 err = request_irq(tnapi->irq_vec, tg3_test_isr,
10971 IRQF_SHARED, dev->name, tnapi);
10975 tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10976 tg3_enable_ints(tp);
10978 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10981 for (i = 0; i < 5; i++) {
10982 u32 int_mbox, misc_host_ctrl;
10984 int_mbox = tr32_mailbox(tnapi->int_mbox);
10985 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10987 if ((int_mbox != 0) ||
10988 (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10993 if (tg3_flag(tp, 57765_PLUS) &&
10994 tnapi->hw_status->status_tag != tnapi->last_tag)
10995 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
11000 tg3_disable_ints(tp);
11002 free_irq(tnapi->irq_vec, tnapi);
11004 err = tg3_request_irq(tp, 0);
11010 /* Reenable MSI one shot mode. */
11011 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11012 val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11013 tw32(MSGINT_MODE, val);
11021 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11022 * successfully restored
11024 static int tg3_test_msi(struct tg3 *tp)
11029 if (!tg3_flag(tp, USING_MSI))
11032 /* Turn off SERR reporting in case MSI terminates with Master
11035 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11036 pci_write_config_word(tp->pdev, PCI_COMMAND,
11037 pci_cmd & ~PCI_COMMAND_SERR);
11039 err = tg3_test_interrupt(tp);
11041 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11046 /* other failures */
11050 /* MSI test failed, go back to INTx mode */
11051 netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11052 "to INTx mode. Please report this failure to the PCI "
11053 "maintainer and include system chipset information\n");
11055 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11057 pci_disable_msi(tp->pdev);
11059 tg3_flag_clear(tp, USING_MSI);
11060 tp->napi[0].irq_vec = tp->pdev->irq;
11062 err = tg3_request_irq(tp, 0);
11066 /* Need to reset the chip because the MSI cycle may have terminated
11067 * with Master Abort.
11069 tg3_full_lock(tp, 1);
11071 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11072 err = tg3_init_hw(tp, true);
11074 tg3_full_unlock(tp);
11077 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11082 static int tg3_request_firmware(struct tg3 *tp)
11084 const struct tg3_firmware_hdr *fw_hdr;
11086 if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11087 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11092 fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11094 /* Firmware blob starts with version numbers, followed by
11095 * start address and _full_ length including BSS sections
11096 * (which must be longer than the actual data, of course
11099 tp->fw_len = be32_to_cpu(fw_hdr->len); /* includes bss */
11100 if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11101 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11102 tp->fw_len, tp->fw_needed);
11103 release_firmware(tp->fw);
11108 /* We no longer need firmware; we have it. */
11109 tp->fw_needed = NULL;
11113 static u32 tg3_irq_count(struct tg3 *tp)
11115 u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11118 /* We want as many rx rings enabled as there are cpus.
11119 * In multiqueue MSI-X mode, the first MSI-X vector
11120 * only deals with link interrupts, etc, so we add
11121 * one to the number of vectors we are requesting.
11123 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11129 static bool tg3_enable_msix(struct tg3 *tp)
11132 struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11134 tp->txq_cnt = tp->txq_req;
11135 tp->rxq_cnt = tp->rxq_req;
11137 tp->rxq_cnt = netif_get_num_default_rss_queues();
11138 if (tp->rxq_cnt > tp->rxq_max)
11139 tp->rxq_cnt = tp->rxq_max;
11141 /* Disable multiple TX rings by default. Simple round-robin hardware
11142 * scheduling of the TX rings can cause starvation of rings with
11143 * small packets when other rings have TSO or jumbo packets.
11148 tp->irq_cnt = tg3_irq_count(tp);
11150 for (i = 0; i < tp->irq_max; i++) {
11151 msix_ent[i].entry = i;
11152 msix_ent[i].vector = 0;
11155 rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11158 } else if (rc != 0) {
11159 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11161 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11164 tp->rxq_cnt = max(rc - 1, 1);
11166 tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11169 for (i = 0; i < tp->irq_max; i++)
11170 tp->napi[i].irq_vec = msix_ent[i].vector;
11172 if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11173 pci_disable_msix(tp->pdev);
11177 if (tp->irq_cnt == 1)
11180 tg3_flag_set(tp, ENABLE_RSS);
11182 if (tp->txq_cnt > 1)
11183 tg3_flag_set(tp, ENABLE_TSS);
11185 netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11190 static void tg3_ints_init(struct tg3 *tp)
11192 if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11193 !tg3_flag(tp, TAGGED_STATUS)) {
11194 /* All MSI supporting chips should support tagged
11195 * status. Assert that this is the case.
11197 netdev_warn(tp->dev,
11198 "MSI without TAGGED_STATUS? Not using MSI\n");
11202 if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11203 tg3_flag_set(tp, USING_MSIX);
11204 else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11205 tg3_flag_set(tp, USING_MSI);
11207 if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11208 u32 msi_mode = tr32(MSGINT_MODE);
11209 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11210 msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11211 if (!tg3_flag(tp, 1SHOT_MSI))
11212 msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11213 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11216 if (!tg3_flag(tp, USING_MSIX)) {
11218 tp->napi[0].irq_vec = tp->pdev->irq;
11221 if (tp->irq_cnt == 1) {
11224 netif_set_real_num_tx_queues(tp->dev, 1);
11225 netif_set_real_num_rx_queues(tp->dev, 1);
11229 static void tg3_ints_fini(struct tg3 *tp)
11231 if (tg3_flag(tp, USING_MSIX))
11232 pci_disable_msix(tp->pdev);
11233 else if (tg3_flag(tp, USING_MSI))
11234 pci_disable_msi(tp->pdev);
11235 tg3_flag_clear(tp, USING_MSI);
11236 tg3_flag_clear(tp, USING_MSIX);
11237 tg3_flag_clear(tp, ENABLE_RSS);
11238 tg3_flag_clear(tp, ENABLE_TSS);
11241 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11244 struct net_device *dev = tp->dev;
11248 * Setup interrupts first so we know how
11249 * many NAPI resources to allocate
11253 tg3_rss_check_indir_tbl(tp);
11255 /* The placement of this call is tied
11256 * to the setup and use of Host TX descriptors.
11258 err = tg3_alloc_consistent(tp);
11264 tg3_napi_enable(tp);
11266 for (i = 0; i < tp->irq_cnt; i++) {
11267 struct tg3_napi *tnapi = &tp->napi[i];
11268 err = tg3_request_irq(tp, i);
11270 for (i--; i >= 0; i--) {
11271 tnapi = &tp->napi[i];
11272 free_irq(tnapi->irq_vec, tnapi);
11278 tg3_full_lock(tp, 0);
11280 err = tg3_init_hw(tp, reset_phy);
11282 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11283 tg3_free_rings(tp);
11286 tg3_full_unlock(tp);
11291 if (test_irq && tg3_flag(tp, USING_MSI)) {
11292 err = tg3_test_msi(tp);
11295 tg3_full_lock(tp, 0);
11296 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11297 tg3_free_rings(tp);
11298 tg3_full_unlock(tp);
11303 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11304 u32 val = tr32(PCIE_TRANSACTION_CFG);
11306 tw32(PCIE_TRANSACTION_CFG,
11307 val | PCIE_TRANS_CFG_1SHOT_MSI);
11313 tg3_hwmon_open(tp);
11315 tg3_full_lock(tp, 0);
11317 tg3_timer_start(tp);
11318 tg3_flag_set(tp, INIT_COMPLETE);
11319 tg3_enable_ints(tp);
11324 tg3_ptp_resume(tp);
11327 tg3_full_unlock(tp);
11329 netif_tx_start_all_queues(dev);
11332 * Reset loopback feature if it was turned on while the device was down
11333 * make sure that it's installed properly now.
11335 if (dev->features & NETIF_F_LOOPBACK)
11336 tg3_set_loopback(dev, dev->features);
11341 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11342 struct tg3_napi *tnapi = &tp->napi[i];
11343 free_irq(tnapi->irq_vec, tnapi);
11347 tg3_napi_disable(tp);
11349 tg3_free_consistent(tp);
11357 static void tg3_stop(struct tg3 *tp)
11361 tg3_reset_task_cancel(tp);
11362 tg3_netif_stop(tp);
11364 tg3_timer_stop(tp);
11366 tg3_hwmon_close(tp);
11370 tg3_full_lock(tp, 1);
11372 tg3_disable_ints(tp);
11374 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11375 tg3_free_rings(tp);
11376 tg3_flag_clear(tp, INIT_COMPLETE);
11378 tg3_full_unlock(tp);
11380 for (i = tp->irq_cnt - 1; i >= 0; i--) {
11381 struct tg3_napi *tnapi = &tp->napi[i];
11382 free_irq(tnapi->irq_vec, tnapi);
11389 tg3_free_consistent(tp);
11392 static int tg3_open(struct net_device *dev)
11394 struct tg3 *tp = netdev_priv(dev);
11397 if (tp->fw_needed) {
11398 err = tg3_request_firmware(tp);
11399 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11401 netdev_warn(tp->dev, "EEE capability disabled\n");
11402 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11403 } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11404 netdev_warn(tp->dev, "EEE capability restored\n");
11405 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11407 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11411 netdev_warn(tp->dev, "TSO capability disabled\n");
11412 tg3_flag_clear(tp, TSO_CAPABLE);
11413 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11414 netdev_notice(tp->dev, "TSO capability restored\n");
11415 tg3_flag_set(tp, TSO_CAPABLE);
11419 tg3_carrier_off(tp);
11421 err = tg3_power_up(tp);
11425 tg3_full_lock(tp, 0);
11427 tg3_disable_ints(tp);
11428 tg3_flag_clear(tp, INIT_COMPLETE);
11430 tg3_full_unlock(tp);
11432 err = tg3_start(tp,
11433 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11436 tg3_frob_aux_power(tp, false);
11437 pci_set_power_state(tp->pdev, PCI_D3hot);
11440 if (tg3_flag(tp, PTP_CAPABLE)) {
11441 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11443 if (IS_ERR(tp->ptp_clock))
11444 tp->ptp_clock = NULL;
11450 static int tg3_close(struct net_device *dev)
11452 struct tg3 *tp = netdev_priv(dev);
11458 /* Clear stats across close / open calls */
11459 memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11460 memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11462 tg3_power_down(tp);
11464 tg3_carrier_off(tp);
11469 static inline u64 get_stat64(tg3_stat64_t *val)
11471 return ((u64)val->high << 32) | ((u64)val->low);
11474 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11476 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11478 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11479 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11480 tg3_asic_rev(tp) == ASIC_REV_5701)) {
11483 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11484 tg3_writephy(tp, MII_TG3_TEST1,
11485 val | MII_TG3_TEST1_CRC_EN);
11486 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11490 tp->phy_crc_errors += val;
11492 return tp->phy_crc_errors;
11495 return get_stat64(&hw_stats->rx_fcs_errors);
11498 #define ESTAT_ADD(member) \
11499 estats->member = old_estats->member + \
11500 get_stat64(&hw_stats->member)
11502 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11504 struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11505 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11507 ESTAT_ADD(rx_octets);
11508 ESTAT_ADD(rx_fragments);
11509 ESTAT_ADD(rx_ucast_packets);
11510 ESTAT_ADD(rx_mcast_packets);
11511 ESTAT_ADD(rx_bcast_packets);
11512 ESTAT_ADD(rx_fcs_errors);
11513 ESTAT_ADD(rx_align_errors);
11514 ESTAT_ADD(rx_xon_pause_rcvd);
11515 ESTAT_ADD(rx_xoff_pause_rcvd);
11516 ESTAT_ADD(rx_mac_ctrl_rcvd);
11517 ESTAT_ADD(rx_xoff_entered);
11518 ESTAT_ADD(rx_frame_too_long_errors);
11519 ESTAT_ADD(rx_jabbers);
11520 ESTAT_ADD(rx_undersize_packets);
11521 ESTAT_ADD(rx_in_length_errors);
11522 ESTAT_ADD(rx_out_length_errors);
11523 ESTAT_ADD(rx_64_or_less_octet_packets);
11524 ESTAT_ADD(rx_65_to_127_octet_packets);
11525 ESTAT_ADD(rx_128_to_255_octet_packets);
11526 ESTAT_ADD(rx_256_to_511_octet_packets);
11527 ESTAT_ADD(rx_512_to_1023_octet_packets);
11528 ESTAT_ADD(rx_1024_to_1522_octet_packets);
11529 ESTAT_ADD(rx_1523_to_2047_octet_packets);
11530 ESTAT_ADD(rx_2048_to_4095_octet_packets);
11531 ESTAT_ADD(rx_4096_to_8191_octet_packets);
11532 ESTAT_ADD(rx_8192_to_9022_octet_packets);
11534 ESTAT_ADD(tx_octets);
11535 ESTAT_ADD(tx_collisions);
11536 ESTAT_ADD(tx_xon_sent);
11537 ESTAT_ADD(tx_xoff_sent);
11538 ESTAT_ADD(tx_flow_control);
11539 ESTAT_ADD(tx_mac_errors);
11540 ESTAT_ADD(tx_single_collisions);
11541 ESTAT_ADD(tx_mult_collisions);
11542 ESTAT_ADD(tx_deferred);
11543 ESTAT_ADD(tx_excessive_collisions);
11544 ESTAT_ADD(tx_late_collisions);
11545 ESTAT_ADD(tx_collide_2times);
11546 ESTAT_ADD(tx_collide_3times);
11547 ESTAT_ADD(tx_collide_4times);
11548 ESTAT_ADD(tx_collide_5times);
11549 ESTAT_ADD(tx_collide_6times);
11550 ESTAT_ADD(tx_collide_7times);
11551 ESTAT_ADD(tx_collide_8times);
11552 ESTAT_ADD(tx_collide_9times);
11553 ESTAT_ADD(tx_collide_10times);
11554 ESTAT_ADD(tx_collide_11times);
11555 ESTAT_ADD(tx_collide_12times);
11556 ESTAT_ADD(tx_collide_13times);
11557 ESTAT_ADD(tx_collide_14times);
11558 ESTAT_ADD(tx_collide_15times);
11559 ESTAT_ADD(tx_ucast_packets);
11560 ESTAT_ADD(tx_mcast_packets);
11561 ESTAT_ADD(tx_bcast_packets);
11562 ESTAT_ADD(tx_carrier_sense_errors);
11563 ESTAT_ADD(tx_discards);
11564 ESTAT_ADD(tx_errors);
11566 ESTAT_ADD(dma_writeq_full);
11567 ESTAT_ADD(dma_write_prioq_full);
11568 ESTAT_ADD(rxbds_empty);
11569 ESTAT_ADD(rx_discards);
11570 ESTAT_ADD(rx_errors);
11571 ESTAT_ADD(rx_threshold_hit);
11573 ESTAT_ADD(dma_readq_full);
11574 ESTAT_ADD(dma_read_prioq_full);
11575 ESTAT_ADD(tx_comp_queue_full);
11577 ESTAT_ADD(ring_set_send_prod_index);
11578 ESTAT_ADD(ring_status_update);
11579 ESTAT_ADD(nic_irqs);
11580 ESTAT_ADD(nic_avoided_irqs);
11581 ESTAT_ADD(nic_tx_threshold_hit);
11583 ESTAT_ADD(mbuf_lwm_thresh_hit);
11586 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11588 struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11589 struct tg3_hw_stats *hw_stats = tp->hw_stats;
11591 stats->rx_packets = old_stats->rx_packets +
11592 get_stat64(&hw_stats->rx_ucast_packets) +
11593 get_stat64(&hw_stats->rx_mcast_packets) +
11594 get_stat64(&hw_stats->rx_bcast_packets);
11596 stats->tx_packets = old_stats->tx_packets +
11597 get_stat64(&hw_stats->tx_ucast_packets) +
11598 get_stat64(&hw_stats->tx_mcast_packets) +
11599 get_stat64(&hw_stats->tx_bcast_packets);
11601 stats->rx_bytes = old_stats->rx_bytes +
11602 get_stat64(&hw_stats->rx_octets);
11603 stats->tx_bytes = old_stats->tx_bytes +
11604 get_stat64(&hw_stats->tx_octets);
11606 stats->rx_errors = old_stats->rx_errors +
11607 get_stat64(&hw_stats->rx_errors);
11608 stats->tx_errors = old_stats->tx_errors +
11609 get_stat64(&hw_stats->tx_errors) +
11610 get_stat64(&hw_stats->tx_mac_errors) +
11611 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11612 get_stat64(&hw_stats->tx_discards);
11614 stats->multicast = old_stats->multicast +
11615 get_stat64(&hw_stats->rx_mcast_packets);
11616 stats->collisions = old_stats->collisions +
11617 get_stat64(&hw_stats->tx_collisions);
11619 stats->rx_length_errors = old_stats->rx_length_errors +
11620 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11621 get_stat64(&hw_stats->rx_undersize_packets);
11623 stats->rx_over_errors = old_stats->rx_over_errors +
11624 get_stat64(&hw_stats->rxbds_empty);
11625 stats->rx_frame_errors = old_stats->rx_frame_errors +
11626 get_stat64(&hw_stats->rx_align_errors);
11627 stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11628 get_stat64(&hw_stats->tx_discards);
11629 stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11630 get_stat64(&hw_stats->tx_carrier_sense_errors);
11632 stats->rx_crc_errors = old_stats->rx_crc_errors +
11633 tg3_calc_crc_errors(tp);
11635 stats->rx_missed_errors = old_stats->rx_missed_errors +
11636 get_stat64(&hw_stats->rx_discards);
11638 stats->rx_dropped = tp->rx_dropped;
11639 stats->tx_dropped = tp->tx_dropped;
11642 static int tg3_get_regs_len(struct net_device *dev)
11644 return TG3_REG_BLK_SIZE;
11647 static void tg3_get_regs(struct net_device *dev,
11648 struct ethtool_regs *regs, void *_p)
11650 struct tg3 *tp = netdev_priv(dev);
11654 memset(_p, 0, TG3_REG_BLK_SIZE);
11656 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11659 tg3_full_lock(tp, 0);
11661 tg3_dump_legacy_regs(tp, (u32 *)_p);
11663 tg3_full_unlock(tp);
11666 static int tg3_get_eeprom_len(struct net_device *dev)
11668 struct tg3 *tp = netdev_priv(dev);
11670 return tp->nvram_size;
11673 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11675 struct tg3 *tp = netdev_priv(dev);
11678 u32 i, offset, len, b_offset, b_count;
11681 if (tg3_flag(tp, NO_NVRAM))
11684 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11687 offset = eeprom->offset;
11691 eeprom->magic = TG3_EEPROM_MAGIC;
11694 /* adjustments to start on required 4 byte boundary */
11695 b_offset = offset & 3;
11696 b_count = 4 - b_offset;
11697 if (b_count > len) {
11698 /* i.e. offset=1 len=2 */
11701 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11704 memcpy(data, ((char *)&val) + b_offset, b_count);
11707 eeprom->len += b_count;
11710 /* read bytes up to the last 4 byte boundary */
11711 pd = &data[eeprom->len];
11712 for (i = 0; i < (len - (len & 3)); i += 4) {
11713 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11718 memcpy(pd + i, &val, 4);
11723 /* read last bytes not ending on 4 byte boundary */
11724 pd = &data[eeprom->len];
11726 b_offset = offset + len - b_count;
11727 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11730 memcpy(pd, &val, b_count);
11731 eeprom->len += b_count;
11736 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11738 struct tg3 *tp = netdev_priv(dev);
11740 u32 offset, len, b_offset, odd_len;
11744 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11747 if (tg3_flag(tp, NO_NVRAM) ||
11748 eeprom->magic != TG3_EEPROM_MAGIC)
11751 offset = eeprom->offset;
11754 if ((b_offset = (offset & 3))) {
11755 /* adjustments to start on required 4 byte boundary */
11756 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11767 /* adjustments to end on required 4 byte boundary */
11769 len = (len + 3) & ~3;
11770 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11776 if (b_offset || odd_len) {
11777 buf = kmalloc(len, GFP_KERNEL);
11781 memcpy(buf, &start, 4);
11783 memcpy(buf+len-4, &end, 4);
11784 memcpy(buf + b_offset, data, eeprom->len);
11787 ret = tg3_nvram_write_block(tp, offset, len, buf);
11795 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11797 struct tg3 *tp = netdev_priv(dev);
11799 if (tg3_flag(tp, USE_PHYLIB)) {
11800 struct phy_device *phydev;
11801 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11803 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11804 return phy_ethtool_gset(phydev, cmd);
11807 cmd->supported = (SUPPORTED_Autoneg);
11809 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11810 cmd->supported |= (SUPPORTED_1000baseT_Half |
11811 SUPPORTED_1000baseT_Full);
11813 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11814 cmd->supported |= (SUPPORTED_100baseT_Half |
11815 SUPPORTED_100baseT_Full |
11816 SUPPORTED_10baseT_Half |
11817 SUPPORTED_10baseT_Full |
11819 cmd->port = PORT_TP;
11821 cmd->supported |= SUPPORTED_FIBRE;
11822 cmd->port = PORT_FIBRE;
11825 cmd->advertising = tp->link_config.advertising;
11826 if (tg3_flag(tp, PAUSE_AUTONEG)) {
11827 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11828 if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11829 cmd->advertising |= ADVERTISED_Pause;
11831 cmd->advertising |= ADVERTISED_Pause |
11832 ADVERTISED_Asym_Pause;
11834 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11835 cmd->advertising |= ADVERTISED_Asym_Pause;
11838 if (netif_running(dev) && tp->link_up) {
11839 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11840 cmd->duplex = tp->link_config.active_duplex;
11841 cmd->lp_advertising = tp->link_config.rmt_adv;
11842 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11843 if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11844 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11846 cmd->eth_tp_mdix = ETH_TP_MDI;
11849 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11850 cmd->duplex = DUPLEX_UNKNOWN;
11851 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11853 cmd->phy_address = tp->phy_addr;
11854 cmd->transceiver = XCVR_INTERNAL;
11855 cmd->autoneg = tp->link_config.autoneg;
11861 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11863 struct tg3 *tp = netdev_priv(dev);
11864 u32 speed = ethtool_cmd_speed(cmd);
11866 if (tg3_flag(tp, USE_PHYLIB)) {
11867 struct phy_device *phydev;
11868 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11870 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11871 return phy_ethtool_sset(phydev, cmd);
11874 if (cmd->autoneg != AUTONEG_ENABLE &&
11875 cmd->autoneg != AUTONEG_DISABLE)
11878 if (cmd->autoneg == AUTONEG_DISABLE &&
11879 cmd->duplex != DUPLEX_FULL &&
11880 cmd->duplex != DUPLEX_HALF)
11883 if (cmd->autoneg == AUTONEG_ENABLE) {
11884 u32 mask = ADVERTISED_Autoneg |
11886 ADVERTISED_Asym_Pause;
11888 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11889 mask |= ADVERTISED_1000baseT_Half |
11890 ADVERTISED_1000baseT_Full;
11892 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11893 mask |= ADVERTISED_100baseT_Half |
11894 ADVERTISED_100baseT_Full |
11895 ADVERTISED_10baseT_Half |
11896 ADVERTISED_10baseT_Full |
11899 mask |= ADVERTISED_FIBRE;
11901 if (cmd->advertising & ~mask)
11904 mask &= (ADVERTISED_1000baseT_Half |
11905 ADVERTISED_1000baseT_Full |
11906 ADVERTISED_100baseT_Half |
11907 ADVERTISED_100baseT_Full |
11908 ADVERTISED_10baseT_Half |
11909 ADVERTISED_10baseT_Full);
11911 cmd->advertising &= mask;
11913 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11914 if (speed != SPEED_1000)
11917 if (cmd->duplex != DUPLEX_FULL)
11920 if (speed != SPEED_100 &&
11926 tg3_full_lock(tp, 0);
11928 tp->link_config.autoneg = cmd->autoneg;
11929 if (cmd->autoneg == AUTONEG_ENABLE) {
11930 tp->link_config.advertising = (cmd->advertising |
11931 ADVERTISED_Autoneg);
11932 tp->link_config.speed = SPEED_UNKNOWN;
11933 tp->link_config.duplex = DUPLEX_UNKNOWN;
11935 tp->link_config.advertising = 0;
11936 tp->link_config.speed = speed;
11937 tp->link_config.duplex = cmd->duplex;
11940 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11942 tg3_warn_mgmt_link_flap(tp);
11944 if (netif_running(dev))
11945 tg3_setup_phy(tp, true);
11947 tg3_full_unlock(tp);
11952 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11954 struct tg3 *tp = netdev_priv(dev);
11956 strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11957 strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11958 strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11959 strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11962 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11964 struct tg3 *tp = netdev_priv(dev);
11966 if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11967 wol->supported = WAKE_MAGIC;
11969 wol->supported = 0;
11971 if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11972 wol->wolopts = WAKE_MAGIC;
11973 memset(&wol->sopass, 0, sizeof(wol->sopass));
11976 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11978 struct tg3 *tp = netdev_priv(dev);
11979 struct device *dp = &tp->pdev->dev;
11981 if (wol->wolopts & ~WAKE_MAGIC)
11983 if ((wol->wolopts & WAKE_MAGIC) &&
11984 !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11987 device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11989 spin_lock_bh(&tp->lock);
11990 if (device_may_wakeup(dp))
11991 tg3_flag_set(tp, WOL_ENABLE);
11993 tg3_flag_clear(tp, WOL_ENABLE);
11994 spin_unlock_bh(&tp->lock);
11999 static u32 tg3_get_msglevel(struct net_device *dev)
12001 struct tg3 *tp = netdev_priv(dev);
12002 return tp->msg_enable;
12005 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12007 struct tg3 *tp = netdev_priv(dev);
12008 tp->msg_enable = value;
12011 static int tg3_nway_reset(struct net_device *dev)
12013 struct tg3 *tp = netdev_priv(dev);
12016 if (!netif_running(dev))
12019 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12022 tg3_warn_mgmt_link_flap(tp);
12024 if (tg3_flag(tp, USE_PHYLIB)) {
12025 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12027 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12031 spin_lock_bh(&tp->lock);
12033 tg3_readphy(tp, MII_BMCR, &bmcr);
12034 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12035 ((bmcr & BMCR_ANENABLE) ||
12036 (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12037 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12041 spin_unlock_bh(&tp->lock);
12047 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12049 struct tg3 *tp = netdev_priv(dev);
12051 ering->rx_max_pending = tp->rx_std_ring_mask;
12052 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12053 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12055 ering->rx_jumbo_max_pending = 0;
12057 ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12059 ering->rx_pending = tp->rx_pending;
12060 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12061 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12063 ering->rx_jumbo_pending = 0;
12065 ering->tx_pending = tp->napi[0].tx_pending;
12068 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12070 struct tg3 *tp = netdev_priv(dev);
12071 int i, irq_sync = 0, err = 0;
12073 if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12074 (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12075 (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12076 (ering->tx_pending <= MAX_SKB_FRAGS) ||
12077 (tg3_flag(tp, TSO_BUG) &&
12078 (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12081 if (netif_running(dev)) {
12083 tg3_netif_stop(tp);
12087 tg3_full_lock(tp, irq_sync);
12089 tp->rx_pending = ering->rx_pending;
12091 if (tg3_flag(tp, MAX_RXPEND_64) &&
12092 tp->rx_pending > 63)
12093 tp->rx_pending = 63;
12095 if (tg3_flag(tp, JUMBO_RING_ENABLE))
12096 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12098 for (i = 0; i < tp->irq_max; i++)
12099 tp->napi[i].tx_pending = ering->tx_pending;
12101 if (netif_running(dev)) {
12102 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12103 err = tg3_restart_hw(tp, false);
12105 tg3_netif_start(tp);
12108 tg3_full_unlock(tp);
12110 if (irq_sync && !err)
12116 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12118 struct tg3 *tp = netdev_priv(dev);
12120 epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12122 if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12123 epause->rx_pause = 1;
12125 epause->rx_pause = 0;
12127 if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12128 epause->tx_pause = 1;
12130 epause->tx_pause = 0;
12133 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12135 struct tg3 *tp = netdev_priv(dev);
12138 if (tp->link_config.autoneg == AUTONEG_ENABLE)
12139 tg3_warn_mgmt_link_flap(tp);
12141 if (tg3_flag(tp, USE_PHYLIB)) {
12143 struct phy_device *phydev;
12145 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12147 if (!(phydev->supported & SUPPORTED_Pause) ||
12148 (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12149 (epause->rx_pause != epause->tx_pause)))
12152 tp->link_config.flowctrl = 0;
12153 if (epause->rx_pause) {
12154 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12156 if (epause->tx_pause) {
12157 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12158 newadv = ADVERTISED_Pause;
12160 newadv = ADVERTISED_Pause |
12161 ADVERTISED_Asym_Pause;
12162 } else if (epause->tx_pause) {
12163 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12164 newadv = ADVERTISED_Asym_Pause;
12168 if (epause->autoneg)
12169 tg3_flag_set(tp, PAUSE_AUTONEG);
12171 tg3_flag_clear(tp, PAUSE_AUTONEG);
12173 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12174 u32 oldadv = phydev->advertising &
12175 (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12176 if (oldadv != newadv) {
12177 phydev->advertising &=
12178 ~(ADVERTISED_Pause |
12179 ADVERTISED_Asym_Pause);
12180 phydev->advertising |= newadv;
12181 if (phydev->autoneg) {
12183 * Always renegotiate the link to
12184 * inform our link partner of our
12185 * flow control settings, even if the
12186 * flow control is forced. Let
12187 * tg3_adjust_link() do the final
12188 * flow control setup.
12190 return phy_start_aneg(phydev);
12194 if (!epause->autoneg)
12195 tg3_setup_flow_control(tp, 0, 0);
12197 tp->link_config.advertising &=
12198 ~(ADVERTISED_Pause |
12199 ADVERTISED_Asym_Pause);
12200 tp->link_config.advertising |= newadv;
12205 if (netif_running(dev)) {
12206 tg3_netif_stop(tp);
12210 tg3_full_lock(tp, irq_sync);
12212 if (epause->autoneg)
12213 tg3_flag_set(tp, PAUSE_AUTONEG);
12215 tg3_flag_clear(tp, PAUSE_AUTONEG);
12216 if (epause->rx_pause)
12217 tp->link_config.flowctrl |= FLOW_CTRL_RX;
12219 tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12220 if (epause->tx_pause)
12221 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12223 tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12225 if (netif_running(dev)) {
12226 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12227 err = tg3_restart_hw(tp, false);
12229 tg3_netif_start(tp);
12232 tg3_full_unlock(tp);
12235 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12240 static int tg3_get_sset_count(struct net_device *dev, int sset)
12244 return TG3_NUM_TEST;
12246 return TG3_NUM_STATS;
12248 return -EOPNOTSUPP;
12252 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12253 u32 *rules __always_unused)
12255 struct tg3 *tp = netdev_priv(dev);
12257 if (!tg3_flag(tp, SUPPORT_MSIX))
12258 return -EOPNOTSUPP;
12260 switch (info->cmd) {
12261 case ETHTOOL_GRXRINGS:
12262 if (netif_running(tp->dev))
12263 info->data = tp->rxq_cnt;
12265 info->data = num_online_cpus();
12266 if (info->data > TG3_RSS_MAX_NUM_QS)
12267 info->data = TG3_RSS_MAX_NUM_QS;
12270 /* The first interrupt vector only
12271 * handles link interrupts.
12277 return -EOPNOTSUPP;
12281 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12284 struct tg3 *tp = netdev_priv(dev);
12286 if (tg3_flag(tp, SUPPORT_MSIX))
12287 size = TG3_RSS_INDIR_TBL_SIZE;
12292 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12294 struct tg3 *tp = netdev_priv(dev);
12297 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12298 indir[i] = tp->rss_ind_tbl[i];
12303 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12305 struct tg3 *tp = netdev_priv(dev);
12308 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12309 tp->rss_ind_tbl[i] = indir[i];
12311 if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12314 /* It is legal to write the indirection
12315 * table while the device is running.
12317 tg3_full_lock(tp, 0);
12318 tg3_rss_write_indir_tbl(tp);
12319 tg3_full_unlock(tp);
12324 static void tg3_get_channels(struct net_device *dev,
12325 struct ethtool_channels *channel)
12327 struct tg3 *tp = netdev_priv(dev);
12328 u32 deflt_qs = netif_get_num_default_rss_queues();
12330 channel->max_rx = tp->rxq_max;
12331 channel->max_tx = tp->txq_max;
12333 if (netif_running(dev)) {
12334 channel->rx_count = tp->rxq_cnt;
12335 channel->tx_count = tp->txq_cnt;
12338 channel->rx_count = tp->rxq_req;
12340 channel->rx_count = min(deflt_qs, tp->rxq_max);
12343 channel->tx_count = tp->txq_req;
12345 channel->tx_count = min(deflt_qs, tp->txq_max);
12349 static int tg3_set_channels(struct net_device *dev,
12350 struct ethtool_channels *channel)
12352 struct tg3 *tp = netdev_priv(dev);
12354 if (!tg3_flag(tp, SUPPORT_MSIX))
12355 return -EOPNOTSUPP;
12357 if (channel->rx_count > tp->rxq_max ||
12358 channel->tx_count > tp->txq_max)
12361 tp->rxq_req = channel->rx_count;
12362 tp->txq_req = channel->tx_count;
12364 if (!netif_running(dev))
12369 tg3_carrier_off(tp);
12371 tg3_start(tp, true, false, false);
12376 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12378 switch (stringset) {
12380 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
12383 memcpy(buf, ðtool_test_keys, sizeof(ethtool_test_keys));
12386 WARN_ON(1); /* we need a WARN() */
12391 static int tg3_set_phys_id(struct net_device *dev,
12392 enum ethtool_phys_id_state state)
12394 struct tg3 *tp = netdev_priv(dev);
12396 if (!netif_running(tp->dev))
12400 case ETHTOOL_ID_ACTIVE:
12401 return 1; /* cycle on/off once per second */
12403 case ETHTOOL_ID_ON:
12404 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12405 LED_CTRL_1000MBPS_ON |
12406 LED_CTRL_100MBPS_ON |
12407 LED_CTRL_10MBPS_ON |
12408 LED_CTRL_TRAFFIC_OVERRIDE |
12409 LED_CTRL_TRAFFIC_BLINK |
12410 LED_CTRL_TRAFFIC_LED);
12413 case ETHTOOL_ID_OFF:
12414 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12415 LED_CTRL_TRAFFIC_OVERRIDE);
12418 case ETHTOOL_ID_INACTIVE:
12419 tw32(MAC_LED_CTRL, tp->led_ctrl);
12426 static void tg3_get_ethtool_stats(struct net_device *dev,
12427 struct ethtool_stats *estats, u64 *tmp_stats)
12429 struct tg3 *tp = netdev_priv(dev);
12432 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12434 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12437 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12441 u32 offset = 0, len = 0;
12444 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12447 if (magic == TG3_EEPROM_MAGIC) {
12448 for (offset = TG3_NVM_DIR_START;
12449 offset < TG3_NVM_DIR_END;
12450 offset += TG3_NVM_DIRENT_SIZE) {
12451 if (tg3_nvram_read(tp, offset, &val))
12454 if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12455 TG3_NVM_DIRTYPE_EXTVPD)
12459 if (offset != TG3_NVM_DIR_END) {
12460 len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12461 if (tg3_nvram_read(tp, offset + 4, &offset))
12464 offset = tg3_nvram_logical_addr(tp, offset);
12468 if (!offset || !len) {
12469 offset = TG3_NVM_VPD_OFF;
12470 len = TG3_NVM_VPD_LEN;
12473 buf = kmalloc(len, GFP_KERNEL);
12477 if (magic == TG3_EEPROM_MAGIC) {
12478 for (i = 0; i < len; i += 4) {
12479 /* The data is in little-endian format in NVRAM.
12480 * Use the big-endian read routines to preserve
12481 * the byte order as it exists in NVRAM.
12483 if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12489 unsigned int pos = 0;
12491 ptr = (u8 *)&buf[0];
12492 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12493 cnt = pci_read_vpd(tp->pdev, pos,
12495 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12513 #define NVRAM_TEST_SIZE 0x100
12514 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE 0x14
12515 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE 0x18
12516 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE 0x1c
12517 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE 0x20
12518 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE 0x24
12519 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE 0x50
12520 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12521 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12523 static int tg3_test_nvram(struct tg3 *tp)
12525 u32 csum, magic, len;
12527 int i, j, k, err = 0, size;
12529 if (tg3_flag(tp, NO_NVRAM))
12532 if (tg3_nvram_read(tp, 0, &magic) != 0)
12535 if (magic == TG3_EEPROM_MAGIC)
12536 size = NVRAM_TEST_SIZE;
12537 else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12538 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12539 TG3_EEPROM_SB_FORMAT_1) {
12540 switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12541 case TG3_EEPROM_SB_REVISION_0:
12542 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12544 case TG3_EEPROM_SB_REVISION_2:
12545 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12547 case TG3_EEPROM_SB_REVISION_3:
12548 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12550 case TG3_EEPROM_SB_REVISION_4:
12551 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12553 case TG3_EEPROM_SB_REVISION_5:
12554 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12556 case TG3_EEPROM_SB_REVISION_6:
12557 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12564 } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12565 size = NVRAM_SELFBOOT_HW_SIZE;
12569 buf = kmalloc(size, GFP_KERNEL);
12574 for (i = 0, j = 0; i < size; i += 4, j++) {
12575 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12582 /* Selfboot format */
12583 magic = be32_to_cpu(buf[0]);
12584 if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12585 TG3_EEPROM_MAGIC_FW) {
12586 u8 *buf8 = (u8 *) buf, csum8 = 0;
12588 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12589 TG3_EEPROM_SB_REVISION_2) {
12590 /* For rev 2, the csum doesn't include the MBA. */
12591 for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12593 for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12596 for (i = 0; i < size; i++)
12609 if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12610 TG3_EEPROM_MAGIC_HW) {
12611 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12612 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12613 u8 *buf8 = (u8 *) buf;
12615 /* Separate the parity bits and the data bytes. */
12616 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12617 if ((i == 0) || (i == 8)) {
12621 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12622 parity[k++] = buf8[i] & msk;
12624 } else if (i == 16) {
12628 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12629 parity[k++] = buf8[i] & msk;
12632 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12633 parity[k++] = buf8[i] & msk;
12636 data[j++] = buf8[i];
12640 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12641 u8 hw8 = hweight8(data[i]);
12643 if ((hw8 & 0x1) && parity[i])
12645 else if (!(hw8 & 0x1) && !parity[i])
12654 /* Bootstrap checksum at offset 0x10 */
12655 csum = calc_crc((unsigned char *) buf, 0x10);
12656 if (csum != le32_to_cpu(buf[0x10/4]))
12659 /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12660 csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12661 if (csum != le32_to_cpu(buf[0xfc/4]))
12666 buf = tg3_vpd_readblock(tp, &len);
12670 i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12672 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12676 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12679 i += PCI_VPD_LRDT_TAG_SIZE;
12680 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12681 PCI_VPD_RO_KEYWORD_CHKSUM);
12685 j += PCI_VPD_INFO_FLD_HDR_SIZE;
12687 for (i = 0; i <= j; i++)
12688 csum8 += ((u8 *)buf)[i];
12702 #define TG3_SERDES_TIMEOUT_SEC 2
12703 #define TG3_COPPER_TIMEOUT_SEC 6
12705 static int tg3_test_link(struct tg3 *tp)
12709 if (!netif_running(tp->dev))
12712 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12713 max = TG3_SERDES_TIMEOUT_SEC;
12715 max = TG3_COPPER_TIMEOUT_SEC;
12717 for (i = 0; i < max; i++) {
12721 if (msleep_interruptible(1000))
12728 /* Only test the commonly used registers */
12729 static int tg3_test_registers(struct tg3 *tp)
12731 int i, is_5705, is_5750;
12732 u32 offset, read_mask, write_mask, val, save_val, read_val;
12736 #define TG3_FL_5705 0x1
12737 #define TG3_FL_NOT_5705 0x2
12738 #define TG3_FL_NOT_5788 0x4
12739 #define TG3_FL_NOT_5750 0x8
12743 /* MAC Control Registers */
12744 { MAC_MODE, TG3_FL_NOT_5705,
12745 0x00000000, 0x00ef6f8c },
12746 { MAC_MODE, TG3_FL_5705,
12747 0x00000000, 0x01ef6b8c },
12748 { MAC_STATUS, TG3_FL_NOT_5705,
12749 0x03800107, 0x00000000 },
12750 { MAC_STATUS, TG3_FL_5705,
12751 0x03800100, 0x00000000 },
12752 { MAC_ADDR_0_HIGH, 0x0000,
12753 0x00000000, 0x0000ffff },
12754 { MAC_ADDR_0_LOW, 0x0000,
12755 0x00000000, 0xffffffff },
12756 { MAC_RX_MTU_SIZE, 0x0000,
12757 0x00000000, 0x0000ffff },
12758 { MAC_TX_MODE, 0x0000,
12759 0x00000000, 0x00000070 },
12760 { MAC_TX_LENGTHS, 0x0000,
12761 0x00000000, 0x00003fff },
12762 { MAC_RX_MODE, TG3_FL_NOT_5705,
12763 0x00000000, 0x000007fc },
12764 { MAC_RX_MODE, TG3_FL_5705,
12765 0x00000000, 0x000007dc },
12766 { MAC_HASH_REG_0, 0x0000,
12767 0x00000000, 0xffffffff },
12768 { MAC_HASH_REG_1, 0x0000,
12769 0x00000000, 0xffffffff },
12770 { MAC_HASH_REG_2, 0x0000,
12771 0x00000000, 0xffffffff },
12772 { MAC_HASH_REG_3, 0x0000,
12773 0x00000000, 0xffffffff },
12775 /* Receive Data and Receive BD Initiator Control Registers. */
12776 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12777 0x00000000, 0xffffffff },
12778 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12779 0x00000000, 0xffffffff },
12780 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12781 0x00000000, 0x00000003 },
12782 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12783 0x00000000, 0xffffffff },
12784 { RCVDBDI_STD_BD+0, 0x0000,
12785 0x00000000, 0xffffffff },
12786 { RCVDBDI_STD_BD+4, 0x0000,
12787 0x00000000, 0xffffffff },
12788 { RCVDBDI_STD_BD+8, 0x0000,
12789 0x00000000, 0xffff0002 },
12790 { RCVDBDI_STD_BD+0xc, 0x0000,
12791 0x00000000, 0xffffffff },
12793 /* Receive BD Initiator Control Registers. */
12794 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12795 0x00000000, 0xffffffff },
12796 { RCVBDI_STD_THRESH, TG3_FL_5705,
12797 0x00000000, 0x000003ff },
12798 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12799 0x00000000, 0xffffffff },
12801 /* Host Coalescing Control Registers. */
12802 { HOSTCC_MODE, TG3_FL_NOT_5705,
12803 0x00000000, 0x00000004 },
12804 { HOSTCC_MODE, TG3_FL_5705,
12805 0x00000000, 0x000000f6 },
12806 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12807 0x00000000, 0xffffffff },
12808 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12809 0x00000000, 0x000003ff },
12810 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12811 0x00000000, 0xffffffff },
12812 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12813 0x00000000, 0x000003ff },
12814 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12815 0x00000000, 0xffffffff },
12816 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12817 0x00000000, 0x000000ff },
12818 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12819 0x00000000, 0xffffffff },
12820 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12821 0x00000000, 0x000000ff },
12822 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12823 0x00000000, 0xffffffff },
12824 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12825 0x00000000, 0xffffffff },
12826 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12827 0x00000000, 0xffffffff },
12828 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12829 0x00000000, 0x000000ff },
12830 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12831 0x00000000, 0xffffffff },
12832 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12833 0x00000000, 0x000000ff },
12834 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12835 0x00000000, 0xffffffff },
12836 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12837 0x00000000, 0xffffffff },
12838 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12839 0x00000000, 0xffffffff },
12840 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12841 0x00000000, 0xffffffff },
12842 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12843 0x00000000, 0xffffffff },
12844 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12845 0xffffffff, 0x00000000 },
12846 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12847 0xffffffff, 0x00000000 },
12849 /* Buffer Manager Control Registers. */
12850 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12851 0x00000000, 0x007fff80 },
12852 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12853 0x00000000, 0x007fffff },
12854 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12855 0x00000000, 0x0000003f },
12856 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12857 0x00000000, 0x000001ff },
12858 { BUFMGR_MB_HIGH_WATER, 0x0000,
12859 0x00000000, 0x000001ff },
12860 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12861 0xffffffff, 0x00000000 },
12862 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12863 0xffffffff, 0x00000000 },
12865 /* Mailbox Registers */
12866 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12867 0x00000000, 0x000001ff },
12868 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12869 0x00000000, 0x000001ff },
12870 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12871 0x00000000, 0x000007ff },
12872 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12873 0x00000000, 0x000001ff },
12875 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12878 is_5705 = is_5750 = 0;
12879 if (tg3_flag(tp, 5705_PLUS)) {
12881 if (tg3_flag(tp, 5750_PLUS))
12885 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12886 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12889 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12892 if (tg3_flag(tp, IS_5788) &&
12893 (reg_tbl[i].flags & TG3_FL_NOT_5788))
12896 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12899 offset = (u32) reg_tbl[i].offset;
12900 read_mask = reg_tbl[i].read_mask;
12901 write_mask = reg_tbl[i].write_mask;
12903 /* Save the original register content */
12904 save_val = tr32(offset);
12906 /* Determine the read-only value. */
12907 read_val = save_val & read_mask;
12909 /* Write zero to the register, then make sure the read-only bits
12910 * are not changed and the read/write bits are all zeros.
12914 val = tr32(offset);
12916 /* Test the read-only and read/write bits. */
12917 if (((val & read_mask) != read_val) || (val & write_mask))
12920 /* Write ones to all the bits defined by RdMask and WrMask, then
12921 * make sure the read-only bits are not changed and the
12922 * read/write bits are all ones.
12924 tw32(offset, read_mask | write_mask);
12926 val = tr32(offset);
12928 /* Test the read-only bits. */
12929 if ((val & read_mask) != read_val)
12932 /* Test the read/write bits. */
12933 if ((val & write_mask) != write_mask)
12936 tw32(offset, save_val);
12942 if (netif_msg_hw(tp))
12943 netdev_err(tp->dev,
12944 "Register test failed at offset %x\n", offset);
12945 tw32(offset, save_val);
12949 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12951 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12955 for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12956 for (j = 0; j < len; j += 4) {
12959 tg3_write_mem(tp, offset + j, test_pattern[i]);
12960 tg3_read_mem(tp, offset + j, &val);
12961 if (val != test_pattern[i])
12968 static int tg3_test_memory(struct tg3 *tp)
12970 static struct mem_entry {
12973 } mem_tbl_570x[] = {
12974 { 0x00000000, 0x00b50},
12975 { 0x00002000, 0x1c000},
12976 { 0xffffffff, 0x00000}
12977 }, mem_tbl_5705[] = {
12978 { 0x00000100, 0x0000c},
12979 { 0x00000200, 0x00008},
12980 { 0x00004000, 0x00800},
12981 { 0x00006000, 0x01000},
12982 { 0x00008000, 0x02000},
12983 { 0x00010000, 0x0e000},
12984 { 0xffffffff, 0x00000}
12985 }, mem_tbl_5755[] = {
12986 { 0x00000200, 0x00008},
12987 { 0x00004000, 0x00800},
12988 { 0x00006000, 0x00800},
12989 { 0x00008000, 0x02000},
12990 { 0x00010000, 0x0c000},
12991 { 0xffffffff, 0x00000}
12992 }, mem_tbl_5906[] = {
12993 { 0x00000200, 0x00008},
12994 { 0x00004000, 0x00400},
12995 { 0x00006000, 0x00400},
12996 { 0x00008000, 0x01000},
12997 { 0x00010000, 0x01000},
12998 { 0xffffffff, 0x00000}
12999 }, mem_tbl_5717[] = {
13000 { 0x00000200, 0x00008},
13001 { 0x00010000, 0x0a000},
13002 { 0x00020000, 0x13c00},
13003 { 0xffffffff, 0x00000}
13004 }, mem_tbl_57765[] = {
13005 { 0x00000200, 0x00008},
13006 { 0x00004000, 0x00800},
13007 { 0x00006000, 0x09800},
13008 { 0x00010000, 0x0a000},
13009 { 0xffffffff, 0x00000}
13011 struct mem_entry *mem_tbl;
13015 if (tg3_flag(tp, 5717_PLUS))
13016 mem_tbl = mem_tbl_5717;
13017 else if (tg3_flag(tp, 57765_CLASS) ||
13018 tg3_asic_rev(tp) == ASIC_REV_5762)
13019 mem_tbl = mem_tbl_57765;
13020 else if (tg3_flag(tp, 5755_PLUS))
13021 mem_tbl = mem_tbl_5755;
13022 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13023 mem_tbl = mem_tbl_5906;
13024 else if (tg3_flag(tp, 5705_PLUS))
13025 mem_tbl = mem_tbl_5705;
13027 mem_tbl = mem_tbl_570x;
13029 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13030 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13038 #define TG3_TSO_MSS 500
13040 #define TG3_TSO_IP_HDR_LEN 20
13041 #define TG3_TSO_TCP_HDR_LEN 20
13042 #define TG3_TSO_TCP_OPT_LEN 12
13044 static const u8 tg3_tso_header[] = {
13046 0x45, 0x00, 0x00, 0x00,
13047 0x00, 0x00, 0x40, 0x00,
13048 0x40, 0x06, 0x00, 0x00,
13049 0x0a, 0x00, 0x00, 0x01,
13050 0x0a, 0x00, 0x00, 0x02,
13051 0x0d, 0x00, 0xe0, 0x00,
13052 0x00, 0x00, 0x01, 0x00,
13053 0x00, 0x00, 0x02, 0x00,
13054 0x80, 0x10, 0x10, 0x00,
13055 0x14, 0x09, 0x00, 0x00,
13056 0x01, 0x01, 0x08, 0x0a,
13057 0x11, 0x11, 0x11, 0x11,
13058 0x11, 0x11, 0x11, 0x11,
13061 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13063 u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13064 u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13066 struct sk_buff *skb;
13067 u8 *tx_data, *rx_data;
13069 int num_pkts, tx_len, rx_len, i, err;
13070 struct tg3_rx_buffer_desc *desc;
13071 struct tg3_napi *tnapi, *rnapi;
13072 struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13074 tnapi = &tp->napi[0];
13075 rnapi = &tp->napi[0];
13076 if (tp->irq_cnt > 1) {
13077 if (tg3_flag(tp, ENABLE_RSS))
13078 rnapi = &tp->napi[1];
13079 if (tg3_flag(tp, ENABLE_TSS))
13080 tnapi = &tp->napi[1];
13082 coal_now = tnapi->coal_now | rnapi->coal_now;
13087 skb = netdev_alloc_skb(tp->dev, tx_len);
13091 tx_data = skb_put(skb, tx_len);
13092 memcpy(tx_data, tp->dev->dev_addr, 6);
13093 memset(tx_data + 6, 0x0, 8);
13095 tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13097 if (tso_loopback) {
13098 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13100 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13101 TG3_TSO_TCP_OPT_LEN;
13103 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13104 sizeof(tg3_tso_header));
13107 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13108 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13110 /* Set the total length field in the IP header */
13111 iph->tot_len = htons((u16)(mss + hdr_len));
13113 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13114 TXD_FLAG_CPU_POST_DMA);
13116 if (tg3_flag(tp, HW_TSO_1) ||
13117 tg3_flag(tp, HW_TSO_2) ||
13118 tg3_flag(tp, HW_TSO_3)) {
13120 val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13121 th = (struct tcphdr *)&tx_data[val];
13124 base_flags |= TXD_FLAG_TCPUDP_CSUM;
13126 if (tg3_flag(tp, HW_TSO_3)) {
13127 mss |= (hdr_len & 0xc) << 12;
13128 if (hdr_len & 0x10)
13129 base_flags |= 0x00000010;
13130 base_flags |= (hdr_len & 0x3e0) << 5;
13131 } else if (tg3_flag(tp, HW_TSO_2))
13132 mss |= hdr_len << 9;
13133 else if (tg3_flag(tp, HW_TSO_1) ||
13134 tg3_asic_rev(tp) == ASIC_REV_5705) {
13135 mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13137 base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13140 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13143 data_off = ETH_HLEN;
13145 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13146 tx_len > VLAN_ETH_FRAME_LEN)
13147 base_flags |= TXD_FLAG_JMB_PKT;
13150 for (i = data_off; i < tx_len; i++)
13151 tx_data[i] = (u8) (i & 0xff);
13153 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13154 if (pci_dma_mapping_error(tp->pdev, map)) {
13155 dev_kfree_skb(skb);
13159 val = tnapi->tx_prod;
13160 tnapi->tx_buffers[val].skb = skb;
13161 dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13163 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13168 rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13170 budget = tg3_tx_avail(tnapi);
13171 if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13172 base_flags | TXD_FLAG_END, mss, 0)) {
13173 tnapi->tx_buffers[val].skb = NULL;
13174 dev_kfree_skb(skb);
13180 /* Sync BD data before updating mailbox */
13183 tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13184 tr32_mailbox(tnapi->prodmbox);
13188 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
13189 for (i = 0; i < 35; i++) {
13190 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13195 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13196 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13197 if ((tx_idx == tnapi->tx_prod) &&
13198 (rx_idx == (rx_start_idx + num_pkts)))
13202 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13203 dev_kfree_skb(skb);
13205 if (tx_idx != tnapi->tx_prod)
13208 if (rx_idx != rx_start_idx + num_pkts)
13212 while (rx_idx != rx_start_idx) {
13213 desc = &rnapi->rx_rcb[rx_start_idx++];
13214 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13215 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13217 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13218 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13221 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13224 if (!tso_loopback) {
13225 if (rx_len != tx_len)
13228 if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13229 if (opaque_key != RXD_OPAQUE_RING_STD)
13232 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13235 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13236 (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13237 >> RXD_TCPCSUM_SHIFT != 0xffff) {
13241 if (opaque_key == RXD_OPAQUE_RING_STD) {
13242 rx_data = tpr->rx_std_buffers[desc_idx].data;
13243 map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13245 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13246 rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13247 map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13252 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13253 PCI_DMA_FROMDEVICE);
13255 rx_data += TG3_RX_OFFSET(tp);
13256 for (i = data_off; i < rx_len; i++, val++) {
13257 if (*(rx_data + i) != (u8) (val & 0xff))
13264 /* tg3_free_rings will unmap and free the rx_data */
13269 #define TG3_STD_LOOPBACK_FAILED 1
13270 #define TG3_JMB_LOOPBACK_FAILED 2
13271 #define TG3_TSO_LOOPBACK_FAILED 4
13272 #define TG3_LOOPBACK_FAILED \
13273 (TG3_STD_LOOPBACK_FAILED | \
13274 TG3_JMB_LOOPBACK_FAILED | \
13275 TG3_TSO_LOOPBACK_FAILED)
13277 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13281 u32 jmb_pkt_sz = 9000;
13284 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13286 eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13287 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13289 if (!netif_running(tp->dev)) {
13290 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13291 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13293 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13297 err = tg3_reset_hw(tp, true);
13299 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13300 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13302 data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13306 if (tg3_flag(tp, ENABLE_RSS)) {
13309 /* Reroute all rx packets to the 1st queue */
13310 for (i = MAC_RSS_INDIR_TBL_0;
13311 i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13315 /* HW errata - mac loopback fails in some cases on 5780.
13316 * Normal traffic and PHY loopback are not affected by
13317 * errata. Also, the MAC loopback test is deprecated for
13318 * all newer ASIC revisions.
13320 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13321 !tg3_flag(tp, CPMU_PRESENT)) {
13322 tg3_mac_loopback(tp, true);
13324 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13325 data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13327 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13328 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13329 data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13331 tg3_mac_loopback(tp, false);
13334 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13335 !tg3_flag(tp, USE_PHYLIB)) {
13338 tg3_phy_lpbk_set(tp, 0, false);
13340 /* Wait for link */
13341 for (i = 0; i < 100; i++) {
13342 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13347 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13348 data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13349 if (tg3_flag(tp, TSO_CAPABLE) &&
13350 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13351 data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13352 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13353 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13354 data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13357 tg3_phy_lpbk_set(tp, 0, true);
13359 /* All link indications report up, but the hardware
13360 * isn't really ready for about 20 msec. Double it
13365 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13366 data[TG3_EXT_LOOPB_TEST] |=
13367 TG3_STD_LOOPBACK_FAILED;
13368 if (tg3_flag(tp, TSO_CAPABLE) &&
13369 tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13370 data[TG3_EXT_LOOPB_TEST] |=
13371 TG3_TSO_LOOPBACK_FAILED;
13372 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13373 tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13374 data[TG3_EXT_LOOPB_TEST] |=
13375 TG3_JMB_LOOPBACK_FAILED;
13378 /* Re-enable gphy autopowerdown. */
13379 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13380 tg3_phy_toggle_apd(tp, true);
13383 err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13384 data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13387 tp->phy_flags |= eee_cap;
13392 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13395 struct tg3 *tp = netdev_priv(dev);
13396 bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13398 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13399 tg3_power_up(tp)) {
13400 etest->flags |= ETH_TEST_FL_FAILED;
13401 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13405 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13407 if (tg3_test_nvram(tp) != 0) {
13408 etest->flags |= ETH_TEST_FL_FAILED;
13409 data[TG3_NVRAM_TEST] = 1;
13411 if (!doextlpbk && tg3_test_link(tp)) {
13412 etest->flags |= ETH_TEST_FL_FAILED;
13413 data[TG3_LINK_TEST] = 1;
13415 if (etest->flags & ETH_TEST_FL_OFFLINE) {
13416 int err, err2 = 0, irq_sync = 0;
13418 if (netif_running(dev)) {
13420 tg3_netif_stop(tp);
13424 tg3_full_lock(tp, irq_sync);
13425 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13426 err = tg3_nvram_lock(tp);
13427 tg3_halt_cpu(tp, RX_CPU_BASE);
13428 if (!tg3_flag(tp, 5705_PLUS))
13429 tg3_halt_cpu(tp, TX_CPU_BASE);
13431 tg3_nvram_unlock(tp);
13433 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13436 if (tg3_test_registers(tp) != 0) {
13437 etest->flags |= ETH_TEST_FL_FAILED;
13438 data[TG3_REGISTER_TEST] = 1;
13441 if (tg3_test_memory(tp) != 0) {
13442 etest->flags |= ETH_TEST_FL_FAILED;
13443 data[TG3_MEMORY_TEST] = 1;
13447 etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13449 if (tg3_test_loopback(tp, data, doextlpbk))
13450 etest->flags |= ETH_TEST_FL_FAILED;
13452 tg3_full_unlock(tp);
13454 if (tg3_test_interrupt(tp) != 0) {
13455 etest->flags |= ETH_TEST_FL_FAILED;
13456 data[TG3_INTERRUPT_TEST] = 1;
13459 tg3_full_lock(tp, 0);
13461 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13462 if (netif_running(dev)) {
13463 tg3_flag_set(tp, INIT_COMPLETE);
13464 err2 = tg3_restart_hw(tp, true);
13466 tg3_netif_start(tp);
13469 tg3_full_unlock(tp);
13471 if (irq_sync && !err2)
13474 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13475 tg3_power_down(tp);
13479 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13480 struct ifreq *ifr, int cmd)
13482 struct tg3 *tp = netdev_priv(dev);
13483 struct hwtstamp_config stmpconf;
13485 if (!tg3_flag(tp, PTP_CAPABLE))
13488 if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13491 if (stmpconf.flags)
13494 switch (stmpconf.tx_type) {
13495 case HWTSTAMP_TX_ON:
13496 tg3_flag_set(tp, TX_TSTAMP_EN);
13498 case HWTSTAMP_TX_OFF:
13499 tg3_flag_clear(tp, TX_TSTAMP_EN);
13505 switch (stmpconf.rx_filter) {
13506 case HWTSTAMP_FILTER_NONE:
13509 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13510 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13511 TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13513 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13514 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13515 TG3_RX_PTP_CTL_SYNC_EVNT;
13517 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13518 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13519 TG3_RX_PTP_CTL_DELAY_REQ;
13521 case HWTSTAMP_FILTER_PTP_V2_EVENT:
13522 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13523 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13525 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13526 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13527 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13529 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13530 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13531 TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13533 case HWTSTAMP_FILTER_PTP_V2_SYNC:
13534 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13535 TG3_RX_PTP_CTL_SYNC_EVNT;
13537 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13538 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13539 TG3_RX_PTP_CTL_SYNC_EVNT;
13541 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13542 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13543 TG3_RX_PTP_CTL_SYNC_EVNT;
13545 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13546 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13547 TG3_RX_PTP_CTL_DELAY_REQ;
13549 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13550 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13551 TG3_RX_PTP_CTL_DELAY_REQ;
13553 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13554 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13555 TG3_RX_PTP_CTL_DELAY_REQ;
13561 if (netif_running(dev) && tp->rxptpctl)
13562 tw32(TG3_RX_PTP_CTL,
13563 tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13565 return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13569 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13571 struct mii_ioctl_data *data = if_mii(ifr);
13572 struct tg3 *tp = netdev_priv(dev);
13575 if (tg3_flag(tp, USE_PHYLIB)) {
13576 struct phy_device *phydev;
13577 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13579 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13580 return phy_mii_ioctl(phydev, ifr, cmd);
13585 data->phy_id = tp->phy_addr;
13588 case SIOCGMIIREG: {
13591 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13592 break; /* We have no PHY */
13594 if (!netif_running(dev))
13597 spin_lock_bh(&tp->lock);
13598 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13599 data->reg_num & 0x1f, &mii_regval);
13600 spin_unlock_bh(&tp->lock);
13602 data->val_out = mii_regval;
13608 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13609 break; /* We have no PHY */
13611 if (!netif_running(dev))
13614 spin_lock_bh(&tp->lock);
13615 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13616 data->reg_num & 0x1f, data->val_in);
13617 spin_unlock_bh(&tp->lock);
13621 case SIOCSHWTSTAMP:
13622 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13628 return -EOPNOTSUPP;
13631 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13633 struct tg3 *tp = netdev_priv(dev);
13635 memcpy(ec, &tp->coal, sizeof(*ec));
13639 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13641 struct tg3 *tp = netdev_priv(dev);
13642 u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13643 u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13645 if (!tg3_flag(tp, 5705_PLUS)) {
13646 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13647 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13648 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13649 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13652 if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13653 (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13654 (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13655 (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13656 (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13657 (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13658 (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13659 (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13660 (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13661 (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13664 /* No rx interrupts will be generated if both are zero */
13665 if ((ec->rx_coalesce_usecs == 0) &&
13666 (ec->rx_max_coalesced_frames == 0))
13669 /* No tx interrupts will be generated if both are zero */
13670 if ((ec->tx_coalesce_usecs == 0) &&
13671 (ec->tx_max_coalesced_frames == 0))
13674 /* Only copy relevant parameters, ignore all others. */
13675 tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13676 tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13677 tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13678 tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13679 tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13680 tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13681 tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13682 tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13683 tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13685 if (netif_running(dev)) {
13686 tg3_full_lock(tp, 0);
13687 __tg3_set_coalesce(tp, &tp->coal);
13688 tg3_full_unlock(tp);
13693 static const struct ethtool_ops tg3_ethtool_ops = {
13694 .get_settings = tg3_get_settings,
13695 .set_settings = tg3_set_settings,
13696 .get_drvinfo = tg3_get_drvinfo,
13697 .get_regs_len = tg3_get_regs_len,
13698 .get_regs = tg3_get_regs,
13699 .get_wol = tg3_get_wol,
13700 .set_wol = tg3_set_wol,
13701 .get_msglevel = tg3_get_msglevel,
13702 .set_msglevel = tg3_set_msglevel,
13703 .nway_reset = tg3_nway_reset,
13704 .get_link = ethtool_op_get_link,
13705 .get_eeprom_len = tg3_get_eeprom_len,
13706 .get_eeprom = tg3_get_eeprom,
13707 .set_eeprom = tg3_set_eeprom,
13708 .get_ringparam = tg3_get_ringparam,
13709 .set_ringparam = tg3_set_ringparam,
13710 .get_pauseparam = tg3_get_pauseparam,
13711 .set_pauseparam = tg3_set_pauseparam,
13712 .self_test = tg3_self_test,
13713 .get_strings = tg3_get_strings,
13714 .set_phys_id = tg3_set_phys_id,
13715 .get_ethtool_stats = tg3_get_ethtool_stats,
13716 .get_coalesce = tg3_get_coalesce,
13717 .set_coalesce = tg3_set_coalesce,
13718 .get_sset_count = tg3_get_sset_count,
13719 .get_rxnfc = tg3_get_rxnfc,
13720 .get_rxfh_indir_size = tg3_get_rxfh_indir_size,
13721 .get_rxfh_indir = tg3_get_rxfh_indir,
13722 .set_rxfh_indir = tg3_set_rxfh_indir,
13723 .get_channels = tg3_get_channels,
13724 .set_channels = tg3_set_channels,
13725 .get_ts_info = tg3_get_ts_info,
13728 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13729 struct rtnl_link_stats64 *stats)
13731 struct tg3 *tp = netdev_priv(dev);
13733 spin_lock_bh(&tp->lock);
13734 if (!tp->hw_stats) {
13735 spin_unlock_bh(&tp->lock);
13736 return &tp->net_stats_prev;
13739 tg3_get_nstats(tp, stats);
13740 spin_unlock_bh(&tp->lock);
13745 static void tg3_set_rx_mode(struct net_device *dev)
13747 struct tg3 *tp = netdev_priv(dev);
13749 if (!netif_running(dev))
13752 tg3_full_lock(tp, 0);
13753 __tg3_set_rx_mode(dev);
13754 tg3_full_unlock(tp);
13757 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13760 dev->mtu = new_mtu;
13762 if (new_mtu > ETH_DATA_LEN) {
13763 if (tg3_flag(tp, 5780_CLASS)) {
13764 netdev_update_features(dev);
13765 tg3_flag_clear(tp, TSO_CAPABLE);
13767 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13770 if (tg3_flag(tp, 5780_CLASS)) {
13771 tg3_flag_set(tp, TSO_CAPABLE);
13772 netdev_update_features(dev);
13774 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13778 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13780 struct tg3 *tp = netdev_priv(dev);
13782 bool reset_phy = false;
13784 if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13787 if (!netif_running(dev)) {
13788 /* We'll just catch it later when the
13791 tg3_set_mtu(dev, tp, new_mtu);
13797 tg3_netif_stop(tp);
13799 tg3_set_mtu(dev, tp, new_mtu);
13801 tg3_full_lock(tp, 1);
13803 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13805 /* Reset PHY, otherwise the read DMA engine will be in a mode that
13806 * breaks all requests to 256 bytes.
13808 if (tg3_asic_rev(tp) == ASIC_REV_57766)
13811 err = tg3_restart_hw(tp, reset_phy);
13814 tg3_netif_start(tp);
13816 tg3_full_unlock(tp);
13824 static const struct net_device_ops tg3_netdev_ops = {
13825 .ndo_open = tg3_open,
13826 .ndo_stop = tg3_close,
13827 .ndo_start_xmit = tg3_start_xmit,
13828 .ndo_get_stats64 = tg3_get_stats64,
13829 .ndo_validate_addr = eth_validate_addr,
13830 .ndo_set_rx_mode = tg3_set_rx_mode,
13831 .ndo_set_mac_address = tg3_set_mac_addr,
13832 .ndo_do_ioctl = tg3_ioctl,
13833 .ndo_tx_timeout = tg3_tx_timeout,
13834 .ndo_change_mtu = tg3_change_mtu,
13835 .ndo_fix_features = tg3_fix_features,
13836 .ndo_set_features = tg3_set_features,
13837 #ifdef CONFIG_NET_POLL_CONTROLLER
13838 .ndo_poll_controller = tg3_poll_controller,
13842 static void tg3_get_eeprom_size(struct tg3 *tp)
13844 u32 cursize, val, magic;
13846 tp->nvram_size = EEPROM_CHIP_SIZE;
13848 if (tg3_nvram_read(tp, 0, &magic) != 0)
13851 if ((magic != TG3_EEPROM_MAGIC) &&
13852 ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13853 ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13857 * Size the chip by reading offsets at increasing powers of two.
13858 * When we encounter our validation signature, we know the addressing
13859 * has wrapped around, and thus have our chip size.
13863 while (cursize < tp->nvram_size) {
13864 if (tg3_nvram_read(tp, cursize, &val) != 0)
13873 tp->nvram_size = cursize;
13876 static void tg3_get_nvram_size(struct tg3 *tp)
13880 if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13883 /* Selfboot format */
13884 if (val != TG3_EEPROM_MAGIC) {
13885 tg3_get_eeprom_size(tp);
13889 if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13891 /* This is confusing. We want to operate on the
13892 * 16-bit value at offset 0xf2. The tg3_nvram_read()
13893 * call will read from NVRAM and byteswap the data
13894 * according to the byteswapping settings for all
13895 * other register accesses. This ensures the data we
13896 * want will always reside in the lower 16-bits.
13897 * However, the data in NVRAM is in LE format, which
13898 * means the data from the NVRAM read will always be
13899 * opposite the endianness of the CPU. The 16-bit
13900 * byteswap then brings the data to CPU endianness.
13902 tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13906 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13909 static void tg3_get_nvram_info(struct tg3 *tp)
13913 nvcfg1 = tr32(NVRAM_CFG1);
13914 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13915 tg3_flag_set(tp, FLASH);
13917 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13918 tw32(NVRAM_CFG1, nvcfg1);
13921 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13922 tg3_flag(tp, 5780_CLASS)) {
13923 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13924 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13925 tp->nvram_jedecnum = JEDEC_ATMEL;
13926 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13927 tg3_flag_set(tp, NVRAM_BUFFERED);
13929 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13930 tp->nvram_jedecnum = JEDEC_ATMEL;
13931 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13933 case FLASH_VENDOR_ATMEL_EEPROM:
13934 tp->nvram_jedecnum = JEDEC_ATMEL;
13935 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13936 tg3_flag_set(tp, NVRAM_BUFFERED);
13938 case FLASH_VENDOR_ST:
13939 tp->nvram_jedecnum = JEDEC_ST;
13940 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13941 tg3_flag_set(tp, NVRAM_BUFFERED);
13943 case FLASH_VENDOR_SAIFUN:
13944 tp->nvram_jedecnum = JEDEC_SAIFUN;
13945 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13947 case FLASH_VENDOR_SST_SMALL:
13948 case FLASH_VENDOR_SST_LARGE:
13949 tp->nvram_jedecnum = JEDEC_SST;
13950 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13954 tp->nvram_jedecnum = JEDEC_ATMEL;
13955 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13956 tg3_flag_set(tp, NVRAM_BUFFERED);
13960 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13962 switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13963 case FLASH_5752PAGE_SIZE_256:
13964 tp->nvram_pagesize = 256;
13966 case FLASH_5752PAGE_SIZE_512:
13967 tp->nvram_pagesize = 512;
13969 case FLASH_5752PAGE_SIZE_1K:
13970 tp->nvram_pagesize = 1024;
13972 case FLASH_5752PAGE_SIZE_2K:
13973 tp->nvram_pagesize = 2048;
13975 case FLASH_5752PAGE_SIZE_4K:
13976 tp->nvram_pagesize = 4096;
13978 case FLASH_5752PAGE_SIZE_264:
13979 tp->nvram_pagesize = 264;
13981 case FLASH_5752PAGE_SIZE_528:
13982 tp->nvram_pagesize = 528;
13987 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13991 nvcfg1 = tr32(NVRAM_CFG1);
13993 /* NVRAM protection for TPM */
13994 if (nvcfg1 & (1 << 27))
13995 tg3_flag_set(tp, PROTECTED_NVRAM);
13997 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13998 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13999 case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14000 tp->nvram_jedecnum = JEDEC_ATMEL;
14001 tg3_flag_set(tp, NVRAM_BUFFERED);
14003 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14004 tp->nvram_jedecnum = JEDEC_ATMEL;
14005 tg3_flag_set(tp, NVRAM_BUFFERED);
14006 tg3_flag_set(tp, FLASH);
14008 case FLASH_5752VENDOR_ST_M45PE10:
14009 case FLASH_5752VENDOR_ST_M45PE20:
14010 case FLASH_5752VENDOR_ST_M45PE40:
14011 tp->nvram_jedecnum = JEDEC_ST;
14012 tg3_flag_set(tp, NVRAM_BUFFERED);
14013 tg3_flag_set(tp, FLASH);
14017 if (tg3_flag(tp, FLASH)) {
14018 tg3_nvram_get_pagesize(tp, nvcfg1);
14020 /* For eeprom, set pagesize to maximum eeprom size */
14021 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14023 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14024 tw32(NVRAM_CFG1, nvcfg1);
14028 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14030 u32 nvcfg1, protect = 0;
14032 nvcfg1 = tr32(NVRAM_CFG1);
14034 /* NVRAM protection for TPM */
14035 if (nvcfg1 & (1 << 27)) {
14036 tg3_flag_set(tp, PROTECTED_NVRAM);
14040 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14042 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14043 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14044 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14045 case FLASH_5755VENDOR_ATMEL_FLASH_5:
14046 tp->nvram_jedecnum = JEDEC_ATMEL;
14047 tg3_flag_set(tp, NVRAM_BUFFERED);
14048 tg3_flag_set(tp, FLASH);
14049 tp->nvram_pagesize = 264;
14050 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14051 nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14052 tp->nvram_size = (protect ? 0x3e200 :
14053 TG3_NVRAM_SIZE_512KB);
14054 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14055 tp->nvram_size = (protect ? 0x1f200 :
14056 TG3_NVRAM_SIZE_256KB);
14058 tp->nvram_size = (protect ? 0x1f200 :
14059 TG3_NVRAM_SIZE_128KB);
14061 case FLASH_5752VENDOR_ST_M45PE10:
14062 case FLASH_5752VENDOR_ST_M45PE20:
14063 case FLASH_5752VENDOR_ST_M45PE40:
14064 tp->nvram_jedecnum = JEDEC_ST;
14065 tg3_flag_set(tp, NVRAM_BUFFERED);
14066 tg3_flag_set(tp, FLASH);
14067 tp->nvram_pagesize = 256;
14068 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14069 tp->nvram_size = (protect ?
14070 TG3_NVRAM_SIZE_64KB :
14071 TG3_NVRAM_SIZE_128KB);
14072 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14073 tp->nvram_size = (protect ?
14074 TG3_NVRAM_SIZE_64KB :
14075 TG3_NVRAM_SIZE_256KB);
14077 tp->nvram_size = (protect ?
14078 TG3_NVRAM_SIZE_128KB :
14079 TG3_NVRAM_SIZE_512KB);
14084 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14088 nvcfg1 = tr32(NVRAM_CFG1);
14090 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14091 case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14092 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14093 case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14094 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14095 tp->nvram_jedecnum = JEDEC_ATMEL;
14096 tg3_flag_set(tp, NVRAM_BUFFERED);
14097 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14099 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14100 tw32(NVRAM_CFG1, nvcfg1);
14102 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14103 case FLASH_5755VENDOR_ATMEL_FLASH_1:
14104 case FLASH_5755VENDOR_ATMEL_FLASH_2:
14105 case FLASH_5755VENDOR_ATMEL_FLASH_3:
14106 tp->nvram_jedecnum = JEDEC_ATMEL;
14107 tg3_flag_set(tp, NVRAM_BUFFERED);
14108 tg3_flag_set(tp, FLASH);
14109 tp->nvram_pagesize = 264;
14111 case FLASH_5752VENDOR_ST_M45PE10:
14112 case FLASH_5752VENDOR_ST_M45PE20:
14113 case FLASH_5752VENDOR_ST_M45PE40:
14114 tp->nvram_jedecnum = JEDEC_ST;
14115 tg3_flag_set(tp, NVRAM_BUFFERED);
14116 tg3_flag_set(tp, FLASH);
14117 tp->nvram_pagesize = 256;
14122 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14124 u32 nvcfg1, protect = 0;
14126 nvcfg1 = tr32(NVRAM_CFG1);
14128 /* NVRAM protection for TPM */
14129 if (nvcfg1 & (1 << 27)) {
14130 tg3_flag_set(tp, PROTECTED_NVRAM);
14134 nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14136 case FLASH_5761VENDOR_ATMEL_ADB021D:
14137 case FLASH_5761VENDOR_ATMEL_ADB041D:
14138 case FLASH_5761VENDOR_ATMEL_ADB081D:
14139 case FLASH_5761VENDOR_ATMEL_ADB161D:
14140 case FLASH_5761VENDOR_ATMEL_MDB021D:
14141 case FLASH_5761VENDOR_ATMEL_MDB041D:
14142 case FLASH_5761VENDOR_ATMEL_MDB081D:
14143 case FLASH_5761VENDOR_ATMEL_MDB161D:
14144 tp->nvram_jedecnum = JEDEC_ATMEL;
14145 tg3_flag_set(tp, NVRAM_BUFFERED);
14146 tg3_flag_set(tp, FLASH);
14147 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14148 tp->nvram_pagesize = 256;
14150 case FLASH_5761VENDOR_ST_A_M45PE20:
14151 case FLASH_5761VENDOR_ST_A_M45PE40:
14152 case FLASH_5761VENDOR_ST_A_M45PE80:
14153 case FLASH_5761VENDOR_ST_A_M45PE16:
14154 case FLASH_5761VENDOR_ST_M_M45PE20:
14155 case FLASH_5761VENDOR_ST_M_M45PE40:
14156 case FLASH_5761VENDOR_ST_M_M45PE80:
14157 case FLASH_5761VENDOR_ST_M_M45PE16:
14158 tp->nvram_jedecnum = JEDEC_ST;
14159 tg3_flag_set(tp, NVRAM_BUFFERED);
14160 tg3_flag_set(tp, FLASH);
14161 tp->nvram_pagesize = 256;
14166 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14169 case FLASH_5761VENDOR_ATMEL_ADB161D:
14170 case FLASH_5761VENDOR_ATMEL_MDB161D:
14171 case FLASH_5761VENDOR_ST_A_M45PE16:
14172 case FLASH_5761VENDOR_ST_M_M45PE16:
14173 tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14175 case FLASH_5761VENDOR_ATMEL_ADB081D:
14176 case FLASH_5761VENDOR_ATMEL_MDB081D:
14177 case FLASH_5761VENDOR_ST_A_M45PE80:
14178 case FLASH_5761VENDOR_ST_M_M45PE80:
14179 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14181 case FLASH_5761VENDOR_ATMEL_ADB041D:
14182 case FLASH_5761VENDOR_ATMEL_MDB041D:
14183 case FLASH_5761VENDOR_ST_A_M45PE40:
14184 case FLASH_5761VENDOR_ST_M_M45PE40:
14185 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14187 case FLASH_5761VENDOR_ATMEL_ADB021D:
14188 case FLASH_5761VENDOR_ATMEL_MDB021D:
14189 case FLASH_5761VENDOR_ST_A_M45PE20:
14190 case FLASH_5761VENDOR_ST_M_M45PE20:
14191 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14197 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14199 tp->nvram_jedecnum = JEDEC_ATMEL;
14200 tg3_flag_set(tp, NVRAM_BUFFERED);
14201 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14204 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14208 nvcfg1 = tr32(NVRAM_CFG1);
14210 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14211 case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14212 case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14213 tp->nvram_jedecnum = JEDEC_ATMEL;
14214 tg3_flag_set(tp, NVRAM_BUFFERED);
14215 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14217 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14218 tw32(NVRAM_CFG1, nvcfg1);
14220 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14221 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14222 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14223 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14224 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14225 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14226 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14227 tp->nvram_jedecnum = JEDEC_ATMEL;
14228 tg3_flag_set(tp, NVRAM_BUFFERED);
14229 tg3_flag_set(tp, FLASH);
14231 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14232 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14233 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14234 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14235 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14237 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14238 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14239 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14241 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14242 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14243 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14247 case FLASH_5752VENDOR_ST_M45PE10:
14248 case FLASH_5752VENDOR_ST_M45PE20:
14249 case FLASH_5752VENDOR_ST_M45PE40:
14250 tp->nvram_jedecnum = JEDEC_ST;
14251 tg3_flag_set(tp, NVRAM_BUFFERED);
14252 tg3_flag_set(tp, FLASH);
14254 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14255 case FLASH_5752VENDOR_ST_M45PE10:
14256 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14258 case FLASH_5752VENDOR_ST_M45PE20:
14259 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14261 case FLASH_5752VENDOR_ST_M45PE40:
14262 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14267 tg3_flag_set(tp, NO_NVRAM);
14271 tg3_nvram_get_pagesize(tp, nvcfg1);
14272 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14273 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14277 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14281 nvcfg1 = tr32(NVRAM_CFG1);
14283 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14284 case FLASH_5717VENDOR_ATMEL_EEPROM:
14285 case FLASH_5717VENDOR_MICRO_EEPROM:
14286 tp->nvram_jedecnum = JEDEC_ATMEL;
14287 tg3_flag_set(tp, NVRAM_BUFFERED);
14288 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14290 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14291 tw32(NVRAM_CFG1, nvcfg1);
14293 case FLASH_5717VENDOR_ATMEL_MDB011D:
14294 case FLASH_5717VENDOR_ATMEL_ADB011B:
14295 case FLASH_5717VENDOR_ATMEL_ADB011D:
14296 case FLASH_5717VENDOR_ATMEL_MDB021D:
14297 case FLASH_5717VENDOR_ATMEL_ADB021B:
14298 case FLASH_5717VENDOR_ATMEL_ADB021D:
14299 case FLASH_5717VENDOR_ATMEL_45USPT:
14300 tp->nvram_jedecnum = JEDEC_ATMEL;
14301 tg3_flag_set(tp, NVRAM_BUFFERED);
14302 tg3_flag_set(tp, FLASH);
14304 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14305 case FLASH_5717VENDOR_ATMEL_MDB021D:
14306 /* Detect size with tg3_nvram_get_size() */
14308 case FLASH_5717VENDOR_ATMEL_ADB021B:
14309 case FLASH_5717VENDOR_ATMEL_ADB021D:
14310 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14313 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14317 case FLASH_5717VENDOR_ST_M_M25PE10:
14318 case FLASH_5717VENDOR_ST_A_M25PE10:
14319 case FLASH_5717VENDOR_ST_M_M45PE10:
14320 case FLASH_5717VENDOR_ST_A_M45PE10:
14321 case FLASH_5717VENDOR_ST_M_M25PE20:
14322 case FLASH_5717VENDOR_ST_A_M25PE20:
14323 case FLASH_5717VENDOR_ST_M_M45PE20:
14324 case FLASH_5717VENDOR_ST_A_M45PE20:
14325 case FLASH_5717VENDOR_ST_25USPT:
14326 case FLASH_5717VENDOR_ST_45USPT:
14327 tp->nvram_jedecnum = JEDEC_ST;
14328 tg3_flag_set(tp, NVRAM_BUFFERED);
14329 tg3_flag_set(tp, FLASH);
14331 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14332 case FLASH_5717VENDOR_ST_M_M25PE20:
14333 case FLASH_5717VENDOR_ST_M_M45PE20:
14334 /* Detect size with tg3_nvram_get_size() */
14336 case FLASH_5717VENDOR_ST_A_M25PE20:
14337 case FLASH_5717VENDOR_ST_A_M45PE20:
14338 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14341 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14346 tg3_flag_set(tp, NO_NVRAM);
14350 tg3_nvram_get_pagesize(tp, nvcfg1);
14351 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14352 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14355 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14357 u32 nvcfg1, nvmpinstrp;
14359 nvcfg1 = tr32(NVRAM_CFG1);
14360 nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14362 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14363 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14364 tg3_flag_set(tp, NO_NVRAM);
14368 switch (nvmpinstrp) {
14369 case FLASH_5762_EEPROM_HD:
14370 nvmpinstrp = FLASH_5720_EEPROM_HD;
14372 case FLASH_5762_EEPROM_LD:
14373 nvmpinstrp = FLASH_5720_EEPROM_LD;
14375 case FLASH_5720VENDOR_M_ST_M45PE20:
14376 /* This pinstrap supports multiple sizes, so force it
14377 * to read the actual size from location 0xf0.
14379 nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14384 switch (nvmpinstrp) {
14385 case FLASH_5720_EEPROM_HD:
14386 case FLASH_5720_EEPROM_LD:
14387 tp->nvram_jedecnum = JEDEC_ATMEL;
14388 tg3_flag_set(tp, NVRAM_BUFFERED);
14390 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14391 tw32(NVRAM_CFG1, nvcfg1);
14392 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14393 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14395 tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14397 case FLASH_5720VENDOR_M_ATMEL_DB011D:
14398 case FLASH_5720VENDOR_A_ATMEL_DB011B:
14399 case FLASH_5720VENDOR_A_ATMEL_DB011D:
14400 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14401 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14402 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14403 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14404 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14405 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14406 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14407 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14408 case FLASH_5720VENDOR_ATMEL_45USPT:
14409 tp->nvram_jedecnum = JEDEC_ATMEL;
14410 tg3_flag_set(tp, NVRAM_BUFFERED);
14411 tg3_flag_set(tp, FLASH);
14413 switch (nvmpinstrp) {
14414 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14415 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14416 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14417 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14419 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14420 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14421 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14422 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14424 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14425 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14426 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14429 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14430 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14434 case FLASH_5720VENDOR_M_ST_M25PE10:
14435 case FLASH_5720VENDOR_M_ST_M45PE10:
14436 case FLASH_5720VENDOR_A_ST_M25PE10:
14437 case FLASH_5720VENDOR_A_ST_M45PE10:
14438 case FLASH_5720VENDOR_M_ST_M25PE20:
14439 case FLASH_5720VENDOR_M_ST_M45PE20:
14440 case FLASH_5720VENDOR_A_ST_M25PE20:
14441 case FLASH_5720VENDOR_A_ST_M45PE20:
14442 case FLASH_5720VENDOR_M_ST_M25PE40:
14443 case FLASH_5720VENDOR_M_ST_M45PE40:
14444 case FLASH_5720VENDOR_A_ST_M25PE40:
14445 case FLASH_5720VENDOR_A_ST_M45PE40:
14446 case FLASH_5720VENDOR_M_ST_M25PE80:
14447 case FLASH_5720VENDOR_M_ST_M45PE80:
14448 case FLASH_5720VENDOR_A_ST_M25PE80:
14449 case FLASH_5720VENDOR_A_ST_M45PE80:
14450 case FLASH_5720VENDOR_ST_25USPT:
14451 case FLASH_5720VENDOR_ST_45USPT:
14452 tp->nvram_jedecnum = JEDEC_ST;
14453 tg3_flag_set(tp, NVRAM_BUFFERED);
14454 tg3_flag_set(tp, FLASH);
14456 switch (nvmpinstrp) {
14457 case FLASH_5720VENDOR_M_ST_M25PE20:
14458 case FLASH_5720VENDOR_M_ST_M45PE20:
14459 case FLASH_5720VENDOR_A_ST_M25PE20:
14460 case FLASH_5720VENDOR_A_ST_M45PE20:
14461 tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14463 case FLASH_5720VENDOR_M_ST_M25PE40:
14464 case FLASH_5720VENDOR_M_ST_M45PE40:
14465 case FLASH_5720VENDOR_A_ST_M25PE40:
14466 case FLASH_5720VENDOR_A_ST_M45PE40:
14467 tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14469 case FLASH_5720VENDOR_M_ST_M25PE80:
14470 case FLASH_5720VENDOR_M_ST_M45PE80:
14471 case FLASH_5720VENDOR_A_ST_M25PE80:
14472 case FLASH_5720VENDOR_A_ST_M45PE80:
14473 tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14476 if (tg3_asic_rev(tp) != ASIC_REV_5762)
14477 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14482 tg3_flag_set(tp, NO_NVRAM);
14486 tg3_nvram_get_pagesize(tp, nvcfg1);
14487 if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14488 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14490 if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14493 if (tg3_nvram_read(tp, 0, &val))
14496 if (val != TG3_EEPROM_MAGIC &&
14497 (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14498 tg3_flag_set(tp, NO_NVRAM);
14502 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14503 static void tg3_nvram_init(struct tg3 *tp)
14505 if (tg3_flag(tp, IS_SSB_CORE)) {
14506 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14507 tg3_flag_clear(tp, NVRAM);
14508 tg3_flag_clear(tp, NVRAM_BUFFERED);
14509 tg3_flag_set(tp, NO_NVRAM);
14513 tw32_f(GRC_EEPROM_ADDR,
14514 (EEPROM_ADDR_FSM_RESET |
14515 (EEPROM_DEFAULT_CLOCK_PERIOD <<
14516 EEPROM_ADDR_CLKPERD_SHIFT)));
14520 /* Enable seeprom accesses. */
14521 tw32_f(GRC_LOCAL_CTRL,
14522 tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14525 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14526 tg3_asic_rev(tp) != ASIC_REV_5701) {
14527 tg3_flag_set(tp, NVRAM);
14529 if (tg3_nvram_lock(tp)) {
14530 netdev_warn(tp->dev,
14531 "Cannot get nvram lock, %s failed\n",
14535 tg3_enable_nvram_access(tp);
14537 tp->nvram_size = 0;
14539 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14540 tg3_get_5752_nvram_info(tp);
14541 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14542 tg3_get_5755_nvram_info(tp);
14543 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14544 tg3_asic_rev(tp) == ASIC_REV_5784 ||
14545 tg3_asic_rev(tp) == ASIC_REV_5785)
14546 tg3_get_5787_nvram_info(tp);
14547 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14548 tg3_get_5761_nvram_info(tp);
14549 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14550 tg3_get_5906_nvram_info(tp);
14551 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14552 tg3_flag(tp, 57765_CLASS))
14553 tg3_get_57780_nvram_info(tp);
14554 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14555 tg3_asic_rev(tp) == ASIC_REV_5719)
14556 tg3_get_5717_nvram_info(tp);
14557 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14558 tg3_asic_rev(tp) == ASIC_REV_5762)
14559 tg3_get_5720_nvram_info(tp);
14561 tg3_get_nvram_info(tp);
14563 if (tp->nvram_size == 0)
14564 tg3_get_nvram_size(tp);
14566 tg3_disable_nvram_access(tp);
14567 tg3_nvram_unlock(tp);
14570 tg3_flag_clear(tp, NVRAM);
14571 tg3_flag_clear(tp, NVRAM_BUFFERED);
14573 tg3_get_eeprom_size(tp);
14577 struct subsys_tbl_ent {
14578 u16 subsys_vendor, subsys_devid;
14582 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14583 /* Broadcom boards. */
14584 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14585 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14586 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14587 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14588 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14589 TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14590 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14591 TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14592 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14593 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14594 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14595 TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14596 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14597 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14598 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14599 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14600 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14601 TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14602 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14603 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14604 { TG3PCI_SUBVENDOR_ID_BROADCOM,
14605 TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14608 { TG3PCI_SUBVENDOR_ID_3COM,
14609 TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14610 { TG3PCI_SUBVENDOR_ID_3COM,
14611 TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14612 { TG3PCI_SUBVENDOR_ID_3COM,
14613 TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14614 { TG3PCI_SUBVENDOR_ID_3COM,
14615 TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14616 { TG3PCI_SUBVENDOR_ID_3COM,
14617 TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14620 { TG3PCI_SUBVENDOR_ID_DELL,
14621 TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14622 { TG3PCI_SUBVENDOR_ID_DELL,
14623 TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14624 { TG3PCI_SUBVENDOR_ID_DELL,
14625 TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14626 { TG3PCI_SUBVENDOR_ID_DELL,
14627 TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14629 /* Compaq boards. */
14630 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14631 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14632 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14633 TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14634 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14635 TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14636 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14637 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14638 { TG3PCI_SUBVENDOR_ID_COMPAQ,
14639 TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14642 { TG3PCI_SUBVENDOR_ID_IBM,
14643 TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14646 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14650 for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14651 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14652 tp->pdev->subsystem_vendor) &&
14653 (subsys_id_to_phy_id[i].subsys_devid ==
14654 tp->pdev->subsystem_device))
14655 return &subsys_id_to_phy_id[i];
14660 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14664 tp->phy_id = TG3_PHY_ID_INVALID;
14665 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14667 /* Assume an onboard device and WOL capable by default. */
14668 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14669 tg3_flag_set(tp, WOL_CAP);
14671 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14672 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14673 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14674 tg3_flag_set(tp, IS_NIC);
14676 val = tr32(VCPU_CFGSHDW);
14677 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14678 tg3_flag_set(tp, ASPM_WORKAROUND);
14679 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14680 (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14681 tg3_flag_set(tp, WOL_ENABLE);
14682 device_set_wakeup_enable(&tp->pdev->dev, true);
14687 tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14688 if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14689 u32 nic_cfg, led_cfg;
14690 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14691 int eeprom_phy_serdes = 0;
14693 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14694 tp->nic_sram_data_cfg = nic_cfg;
14696 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14697 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14698 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14699 tg3_asic_rev(tp) != ASIC_REV_5701 &&
14700 tg3_asic_rev(tp) != ASIC_REV_5703 &&
14701 (ver > 0) && (ver < 0x100))
14702 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14704 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14705 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14707 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14708 NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14709 eeprom_phy_serdes = 1;
14711 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14712 if (nic_phy_id != 0) {
14713 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14714 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14716 eeprom_phy_id = (id1 >> 16) << 10;
14717 eeprom_phy_id |= (id2 & 0xfc00) << 16;
14718 eeprom_phy_id |= (id2 & 0x03ff) << 0;
14722 tp->phy_id = eeprom_phy_id;
14723 if (eeprom_phy_serdes) {
14724 if (!tg3_flag(tp, 5705_PLUS))
14725 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14727 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14730 if (tg3_flag(tp, 5750_PLUS))
14731 led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14732 SHASTA_EXT_LED_MODE_MASK);
14734 led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14738 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14739 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14742 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14743 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14746 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14747 tp->led_ctrl = LED_CTRL_MODE_MAC;
14749 /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14750 * read on some older 5700/5701 bootcode.
14752 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14753 tg3_asic_rev(tp) == ASIC_REV_5701)
14754 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14758 case SHASTA_EXT_LED_SHARED:
14759 tp->led_ctrl = LED_CTRL_MODE_SHARED;
14760 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14761 tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14762 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14763 LED_CTRL_MODE_PHY_2);
14766 case SHASTA_EXT_LED_MAC:
14767 tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14770 case SHASTA_EXT_LED_COMBO:
14771 tp->led_ctrl = LED_CTRL_MODE_COMBO;
14772 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14773 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14774 LED_CTRL_MODE_PHY_2);
14779 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14780 tg3_asic_rev(tp) == ASIC_REV_5701) &&
14781 tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14782 tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14784 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14785 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14787 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14788 tg3_flag_set(tp, EEPROM_WRITE_PROT);
14789 if ((tp->pdev->subsystem_vendor ==
14790 PCI_VENDOR_ID_ARIMA) &&
14791 (tp->pdev->subsystem_device == 0x205a ||
14792 tp->pdev->subsystem_device == 0x2063))
14793 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14795 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14796 tg3_flag_set(tp, IS_NIC);
14799 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14800 tg3_flag_set(tp, ENABLE_ASF);
14801 if (tg3_flag(tp, 5750_PLUS))
14802 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14805 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14806 tg3_flag(tp, 5750_PLUS))
14807 tg3_flag_set(tp, ENABLE_APE);
14809 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14810 !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14811 tg3_flag_clear(tp, WOL_CAP);
14813 if (tg3_flag(tp, WOL_CAP) &&
14814 (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14815 tg3_flag_set(tp, WOL_ENABLE);
14816 device_set_wakeup_enable(&tp->pdev->dev, true);
14819 if (cfg2 & (1 << 17))
14820 tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14822 /* serdes signal pre-emphasis in register 0x590 set by */
14823 /* bootcode if bit 18 is set */
14824 if (cfg2 & (1 << 18))
14825 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14827 if ((tg3_flag(tp, 57765_PLUS) ||
14828 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14829 tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14830 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14831 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14833 if (tg3_flag(tp, PCI_EXPRESS)) {
14836 tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14837 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14838 !tg3_flag(tp, 57765_PLUS) &&
14839 (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14840 tg3_flag_set(tp, ASPM_WORKAROUND);
14841 if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14842 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14843 if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14844 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14847 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14848 tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14849 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14850 tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14851 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14852 tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14855 if (tg3_flag(tp, WOL_CAP))
14856 device_set_wakeup_enable(&tp->pdev->dev,
14857 tg3_flag(tp, WOL_ENABLE));
14859 device_set_wakeup_capable(&tp->pdev->dev, false);
14862 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14865 u32 val2, off = offset * 8;
14867 err = tg3_nvram_lock(tp);
14871 tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14872 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14873 APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14874 tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14877 for (i = 0; i < 100; i++) {
14878 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14879 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14880 *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14886 tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14888 tg3_nvram_unlock(tp);
14889 if (val2 & APE_OTP_STATUS_CMD_DONE)
14895 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14900 tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14901 tw32(OTP_CTRL, cmd);
14903 /* Wait for up to 1 ms for command to execute. */
14904 for (i = 0; i < 100; i++) {
14905 val = tr32(OTP_STATUS);
14906 if (val & OTP_STATUS_CMD_DONE)
14911 return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14914 /* Read the gphy configuration from the OTP region of the chip. The gphy
14915 * configuration is a 32-bit value that straddles the alignment boundary.
14916 * We do two 32-bit reads and then shift and merge the results.
14918 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14920 u32 bhalf_otp, thalf_otp;
14922 tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14924 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14927 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14929 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14932 thalf_otp = tr32(OTP_READ_DATA);
14934 tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14936 if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14939 bhalf_otp = tr32(OTP_READ_DATA);
14941 return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14944 static void tg3_phy_init_link_config(struct tg3 *tp)
14946 u32 adv = ADVERTISED_Autoneg;
14948 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14949 adv |= ADVERTISED_1000baseT_Half |
14950 ADVERTISED_1000baseT_Full;
14952 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14953 adv |= ADVERTISED_100baseT_Half |
14954 ADVERTISED_100baseT_Full |
14955 ADVERTISED_10baseT_Half |
14956 ADVERTISED_10baseT_Full |
14959 adv |= ADVERTISED_FIBRE;
14961 tp->link_config.advertising = adv;
14962 tp->link_config.speed = SPEED_UNKNOWN;
14963 tp->link_config.duplex = DUPLEX_UNKNOWN;
14964 tp->link_config.autoneg = AUTONEG_ENABLE;
14965 tp->link_config.active_speed = SPEED_UNKNOWN;
14966 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14971 static int tg3_phy_probe(struct tg3 *tp)
14973 u32 hw_phy_id_1, hw_phy_id_2;
14974 u32 hw_phy_id, hw_phy_id_masked;
14977 /* flow control autonegotiation is default behavior */
14978 tg3_flag_set(tp, PAUSE_AUTONEG);
14979 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14981 if (tg3_flag(tp, ENABLE_APE)) {
14982 switch (tp->pci_fn) {
14984 tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14987 tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14990 tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14993 tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14998 if (!tg3_flag(tp, ENABLE_ASF) &&
14999 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15000 !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15001 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15002 TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15004 if (tg3_flag(tp, USE_PHYLIB))
15005 return tg3_phy_init(tp);
15007 /* Reading the PHY ID register can conflict with ASF
15008 * firmware access to the PHY hardware.
15011 if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15012 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15014 /* Now read the physical PHY_ID from the chip and verify
15015 * that it is sane. If it doesn't look good, we fall back
15016 * to either the hard-coded table based PHY_ID and failing
15017 * that the value found in the eeprom area.
15019 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15020 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15022 hw_phy_id = (hw_phy_id_1 & 0xffff) << 10;
15023 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15024 hw_phy_id |= (hw_phy_id_2 & 0x03ff) << 0;
15026 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15029 if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15030 tp->phy_id = hw_phy_id;
15031 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15032 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15034 tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15036 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15037 /* Do nothing, phy ID already set up in
15038 * tg3_get_eeprom_hw_cfg().
15041 struct subsys_tbl_ent *p;
15043 /* No eeprom signature? Try the hardcoded
15044 * subsys device table.
15046 p = tg3_lookup_by_subsys(tp);
15048 tp->phy_id = p->phy_id;
15049 } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15050 /* For now we saw the IDs 0xbc050cd0,
15051 * 0xbc050f80 and 0xbc050c30 on devices
15052 * connected to an BCM4785 and there are
15053 * probably more. Just assume that the phy is
15054 * supported when it is connected to a SSB core
15061 tp->phy_id == TG3_PHY_ID_BCM8002)
15062 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15066 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15067 (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15068 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15069 tg3_asic_rev(tp) == ASIC_REV_57766 ||
15070 tg3_asic_rev(tp) == ASIC_REV_5762 ||
15071 (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15072 tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15073 (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15074 tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15075 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15077 tg3_phy_init_link_config(tp);
15079 if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15080 !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15081 !tg3_flag(tp, ENABLE_APE) &&
15082 !tg3_flag(tp, ENABLE_ASF)) {
15085 tg3_readphy(tp, MII_BMSR, &bmsr);
15086 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15087 (bmsr & BMSR_LSTATUS))
15088 goto skip_phy_reset;
15090 err = tg3_phy_reset(tp);
15094 tg3_phy_set_wirespeed(tp);
15096 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15097 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15098 tp->link_config.flowctrl);
15100 tg3_writephy(tp, MII_BMCR,
15101 BMCR_ANENABLE | BMCR_ANRESTART);
15106 if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15107 err = tg3_init_5401phy_dsp(tp);
15111 err = tg3_init_5401phy_dsp(tp);
15117 static void tg3_read_vpd(struct tg3 *tp)
15120 unsigned int block_end, rosize, len;
15124 vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15128 i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15130 goto out_not_found;
15132 rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15133 block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15134 i += PCI_VPD_LRDT_TAG_SIZE;
15136 if (block_end > vpdlen)
15137 goto out_not_found;
15139 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15140 PCI_VPD_RO_KEYWORD_MFR_ID);
15142 len = pci_vpd_info_field_size(&vpd_data[j]);
15144 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15145 if (j + len > block_end || len != 4 ||
15146 memcmp(&vpd_data[j], "1028", 4))
15149 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15150 PCI_VPD_RO_KEYWORD_VENDOR0);
15154 len = pci_vpd_info_field_size(&vpd_data[j]);
15156 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15157 if (j + len > block_end)
15160 if (len >= sizeof(tp->fw_ver))
15161 len = sizeof(tp->fw_ver) - 1;
15162 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15163 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15168 i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15169 PCI_VPD_RO_KEYWORD_PARTNO);
15171 goto out_not_found;
15173 len = pci_vpd_info_field_size(&vpd_data[i]);
15175 i += PCI_VPD_INFO_FLD_HDR_SIZE;
15176 if (len > TG3_BPN_SIZE ||
15177 (len + i) > vpdlen)
15178 goto out_not_found;
15180 memcpy(tp->board_part_number, &vpd_data[i], len);
15184 if (tp->board_part_number[0])
15188 if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15189 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15190 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15191 strcpy(tp->board_part_number, "BCM5717");
15192 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15193 strcpy(tp->board_part_number, "BCM5718");
15196 } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15197 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15198 strcpy(tp->board_part_number, "BCM57780");
15199 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15200 strcpy(tp->board_part_number, "BCM57760");
15201 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15202 strcpy(tp->board_part_number, "BCM57790");
15203 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15204 strcpy(tp->board_part_number, "BCM57788");
15207 } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15208 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15209 strcpy(tp->board_part_number, "BCM57761");
15210 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15211 strcpy(tp->board_part_number, "BCM57765");
15212 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15213 strcpy(tp->board_part_number, "BCM57781");
15214 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15215 strcpy(tp->board_part_number, "BCM57785");
15216 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15217 strcpy(tp->board_part_number, "BCM57791");
15218 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15219 strcpy(tp->board_part_number, "BCM57795");
15222 } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15223 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15224 strcpy(tp->board_part_number, "BCM57762");
15225 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15226 strcpy(tp->board_part_number, "BCM57766");
15227 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15228 strcpy(tp->board_part_number, "BCM57782");
15229 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15230 strcpy(tp->board_part_number, "BCM57786");
15233 } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15234 strcpy(tp->board_part_number, "BCM95906");
15237 strcpy(tp->board_part_number, "none");
15241 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15245 if (tg3_nvram_read(tp, offset, &val) ||
15246 (val & 0xfc000000) != 0x0c000000 ||
15247 tg3_nvram_read(tp, offset + 4, &val) ||
15254 static void tg3_read_bc_ver(struct tg3 *tp)
15256 u32 val, offset, start, ver_offset;
15258 bool newver = false;
15260 if (tg3_nvram_read(tp, 0xc, &offset) ||
15261 tg3_nvram_read(tp, 0x4, &start))
15264 offset = tg3_nvram_logical_addr(tp, offset);
15266 if (tg3_nvram_read(tp, offset, &val))
15269 if ((val & 0xfc000000) == 0x0c000000) {
15270 if (tg3_nvram_read(tp, offset + 4, &val))
15277 dst_off = strlen(tp->fw_ver);
15280 if (TG3_VER_SIZE - dst_off < 16 ||
15281 tg3_nvram_read(tp, offset + 8, &ver_offset))
15284 offset = offset + ver_offset - start;
15285 for (i = 0; i < 16; i += 4) {
15287 if (tg3_nvram_read_be32(tp, offset + i, &v))
15290 memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15295 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15298 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15299 TG3_NVM_BCVER_MAJSFT;
15300 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15301 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15302 "v%d.%02d", major, minor);
15306 static void tg3_read_hwsb_ver(struct tg3 *tp)
15308 u32 val, major, minor;
15310 /* Use native endian representation */
15311 if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15314 major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15315 TG3_NVM_HWSB_CFG1_MAJSFT;
15316 minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15317 TG3_NVM_HWSB_CFG1_MINSFT;
15319 snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15322 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15324 u32 offset, major, minor, build;
15326 strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15328 if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15331 switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15332 case TG3_EEPROM_SB_REVISION_0:
15333 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15335 case TG3_EEPROM_SB_REVISION_2:
15336 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15338 case TG3_EEPROM_SB_REVISION_3:
15339 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15341 case TG3_EEPROM_SB_REVISION_4:
15342 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15344 case TG3_EEPROM_SB_REVISION_5:
15345 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15347 case TG3_EEPROM_SB_REVISION_6:
15348 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15354 if (tg3_nvram_read(tp, offset, &val))
15357 build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15358 TG3_EEPROM_SB_EDH_BLD_SHFT;
15359 major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15360 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15361 minor = val & TG3_EEPROM_SB_EDH_MIN_MASK;
15363 if (minor > 99 || build > 26)
15366 offset = strlen(tp->fw_ver);
15367 snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15368 " v%d.%02d", major, minor);
15371 offset = strlen(tp->fw_ver);
15372 if (offset < TG3_VER_SIZE - 1)
15373 tp->fw_ver[offset] = 'a' + build - 1;
15377 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15379 u32 val, offset, start;
15382 for (offset = TG3_NVM_DIR_START;
15383 offset < TG3_NVM_DIR_END;
15384 offset += TG3_NVM_DIRENT_SIZE) {
15385 if (tg3_nvram_read(tp, offset, &val))
15388 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15392 if (offset == TG3_NVM_DIR_END)
15395 if (!tg3_flag(tp, 5705_PLUS))
15396 start = 0x08000000;
15397 else if (tg3_nvram_read(tp, offset - 4, &start))
15400 if (tg3_nvram_read(tp, offset + 4, &offset) ||
15401 !tg3_fw_img_is_valid(tp, offset) ||
15402 tg3_nvram_read(tp, offset + 8, &val))
15405 offset += val - start;
15407 vlen = strlen(tp->fw_ver);
15409 tp->fw_ver[vlen++] = ',';
15410 tp->fw_ver[vlen++] = ' ';
15412 for (i = 0; i < 4; i++) {
15414 if (tg3_nvram_read_be32(tp, offset, &v))
15417 offset += sizeof(v);
15419 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15420 memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15424 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15429 static void tg3_probe_ncsi(struct tg3 *tp)
15433 apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15434 if (apedata != APE_SEG_SIG_MAGIC)
15437 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15438 if (!(apedata & APE_FW_STATUS_READY))
15441 if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15442 tg3_flag_set(tp, APE_HAS_NCSI);
15445 static void tg3_read_dash_ver(struct tg3 *tp)
15451 apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15453 if (tg3_flag(tp, APE_HAS_NCSI))
15455 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15460 vlen = strlen(tp->fw_ver);
15462 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15464 (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15465 (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15466 (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15467 (apedata & APE_FW_VERSION_BLDMSK));
15470 static void tg3_read_otp_ver(struct tg3 *tp)
15474 if (tg3_asic_rev(tp) != ASIC_REV_5762)
15477 if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15478 !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15479 TG3_OTP_MAGIC0_VALID(val)) {
15480 u64 val64 = (u64) val << 32 | val2;
15484 for (i = 0; i < 7; i++) {
15485 if ((val64 & 0xff) == 0)
15487 ver = val64 & 0xff;
15490 vlen = strlen(tp->fw_ver);
15491 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15495 static void tg3_read_fw_ver(struct tg3 *tp)
15498 bool vpd_vers = false;
15500 if (tp->fw_ver[0] != 0)
15503 if (tg3_flag(tp, NO_NVRAM)) {
15504 strcat(tp->fw_ver, "sb");
15505 tg3_read_otp_ver(tp);
15509 if (tg3_nvram_read(tp, 0, &val))
15512 if (val == TG3_EEPROM_MAGIC)
15513 tg3_read_bc_ver(tp);
15514 else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15515 tg3_read_sb_ver(tp, val);
15516 else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15517 tg3_read_hwsb_ver(tp);
15519 if (tg3_flag(tp, ENABLE_ASF)) {
15520 if (tg3_flag(tp, ENABLE_APE)) {
15521 tg3_probe_ncsi(tp);
15523 tg3_read_dash_ver(tp);
15524 } else if (!vpd_vers) {
15525 tg3_read_mgmtfw_ver(tp);
15529 tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15532 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15534 if (tg3_flag(tp, LRG_PROD_RING_CAP))
15535 return TG3_RX_RET_MAX_SIZE_5717;
15536 else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15537 return TG3_RX_RET_MAX_SIZE_5700;
15539 return TG3_RX_RET_MAX_SIZE_5705;
15542 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15543 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15544 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15545 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15549 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15551 struct pci_dev *peer;
15552 unsigned int func, devnr = tp->pdev->devfn & ~7;
15554 for (func = 0; func < 8; func++) {
15555 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15556 if (peer && peer != tp->pdev)
15560 /* 5704 can be configured in single-port mode, set peer to
15561 * tp->pdev in that case.
15569 * We don't need to keep the refcount elevated; there's no way
15570 * to remove one half of this device without removing the other
15577 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15579 tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15580 if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15583 /* All devices that use the alternate
15584 * ASIC REV location have a CPMU.
15586 tg3_flag_set(tp, CPMU_PRESENT);
15588 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15589 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15590 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15591 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15592 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15593 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15594 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15595 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15596 reg = TG3PCI_GEN2_PRODID_ASICREV;
15597 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15598 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15599 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15600 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15601 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15602 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15603 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15604 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15605 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15606 tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15607 reg = TG3PCI_GEN15_PRODID_ASICREV;
15609 reg = TG3PCI_PRODID_ASICREV;
15611 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15614 /* Wrong chip ID in 5752 A0. This code can be removed later
15615 * as A0 is not in production.
15617 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15618 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15620 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15621 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15623 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15624 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15625 tg3_asic_rev(tp) == ASIC_REV_5720)
15626 tg3_flag_set(tp, 5717_PLUS);
15628 if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15629 tg3_asic_rev(tp) == ASIC_REV_57766)
15630 tg3_flag_set(tp, 57765_CLASS);
15632 if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15633 tg3_asic_rev(tp) == ASIC_REV_5762)
15634 tg3_flag_set(tp, 57765_PLUS);
15636 /* Intentionally exclude ASIC_REV_5906 */
15637 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15638 tg3_asic_rev(tp) == ASIC_REV_5787 ||
15639 tg3_asic_rev(tp) == ASIC_REV_5784 ||
15640 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15641 tg3_asic_rev(tp) == ASIC_REV_5785 ||
15642 tg3_asic_rev(tp) == ASIC_REV_57780 ||
15643 tg3_flag(tp, 57765_PLUS))
15644 tg3_flag_set(tp, 5755_PLUS);
15646 if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15647 tg3_asic_rev(tp) == ASIC_REV_5714)
15648 tg3_flag_set(tp, 5780_CLASS);
15650 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15651 tg3_asic_rev(tp) == ASIC_REV_5752 ||
15652 tg3_asic_rev(tp) == ASIC_REV_5906 ||
15653 tg3_flag(tp, 5755_PLUS) ||
15654 tg3_flag(tp, 5780_CLASS))
15655 tg3_flag_set(tp, 5750_PLUS);
15657 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15658 tg3_flag(tp, 5750_PLUS))
15659 tg3_flag_set(tp, 5705_PLUS);
15662 static bool tg3_10_100_only_device(struct tg3 *tp,
15663 const struct pci_device_id *ent)
15665 u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15667 if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15668 (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15669 (tp->phy_flags & TG3_PHYFLG_IS_FET))
15672 if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15673 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15674 if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15684 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15687 u32 pci_state_reg, grc_misc_cfg;
15692 /* Force memory write invalidate off. If we leave it on,
15693 * then on 5700_BX chips we have to enable a workaround.
15694 * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15695 * to match the cacheline size. The Broadcom driver have this
15696 * workaround but turns MWI off all the times so never uses
15697 * it. This seems to suggest that the workaround is insufficient.
15699 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15700 pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15701 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15703 /* Important! -- Make sure register accesses are byteswapped
15704 * correctly. Also, for those chips that require it, make
15705 * sure that indirect register accesses are enabled before
15706 * the first operation.
15708 pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15710 tp->misc_host_ctrl |= (misc_ctrl_reg &
15711 MISC_HOST_CTRL_CHIPREV);
15712 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15713 tp->misc_host_ctrl);
15715 tg3_detect_asic_rev(tp, misc_ctrl_reg);
15717 /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15718 * we need to disable memory and use config. cycles
15719 * only to access all registers. The 5702/03 chips
15720 * can mistakenly decode the special cycles from the
15721 * ICH chipsets as memory write cycles, causing corruption
15722 * of register and memory space. Only certain ICH bridges
15723 * will drive special cycles with non-zero data during the
15724 * address phase which can fall within the 5703's address
15725 * range. This is not an ICH bug as the PCI spec allows
15726 * non-zero address during special cycles. However, only
15727 * these ICH bridges are known to drive non-zero addresses
15728 * during special cycles.
15730 * Since special cycles do not cross PCI bridges, we only
15731 * enable this workaround if the 5703 is on the secondary
15732 * bus of these ICH bridges.
15734 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15735 (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15736 static struct tg3_dev_id {
15740 } ich_chipsets[] = {
15741 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15743 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15745 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15747 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15751 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15752 struct pci_dev *bridge = NULL;
15754 while (pci_id->vendor != 0) {
15755 bridge = pci_get_device(pci_id->vendor, pci_id->device,
15761 if (pci_id->rev != PCI_ANY_ID) {
15762 if (bridge->revision > pci_id->rev)
15765 if (bridge->subordinate &&
15766 (bridge->subordinate->number ==
15767 tp->pdev->bus->number)) {
15768 tg3_flag_set(tp, ICH_WORKAROUND);
15769 pci_dev_put(bridge);
15775 if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15776 static struct tg3_dev_id {
15779 } bridge_chipsets[] = {
15780 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15781 { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15784 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15785 struct pci_dev *bridge = NULL;
15787 while (pci_id->vendor != 0) {
15788 bridge = pci_get_device(pci_id->vendor,
15795 if (bridge->subordinate &&
15796 (bridge->subordinate->number <=
15797 tp->pdev->bus->number) &&
15798 (bridge->subordinate->busn_res.end >=
15799 tp->pdev->bus->number)) {
15800 tg3_flag_set(tp, 5701_DMA_BUG);
15801 pci_dev_put(bridge);
15807 /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15808 * DMA addresses > 40-bit. This bridge may have other additional
15809 * 57xx devices behind it in some 4-port NIC designs for example.
15810 * Any tg3 device found behind the bridge will also need the 40-bit
15813 if (tg3_flag(tp, 5780_CLASS)) {
15814 tg3_flag_set(tp, 40BIT_DMA_BUG);
15815 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15817 struct pci_dev *bridge = NULL;
15820 bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15821 PCI_DEVICE_ID_SERVERWORKS_EPB,
15823 if (bridge && bridge->subordinate &&
15824 (bridge->subordinate->number <=
15825 tp->pdev->bus->number) &&
15826 (bridge->subordinate->busn_res.end >=
15827 tp->pdev->bus->number)) {
15828 tg3_flag_set(tp, 40BIT_DMA_BUG);
15829 pci_dev_put(bridge);
15835 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15836 tg3_asic_rev(tp) == ASIC_REV_5714)
15837 tp->pdev_peer = tg3_find_peer(tp);
15839 /* Determine TSO capabilities */
15840 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15841 ; /* Do nothing. HW bug. */
15842 else if (tg3_flag(tp, 57765_PLUS))
15843 tg3_flag_set(tp, HW_TSO_3);
15844 else if (tg3_flag(tp, 5755_PLUS) ||
15845 tg3_asic_rev(tp) == ASIC_REV_5906)
15846 tg3_flag_set(tp, HW_TSO_2);
15847 else if (tg3_flag(tp, 5750_PLUS)) {
15848 tg3_flag_set(tp, HW_TSO_1);
15849 tg3_flag_set(tp, TSO_BUG);
15850 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15851 tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15852 tg3_flag_clear(tp, TSO_BUG);
15853 } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15854 tg3_asic_rev(tp) != ASIC_REV_5701 &&
15855 tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15856 tg3_flag_set(tp, FW_TSO);
15857 tg3_flag_set(tp, TSO_BUG);
15858 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15859 tp->fw_needed = FIRMWARE_TG3TSO5;
15861 tp->fw_needed = FIRMWARE_TG3TSO;
15864 /* Selectively allow TSO based on operating conditions */
15865 if (tg3_flag(tp, HW_TSO_1) ||
15866 tg3_flag(tp, HW_TSO_2) ||
15867 tg3_flag(tp, HW_TSO_3) ||
15868 tg3_flag(tp, FW_TSO)) {
15869 /* For firmware TSO, assume ASF is disabled.
15870 * We'll disable TSO later if we discover ASF
15871 * is enabled in tg3_get_eeprom_hw_cfg().
15873 tg3_flag_set(tp, TSO_CAPABLE);
15875 tg3_flag_clear(tp, TSO_CAPABLE);
15876 tg3_flag_clear(tp, TSO_BUG);
15877 tp->fw_needed = NULL;
15880 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15881 tp->fw_needed = FIRMWARE_TG3;
15883 if (tg3_asic_rev(tp) == ASIC_REV_57766)
15884 tp->fw_needed = FIRMWARE_TG357766;
15888 if (tg3_flag(tp, 5750_PLUS)) {
15889 tg3_flag_set(tp, SUPPORT_MSI);
15890 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15891 tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15892 (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15893 tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15894 tp->pdev_peer == tp->pdev))
15895 tg3_flag_clear(tp, SUPPORT_MSI);
15897 if (tg3_flag(tp, 5755_PLUS) ||
15898 tg3_asic_rev(tp) == ASIC_REV_5906) {
15899 tg3_flag_set(tp, 1SHOT_MSI);
15902 if (tg3_flag(tp, 57765_PLUS)) {
15903 tg3_flag_set(tp, SUPPORT_MSIX);
15904 tp->irq_max = TG3_IRQ_MAX_VECS;
15910 if (tp->irq_max > 1) {
15911 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15912 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15914 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15915 tg3_asic_rev(tp) == ASIC_REV_5720)
15916 tp->txq_max = tp->irq_max - 1;
15919 if (tg3_flag(tp, 5755_PLUS) ||
15920 tg3_asic_rev(tp) == ASIC_REV_5906)
15921 tg3_flag_set(tp, SHORT_DMA_BUG);
15923 if (tg3_asic_rev(tp) == ASIC_REV_5719)
15924 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15926 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15927 tg3_asic_rev(tp) == ASIC_REV_5719 ||
15928 tg3_asic_rev(tp) == ASIC_REV_5720 ||
15929 tg3_asic_rev(tp) == ASIC_REV_5762)
15930 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15932 if (tg3_flag(tp, 57765_PLUS) &&
15933 tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15934 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15936 if (!tg3_flag(tp, 5705_PLUS) ||
15937 tg3_flag(tp, 5780_CLASS) ||
15938 tg3_flag(tp, USE_JUMBO_BDFLAG))
15939 tg3_flag_set(tp, JUMBO_CAPABLE);
15941 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15944 if (pci_is_pcie(tp->pdev)) {
15947 tg3_flag_set(tp, PCI_EXPRESS);
15949 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15950 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15951 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15952 tg3_flag_clear(tp, HW_TSO_2);
15953 tg3_flag_clear(tp, TSO_CAPABLE);
15955 if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15956 tg3_asic_rev(tp) == ASIC_REV_5761 ||
15957 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15958 tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15959 tg3_flag_set(tp, CLKREQ_BUG);
15960 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15961 tg3_flag_set(tp, L1PLLPD_EN);
15963 } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15964 /* BCM5785 devices are effectively PCIe devices, and should
15965 * follow PCIe codepaths, but do not have a PCIe capabilities
15968 tg3_flag_set(tp, PCI_EXPRESS);
15969 } else if (!tg3_flag(tp, 5705_PLUS) ||
15970 tg3_flag(tp, 5780_CLASS)) {
15971 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15972 if (!tp->pcix_cap) {
15973 dev_err(&tp->pdev->dev,
15974 "Cannot find PCI-X capability, aborting\n");
15978 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15979 tg3_flag_set(tp, PCIX_MODE);
15982 /* If we have an AMD 762 or VIA K8T800 chipset, write
15983 * reordering to the mailbox registers done by the host
15984 * controller can cause major troubles. We read back from
15985 * every mailbox register write to force the writes to be
15986 * posted to the chip in order.
15988 if (pci_dev_present(tg3_write_reorder_chipsets) &&
15989 !tg3_flag(tp, PCI_EXPRESS))
15990 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15992 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15993 &tp->pci_cacheline_sz);
15994 pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15995 &tp->pci_lat_timer);
15996 if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15997 tp->pci_lat_timer < 64) {
15998 tp->pci_lat_timer = 64;
15999 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16000 tp->pci_lat_timer);
16003 /* Important! -- It is critical that the PCI-X hw workaround
16004 * situation is decided before the first MMIO register access.
16006 if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16007 /* 5700 BX chips need to have their TX producer index
16008 * mailboxes written twice to workaround a bug.
16010 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16012 /* If we are in PCI-X mode, enable register write workaround.
16014 * The workaround is to use indirect register accesses
16015 * for all chip writes not to mailbox registers.
16017 if (tg3_flag(tp, PCIX_MODE)) {
16020 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16022 /* The chip can have it's power management PCI config
16023 * space registers clobbered due to this bug.
16024 * So explicitly force the chip into D0 here.
16026 pci_read_config_dword(tp->pdev,
16027 tp->pm_cap + PCI_PM_CTRL,
16029 pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16030 pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16031 pci_write_config_dword(tp->pdev,
16032 tp->pm_cap + PCI_PM_CTRL,
16035 /* Also, force SERR#/PERR# in PCI command. */
16036 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16037 pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16038 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16042 if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16043 tg3_flag_set(tp, PCI_HIGH_SPEED);
16044 if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16045 tg3_flag_set(tp, PCI_32BIT);
16047 /* Chip-specific fixup from Broadcom driver */
16048 if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16049 (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16050 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16051 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16054 /* Default fast path register access methods */
16055 tp->read32 = tg3_read32;
16056 tp->write32 = tg3_write32;
16057 tp->read32_mbox = tg3_read32;
16058 tp->write32_mbox = tg3_write32;
16059 tp->write32_tx_mbox = tg3_write32;
16060 tp->write32_rx_mbox = tg3_write32;
16062 /* Various workaround register access methods */
16063 if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16064 tp->write32 = tg3_write_indirect_reg32;
16065 else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16066 (tg3_flag(tp, PCI_EXPRESS) &&
16067 tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16069 * Back to back register writes can cause problems on these
16070 * chips, the workaround is to read back all reg writes
16071 * except those to mailbox regs.
16073 * See tg3_write_indirect_reg32().
16075 tp->write32 = tg3_write_flush_reg32;
16078 if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16079 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16080 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16081 tp->write32_rx_mbox = tg3_write_flush_reg32;
16084 if (tg3_flag(tp, ICH_WORKAROUND)) {
16085 tp->read32 = tg3_read_indirect_reg32;
16086 tp->write32 = tg3_write_indirect_reg32;
16087 tp->read32_mbox = tg3_read_indirect_mbox;
16088 tp->write32_mbox = tg3_write_indirect_mbox;
16089 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16090 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16095 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16096 pci_cmd &= ~PCI_COMMAND_MEMORY;
16097 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16099 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16100 tp->read32_mbox = tg3_read32_mbox_5906;
16101 tp->write32_mbox = tg3_write32_mbox_5906;
16102 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16103 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16106 if (tp->write32 == tg3_write_indirect_reg32 ||
16107 (tg3_flag(tp, PCIX_MODE) &&
16108 (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16109 tg3_asic_rev(tp) == ASIC_REV_5701)))
16110 tg3_flag_set(tp, SRAM_USE_CONFIG);
16112 /* The memory arbiter has to be enabled in order for SRAM accesses
16113 * to succeed. Normally on powerup the tg3 chip firmware will make
16114 * sure it is enabled, but other entities such as system netboot
16115 * code might disable it.
16117 val = tr32(MEMARB_MODE);
16118 tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16120 tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16121 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16122 tg3_flag(tp, 5780_CLASS)) {
16123 if (tg3_flag(tp, PCIX_MODE)) {
16124 pci_read_config_dword(tp->pdev,
16125 tp->pcix_cap + PCI_X_STATUS,
16127 tp->pci_fn = val & 0x7;
16129 } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16130 tg3_asic_rev(tp) == ASIC_REV_5719 ||
16131 tg3_asic_rev(tp) == ASIC_REV_5720) {
16132 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16133 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16134 val = tr32(TG3_CPMU_STATUS);
16136 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16137 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16139 tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16140 TG3_CPMU_STATUS_FSHFT_5719;
16143 if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16144 tp->write32_tx_mbox = tg3_write_flush_reg32;
16145 tp->write32_rx_mbox = tg3_write_flush_reg32;
16148 /* Get eeprom hw config before calling tg3_set_power_state().
16149 * In particular, the TG3_FLAG_IS_NIC flag must be
16150 * determined before calling tg3_set_power_state() so that
16151 * we know whether or not to switch out of Vaux power.
16152 * When the flag is set, it means that GPIO1 is used for eeprom
16153 * write protect and also implies that it is a LOM where GPIOs
16154 * are not used to switch power.
16156 tg3_get_eeprom_hw_cfg(tp);
16158 if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16159 tg3_flag_clear(tp, TSO_CAPABLE);
16160 tg3_flag_clear(tp, TSO_BUG);
16161 tp->fw_needed = NULL;
16164 if (tg3_flag(tp, ENABLE_APE)) {
16165 /* Allow reads and writes to the
16166 * APE register and memory space.
16168 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16169 PCISTATE_ALLOW_APE_SHMEM_WR |
16170 PCISTATE_ALLOW_APE_PSPACE_WR;
16171 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16174 tg3_ape_lock_init(tp);
16177 /* Set up tp->grc_local_ctrl before calling
16178 * tg3_pwrsrc_switch_to_vmain(). GPIO1 driven high
16179 * will bring 5700's external PHY out of reset.
16180 * It is also used as eeprom write protect on LOMs.
16182 tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16183 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16184 tg3_flag(tp, EEPROM_WRITE_PROT))
16185 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16186 GRC_LCLCTRL_GPIO_OUTPUT1);
16187 /* Unused GPIO3 must be driven as output on 5752 because there
16188 * are no pull-up resistors on unused GPIO pins.
16190 else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16191 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16193 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16194 tg3_asic_rev(tp) == ASIC_REV_57780 ||
16195 tg3_flag(tp, 57765_CLASS))
16196 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16198 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16199 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16200 /* Turn off the debug UART. */
16201 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16202 if (tg3_flag(tp, IS_NIC))
16203 /* Keep VMain power. */
16204 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16205 GRC_LCLCTRL_GPIO_OUTPUT0;
16208 if (tg3_asic_rev(tp) == ASIC_REV_5762)
16209 tp->grc_local_ctrl |=
16210 tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16212 /* Switch out of Vaux if it is a NIC */
16213 tg3_pwrsrc_switch_to_vmain(tp);
16215 /* Derive initial jumbo mode from MTU assigned in
16216 * ether_setup() via the alloc_etherdev() call
16218 if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16219 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16221 /* Determine WakeOnLan speed to use. */
16222 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16223 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16224 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16225 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16226 tg3_flag_clear(tp, WOL_SPEED_100MB);
16228 tg3_flag_set(tp, WOL_SPEED_100MB);
16231 if (tg3_asic_rev(tp) == ASIC_REV_5906)
16232 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16234 /* A few boards don't want Ethernet@WireSpeed phy feature */
16235 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16236 (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16237 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16238 (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16239 (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16240 (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16241 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16243 if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16244 tg3_chip_rev(tp) == CHIPREV_5704_AX)
16245 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16246 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16247 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16249 if (tg3_flag(tp, 5705_PLUS) &&
16250 !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16251 tg3_asic_rev(tp) != ASIC_REV_5785 &&
16252 tg3_asic_rev(tp) != ASIC_REV_57780 &&
16253 !tg3_flag(tp, 57765_PLUS)) {
16254 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16255 tg3_asic_rev(tp) == ASIC_REV_5787 ||
16256 tg3_asic_rev(tp) == ASIC_REV_5784 ||
16257 tg3_asic_rev(tp) == ASIC_REV_5761) {
16258 if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16259 tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16260 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16261 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16262 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16264 tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16267 if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16268 tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16269 tp->phy_otp = tg3_read_otp_phycfg(tp);
16270 if (tp->phy_otp == 0)
16271 tp->phy_otp = TG3_OTP_DEFAULT;
16274 if (tg3_flag(tp, CPMU_PRESENT))
16275 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16277 tp->mi_mode = MAC_MI_MODE_BASE;
16279 tp->coalesce_mode = 0;
16280 if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16281 tg3_chip_rev(tp) != CHIPREV_5700_BX)
16282 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16284 /* Set these bits to enable statistics workaround. */
16285 if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16286 tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16287 tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16288 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16289 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16292 if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16293 tg3_asic_rev(tp) == ASIC_REV_57780)
16294 tg3_flag_set(tp, USE_PHYLIB);
16296 err = tg3_mdio_init(tp);
16300 /* Initialize data/descriptor byte/word swapping. */
16301 val = tr32(GRC_MODE);
16302 if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16303 tg3_asic_rev(tp) == ASIC_REV_5762)
16304 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16305 GRC_MODE_WORD_SWAP_B2HRX_DATA |
16306 GRC_MODE_B2HRX_ENABLE |
16307 GRC_MODE_HTX2B_ENABLE |
16308 GRC_MODE_HOST_STACKUP);
16310 val &= GRC_MODE_HOST_STACKUP;
16312 tw32(GRC_MODE, val | tp->grc_mode);
16314 tg3_switch_clocks(tp);
16316 /* Clear this out for sanity. */
16317 tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16319 /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16320 tw32(TG3PCI_REG_BASE_ADDR, 0);
16322 pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16324 if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16325 !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16326 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16327 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16328 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16329 tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16330 void __iomem *sram_base;
16332 /* Write some dummy words into the SRAM status block
16333 * area, see if it reads back correctly. If the return
16334 * value is bad, force enable the PCIX workaround.
16336 sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16338 writel(0x00000000, sram_base);
16339 writel(0x00000000, sram_base + 4);
16340 writel(0xffffffff, sram_base + 4);
16341 if (readl(sram_base) != 0x00000000)
16342 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16347 tg3_nvram_init(tp);
16349 /* If the device has an NVRAM, no need to load patch firmware */
16350 if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16351 !tg3_flag(tp, NO_NVRAM))
16352 tp->fw_needed = NULL;
16354 grc_misc_cfg = tr32(GRC_MISC_CFG);
16355 grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16357 if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16358 (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16359 grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16360 tg3_flag_set(tp, IS_5788);
16362 if (!tg3_flag(tp, IS_5788) &&
16363 tg3_asic_rev(tp) != ASIC_REV_5700)
16364 tg3_flag_set(tp, TAGGED_STATUS);
16365 if (tg3_flag(tp, TAGGED_STATUS)) {
16366 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16367 HOSTCC_MODE_CLRTICK_TXBD);
16369 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16370 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16371 tp->misc_host_ctrl);
16374 /* Preserve the APE MAC_MODE bits */
16375 if (tg3_flag(tp, ENABLE_APE))
16376 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16380 if (tg3_10_100_only_device(tp, ent))
16381 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16383 err = tg3_phy_probe(tp);
16385 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16386 /* ... but do not return immediately ... */
16391 tg3_read_fw_ver(tp);
16393 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16394 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16396 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16397 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16399 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16402 /* 5700 {AX,BX} chips have a broken status block link
16403 * change bit implementation, so we must use the
16404 * status register in those cases.
16406 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16407 tg3_flag_set(tp, USE_LINKCHG_REG);
16409 tg3_flag_clear(tp, USE_LINKCHG_REG);
16411 /* The led_ctrl is set during tg3_phy_probe, here we might
16412 * have to force the link status polling mechanism based
16413 * upon subsystem IDs.
16415 if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16416 tg3_asic_rev(tp) == ASIC_REV_5701 &&
16417 !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16418 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16419 tg3_flag_set(tp, USE_LINKCHG_REG);
16422 /* For all SERDES we poll the MAC status register. */
16423 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16424 tg3_flag_set(tp, POLL_SERDES);
16426 tg3_flag_clear(tp, POLL_SERDES);
16428 tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16429 tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16430 if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16431 tg3_flag(tp, PCIX_MODE)) {
16432 tp->rx_offset = NET_SKB_PAD;
16433 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16434 tp->rx_copy_thresh = ~(u16)0;
16438 tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16439 tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16440 tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16442 tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16444 /* Increment the rx prod index on the rx std ring by at most
16445 * 8 for these chips to workaround hw errata.
16447 if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16448 tg3_asic_rev(tp) == ASIC_REV_5752 ||
16449 tg3_asic_rev(tp) == ASIC_REV_5755)
16450 tp->rx_std_max_post = 8;
16452 if (tg3_flag(tp, ASPM_WORKAROUND))
16453 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16454 PCIE_PWR_MGMT_L1_THRESH_MSK;
16459 #ifdef CONFIG_SPARC
16460 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16462 struct net_device *dev = tp->dev;
16463 struct pci_dev *pdev = tp->pdev;
16464 struct device_node *dp = pci_device_to_OF_node(pdev);
16465 const unsigned char *addr;
16468 addr = of_get_property(dp, "local-mac-address", &len);
16469 if (addr && len == 6) {
16470 memcpy(dev->dev_addr, addr, 6);
16476 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16478 struct net_device *dev = tp->dev;
16480 memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16485 static int tg3_get_device_address(struct tg3 *tp)
16487 struct net_device *dev = tp->dev;
16488 u32 hi, lo, mac_offset;
16492 #ifdef CONFIG_SPARC
16493 if (!tg3_get_macaddr_sparc(tp))
16497 if (tg3_flag(tp, IS_SSB_CORE)) {
16498 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16499 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16504 if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16505 tg3_flag(tp, 5780_CLASS)) {
16506 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16508 if (tg3_nvram_lock(tp))
16509 tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16511 tg3_nvram_unlock(tp);
16512 } else if (tg3_flag(tp, 5717_PLUS)) {
16513 if (tp->pci_fn & 1)
16515 if (tp->pci_fn > 1)
16516 mac_offset += 0x18c;
16517 } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16520 /* First try to get it from MAC address mailbox. */
16521 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16522 if ((hi >> 16) == 0x484b) {
16523 dev->dev_addr[0] = (hi >> 8) & 0xff;
16524 dev->dev_addr[1] = (hi >> 0) & 0xff;
16526 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16527 dev->dev_addr[2] = (lo >> 24) & 0xff;
16528 dev->dev_addr[3] = (lo >> 16) & 0xff;
16529 dev->dev_addr[4] = (lo >> 8) & 0xff;
16530 dev->dev_addr[5] = (lo >> 0) & 0xff;
16532 /* Some old bootcode may report a 0 MAC address in SRAM */
16533 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16536 /* Next, try NVRAM. */
16537 if (!tg3_flag(tp, NO_NVRAM) &&
16538 !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16539 !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16540 memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16541 memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16543 /* Finally just fetch it out of the MAC control regs. */
16545 hi = tr32(MAC_ADDR_0_HIGH);
16546 lo = tr32(MAC_ADDR_0_LOW);
16548 dev->dev_addr[5] = lo & 0xff;
16549 dev->dev_addr[4] = (lo >> 8) & 0xff;
16550 dev->dev_addr[3] = (lo >> 16) & 0xff;
16551 dev->dev_addr[2] = (lo >> 24) & 0xff;
16552 dev->dev_addr[1] = hi & 0xff;
16553 dev->dev_addr[0] = (hi >> 8) & 0xff;
16557 if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16558 #ifdef CONFIG_SPARC
16559 if (!tg3_get_default_macaddr_sparc(tp))
16567 #define BOUNDARY_SINGLE_CACHELINE 1
16568 #define BOUNDARY_MULTI_CACHELINE 2
16570 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16572 int cacheline_size;
16576 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16578 cacheline_size = 1024;
16580 cacheline_size = (int) byte * 4;
16582 /* On 5703 and later chips, the boundary bits have no
16585 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16586 tg3_asic_rev(tp) != ASIC_REV_5701 &&
16587 !tg3_flag(tp, PCI_EXPRESS))
16590 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16591 goal = BOUNDARY_MULTI_CACHELINE;
16593 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16594 goal = BOUNDARY_SINGLE_CACHELINE;
16600 if (tg3_flag(tp, 57765_PLUS)) {
16601 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16608 /* PCI controllers on most RISC systems tend to disconnect
16609 * when a device tries to burst across a cache-line boundary.
16610 * Therefore, letting tg3 do so just wastes PCI bandwidth.
16612 * Unfortunately, for PCI-E there are only limited
16613 * write-side controls for this, and thus for reads
16614 * we will still get the disconnects. We'll also waste
16615 * these PCI cycles for both read and write for chips
16616 * other than 5700 and 5701 which do not implement the
16619 if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16620 switch (cacheline_size) {
16625 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16626 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16627 DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16629 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16630 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16635 val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16636 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16640 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16641 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16644 } else if (tg3_flag(tp, PCI_EXPRESS)) {
16645 switch (cacheline_size) {
16649 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16650 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16651 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16657 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16658 val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16662 switch (cacheline_size) {
16664 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16665 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16666 DMA_RWCTRL_WRITE_BNDRY_16);
16671 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16672 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16673 DMA_RWCTRL_WRITE_BNDRY_32);
16678 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16679 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16680 DMA_RWCTRL_WRITE_BNDRY_64);
16685 if (goal == BOUNDARY_SINGLE_CACHELINE) {
16686 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16687 DMA_RWCTRL_WRITE_BNDRY_128);
16692 val |= (DMA_RWCTRL_READ_BNDRY_256 |
16693 DMA_RWCTRL_WRITE_BNDRY_256);
16696 val |= (DMA_RWCTRL_READ_BNDRY_512 |
16697 DMA_RWCTRL_WRITE_BNDRY_512);
16701 val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16702 DMA_RWCTRL_WRITE_BNDRY_1024);
16711 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16712 int size, bool to_device)
16714 struct tg3_internal_buffer_desc test_desc;
16715 u32 sram_dma_descs;
16718 sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16720 tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16721 tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16722 tw32(RDMAC_STATUS, 0);
16723 tw32(WDMAC_STATUS, 0);
16725 tw32(BUFMGR_MODE, 0);
16726 tw32(FTQ_RESET, 0);
16728 test_desc.addr_hi = ((u64) buf_dma) >> 32;
16729 test_desc.addr_lo = buf_dma & 0xffffffff;
16730 test_desc.nic_mbuf = 0x00002100;
16731 test_desc.len = size;
16734 * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16735 * the *second* time the tg3 driver was getting loaded after an
16738 * Broadcom tells me:
16739 * ...the DMA engine is connected to the GRC block and a DMA
16740 * reset may affect the GRC block in some unpredictable way...
16741 * The behavior of resets to individual blocks has not been tested.
16743 * Broadcom noted the GRC reset will also reset all sub-components.
16746 test_desc.cqid_sqid = (13 << 8) | 2;
16748 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16751 test_desc.cqid_sqid = (16 << 8) | 7;
16753 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16756 test_desc.flags = 0x00000005;
16758 for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16761 val = *(((u32 *)&test_desc) + i);
16762 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16763 sram_dma_descs + (i * sizeof(u32)));
16764 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16766 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16769 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16771 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16774 for (i = 0; i < 40; i++) {
16778 val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16780 val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16781 if ((val & 0xffff) == sram_dma_descs) {
16792 #define TEST_BUFFER_SIZE 0x2000
16794 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16795 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16799 static int tg3_test_dma(struct tg3 *tp)
16801 dma_addr_t buf_dma;
16802 u32 *buf, saved_dma_rwctrl;
16805 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16806 &buf_dma, GFP_KERNEL);
16812 tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16813 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16815 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16817 if (tg3_flag(tp, 57765_PLUS))
16820 if (tg3_flag(tp, PCI_EXPRESS)) {
16821 /* DMA read watermark not used on PCIE */
16822 tp->dma_rwctrl |= 0x00180000;
16823 } else if (!tg3_flag(tp, PCIX_MODE)) {
16824 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16825 tg3_asic_rev(tp) == ASIC_REV_5750)
16826 tp->dma_rwctrl |= 0x003f0000;
16828 tp->dma_rwctrl |= 0x003f000f;
16830 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16831 tg3_asic_rev(tp) == ASIC_REV_5704) {
16832 u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16833 u32 read_water = 0x7;
16835 /* If the 5704 is behind the EPB bridge, we can
16836 * do the less restrictive ONE_DMA workaround for
16837 * better performance.
16839 if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16840 tg3_asic_rev(tp) == ASIC_REV_5704)
16841 tp->dma_rwctrl |= 0x8000;
16842 else if (ccval == 0x6 || ccval == 0x7)
16843 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16845 if (tg3_asic_rev(tp) == ASIC_REV_5703)
16847 /* Set bit 23 to enable PCIX hw bug fix */
16849 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16850 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16852 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16853 /* 5780 always in PCIX mode */
16854 tp->dma_rwctrl |= 0x00144000;
16855 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16856 /* 5714 always in PCIX mode */
16857 tp->dma_rwctrl |= 0x00148000;
16859 tp->dma_rwctrl |= 0x001b000f;
16862 if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16863 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16865 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16866 tg3_asic_rev(tp) == ASIC_REV_5704)
16867 tp->dma_rwctrl &= 0xfffffff0;
16869 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16870 tg3_asic_rev(tp) == ASIC_REV_5701) {
16871 /* Remove this if it causes problems for some boards. */
16872 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16874 /* On 5700/5701 chips, we need to set this bit.
16875 * Otherwise the chip will issue cacheline transactions
16876 * to streamable DMA memory with not all the byte
16877 * enables turned on. This is an error on several
16878 * RISC PCI controllers, in particular sparc64.
16880 * On 5703/5704 chips, this bit has been reassigned
16881 * a different meaning. In particular, it is used
16882 * on those chips to enable a PCI-X workaround.
16884 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16887 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16890 /* Unneeded, already done by tg3_get_invariants. */
16891 tg3_switch_clocks(tp);
16894 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16895 tg3_asic_rev(tp) != ASIC_REV_5701)
16898 /* It is best to perform DMA test with maximum write burst size
16899 * to expose the 5700/5701 write DMA bug.
16901 saved_dma_rwctrl = tp->dma_rwctrl;
16902 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16903 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16908 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16911 /* Send the buffer to the chip. */
16912 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16914 dev_err(&tp->pdev->dev,
16915 "%s: Buffer write failed. err = %d\n",
16921 /* validate data reached card RAM correctly. */
16922 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16924 tg3_read_mem(tp, 0x2100 + (i*4), &val);
16925 if (le32_to_cpu(val) != p[i]) {
16926 dev_err(&tp->pdev->dev,
16927 "%s: Buffer corrupted on device! "
16928 "(%d != %d)\n", __func__, val, i);
16929 /* ret = -ENODEV here? */
16934 /* Now read it back. */
16935 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16937 dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16938 "err = %d\n", __func__, ret);
16943 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16947 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16948 DMA_RWCTRL_WRITE_BNDRY_16) {
16949 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16950 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16951 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16954 dev_err(&tp->pdev->dev,
16955 "%s: Buffer corrupted on read back! "
16956 "(%d != %d)\n", __func__, p[i], i);
16962 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16968 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16969 DMA_RWCTRL_WRITE_BNDRY_16) {
16970 /* DMA test passed without adjusting DMA boundary,
16971 * now look for chipsets that are known to expose the
16972 * DMA bug without failing the test.
16974 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16975 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16976 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16978 /* Safe to use the calculated DMA boundary. */
16979 tp->dma_rwctrl = saved_dma_rwctrl;
16982 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16986 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16991 static void tg3_init_bufmgr_config(struct tg3 *tp)
16993 if (tg3_flag(tp, 57765_PLUS)) {
16994 tp->bufmgr_config.mbuf_read_dma_low_water =
16995 DEFAULT_MB_RDMA_LOW_WATER_5705;
16996 tp->bufmgr_config.mbuf_mac_rx_low_water =
16997 DEFAULT_MB_MACRX_LOW_WATER_57765;
16998 tp->bufmgr_config.mbuf_high_water =
16999 DEFAULT_MB_HIGH_WATER_57765;
17001 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17002 DEFAULT_MB_RDMA_LOW_WATER_5705;
17003 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17004 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17005 tp->bufmgr_config.mbuf_high_water_jumbo =
17006 DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17007 } else if (tg3_flag(tp, 5705_PLUS)) {
17008 tp->bufmgr_config.mbuf_read_dma_low_water =
17009 DEFAULT_MB_RDMA_LOW_WATER_5705;
17010 tp->bufmgr_config.mbuf_mac_rx_low_water =
17011 DEFAULT_MB_MACRX_LOW_WATER_5705;
17012 tp->bufmgr_config.mbuf_high_water =
17013 DEFAULT_MB_HIGH_WATER_5705;
17014 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17015 tp->bufmgr_config.mbuf_mac_rx_low_water =
17016 DEFAULT_MB_MACRX_LOW_WATER_5906;
17017 tp->bufmgr_config.mbuf_high_water =
17018 DEFAULT_MB_HIGH_WATER_5906;
17021 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17022 DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17023 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17024 DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17025 tp->bufmgr_config.mbuf_high_water_jumbo =
17026 DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17028 tp->bufmgr_config.mbuf_read_dma_low_water =
17029 DEFAULT_MB_RDMA_LOW_WATER;
17030 tp->bufmgr_config.mbuf_mac_rx_low_water =
17031 DEFAULT_MB_MACRX_LOW_WATER;
17032 tp->bufmgr_config.mbuf_high_water =
17033 DEFAULT_MB_HIGH_WATER;
17035 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17036 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17037 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17038 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17039 tp->bufmgr_config.mbuf_high_water_jumbo =
17040 DEFAULT_MB_HIGH_WATER_JUMBO;
17043 tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17044 tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17047 static char *tg3_phy_string(struct tg3 *tp)
17049 switch (tp->phy_id & TG3_PHY_ID_MASK) {
17050 case TG3_PHY_ID_BCM5400: return "5400";
17051 case TG3_PHY_ID_BCM5401: return "5401";
17052 case TG3_PHY_ID_BCM5411: return "5411";
17053 case TG3_PHY_ID_BCM5701: return "5701";
17054 case TG3_PHY_ID_BCM5703: return "5703";
17055 case TG3_PHY_ID_BCM5704: return "5704";
17056 case TG3_PHY_ID_BCM5705: return "5705";
17057 case TG3_PHY_ID_BCM5750: return "5750";
17058 case TG3_PHY_ID_BCM5752: return "5752";
17059 case TG3_PHY_ID_BCM5714: return "5714";
17060 case TG3_PHY_ID_BCM5780: return "5780";
17061 case TG3_PHY_ID_BCM5755: return "5755";
17062 case TG3_PHY_ID_BCM5787: return "5787";
17063 case TG3_PHY_ID_BCM5784: return "5784";
17064 case TG3_PHY_ID_BCM5756: return "5722/5756";
17065 case TG3_PHY_ID_BCM5906: return "5906";
17066 case TG3_PHY_ID_BCM5761: return "5761";
17067 case TG3_PHY_ID_BCM5718C: return "5718C";
17068 case TG3_PHY_ID_BCM5718S: return "5718S";
17069 case TG3_PHY_ID_BCM57765: return "57765";
17070 case TG3_PHY_ID_BCM5719C: return "5719C";
17071 case TG3_PHY_ID_BCM5720C: return "5720C";
17072 case TG3_PHY_ID_BCM5762: return "5762C";
17073 case TG3_PHY_ID_BCM8002: return "8002/serdes";
17074 case 0: return "serdes";
17075 default: return "unknown";
17079 static char *tg3_bus_string(struct tg3 *tp, char *str)
17081 if (tg3_flag(tp, PCI_EXPRESS)) {
17082 strcpy(str, "PCI Express");
17084 } else if (tg3_flag(tp, PCIX_MODE)) {
17085 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17087 strcpy(str, "PCIX:");
17089 if ((clock_ctrl == 7) ||
17090 ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17091 GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17092 strcat(str, "133MHz");
17093 else if (clock_ctrl == 0)
17094 strcat(str, "33MHz");
17095 else if (clock_ctrl == 2)
17096 strcat(str, "50MHz");
17097 else if (clock_ctrl == 4)
17098 strcat(str, "66MHz");
17099 else if (clock_ctrl == 6)
17100 strcat(str, "100MHz");
17102 strcpy(str, "PCI:");
17103 if (tg3_flag(tp, PCI_HIGH_SPEED))
17104 strcat(str, "66MHz");
17106 strcat(str, "33MHz");
17108 if (tg3_flag(tp, PCI_32BIT))
17109 strcat(str, ":32-bit");
17111 strcat(str, ":64-bit");
17115 static void tg3_init_coal(struct tg3 *tp)
17117 struct ethtool_coalesce *ec = &tp->coal;
17119 memset(ec, 0, sizeof(*ec));
17120 ec->cmd = ETHTOOL_GCOALESCE;
17121 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17122 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17123 ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17124 ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17125 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17126 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17127 ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17128 ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17129 ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17131 if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17132 HOSTCC_MODE_CLRTICK_TXBD)) {
17133 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17134 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17135 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17136 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17139 if (tg3_flag(tp, 5705_PLUS)) {
17140 ec->rx_coalesce_usecs_irq = 0;
17141 ec->tx_coalesce_usecs_irq = 0;
17142 ec->stats_block_coalesce_usecs = 0;
17146 static int tg3_init_one(struct pci_dev *pdev,
17147 const struct pci_device_id *ent)
17149 struct net_device *dev;
17151 int i, err, pm_cap;
17152 u32 sndmbx, rcvmbx, intmbx;
17154 u64 dma_mask, persist_dma_mask;
17155 netdev_features_t features = 0;
17157 printk_once(KERN_INFO "%s\n", version);
17159 err = pci_enable_device(pdev);
17161 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17165 err = pci_request_regions(pdev, DRV_MODULE_NAME);
17167 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17168 goto err_out_disable_pdev;
17171 pci_set_master(pdev);
17173 /* Find power-management capability. */
17174 pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17176 dev_err(&pdev->dev,
17177 "Cannot find Power Management capability, aborting\n");
17179 goto err_out_free_res;
17182 err = pci_set_power_state(pdev, PCI_D0);
17184 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17185 goto err_out_free_res;
17188 dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17191 goto err_out_power_down;
17194 SET_NETDEV_DEV(dev, &pdev->dev);
17196 tp = netdev_priv(dev);
17199 tp->pm_cap = pm_cap;
17200 tp->rx_mode = TG3_DEF_RX_MODE;
17201 tp->tx_mode = TG3_DEF_TX_MODE;
17205 tp->msg_enable = tg3_debug;
17207 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17209 if (pdev_is_ssb_gige_core(pdev)) {
17210 tg3_flag_set(tp, IS_SSB_CORE);
17211 if (ssb_gige_must_flush_posted_writes(pdev))
17212 tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17213 if (ssb_gige_one_dma_at_once(pdev))
17214 tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17215 if (ssb_gige_have_roboswitch(pdev))
17216 tg3_flag_set(tp, ROBOSWITCH);
17217 if (ssb_gige_is_rgmii(pdev))
17218 tg3_flag_set(tp, RGMII_MODE);
17221 /* The word/byte swap controls here control register access byte
17222 * swapping. DMA data byte swapping is controlled in the GRC_MODE
17225 tp->misc_host_ctrl =
17226 MISC_HOST_CTRL_MASK_PCI_INT |
17227 MISC_HOST_CTRL_WORD_SWAP |
17228 MISC_HOST_CTRL_INDIR_ACCESS |
17229 MISC_HOST_CTRL_PCISTATE_RW;
17231 /* The NONFRM (non-frame) byte/word swap controls take effect
17232 * on descriptor entries, anything which isn't packet data.
17234 * The StrongARM chips on the board (one for tx, one for rx)
17235 * are running in big-endian mode.
17237 tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17238 GRC_MODE_WSWAP_NONFRM_DATA);
17239 #ifdef __BIG_ENDIAN
17240 tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17242 spin_lock_init(&tp->lock);
17243 spin_lock_init(&tp->indirect_lock);
17244 INIT_WORK(&tp->reset_task, tg3_reset_task);
17246 tp->regs = pci_ioremap_bar(pdev, BAR_0);
17248 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17250 goto err_out_free_dev;
17253 if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17254 tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17255 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17256 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17257 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17258 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17259 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17260 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17261 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17262 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17263 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17264 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17265 tg3_flag_set(tp, ENABLE_APE);
17266 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17267 if (!tp->aperegs) {
17268 dev_err(&pdev->dev,
17269 "Cannot map APE registers, aborting\n");
17271 goto err_out_iounmap;
17275 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17276 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17278 dev->ethtool_ops = &tg3_ethtool_ops;
17279 dev->watchdog_timeo = TG3_TX_TIMEOUT;
17280 dev->netdev_ops = &tg3_netdev_ops;
17281 dev->irq = pdev->irq;
17283 err = tg3_get_invariants(tp, ent);
17285 dev_err(&pdev->dev,
17286 "Problem fetching invariants of chip, aborting\n");
17287 goto err_out_apeunmap;
17290 /* The EPB bridge inside 5714, 5715, and 5780 and any
17291 * device behind the EPB cannot support DMA addresses > 40-bit.
17292 * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17293 * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17294 * do DMA address check in tg3_start_xmit().
17296 if (tg3_flag(tp, IS_5788))
17297 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17298 else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17299 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17300 #ifdef CONFIG_HIGHMEM
17301 dma_mask = DMA_BIT_MASK(64);
17304 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17306 /* Configure DMA attributes. */
17307 if (dma_mask > DMA_BIT_MASK(32)) {
17308 err = pci_set_dma_mask(pdev, dma_mask);
17310 features |= NETIF_F_HIGHDMA;
17311 err = pci_set_consistent_dma_mask(pdev,
17314 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17315 "DMA for consistent allocations\n");
17316 goto err_out_apeunmap;
17320 if (err || dma_mask == DMA_BIT_MASK(32)) {
17321 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17323 dev_err(&pdev->dev,
17324 "No usable DMA configuration, aborting\n");
17325 goto err_out_apeunmap;
17329 tg3_init_bufmgr_config(tp);
17331 /* 5700 B0 chips do not support checksumming correctly due
17332 * to hardware bugs.
17334 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17335 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17337 if (tg3_flag(tp, 5755_PLUS))
17338 features |= NETIF_F_IPV6_CSUM;
17341 /* TSO is on by default on chips that support hardware TSO.
17342 * Firmware TSO on older chips gives lower performance, so it
17343 * is off by default, but can be enabled using ethtool.
17345 if ((tg3_flag(tp, HW_TSO_1) ||
17346 tg3_flag(tp, HW_TSO_2) ||
17347 tg3_flag(tp, HW_TSO_3)) &&
17348 (features & NETIF_F_IP_CSUM))
17349 features |= NETIF_F_TSO;
17350 if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17351 if (features & NETIF_F_IPV6_CSUM)
17352 features |= NETIF_F_TSO6;
17353 if (tg3_flag(tp, HW_TSO_3) ||
17354 tg3_asic_rev(tp) == ASIC_REV_5761 ||
17355 (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17356 tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17357 tg3_asic_rev(tp) == ASIC_REV_5785 ||
17358 tg3_asic_rev(tp) == ASIC_REV_57780)
17359 features |= NETIF_F_TSO_ECN;
17362 dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17363 NETIF_F_HW_VLAN_CTAG_RX;
17364 dev->vlan_features |= features;
17367 * Add loopback capability only for a subset of devices that support
17368 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17369 * loopback for the remaining devices.
17371 if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17372 !tg3_flag(tp, CPMU_PRESENT))
17373 /* Add the loopback capability */
17374 features |= NETIF_F_LOOPBACK;
17376 dev->hw_features |= features;
17378 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17379 !tg3_flag(tp, TSO_CAPABLE) &&
17380 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17381 tg3_flag_set(tp, MAX_RXPEND_64);
17382 tp->rx_pending = 63;
17385 err = tg3_get_device_address(tp);
17387 dev_err(&pdev->dev,
17388 "Could not obtain valid ethernet address, aborting\n");
17389 goto err_out_apeunmap;
17392 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17393 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17394 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17395 for (i = 0; i < tp->irq_max; i++) {
17396 struct tg3_napi *tnapi = &tp->napi[i];
17399 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17401 tnapi->int_mbox = intmbx;
17407 tnapi->consmbox = rcvmbx;
17408 tnapi->prodmbox = sndmbx;
17411 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17413 tnapi->coal_now = HOSTCC_MODE_NOW;
17415 if (!tg3_flag(tp, SUPPORT_MSIX))
17419 * If we support MSIX, we'll be using RSS. If we're using
17420 * RSS, the first vector only handles link interrupts and the
17421 * remaining vectors handle rx and tx interrupts. Reuse the
17422 * mailbox values for the next iteration. The values we setup
17423 * above are still useful for the single vectored mode.
17437 * Reset chip in case UNDI or EFI driver did not shutdown
17438 * DMA self test will enable WDMAC and we'll see (spurious)
17439 * pending DMA on the PCI bus at that point.
17441 if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17442 (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17443 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17444 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17447 err = tg3_test_dma(tp);
17449 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17450 goto err_out_apeunmap;
17455 pci_set_drvdata(pdev, dev);
17457 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17458 tg3_asic_rev(tp) == ASIC_REV_5720 ||
17459 tg3_asic_rev(tp) == ASIC_REV_5762)
17460 tg3_flag_set(tp, PTP_CAPABLE);
17462 if (tg3_flag(tp, 5717_PLUS)) {
17463 /* Resume a low-power mode */
17464 tg3_frob_aux_power(tp, false);
17467 tg3_timer_init(tp);
17469 tg3_carrier_off(tp);
17471 err = register_netdev(dev);
17473 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17474 goto err_out_apeunmap;
17477 netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17478 tp->board_part_number,
17479 tg3_chip_rev_id(tp),
17480 tg3_bus_string(tp, str),
17483 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17484 struct phy_device *phydev;
17485 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17487 "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17488 phydev->drv->name, dev_name(&phydev->dev));
17492 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17493 ethtype = "10/100Base-TX";
17494 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17495 ethtype = "1000Base-SX";
17497 ethtype = "10/100/1000Base-T";
17499 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17500 "(WireSpeed[%d], EEE[%d])\n",
17501 tg3_phy_string(tp), ethtype,
17502 (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17503 (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17506 netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17507 (dev->features & NETIF_F_RXCSUM) != 0,
17508 tg3_flag(tp, USE_LINKCHG_REG) != 0,
17509 (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17510 tg3_flag(tp, ENABLE_ASF) != 0,
17511 tg3_flag(tp, TSO_CAPABLE) != 0);
17512 netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17514 pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17515 ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17517 pci_save_state(pdev);
17523 iounmap(tp->aperegs);
17524 tp->aperegs = NULL;
17536 err_out_power_down:
17537 pci_set_power_state(pdev, PCI_D3hot);
17540 pci_release_regions(pdev);
17542 err_out_disable_pdev:
17543 pci_disable_device(pdev);
17544 pci_set_drvdata(pdev, NULL);
17548 static void tg3_remove_one(struct pci_dev *pdev)
17550 struct net_device *dev = pci_get_drvdata(pdev);
17553 struct tg3 *tp = netdev_priv(dev);
17555 release_firmware(tp->fw);
17557 tg3_reset_task_cancel(tp);
17559 if (tg3_flag(tp, USE_PHYLIB)) {
17564 unregister_netdev(dev);
17566 iounmap(tp->aperegs);
17567 tp->aperegs = NULL;
17574 pci_release_regions(pdev);
17575 pci_disable_device(pdev);
17576 pci_set_drvdata(pdev, NULL);
17580 #ifdef CONFIG_PM_SLEEP
17581 static int tg3_suspend(struct device *device)
17583 struct pci_dev *pdev = to_pci_dev(device);
17584 struct net_device *dev = pci_get_drvdata(pdev);
17585 struct tg3 *tp = netdev_priv(dev);
17588 if (!netif_running(dev))
17591 tg3_reset_task_cancel(tp);
17593 tg3_netif_stop(tp);
17595 tg3_timer_stop(tp);
17597 tg3_full_lock(tp, 1);
17598 tg3_disable_ints(tp);
17599 tg3_full_unlock(tp);
17601 netif_device_detach(dev);
17603 tg3_full_lock(tp, 0);
17604 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17605 tg3_flag_clear(tp, INIT_COMPLETE);
17606 tg3_full_unlock(tp);
17608 err = tg3_power_down_prepare(tp);
17612 tg3_full_lock(tp, 0);
17614 tg3_flag_set(tp, INIT_COMPLETE);
17615 err2 = tg3_restart_hw(tp, true);
17619 tg3_timer_start(tp);
17621 netif_device_attach(dev);
17622 tg3_netif_start(tp);
17625 tg3_full_unlock(tp);
17634 static int tg3_resume(struct device *device)
17636 struct pci_dev *pdev = to_pci_dev(device);
17637 struct net_device *dev = pci_get_drvdata(pdev);
17638 struct tg3 *tp = netdev_priv(dev);
17641 if (!netif_running(dev))
17644 netif_device_attach(dev);
17646 tg3_full_lock(tp, 0);
17648 tg3_flag_set(tp, INIT_COMPLETE);
17649 err = tg3_restart_hw(tp,
17650 !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17654 tg3_timer_start(tp);
17656 tg3_netif_start(tp);
17659 tg3_full_unlock(tp);
17666 #endif /* CONFIG_PM_SLEEP */
17668 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17671 * tg3_io_error_detected - called when PCI error is detected
17672 * @pdev: Pointer to PCI device
17673 * @state: The current pci connection state
17675 * This function is called after a PCI bus error affecting
17676 * this device has been detected.
17678 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17679 pci_channel_state_t state)
17681 struct net_device *netdev = pci_get_drvdata(pdev);
17682 struct tg3 *tp = netdev_priv(netdev);
17683 pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17685 netdev_info(netdev, "PCI I/O error detected\n");
17689 if (!netif_running(netdev))
17694 tg3_netif_stop(tp);
17696 tg3_timer_stop(tp);
17698 /* Want to make sure that the reset task doesn't run */
17699 tg3_reset_task_cancel(tp);
17701 netif_device_detach(netdev);
17703 /* Clean up software state, even if MMIO is blocked */
17704 tg3_full_lock(tp, 0);
17705 tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17706 tg3_full_unlock(tp);
17709 if (state == pci_channel_io_perm_failure)
17710 err = PCI_ERS_RESULT_DISCONNECT;
17712 pci_disable_device(pdev);
17720 * tg3_io_slot_reset - called after the pci bus has been reset.
17721 * @pdev: Pointer to PCI device
17723 * Restart the card from scratch, as if from a cold-boot.
17724 * At this point, the card has exprienced a hard reset,
17725 * followed by fixups by BIOS, and has its config space
17726 * set up identically to what it was at cold boot.
17728 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17730 struct net_device *netdev = pci_get_drvdata(pdev);
17731 struct tg3 *tp = netdev_priv(netdev);
17732 pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17737 if (pci_enable_device(pdev)) {
17738 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17742 pci_set_master(pdev);
17743 pci_restore_state(pdev);
17744 pci_save_state(pdev);
17746 if (!netif_running(netdev)) {
17747 rc = PCI_ERS_RESULT_RECOVERED;
17751 err = tg3_power_up(tp);
17755 rc = PCI_ERS_RESULT_RECOVERED;
17764 * tg3_io_resume - called when traffic can start flowing again.
17765 * @pdev: Pointer to PCI device
17767 * This callback is called when the error recovery driver tells
17768 * us that its OK to resume normal operation.
17770 static void tg3_io_resume(struct pci_dev *pdev)
17772 struct net_device *netdev = pci_get_drvdata(pdev);
17773 struct tg3 *tp = netdev_priv(netdev);
17778 if (!netif_running(netdev))
17781 tg3_full_lock(tp, 0);
17782 tg3_flag_set(tp, INIT_COMPLETE);
17783 err = tg3_restart_hw(tp, true);
17785 tg3_full_unlock(tp);
17786 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17790 netif_device_attach(netdev);
17792 tg3_timer_start(tp);
17794 tg3_netif_start(tp);
17796 tg3_full_unlock(tp);
17804 static const struct pci_error_handlers tg3_err_handler = {
17805 .error_detected = tg3_io_error_detected,
17806 .slot_reset = tg3_io_slot_reset,
17807 .resume = tg3_io_resume
17810 static struct pci_driver tg3_driver = {
17811 .name = DRV_MODULE_NAME,
17812 .id_table = tg3_pci_tbl,
17813 .probe = tg3_init_one,
17814 .remove = tg3_remove_one,
17815 .err_handler = &tg3_err_handler,
17816 .driver.pm = &tg3_pm_ops,
17819 static int __init tg3_init(void)
17821 return pci_register_driver(&tg3_driver);
17824 static void __exit tg3_cleanup(void)
17826 pci_unregister_driver(&tg3_driver);
17829 module_init(tg3_init);
17830 module_exit(tg3_cleanup);