SCSI: Fix 'Device not ready' issue on mpt2sas
[firefly-linux-kernel-4.4.55.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2011 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/ioport.h>
30 #include <linux/pci.h>
31 #include <linux/netdevice.h>
32 #include <linux/etherdevice.h>
33 #include <linux/skbuff.h>
34 #include <linux/ethtool.h>
35 #include <linux/mdio.h>
36 #include <linux/mii.h>
37 #include <linux/phy.h>
38 #include <linux/brcmphy.h>
39 #include <linux/if_vlan.h>
40 #include <linux/ip.h>
41 #include <linux/tcp.h>
42 #include <linux/workqueue.h>
43 #include <linux/prefetch.h>
44 #include <linux/dma-mapping.h>
45 #include <linux/firmware.h>
46
47 #include <net/checksum.h>
48 #include <net/ip.h>
49
50 #include <asm/system.h>
51 #include <linux/io.h>
52 #include <asm/byteorder.h>
53 #include <linux/uaccess.h>
54
55 #ifdef CONFIG_SPARC
56 #include <asm/idprom.h>
57 #include <asm/prom.h>
58 #endif
59
60 #define BAR_0   0
61 #define BAR_2   2
62
63 #include "tg3.h"
64
65 /* Functions & macros to verify TG3_FLAGS types */
66
67 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
68 {
69         return test_bit(flag, bits);
70 }
71
72 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
73 {
74         set_bit(flag, bits);
75 }
76
77 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
78 {
79         clear_bit(flag, bits);
80 }
81
82 #define tg3_flag(tp, flag)                              \
83         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
84 #define tg3_flag_set(tp, flag)                          \
85         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
86 #define tg3_flag_clear(tp, flag)                        \
87         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
88
89 #define DRV_MODULE_NAME         "tg3"
90 #define TG3_MAJ_NUM                     3
91 #define TG3_MIN_NUM                     119
92 #define DRV_MODULE_VERSION      \
93         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
94 #define DRV_MODULE_RELDATE      "May 18, 2011"
95
96 #define TG3_DEF_MAC_MODE        0
97 #define TG3_DEF_RX_MODE         0
98 #define TG3_DEF_TX_MODE         0
99 #define TG3_DEF_MSG_ENABLE        \
100         (NETIF_MSG_DRV          | \
101          NETIF_MSG_PROBE        | \
102          NETIF_MSG_LINK         | \
103          NETIF_MSG_TIMER        | \
104          NETIF_MSG_IFDOWN       | \
105          NETIF_MSG_IFUP         | \
106          NETIF_MSG_RX_ERR       | \
107          NETIF_MSG_TX_ERR)
108
109 /* length of time before we decide the hardware is borked,
110  * and dev->tx_timeout() should be called to fix the problem
111  */
112
113 #define TG3_TX_TIMEOUT                  (5 * HZ)
114
115 /* hardware minimum and maximum for a single frame's data payload */
116 #define TG3_MIN_MTU                     60
117 #define TG3_MAX_MTU(tp) \
118         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
119
120 /* These numbers seem to be hard coded in the NIC firmware somehow.
121  * You can't change the ring sizes, but you can change where you place
122  * them in the NIC onboard memory.
123  */
124 #define TG3_RX_STD_RING_SIZE(tp) \
125         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
126          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
127 #define TG3_DEF_RX_RING_PENDING         200
128 #define TG3_RX_JMB_RING_SIZE(tp) \
129         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
130          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
131 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
132 #define TG3_RSS_INDIR_TBL_SIZE          128
133
134 /* Do not place this n-ring entries value into the tp struct itself,
135  * we really want to expose these constants to GCC so that modulo et
136  * al.  operations are done with shifts and masks instead of with
137  * hw multiply/modulo instructions.  Another solution would be to
138  * replace things like '% foo' with '& (foo - 1)'.
139  */
140
141 #define TG3_TX_RING_SIZE                512
142 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
143
144 #define TG3_RX_STD_RING_BYTES(tp) \
145         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
146 #define TG3_RX_JMB_RING_BYTES(tp) \
147         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
148 #define TG3_RX_RCB_RING_BYTES(tp) \
149         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
150 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
151                                  TG3_TX_RING_SIZE)
152 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
153
154 #define TG3_DMA_BYTE_ENAB               64
155
156 #define TG3_RX_STD_DMA_SZ               1536
157 #define TG3_RX_JMB_DMA_SZ               9046
158
159 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
160
161 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
162 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
163
164 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
165         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
166
167 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
168         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
169
170 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
171  * that are at least dword aligned when used in PCIX mode.  The driver
172  * works around this bug by double copying the packet.  This workaround
173  * is built into the normal double copy length check for efficiency.
174  *
175  * However, the double copy is only necessary on those architectures
176  * where unaligned memory accesses are inefficient.  For those architectures
177  * where unaligned memory accesses incur little penalty, we can reintegrate
178  * the 5701 in the normal rx path.  Doing so saves a device structure
179  * dereference by hardcoding the double copy threshold in place.
180  */
181 #define TG3_RX_COPY_THRESHOLD           256
182 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
183         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
184 #else
185         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
186 #endif
187
188 /* minimum number of free TX descriptors required to wake up TX process */
189 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
190
191 #define TG3_RAW_IP_ALIGN 2
192
193 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
194
195 #define FIRMWARE_TG3            "tigon/tg3.bin"
196 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
197 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
198
199 static char version[] __devinitdata =
200         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
201
202 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
203 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
204 MODULE_LICENSE("GPL");
205 MODULE_VERSION(DRV_MODULE_VERSION);
206 MODULE_FIRMWARE(FIRMWARE_TG3);
207 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
208 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
209
210 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
211 module_param(tg3_debug, int, 0);
212 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
213
214 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
215         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
216         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
217         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
218         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
219         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
220         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
221         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
222         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
223         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
224         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
225         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
226         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
227         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
228         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
288         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
289         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
290         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
291         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
292         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
293         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
294         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
295         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
296         {}
297 };
298
299 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
300
301 static const struct {
302         const char string[ETH_GSTRING_LEN];
303 } ethtool_stats_keys[] = {
304         { "rx_octets" },
305         { "rx_fragments" },
306         { "rx_ucast_packets" },
307         { "rx_mcast_packets" },
308         { "rx_bcast_packets" },
309         { "rx_fcs_errors" },
310         { "rx_align_errors" },
311         { "rx_xon_pause_rcvd" },
312         { "rx_xoff_pause_rcvd" },
313         { "rx_mac_ctrl_rcvd" },
314         { "rx_xoff_entered" },
315         { "rx_frame_too_long_errors" },
316         { "rx_jabbers" },
317         { "rx_undersize_packets" },
318         { "rx_in_length_errors" },
319         { "rx_out_length_errors" },
320         { "rx_64_or_less_octet_packets" },
321         { "rx_65_to_127_octet_packets" },
322         { "rx_128_to_255_octet_packets" },
323         { "rx_256_to_511_octet_packets" },
324         { "rx_512_to_1023_octet_packets" },
325         { "rx_1024_to_1522_octet_packets" },
326         { "rx_1523_to_2047_octet_packets" },
327         { "rx_2048_to_4095_octet_packets" },
328         { "rx_4096_to_8191_octet_packets" },
329         { "rx_8192_to_9022_octet_packets" },
330
331         { "tx_octets" },
332         { "tx_collisions" },
333
334         { "tx_xon_sent" },
335         { "tx_xoff_sent" },
336         { "tx_flow_control" },
337         { "tx_mac_errors" },
338         { "tx_single_collisions" },
339         { "tx_mult_collisions" },
340         { "tx_deferred" },
341         { "tx_excessive_collisions" },
342         { "tx_late_collisions" },
343         { "tx_collide_2times" },
344         { "tx_collide_3times" },
345         { "tx_collide_4times" },
346         { "tx_collide_5times" },
347         { "tx_collide_6times" },
348         { "tx_collide_7times" },
349         { "tx_collide_8times" },
350         { "tx_collide_9times" },
351         { "tx_collide_10times" },
352         { "tx_collide_11times" },
353         { "tx_collide_12times" },
354         { "tx_collide_13times" },
355         { "tx_collide_14times" },
356         { "tx_collide_15times" },
357         { "tx_ucast_packets" },
358         { "tx_mcast_packets" },
359         { "tx_bcast_packets" },
360         { "tx_carrier_sense_errors" },
361         { "tx_discards" },
362         { "tx_errors" },
363
364         { "dma_writeq_full" },
365         { "dma_write_prioq_full" },
366         { "rxbds_empty" },
367         { "rx_discards" },
368         { "rx_errors" },
369         { "rx_threshold_hit" },
370
371         { "dma_readq_full" },
372         { "dma_read_prioq_full" },
373         { "tx_comp_queue_full" },
374
375         { "ring_set_send_prod_index" },
376         { "ring_status_update" },
377         { "nic_irqs" },
378         { "nic_avoided_irqs" },
379         { "nic_tx_threshold_hit" },
380
381         { "mbuf_lwm_thresh_hit" },
382 };
383
384 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
385
386
387 static const struct {
388         const char string[ETH_GSTRING_LEN];
389 } ethtool_test_keys[] = {
390         { "nvram test     (online) " },
391         { "link test      (online) " },
392         { "register test  (offline)" },
393         { "memory test    (offline)" },
394         { "loopback test  (offline)" },
395         { "interrupt test (offline)" },
396 };
397
398 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
399
400
401 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
402 {
403         writel(val, tp->regs + off);
404 }
405
406 static u32 tg3_read32(struct tg3 *tp, u32 off)
407 {
408         return readl(tp->regs + off);
409 }
410
411 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
412 {
413         writel(val, tp->aperegs + off);
414 }
415
416 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
417 {
418         return readl(tp->aperegs + off);
419 }
420
421 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
422 {
423         unsigned long flags;
424
425         spin_lock_irqsave(&tp->indirect_lock, flags);
426         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
427         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
428         spin_unlock_irqrestore(&tp->indirect_lock, flags);
429 }
430
431 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
432 {
433         writel(val, tp->regs + off);
434         readl(tp->regs + off);
435 }
436
437 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
438 {
439         unsigned long flags;
440         u32 val;
441
442         spin_lock_irqsave(&tp->indirect_lock, flags);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
444         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
445         spin_unlock_irqrestore(&tp->indirect_lock, flags);
446         return val;
447 }
448
449 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
450 {
451         unsigned long flags;
452
453         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
454                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
455                                        TG3_64BIT_REG_LOW, val);
456                 return;
457         }
458         if (off == TG3_RX_STD_PROD_IDX_REG) {
459                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
460                                        TG3_64BIT_REG_LOW, val);
461                 return;
462         }
463
464         spin_lock_irqsave(&tp->indirect_lock, flags);
465         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
466         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
467         spin_unlock_irqrestore(&tp->indirect_lock, flags);
468
469         /* In indirect mode when disabling interrupts, we also need
470          * to clear the interrupt bit in the GRC local ctrl register.
471          */
472         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
473             (val == 0x1)) {
474                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
475                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
476         }
477 }
478
479 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
480 {
481         unsigned long flags;
482         u32 val;
483
484         spin_lock_irqsave(&tp->indirect_lock, flags);
485         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
486         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
487         spin_unlock_irqrestore(&tp->indirect_lock, flags);
488         return val;
489 }
490
491 /* usec_wait specifies the wait time in usec when writing to certain registers
492  * where it is unsafe to read back the register without some delay.
493  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
494  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
495  */
496 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
497 {
498         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
499                 /* Non-posted methods */
500                 tp->write32(tp, off, val);
501         else {
502                 /* Posted method */
503                 tg3_write32(tp, off, val);
504                 if (usec_wait)
505                         udelay(usec_wait);
506                 tp->read32(tp, off);
507         }
508         /* Wait again after the read for the posted method to guarantee that
509          * the wait time is met.
510          */
511         if (usec_wait)
512                 udelay(usec_wait);
513 }
514
515 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
516 {
517         tp->write32_mbox(tp, off, val);
518         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
519                 tp->read32_mbox(tp, off);
520 }
521
522 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
523 {
524         void __iomem *mbox = tp->regs + off;
525         writel(val, mbox);
526         if (tg3_flag(tp, TXD_MBOX_HWBUG))
527                 writel(val, mbox);
528         if (tg3_flag(tp, MBOX_WRITE_REORDER))
529                 readl(mbox);
530 }
531
532 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
533 {
534         return readl(tp->regs + off + GRCMBOX_BASE);
535 }
536
537 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
538 {
539         writel(val, tp->regs + off + GRCMBOX_BASE);
540 }
541
542 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
543 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
544 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
545 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
546 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
547
548 #define tw32(reg, val)                  tp->write32(tp, reg, val)
549 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
550 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
551 #define tr32(reg)                       tp->read32(tp, reg)
552
553 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
554 {
555         unsigned long flags;
556
557         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
558             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
559                 return;
560
561         spin_lock_irqsave(&tp->indirect_lock, flags);
562         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
563                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
564                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
565
566                 /* Always leave this as zero. */
567                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
568         } else {
569                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
570                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
571
572                 /* Always leave this as zero. */
573                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
574         }
575         spin_unlock_irqrestore(&tp->indirect_lock, flags);
576 }
577
578 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
579 {
580         unsigned long flags;
581
582         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
583             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
584                 *val = 0;
585                 return;
586         }
587
588         spin_lock_irqsave(&tp->indirect_lock, flags);
589         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
590                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
591                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
592
593                 /* Always leave this as zero. */
594                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
595         } else {
596                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
597                 *val = tr32(TG3PCI_MEM_WIN_DATA);
598
599                 /* Always leave this as zero. */
600                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
601         }
602         spin_unlock_irqrestore(&tp->indirect_lock, flags);
603 }
604
605 static void tg3_ape_lock_init(struct tg3 *tp)
606 {
607         int i;
608         u32 regbase;
609
610         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
611                 regbase = TG3_APE_LOCK_GRANT;
612         else
613                 regbase = TG3_APE_PER_LOCK_GRANT;
614
615         /* Make sure the driver hasn't any stale locks. */
616         for (i = 0; i < 8; i++)
617                 tg3_ape_write32(tp, regbase + 4 * i, APE_LOCK_GRANT_DRIVER);
618 }
619
620 static int tg3_ape_lock(struct tg3 *tp, int locknum)
621 {
622         int i, off;
623         int ret = 0;
624         u32 status, req, gnt;
625
626         if (!tg3_flag(tp, ENABLE_APE))
627                 return 0;
628
629         switch (locknum) {
630         case TG3_APE_LOCK_GRC:
631         case TG3_APE_LOCK_MEM:
632                 break;
633         default:
634                 return -EINVAL;
635         }
636
637         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
638                 req = TG3_APE_LOCK_REQ;
639                 gnt = TG3_APE_LOCK_GRANT;
640         } else {
641                 req = TG3_APE_PER_LOCK_REQ;
642                 gnt = TG3_APE_PER_LOCK_GRANT;
643         }
644
645         off = 4 * locknum;
646
647         tg3_ape_write32(tp, req + off, APE_LOCK_REQ_DRIVER);
648
649         /* Wait for up to 1 millisecond to acquire lock. */
650         for (i = 0; i < 100; i++) {
651                 status = tg3_ape_read32(tp, gnt + off);
652                 if (status == APE_LOCK_GRANT_DRIVER)
653                         break;
654                 udelay(10);
655         }
656
657         if (status != APE_LOCK_GRANT_DRIVER) {
658                 /* Revoke the lock request. */
659                 tg3_ape_write32(tp, gnt + off,
660                                 APE_LOCK_GRANT_DRIVER);
661
662                 ret = -EBUSY;
663         }
664
665         return ret;
666 }
667
668 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
669 {
670         u32 gnt;
671
672         if (!tg3_flag(tp, ENABLE_APE))
673                 return;
674
675         switch (locknum) {
676         case TG3_APE_LOCK_GRC:
677         case TG3_APE_LOCK_MEM:
678                 break;
679         default:
680                 return;
681         }
682
683         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
684                 gnt = TG3_APE_LOCK_GRANT;
685         else
686                 gnt = TG3_APE_PER_LOCK_GRANT;
687
688         tg3_ape_write32(tp, gnt + 4 * locknum, APE_LOCK_GRANT_DRIVER);
689 }
690
691 static void tg3_disable_ints(struct tg3 *tp)
692 {
693         int i;
694
695         tw32(TG3PCI_MISC_HOST_CTRL,
696              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
697         for (i = 0; i < tp->irq_max; i++)
698                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
699 }
700
701 static void tg3_enable_ints(struct tg3 *tp)
702 {
703         int i;
704
705         tp->irq_sync = 0;
706         wmb();
707
708         tw32(TG3PCI_MISC_HOST_CTRL,
709              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
710
711         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
712         for (i = 0; i < tp->irq_cnt; i++) {
713                 struct tg3_napi *tnapi = &tp->napi[i];
714
715                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
716                 if (tg3_flag(tp, 1SHOT_MSI))
717                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
718
719                 tp->coal_now |= tnapi->coal_now;
720         }
721
722         /* Force an initial interrupt */
723         if (!tg3_flag(tp, TAGGED_STATUS) &&
724             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
725                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
726         else
727                 tw32(HOSTCC_MODE, tp->coal_now);
728
729         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
730 }
731
732 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
733 {
734         struct tg3 *tp = tnapi->tp;
735         struct tg3_hw_status *sblk = tnapi->hw_status;
736         unsigned int work_exists = 0;
737
738         /* check for phy events */
739         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
740                 if (sblk->status & SD_STATUS_LINK_CHG)
741                         work_exists = 1;
742         }
743
744         /* check for TX work to do */
745         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
746                 work_exists = 1;
747
748         /* check for RX work to do */
749         if (tnapi->rx_rcb_prod_idx &&
750             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
751                 work_exists = 1;
752
753         return work_exists;
754 }
755
756 /* tg3_int_reenable
757  *  similar to tg3_enable_ints, but it accurately determines whether there
758  *  is new work pending and can return without flushing the PIO write
759  *  which reenables interrupts
760  */
761 static void tg3_int_reenable(struct tg3_napi *tnapi)
762 {
763         struct tg3 *tp = tnapi->tp;
764
765         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
766         mmiowb();
767
768         /* When doing tagged status, this work check is unnecessary.
769          * The last_tag we write above tells the chip which piece of
770          * work we've completed.
771          */
772         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
773                 tw32(HOSTCC_MODE, tp->coalesce_mode |
774                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
775 }
776
777 static void tg3_switch_clocks(struct tg3 *tp)
778 {
779         u32 clock_ctrl;
780         u32 orig_clock_ctrl;
781
782         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
783                 return;
784
785         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
786
787         orig_clock_ctrl = clock_ctrl;
788         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
789                        CLOCK_CTRL_CLKRUN_OENABLE |
790                        0x1f);
791         tp->pci_clock_ctrl = clock_ctrl;
792
793         if (tg3_flag(tp, 5705_PLUS)) {
794                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
795                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
796                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
797                 }
798         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
799                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
800                             clock_ctrl |
801                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
802                             40);
803                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
804                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
805                             40);
806         }
807         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
808 }
809
810 #define PHY_BUSY_LOOPS  5000
811
812 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
813 {
814         u32 frame_val;
815         unsigned int loops;
816         int ret;
817
818         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
819                 tw32_f(MAC_MI_MODE,
820                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
821                 udelay(80);
822         }
823
824         *val = 0x0;
825
826         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
827                       MI_COM_PHY_ADDR_MASK);
828         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
829                       MI_COM_REG_ADDR_MASK);
830         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
831
832         tw32_f(MAC_MI_COM, frame_val);
833
834         loops = PHY_BUSY_LOOPS;
835         while (loops != 0) {
836                 udelay(10);
837                 frame_val = tr32(MAC_MI_COM);
838
839                 if ((frame_val & MI_COM_BUSY) == 0) {
840                         udelay(5);
841                         frame_val = tr32(MAC_MI_COM);
842                         break;
843                 }
844                 loops -= 1;
845         }
846
847         ret = -EBUSY;
848         if (loops != 0) {
849                 *val = frame_val & MI_COM_DATA_MASK;
850                 ret = 0;
851         }
852
853         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
854                 tw32_f(MAC_MI_MODE, tp->mi_mode);
855                 udelay(80);
856         }
857
858         return ret;
859 }
860
861 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
862 {
863         u32 frame_val;
864         unsigned int loops;
865         int ret;
866
867         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
868             (reg == MII_TG3_CTRL || reg == MII_TG3_AUX_CTRL))
869                 return 0;
870
871         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
872                 tw32_f(MAC_MI_MODE,
873                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
874                 udelay(80);
875         }
876
877         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
878                       MI_COM_PHY_ADDR_MASK);
879         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
880                       MI_COM_REG_ADDR_MASK);
881         frame_val |= (val & MI_COM_DATA_MASK);
882         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
883
884         tw32_f(MAC_MI_COM, frame_val);
885
886         loops = PHY_BUSY_LOOPS;
887         while (loops != 0) {
888                 udelay(10);
889                 frame_val = tr32(MAC_MI_COM);
890                 if ((frame_val & MI_COM_BUSY) == 0) {
891                         udelay(5);
892                         frame_val = tr32(MAC_MI_COM);
893                         break;
894                 }
895                 loops -= 1;
896         }
897
898         ret = -EBUSY;
899         if (loops != 0)
900                 ret = 0;
901
902         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
903                 tw32_f(MAC_MI_MODE, tp->mi_mode);
904                 udelay(80);
905         }
906
907         return ret;
908 }
909
910 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
911 {
912         int err;
913
914         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
915         if (err)
916                 goto done;
917
918         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
919         if (err)
920                 goto done;
921
922         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
923                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
924         if (err)
925                 goto done;
926
927         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
928
929 done:
930         return err;
931 }
932
933 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
934 {
935         int err;
936
937         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
938         if (err)
939                 goto done;
940
941         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
942         if (err)
943                 goto done;
944
945         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
946                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
947         if (err)
948                 goto done;
949
950         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
951
952 done:
953         return err;
954 }
955
956 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
957 {
958         int err;
959
960         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
961         if (!err)
962                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
963
964         return err;
965 }
966
967 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
968 {
969         int err;
970
971         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
972         if (!err)
973                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
974
975         return err;
976 }
977
978 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
979 {
980         int err;
981
982         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
983                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
984                            MII_TG3_AUXCTL_SHDWSEL_MISC);
985         if (!err)
986                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
987
988         return err;
989 }
990
991 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
992 {
993         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
994                 set |= MII_TG3_AUXCTL_MISC_WREN;
995
996         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
997 }
998
999 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1000         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1001                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1002                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1003
1004 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1005         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1006                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1007
1008 static int tg3_bmcr_reset(struct tg3 *tp)
1009 {
1010         u32 phy_control;
1011         int limit, err;
1012
1013         /* OK, reset it, and poll the BMCR_RESET bit until it
1014          * clears or we time out.
1015          */
1016         phy_control = BMCR_RESET;
1017         err = tg3_writephy(tp, MII_BMCR, phy_control);
1018         if (err != 0)
1019                 return -EBUSY;
1020
1021         limit = 5000;
1022         while (limit--) {
1023                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1024                 if (err != 0)
1025                         return -EBUSY;
1026
1027                 if ((phy_control & BMCR_RESET) == 0) {
1028                         udelay(40);
1029                         break;
1030                 }
1031                 udelay(10);
1032         }
1033         if (limit < 0)
1034                 return -EBUSY;
1035
1036         return 0;
1037 }
1038
1039 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1040 {
1041         struct tg3 *tp = bp->priv;
1042         u32 val;
1043
1044         spin_lock_bh(&tp->lock);
1045
1046         if (tg3_readphy(tp, reg, &val))
1047                 val = -EIO;
1048
1049         spin_unlock_bh(&tp->lock);
1050
1051         return val;
1052 }
1053
1054 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1055 {
1056         struct tg3 *tp = bp->priv;
1057         u32 ret = 0;
1058
1059         spin_lock_bh(&tp->lock);
1060
1061         if (tg3_writephy(tp, reg, val))
1062                 ret = -EIO;
1063
1064         spin_unlock_bh(&tp->lock);
1065
1066         return ret;
1067 }
1068
1069 static int tg3_mdio_reset(struct mii_bus *bp)
1070 {
1071         return 0;
1072 }
1073
1074 static void tg3_mdio_config_5785(struct tg3 *tp)
1075 {
1076         u32 val;
1077         struct phy_device *phydev;
1078
1079         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1080         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1081         case PHY_ID_BCM50610:
1082         case PHY_ID_BCM50610M:
1083                 val = MAC_PHYCFG2_50610_LED_MODES;
1084                 break;
1085         case PHY_ID_BCMAC131:
1086                 val = MAC_PHYCFG2_AC131_LED_MODES;
1087                 break;
1088         case PHY_ID_RTL8211C:
1089                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1090                 break;
1091         case PHY_ID_RTL8201E:
1092                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1093                 break;
1094         default:
1095                 return;
1096         }
1097
1098         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1099                 tw32(MAC_PHYCFG2, val);
1100
1101                 val = tr32(MAC_PHYCFG1);
1102                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1103                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1104                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1105                 tw32(MAC_PHYCFG1, val);
1106
1107                 return;
1108         }
1109
1110         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1111                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1112                        MAC_PHYCFG2_FMODE_MASK_MASK |
1113                        MAC_PHYCFG2_GMODE_MASK_MASK |
1114                        MAC_PHYCFG2_ACT_MASK_MASK   |
1115                        MAC_PHYCFG2_QUAL_MASK_MASK |
1116                        MAC_PHYCFG2_INBAND_ENABLE;
1117
1118         tw32(MAC_PHYCFG2, val);
1119
1120         val = tr32(MAC_PHYCFG1);
1121         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1122                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1123         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1124                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1125                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1126                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1127                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1128         }
1129         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1130                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1131         tw32(MAC_PHYCFG1, val);
1132
1133         val = tr32(MAC_EXT_RGMII_MODE);
1134         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1135                  MAC_RGMII_MODE_RX_QUALITY |
1136                  MAC_RGMII_MODE_RX_ACTIVITY |
1137                  MAC_RGMII_MODE_RX_ENG_DET |
1138                  MAC_RGMII_MODE_TX_ENABLE |
1139                  MAC_RGMII_MODE_TX_LOWPWR |
1140                  MAC_RGMII_MODE_TX_RESET);
1141         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1142                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1143                         val |= MAC_RGMII_MODE_RX_INT_B |
1144                                MAC_RGMII_MODE_RX_QUALITY |
1145                                MAC_RGMII_MODE_RX_ACTIVITY |
1146                                MAC_RGMII_MODE_RX_ENG_DET;
1147                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1148                         val |= MAC_RGMII_MODE_TX_ENABLE |
1149                                MAC_RGMII_MODE_TX_LOWPWR |
1150                                MAC_RGMII_MODE_TX_RESET;
1151         }
1152         tw32(MAC_EXT_RGMII_MODE, val);
1153 }
1154
1155 static void tg3_mdio_start(struct tg3 *tp)
1156 {
1157         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1158         tw32_f(MAC_MI_MODE, tp->mi_mode);
1159         udelay(80);
1160
1161         if (tg3_flag(tp, MDIOBUS_INITED) &&
1162             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1163                 tg3_mdio_config_5785(tp);
1164 }
1165
1166 static int tg3_mdio_init(struct tg3 *tp)
1167 {
1168         int i;
1169         u32 reg;
1170         struct phy_device *phydev;
1171
1172         if (tg3_flag(tp, 5717_PLUS)) {
1173                 u32 is_serdes;
1174
1175                 tp->phy_addr = PCI_FUNC(tp->pdev->devfn) + 1;
1176
1177                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1178                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1179                 else
1180                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1181                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1182                 if (is_serdes)
1183                         tp->phy_addr += 7;
1184         } else
1185                 tp->phy_addr = TG3_PHY_MII_ADDR;
1186
1187         tg3_mdio_start(tp);
1188
1189         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1190                 return 0;
1191
1192         tp->mdio_bus = mdiobus_alloc();
1193         if (tp->mdio_bus == NULL)
1194                 return -ENOMEM;
1195
1196         tp->mdio_bus->name     = "tg3 mdio bus";
1197         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1198                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1199         tp->mdio_bus->priv     = tp;
1200         tp->mdio_bus->parent   = &tp->pdev->dev;
1201         tp->mdio_bus->read     = &tg3_mdio_read;
1202         tp->mdio_bus->write    = &tg3_mdio_write;
1203         tp->mdio_bus->reset    = &tg3_mdio_reset;
1204         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1205         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1206
1207         for (i = 0; i < PHY_MAX_ADDR; i++)
1208                 tp->mdio_bus->irq[i] = PHY_POLL;
1209
1210         /* The bus registration will look for all the PHYs on the mdio bus.
1211          * Unfortunately, it does not ensure the PHY is powered up before
1212          * accessing the PHY ID registers.  A chip reset is the
1213          * quickest way to bring the device back to an operational state..
1214          */
1215         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1216                 tg3_bmcr_reset(tp);
1217
1218         i = mdiobus_register(tp->mdio_bus);
1219         if (i) {
1220                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1221                 mdiobus_free(tp->mdio_bus);
1222                 return i;
1223         }
1224
1225         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1226
1227         if (!phydev || !phydev->drv) {
1228                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1229                 mdiobus_unregister(tp->mdio_bus);
1230                 mdiobus_free(tp->mdio_bus);
1231                 return -ENODEV;
1232         }
1233
1234         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1235         case PHY_ID_BCM57780:
1236                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1237                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1238                 break;
1239         case PHY_ID_BCM50610:
1240         case PHY_ID_BCM50610M:
1241                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1242                                      PHY_BRCM_RX_REFCLK_UNUSED |
1243                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1244                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1245                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1246                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1247                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1248                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1249                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1250                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1251                 /* fallthru */
1252         case PHY_ID_RTL8211C:
1253                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1254                 break;
1255         case PHY_ID_RTL8201E:
1256         case PHY_ID_BCMAC131:
1257                 phydev->interface = PHY_INTERFACE_MODE_MII;
1258                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1259                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1260                 break;
1261         }
1262
1263         tg3_flag_set(tp, MDIOBUS_INITED);
1264
1265         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1266                 tg3_mdio_config_5785(tp);
1267
1268         return 0;
1269 }
1270
1271 static void tg3_mdio_fini(struct tg3 *tp)
1272 {
1273         if (tg3_flag(tp, MDIOBUS_INITED)) {
1274                 tg3_flag_clear(tp, MDIOBUS_INITED);
1275                 mdiobus_unregister(tp->mdio_bus);
1276                 mdiobus_free(tp->mdio_bus);
1277         }
1278 }
1279
1280 /* tp->lock is held. */
1281 static inline void tg3_generate_fw_event(struct tg3 *tp)
1282 {
1283         u32 val;
1284
1285         val = tr32(GRC_RX_CPU_EVENT);
1286         val |= GRC_RX_CPU_DRIVER_EVENT;
1287         tw32_f(GRC_RX_CPU_EVENT, val);
1288
1289         tp->last_event_jiffies = jiffies;
1290 }
1291
1292 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1293
1294 /* tp->lock is held. */
1295 static void tg3_wait_for_event_ack(struct tg3 *tp)
1296 {
1297         int i;
1298         unsigned int delay_cnt;
1299         long time_remain;
1300
1301         /* If enough time has passed, no wait is necessary. */
1302         time_remain = (long)(tp->last_event_jiffies + 1 +
1303                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1304                       (long)jiffies;
1305         if (time_remain < 0)
1306                 return;
1307
1308         /* Check if we can shorten the wait time. */
1309         delay_cnt = jiffies_to_usecs(time_remain);
1310         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1311                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1312         delay_cnt = (delay_cnt >> 3) + 1;
1313
1314         for (i = 0; i < delay_cnt; i++) {
1315                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1316                         break;
1317                 udelay(8);
1318         }
1319 }
1320
1321 /* tp->lock is held. */
1322 static void tg3_ump_link_report(struct tg3 *tp)
1323 {
1324         u32 reg;
1325         u32 val;
1326
1327         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1328                 return;
1329
1330         tg3_wait_for_event_ack(tp);
1331
1332         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1333
1334         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1335
1336         val = 0;
1337         if (!tg3_readphy(tp, MII_BMCR, &reg))
1338                 val = reg << 16;
1339         if (!tg3_readphy(tp, MII_BMSR, &reg))
1340                 val |= (reg & 0xffff);
1341         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, val);
1342
1343         val = 0;
1344         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1345                 val = reg << 16;
1346         if (!tg3_readphy(tp, MII_LPA, &reg))
1347                 val |= (reg & 0xffff);
1348         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 4, val);
1349
1350         val = 0;
1351         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1352                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1353                         val = reg << 16;
1354                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1355                         val |= (reg & 0xffff);
1356         }
1357         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 8, val);
1358
1359         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1360                 val = reg << 16;
1361         else
1362                 val = 0;
1363         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 12, val);
1364
1365         tg3_generate_fw_event(tp);
1366 }
1367
1368 static void tg3_link_report(struct tg3 *tp)
1369 {
1370         if (!netif_carrier_ok(tp->dev)) {
1371                 netif_info(tp, link, tp->dev, "Link is down\n");
1372                 tg3_ump_link_report(tp);
1373         } else if (netif_msg_link(tp)) {
1374                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1375                             (tp->link_config.active_speed == SPEED_1000 ?
1376                              1000 :
1377                              (tp->link_config.active_speed == SPEED_100 ?
1378                               100 : 10)),
1379                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1380                              "full" : "half"));
1381
1382                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1383                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1384                             "on" : "off",
1385                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1386                             "on" : "off");
1387
1388                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1389                         netdev_info(tp->dev, "EEE is %s\n",
1390                                     tp->setlpicnt ? "enabled" : "disabled");
1391
1392                 tg3_ump_link_report(tp);
1393         }
1394 }
1395
1396 static u16 tg3_advert_flowctrl_1000T(u8 flow_ctrl)
1397 {
1398         u16 miireg;
1399
1400         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1401                 miireg = ADVERTISE_PAUSE_CAP;
1402         else if (flow_ctrl & FLOW_CTRL_TX)
1403                 miireg = ADVERTISE_PAUSE_ASYM;
1404         else if (flow_ctrl & FLOW_CTRL_RX)
1405                 miireg = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1406         else
1407                 miireg = 0;
1408
1409         return miireg;
1410 }
1411
1412 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1413 {
1414         u16 miireg;
1415
1416         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1417                 miireg = ADVERTISE_1000XPAUSE;
1418         else if (flow_ctrl & FLOW_CTRL_TX)
1419                 miireg = ADVERTISE_1000XPSE_ASYM;
1420         else if (flow_ctrl & FLOW_CTRL_RX)
1421                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1422         else
1423                 miireg = 0;
1424
1425         return miireg;
1426 }
1427
1428 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1429 {
1430         u8 cap = 0;
1431
1432         if (lcladv & ADVERTISE_1000XPAUSE) {
1433                 if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1434                         if (rmtadv & LPA_1000XPAUSE)
1435                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1436                         else if (rmtadv & LPA_1000XPAUSE_ASYM)
1437                                 cap = FLOW_CTRL_RX;
1438                 } else {
1439                         if (rmtadv & LPA_1000XPAUSE)
1440                                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1441                 }
1442         } else if (lcladv & ADVERTISE_1000XPSE_ASYM) {
1443                 if ((rmtadv & LPA_1000XPAUSE) && (rmtadv & LPA_1000XPAUSE_ASYM))
1444                         cap = FLOW_CTRL_TX;
1445         }
1446
1447         return cap;
1448 }
1449
1450 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1451 {
1452         u8 autoneg;
1453         u8 flowctrl = 0;
1454         u32 old_rx_mode = tp->rx_mode;
1455         u32 old_tx_mode = tp->tx_mode;
1456
1457         if (tg3_flag(tp, USE_PHYLIB))
1458                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1459         else
1460                 autoneg = tp->link_config.autoneg;
1461
1462         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1463                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1464                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1465                 else
1466                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1467         } else
1468                 flowctrl = tp->link_config.flowctrl;
1469
1470         tp->link_config.active_flowctrl = flowctrl;
1471
1472         if (flowctrl & FLOW_CTRL_RX)
1473                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1474         else
1475                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1476
1477         if (old_rx_mode != tp->rx_mode)
1478                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1479
1480         if (flowctrl & FLOW_CTRL_TX)
1481                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1482         else
1483                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1484
1485         if (old_tx_mode != tp->tx_mode)
1486                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1487 }
1488
1489 static void tg3_adjust_link(struct net_device *dev)
1490 {
1491         u8 oldflowctrl, linkmesg = 0;
1492         u32 mac_mode, lcl_adv, rmt_adv;
1493         struct tg3 *tp = netdev_priv(dev);
1494         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1495
1496         spin_lock_bh(&tp->lock);
1497
1498         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1499                                     MAC_MODE_HALF_DUPLEX);
1500
1501         oldflowctrl = tp->link_config.active_flowctrl;
1502
1503         if (phydev->link) {
1504                 lcl_adv = 0;
1505                 rmt_adv = 0;
1506
1507                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1508                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1509                 else if (phydev->speed == SPEED_1000 ||
1510                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1511                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1512                 else
1513                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1514
1515                 if (phydev->duplex == DUPLEX_HALF)
1516                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1517                 else {
1518                         lcl_adv = tg3_advert_flowctrl_1000T(
1519                                   tp->link_config.flowctrl);
1520
1521                         if (phydev->pause)
1522                                 rmt_adv = LPA_PAUSE_CAP;
1523                         if (phydev->asym_pause)
1524                                 rmt_adv |= LPA_PAUSE_ASYM;
1525                 }
1526
1527                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1528         } else
1529                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1530
1531         if (mac_mode != tp->mac_mode) {
1532                 tp->mac_mode = mac_mode;
1533                 tw32_f(MAC_MODE, tp->mac_mode);
1534                 udelay(40);
1535         }
1536
1537         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1538                 if (phydev->speed == SPEED_10)
1539                         tw32(MAC_MI_STAT,
1540                              MAC_MI_STAT_10MBPS_MODE |
1541                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1542                 else
1543                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1544         }
1545
1546         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1547                 tw32(MAC_TX_LENGTHS,
1548                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1549                       (6 << TX_LENGTHS_IPG_SHIFT) |
1550                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1551         else
1552                 tw32(MAC_TX_LENGTHS,
1553                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1554                       (6 << TX_LENGTHS_IPG_SHIFT) |
1555                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1556
1557         if ((phydev->link && tp->link_config.active_speed == SPEED_INVALID) ||
1558             (!phydev->link && tp->link_config.active_speed != SPEED_INVALID) ||
1559             phydev->speed != tp->link_config.active_speed ||
1560             phydev->duplex != tp->link_config.active_duplex ||
1561             oldflowctrl != tp->link_config.active_flowctrl)
1562                 linkmesg = 1;
1563
1564         tp->link_config.active_speed = phydev->speed;
1565         tp->link_config.active_duplex = phydev->duplex;
1566
1567         spin_unlock_bh(&tp->lock);
1568
1569         if (linkmesg)
1570                 tg3_link_report(tp);
1571 }
1572
1573 static int tg3_phy_init(struct tg3 *tp)
1574 {
1575         struct phy_device *phydev;
1576
1577         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1578                 return 0;
1579
1580         /* Bring the PHY back to a known state. */
1581         tg3_bmcr_reset(tp);
1582
1583         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1584
1585         /* Attach the MAC to the PHY. */
1586         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1587                              phydev->dev_flags, phydev->interface);
1588         if (IS_ERR(phydev)) {
1589                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1590                 return PTR_ERR(phydev);
1591         }
1592
1593         /* Mask with MAC supported features. */
1594         switch (phydev->interface) {
1595         case PHY_INTERFACE_MODE_GMII:
1596         case PHY_INTERFACE_MODE_RGMII:
1597                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1598                         phydev->supported &= (PHY_GBIT_FEATURES |
1599                                               SUPPORTED_Pause |
1600                                               SUPPORTED_Asym_Pause);
1601                         break;
1602                 }
1603                 /* fallthru */
1604         case PHY_INTERFACE_MODE_MII:
1605                 phydev->supported &= (PHY_BASIC_FEATURES |
1606                                       SUPPORTED_Pause |
1607                                       SUPPORTED_Asym_Pause);
1608                 break;
1609         default:
1610                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1611                 return -EINVAL;
1612         }
1613
1614         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1615
1616         phydev->advertising = phydev->supported;
1617
1618         return 0;
1619 }
1620
1621 static void tg3_phy_start(struct tg3 *tp)
1622 {
1623         struct phy_device *phydev;
1624
1625         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1626                 return;
1627
1628         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1629
1630         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1631                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1632                 phydev->speed = tp->link_config.orig_speed;
1633                 phydev->duplex = tp->link_config.orig_duplex;
1634                 phydev->autoneg = tp->link_config.orig_autoneg;
1635                 phydev->advertising = tp->link_config.orig_advertising;
1636         }
1637
1638         phy_start(phydev);
1639
1640         phy_start_aneg(phydev);
1641 }
1642
1643 static void tg3_phy_stop(struct tg3 *tp)
1644 {
1645         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1646                 return;
1647
1648         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1649 }
1650
1651 static void tg3_phy_fini(struct tg3 *tp)
1652 {
1653         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1654                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1655                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1656         }
1657 }
1658
1659 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1660 {
1661         u32 phytest;
1662
1663         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1664                 u32 phy;
1665
1666                 tg3_writephy(tp, MII_TG3_FET_TEST,
1667                              phytest | MII_TG3_FET_SHADOW_EN);
1668                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1669                         if (enable)
1670                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1671                         else
1672                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1673                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1674                 }
1675                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1676         }
1677 }
1678
1679 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1680 {
1681         u32 reg;
1682
1683         if (!tg3_flag(tp, 5705_PLUS) ||
1684             (tg3_flag(tp, 5717_PLUS) &&
1685              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1686                 return;
1687
1688         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1689                 tg3_phy_fet_toggle_apd(tp, enable);
1690                 return;
1691         }
1692
1693         reg = MII_TG3_MISC_SHDW_WREN |
1694               MII_TG3_MISC_SHDW_SCR5_SEL |
1695               MII_TG3_MISC_SHDW_SCR5_LPED |
1696               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1697               MII_TG3_MISC_SHDW_SCR5_SDTL |
1698               MII_TG3_MISC_SHDW_SCR5_C125OE;
1699         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1700                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1701
1702         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1703
1704
1705         reg = MII_TG3_MISC_SHDW_WREN |
1706               MII_TG3_MISC_SHDW_APD_SEL |
1707               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
1708         if (enable)
1709                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
1710
1711         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1712 }
1713
1714 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
1715 {
1716         u32 phy;
1717
1718         if (!tg3_flag(tp, 5705_PLUS) ||
1719             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
1720                 return;
1721
1722         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1723                 u32 ephy;
1724
1725                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
1726                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
1727
1728                         tg3_writephy(tp, MII_TG3_FET_TEST,
1729                                      ephy | MII_TG3_FET_SHADOW_EN);
1730                         if (!tg3_readphy(tp, reg, &phy)) {
1731                                 if (enable)
1732                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1733                                 else
1734                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
1735                                 tg3_writephy(tp, reg, phy);
1736                         }
1737                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
1738                 }
1739         } else {
1740                 int ret;
1741
1742                 ret = tg3_phy_auxctl_read(tp,
1743                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
1744                 if (!ret) {
1745                         if (enable)
1746                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1747                         else
1748                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
1749                         tg3_phy_auxctl_write(tp,
1750                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
1751                 }
1752         }
1753 }
1754
1755 static void tg3_phy_set_wirespeed(struct tg3 *tp)
1756 {
1757         int ret;
1758         u32 val;
1759
1760         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
1761                 return;
1762
1763         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
1764         if (!ret)
1765                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
1766                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
1767 }
1768
1769 static void tg3_phy_apply_otp(struct tg3 *tp)
1770 {
1771         u32 otp, phy;
1772
1773         if (!tp->phy_otp)
1774                 return;
1775
1776         otp = tp->phy_otp;
1777
1778         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
1779                 return;
1780
1781         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
1782         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
1783         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
1784
1785         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
1786               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
1787         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
1788
1789         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
1790         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
1791         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
1792
1793         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
1794         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
1795
1796         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
1797         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
1798
1799         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
1800               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
1801         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
1802
1803         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1804 }
1805
1806 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1807 {
1808         u32 val;
1809
1810         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
1811                 return;
1812
1813         tp->setlpicnt = 0;
1814
1815         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1816             current_link_up == 1 &&
1817             tp->link_config.active_duplex == DUPLEX_FULL &&
1818             (tp->link_config.active_speed == SPEED_100 ||
1819              tp->link_config.active_speed == SPEED_1000)) {
1820                 u32 eeectl;
1821
1822                 if (tp->link_config.active_speed == SPEED_1000)
1823                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
1824                 else
1825                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
1826
1827                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1828
1829                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1830                                   TG3_CL45_D7_EEERES_STAT, &val);
1831
1832                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1833                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
1834                         tp->setlpicnt = 2;
1835         }
1836
1837         if (!tp->setlpicnt) {
1838                 val = tr32(TG3_CPMU_EEE_MODE);
1839                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
1840         }
1841 }
1842
1843 static void tg3_phy_eee_enable(struct tg3 *tp)
1844 {
1845         u32 val;
1846
1847         if (tp->link_config.active_speed == SPEED_1000 &&
1848             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
1849              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
1850              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) &&
1851             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
1852                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0003);
1853                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
1854         }
1855
1856         val = tr32(TG3_CPMU_EEE_MODE);
1857         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
1858 }
1859
1860 static int tg3_wait_macro_done(struct tg3 *tp)
1861 {
1862         int limit = 100;
1863
1864         while (limit--) {
1865                 u32 tmp32;
1866
1867                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
1868                         if ((tmp32 & 0x1000) == 0)
1869                                 break;
1870                 }
1871         }
1872         if (limit < 0)
1873                 return -EBUSY;
1874
1875         return 0;
1876 }
1877
1878 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
1879 {
1880         static const u32 test_pat[4][6] = {
1881         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
1882         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
1883         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
1884         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
1885         };
1886         int chan;
1887
1888         for (chan = 0; chan < 4; chan++) {
1889                 int i;
1890
1891                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1892                              (chan * 0x2000) | 0x0200);
1893                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1894
1895                 for (i = 0; i < 6; i++)
1896                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
1897                                      test_pat[chan][i]);
1898
1899                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1900                 if (tg3_wait_macro_done(tp)) {
1901                         *resetp = 1;
1902                         return -EBUSY;
1903                 }
1904
1905                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1906                              (chan * 0x2000) | 0x0200);
1907                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
1908                 if (tg3_wait_macro_done(tp)) {
1909                         *resetp = 1;
1910                         return -EBUSY;
1911                 }
1912
1913                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
1914                 if (tg3_wait_macro_done(tp)) {
1915                         *resetp = 1;
1916                         return -EBUSY;
1917                 }
1918
1919                 for (i = 0; i < 6; i += 2) {
1920                         u32 low, high;
1921
1922                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
1923                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
1924                             tg3_wait_macro_done(tp)) {
1925                                 *resetp = 1;
1926                                 return -EBUSY;
1927                         }
1928                         low &= 0x7fff;
1929                         high &= 0x000f;
1930                         if (low != test_pat[chan][i] ||
1931                             high != test_pat[chan][i+1]) {
1932                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
1933                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
1934                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
1935
1936                                 return -EBUSY;
1937                         }
1938                 }
1939         }
1940
1941         return 0;
1942 }
1943
1944 static int tg3_phy_reset_chanpat(struct tg3 *tp)
1945 {
1946         int chan;
1947
1948         for (chan = 0; chan < 4; chan++) {
1949                 int i;
1950
1951                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
1952                              (chan * 0x2000) | 0x0200);
1953                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
1954                 for (i = 0; i < 6; i++)
1955                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
1956                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
1957                 if (tg3_wait_macro_done(tp))
1958                         return -EBUSY;
1959         }
1960
1961         return 0;
1962 }
1963
1964 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
1965 {
1966         u32 reg32, phy9_orig;
1967         int retries, do_phy_reset, err;
1968
1969         retries = 10;
1970         do_phy_reset = 1;
1971         do {
1972                 if (do_phy_reset) {
1973                         err = tg3_bmcr_reset(tp);
1974                         if (err)
1975                                 return err;
1976                         do_phy_reset = 0;
1977                 }
1978
1979                 /* Disable transmitter and interrupt.  */
1980                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
1981                         continue;
1982
1983                 reg32 |= 0x3000;
1984                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
1985
1986                 /* Set full-duplex, 1000 mbps.  */
1987                 tg3_writephy(tp, MII_BMCR,
1988                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
1989
1990                 /* Set to master mode.  */
1991                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
1992                         continue;
1993
1994                 tg3_writephy(tp, MII_TG3_CTRL,
1995                              (MII_TG3_CTRL_AS_MASTER |
1996                               MII_TG3_CTRL_ENABLE_AS_MASTER));
1997
1998                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
1999                 if (err)
2000                         return err;
2001
2002                 /* Block the PHY control access.  */
2003                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2004
2005                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2006                 if (!err)
2007                         break;
2008         } while (--retries);
2009
2010         err = tg3_phy_reset_chanpat(tp);
2011         if (err)
2012                 return err;
2013
2014         tg3_phydsp_write(tp, 0x8005, 0x0000);
2015
2016         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2017         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2018
2019         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2020
2021         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
2022
2023         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2024                 reg32 &= ~0x3000;
2025                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2026         } else if (!err)
2027                 err = -EBUSY;
2028
2029         return err;
2030 }
2031
2032 /* This will reset the tigon3 PHY if there is no valid
2033  * link unless the FORCE argument is non-zero.
2034  */
2035 static int tg3_phy_reset(struct tg3 *tp)
2036 {
2037         u32 val, cpmuctrl;
2038         int err;
2039
2040         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2041                 val = tr32(GRC_MISC_CFG);
2042                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2043                 udelay(40);
2044         }
2045         err  = tg3_readphy(tp, MII_BMSR, &val);
2046         err |= tg3_readphy(tp, MII_BMSR, &val);
2047         if (err != 0)
2048                 return -EBUSY;
2049
2050         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2051                 netif_carrier_off(tp->dev);
2052                 tg3_link_report(tp);
2053         }
2054
2055         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2056             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2057             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2058                 err = tg3_phy_reset_5703_4_5(tp);
2059                 if (err)
2060                         return err;
2061                 goto out;
2062         }
2063
2064         cpmuctrl = 0;
2065         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2066             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2067                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2068                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2069                         tw32(TG3_CPMU_CTRL,
2070                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2071         }
2072
2073         err = tg3_bmcr_reset(tp);
2074         if (err)
2075                 return err;
2076
2077         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2078                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2079                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2080
2081                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2082         }
2083
2084         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2085             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2086                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2087                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2088                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2089                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2090                         udelay(40);
2091                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2092                 }
2093         }
2094
2095         if (tg3_flag(tp, 5717_PLUS) &&
2096             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2097                 return 0;
2098
2099         tg3_phy_apply_otp(tp);
2100
2101         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2102                 tg3_phy_toggle_apd(tp, true);
2103         else
2104                 tg3_phy_toggle_apd(tp, false);
2105
2106 out:
2107         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2108             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2109                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2110                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2111                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2112         }
2113
2114         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2115                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2116                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2117         }
2118
2119         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2120                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2121                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2122                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2123                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2124                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2125                 }
2126         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2127                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2128                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2129                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2130                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2131                                 tg3_writephy(tp, MII_TG3_TEST1,
2132                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2133                         } else
2134                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2135
2136                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2137                 }
2138         }
2139
2140         /* Set Extended packet length bit (bit 14) on all chips that */
2141         /* support jumbo frames */
2142         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2143                 /* Cannot do read-modify-write on 5401 */
2144                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2145         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2146                 /* Set bit 14 with read-modify-write to preserve other bits */
2147                 err = tg3_phy_auxctl_read(tp,
2148                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2149                 if (!err)
2150                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2151                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2152         }
2153
2154         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2155          * jumbo frames transmission.
2156          */
2157         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2158                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2159                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2160                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2161         }
2162
2163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2164                 /* adjust output voltage */
2165                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2166         }
2167
2168         tg3_phy_toggle_automdix(tp, 1);
2169         tg3_phy_set_wirespeed(tp);
2170         return 0;
2171 }
2172
2173 static void tg3_frob_aux_power(struct tg3 *tp)
2174 {
2175         bool need_vaux = false;
2176
2177         /* The GPIOs do something completely different on 57765. */
2178         if (!tg3_flag(tp, IS_NIC) ||
2179             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2180             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
2181                 return;
2182
2183         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2184              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
2185              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2186              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) &&
2187             tp->pdev_peer != tp->pdev) {
2188                 struct net_device *dev_peer;
2189
2190                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2191
2192                 /* remove_one() may have been run on the peer. */
2193                 if (dev_peer) {
2194                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2195
2196                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2197                                 return;
2198
2199                         if (tg3_flag(tp_peer, WOL_ENABLE) ||
2200                             tg3_flag(tp_peer, ENABLE_ASF))
2201                                 need_vaux = true;
2202                 }
2203         }
2204
2205         if (tg3_flag(tp, WOL_ENABLE) || tg3_flag(tp, ENABLE_ASF))
2206                 need_vaux = true;
2207
2208         if (need_vaux) {
2209                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2210                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2211                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2212                                     (GRC_LCLCTRL_GPIO_OE0 |
2213                                      GRC_LCLCTRL_GPIO_OE1 |
2214                                      GRC_LCLCTRL_GPIO_OE2 |
2215                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2216                                      GRC_LCLCTRL_GPIO_OUTPUT1),
2217                                     100);
2218                 } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2219                            tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2220                         /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2221                         u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2222                                              GRC_LCLCTRL_GPIO_OE1 |
2223                                              GRC_LCLCTRL_GPIO_OE2 |
2224                                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2225                                              GRC_LCLCTRL_GPIO_OUTPUT1 |
2226                                              tp->grc_local_ctrl;
2227                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2228
2229                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2230                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2231
2232                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2233                         tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl, 100);
2234                 } else {
2235                         u32 no_gpio2;
2236                         u32 grc_local_ctrl = 0;
2237
2238                         /* Workaround to prevent overdrawing Amps. */
2239                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2240                             ASIC_REV_5714) {
2241                                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2242                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2243                                             grc_local_ctrl, 100);
2244                         }
2245
2246                         /* On 5753 and variants, GPIO2 cannot be used. */
2247                         no_gpio2 = tp->nic_sram_data_cfg &
2248                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
2249
2250                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2251                                          GRC_LCLCTRL_GPIO_OE1 |
2252                                          GRC_LCLCTRL_GPIO_OE2 |
2253                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
2254                                          GRC_LCLCTRL_GPIO_OUTPUT2;
2255                         if (no_gpio2) {
2256                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2257                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
2258                         }
2259                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2260                                                     grc_local_ctrl, 100);
2261
2262                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2263
2264                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2265                                                     grc_local_ctrl, 100);
2266
2267                         if (!no_gpio2) {
2268                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2269                                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2270                                             grc_local_ctrl, 100);
2271                         }
2272                 }
2273         } else {
2274                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
2275                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
2276                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2277                                     (GRC_LCLCTRL_GPIO_OE1 |
2278                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2279
2280                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2281                                     GRC_LCLCTRL_GPIO_OE1, 100);
2282
2283                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2284                                     (GRC_LCLCTRL_GPIO_OE1 |
2285                                      GRC_LCLCTRL_GPIO_OUTPUT1), 100);
2286                 }
2287         }
2288 }
2289
2290 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2291 {
2292         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2293                 return 1;
2294         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2295                 if (speed != SPEED_10)
2296                         return 1;
2297         } else if (speed == SPEED_10)
2298                 return 1;
2299
2300         return 0;
2301 }
2302
2303 static int tg3_setup_phy(struct tg3 *, int);
2304
2305 #define RESET_KIND_SHUTDOWN     0
2306 #define RESET_KIND_INIT         1
2307 #define RESET_KIND_SUSPEND      2
2308
2309 static void tg3_write_sig_post_reset(struct tg3 *, int);
2310 static int tg3_halt_cpu(struct tg3 *, u32);
2311
2312 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2313 {
2314         u32 val;
2315
2316         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2317                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2318                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2319                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2320
2321                         sg_dig_ctrl |=
2322                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2323                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2324                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2325                 }
2326                 return;
2327         }
2328
2329         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2330                 tg3_bmcr_reset(tp);
2331                 val = tr32(GRC_MISC_CFG);
2332                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2333                 udelay(40);
2334                 return;
2335         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2336                 u32 phytest;
2337                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2338                         u32 phy;
2339
2340                         tg3_writephy(tp, MII_ADVERTISE, 0);
2341                         tg3_writephy(tp, MII_BMCR,
2342                                      BMCR_ANENABLE | BMCR_ANRESTART);
2343
2344                         tg3_writephy(tp, MII_TG3_FET_TEST,
2345                                      phytest | MII_TG3_FET_SHADOW_EN);
2346                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2347                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2348                                 tg3_writephy(tp,
2349                                              MII_TG3_FET_SHDW_AUXMODE4,
2350                                              phy);
2351                         }
2352                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2353                 }
2354                 return;
2355         } else if (do_low_power) {
2356                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2357                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2358
2359                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2360                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2361                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2362                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2363         }
2364
2365         /* The PHY should not be powered down on some chips because
2366          * of bugs.
2367          */
2368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2369             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2370             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2371              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2372                 return;
2373
2374         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2375             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2376                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2377                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2378                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2379                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2380         }
2381
2382         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2383 }
2384
2385 /* tp->lock is held. */
2386 static int tg3_nvram_lock(struct tg3 *tp)
2387 {
2388         if (tg3_flag(tp, NVRAM)) {
2389                 int i;
2390
2391                 if (tp->nvram_lock_cnt == 0) {
2392                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2393                         for (i = 0; i < 8000; i++) {
2394                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2395                                         break;
2396                                 udelay(20);
2397                         }
2398                         if (i == 8000) {
2399                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2400                                 return -ENODEV;
2401                         }
2402                 }
2403                 tp->nvram_lock_cnt++;
2404         }
2405         return 0;
2406 }
2407
2408 /* tp->lock is held. */
2409 static void tg3_nvram_unlock(struct tg3 *tp)
2410 {
2411         if (tg3_flag(tp, NVRAM)) {
2412                 if (tp->nvram_lock_cnt > 0)
2413                         tp->nvram_lock_cnt--;
2414                 if (tp->nvram_lock_cnt == 0)
2415                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2416         }
2417 }
2418
2419 /* tp->lock is held. */
2420 static void tg3_enable_nvram_access(struct tg3 *tp)
2421 {
2422         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2423                 u32 nvaccess = tr32(NVRAM_ACCESS);
2424
2425                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2426         }
2427 }
2428
2429 /* tp->lock is held. */
2430 static void tg3_disable_nvram_access(struct tg3 *tp)
2431 {
2432         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2433                 u32 nvaccess = tr32(NVRAM_ACCESS);
2434
2435                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2436         }
2437 }
2438
2439 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2440                                         u32 offset, u32 *val)
2441 {
2442         u32 tmp;
2443         int i;
2444
2445         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2446                 return -EINVAL;
2447
2448         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2449                                         EEPROM_ADDR_DEVID_MASK |
2450                                         EEPROM_ADDR_READ);
2451         tw32(GRC_EEPROM_ADDR,
2452              tmp |
2453              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2454              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2455               EEPROM_ADDR_ADDR_MASK) |
2456              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2457
2458         for (i = 0; i < 1000; i++) {
2459                 tmp = tr32(GRC_EEPROM_ADDR);
2460
2461                 if (tmp & EEPROM_ADDR_COMPLETE)
2462                         break;
2463                 msleep(1);
2464         }
2465         if (!(tmp & EEPROM_ADDR_COMPLETE))
2466                 return -EBUSY;
2467
2468         tmp = tr32(GRC_EEPROM_DATA);
2469
2470         /*
2471          * The data will always be opposite the native endian
2472          * format.  Perform a blind byteswap to compensate.
2473          */
2474         *val = swab32(tmp);
2475
2476         return 0;
2477 }
2478
2479 #define NVRAM_CMD_TIMEOUT 10000
2480
2481 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2482 {
2483         int i;
2484
2485         tw32(NVRAM_CMD, nvram_cmd);
2486         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2487                 udelay(10);
2488                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2489                         udelay(10);
2490                         break;
2491                 }
2492         }
2493
2494         if (i == NVRAM_CMD_TIMEOUT)
2495                 return -EBUSY;
2496
2497         return 0;
2498 }
2499
2500 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2501 {
2502         if (tg3_flag(tp, NVRAM) &&
2503             tg3_flag(tp, NVRAM_BUFFERED) &&
2504             tg3_flag(tp, FLASH) &&
2505             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2506             (tp->nvram_jedecnum == JEDEC_ATMEL))
2507
2508                 addr = ((addr / tp->nvram_pagesize) <<
2509                         ATMEL_AT45DB0X1B_PAGE_POS) +
2510                        (addr % tp->nvram_pagesize);
2511
2512         return addr;
2513 }
2514
2515 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2516 {
2517         if (tg3_flag(tp, NVRAM) &&
2518             tg3_flag(tp, NVRAM_BUFFERED) &&
2519             tg3_flag(tp, FLASH) &&
2520             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2521             (tp->nvram_jedecnum == JEDEC_ATMEL))
2522
2523                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2524                         tp->nvram_pagesize) +
2525                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2526
2527         return addr;
2528 }
2529
2530 /* NOTE: Data read in from NVRAM is byteswapped according to
2531  * the byteswapping settings for all other register accesses.
2532  * tg3 devices are BE devices, so on a BE machine, the data
2533  * returned will be exactly as it is seen in NVRAM.  On a LE
2534  * machine, the 32-bit value will be byteswapped.
2535  */
2536 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2537 {
2538         int ret;
2539
2540         if (!tg3_flag(tp, NVRAM))
2541                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2542
2543         offset = tg3_nvram_phys_addr(tp, offset);
2544
2545         if (offset > NVRAM_ADDR_MSK)
2546                 return -EINVAL;
2547
2548         ret = tg3_nvram_lock(tp);
2549         if (ret)
2550                 return ret;
2551
2552         tg3_enable_nvram_access(tp);
2553
2554         tw32(NVRAM_ADDR, offset);
2555         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2556                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2557
2558         if (ret == 0)
2559                 *val = tr32(NVRAM_RDDATA);
2560
2561         tg3_disable_nvram_access(tp);
2562
2563         tg3_nvram_unlock(tp);
2564
2565         return ret;
2566 }
2567
2568 /* Ensures NVRAM data is in bytestream format. */
2569 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2570 {
2571         u32 v;
2572         int res = tg3_nvram_read(tp, offset, &v);
2573         if (!res)
2574                 *val = cpu_to_be32(v);
2575         return res;
2576 }
2577
2578 /* tp->lock is held. */
2579 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2580 {
2581         u32 addr_high, addr_low;
2582         int i;
2583
2584         addr_high = ((tp->dev->dev_addr[0] << 8) |
2585                      tp->dev->dev_addr[1]);
2586         addr_low = ((tp->dev->dev_addr[2] << 24) |
2587                     (tp->dev->dev_addr[3] << 16) |
2588                     (tp->dev->dev_addr[4] <<  8) |
2589                     (tp->dev->dev_addr[5] <<  0));
2590         for (i = 0; i < 4; i++) {
2591                 if (i == 1 && skip_mac_1)
2592                         continue;
2593                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
2594                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
2595         }
2596
2597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2598             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2599                 for (i = 0; i < 12; i++) {
2600                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
2601                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
2602                 }
2603         }
2604
2605         addr_high = (tp->dev->dev_addr[0] +
2606                      tp->dev->dev_addr[1] +
2607                      tp->dev->dev_addr[2] +
2608                      tp->dev->dev_addr[3] +
2609                      tp->dev->dev_addr[4] +
2610                      tp->dev->dev_addr[5]) &
2611                 TX_BACKOFF_SEED_MASK;
2612         tw32(MAC_TX_BACKOFF_SEED, addr_high);
2613 }
2614
2615 static void tg3_enable_register_access(struct tg3 *tp)
2616 {
2617         /*
2618          * Make sure register accesses (indirect or otherwise) will function
2619          * correctly.
2620          */
2621         pci_write_config_dword(tp->pdev,
2622                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2623 }
2624
2625 static int tg3_power_up(struct tg3 *tp)
2626 {
2627         tg3_enable_register_access(tp);
2628
2629         pci_set_power_state(tp->pdev, PCI_D0);
2630
2631         /* Switch out of Vaux if it is a NIC */
2632         if (tg3_flag(tp, IS_NIC))
2633                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2634
2635         return 0;
2636 }
2637
2638 static int tg3_power_down_prepare(struct tg3 *tp)
2639 {
2640         u32 misc_host_ctrl;
2641         bool device_should_wake, do_low_power;
2642
2643         tg3_enable_register_access(tp);
2644
2645         /* Restore the CLKREQ setting. */
2646         if (tg3_flag(tp, CLKREQ_BUG)) {
2647                 u16 lnkctl;
2648
2649                 pci_read_config_word(tp->pdev,
2650                                      tp->pcie_cap + PCI_EXP_LNKCTL,
2651                                      &lnkctl);
2652                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
2653                 pci_write_config_word(tp->pdev,
2654                                       tp->pcie_cap + PCI_EXP_LNKCTL,
2655                                       lnkctl);
2656         }
2657
2658         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
2659         tw32(TG3PCI_MISC_HOST_CTRL,
2660              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2661
2662         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2663                              tg3_flag(tp, WOL_ENABLE);
2664
2665         if (tg3_flag(tp, USE_PHYLIB)) {
2666                 do_low_power = false;
2667                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
2668                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2669                         struct phy_device *phydev;
2670                         u32 phyid, advertising;
2671
2672                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2673
2674                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2675
2676                         tp->link_config.orig_speed = phydev->speed;
2677                         tp->link_config.orig_duplex = phydev->duplex;
2678                         tp->link_config.orig_autoneg = phydev->autoneg;
2679                         tp->link_config.orig_advertising = phydev->advertising;
2680
2681                         advertising = ADVERTISED_TP |
2682                                       ADVERTISED_Pause |
2683                                       ADVERTISED_Autoneg |
2684                                       ADVERTISED_10baseT_Half;
2685
2686                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
2687                                 if (tg3_flag(tp, WOL_SPEED_100MB))
2688                                         advertising |=
2689                                                 ADVERTISED_100baseT_Half |
2690                                                 ADVERTISED_100baseT_Full |
2691                                                 ADVERTISED_10baseT_Full;
2692                                 else
2693                                         advertising |= ADVERTISED_10baseT_Full;
2694                         }
2695
2696                         phydev->advertising = advertising;
2697
2698                         phy_start_aneg(phydev);
2699
2700                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
2701                         if (phyid != PHY_ID_BCMAC131) {
2702                                 phyid &= PHY_BCM_OUI_MASK;
2703                                 if (phyid == PHY_BCM_OUI_1 ||
2704                                     phyid == PHY_BCM_OUI_2 ||
2705                                     phyid == PHY_BCM_OUI_3)
2706                                         do_low_power = true;
2707                         }
2708                 }
2709         } else {
2710                 do_low_power = true;
2711
2712                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
2713                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
2714                         tp->link_config.orig_speed = tp->link_config.speed;
2715                         tp->link_config.orig_duplex = tp->link_config.duplex;
2716                         tp->link_config.orig_autoneg = tp->link_config.autoneg;
2717                 }
2718
2719                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
2720                         tp->link_config.speed = SPEED_10;
2721                         tp->link_config.duplex = DUPLEX_HALF;
2722                         tp->link_config.autoneg = AUTONEG_ENABLE;
2723                         tg3_setup_phy(tp, 0);
2724                 }
2725         }
2726
2727         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2728                 u32 val;
2729
2730                 val = tr32(GRC_VCPU_EXT_CTRL);
2731                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
2732         } else if (!tg3_flag(tp, ENABLE_ASF)) {
2733                 int i;
2734                 u32 val;
2735
2736                 for (i = 0; i < 200; i++) {
2737                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
2738                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
2739                                 break;
2740                         msleep(1);
2741                 }
2742         }
2743         if (tg3_flag(tp, WOL_CAP))
2744                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
2745                                                      WOL_DRV_STATE_SHUTDOWN |
2746                                                      WOL_DRV_WOL |
2747                                                      WOL_SET_MAGIC_PKT);
2748
2749         if (device_should_wake) {
2750                 u32 mac_mode;
2751
2752                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
2753                         if (do_low_power &&
2754                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
2755                                 tg3_phy_auxctl_write(tp,
2756                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
2757                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
2758                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2759                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
2760                                 udelay(40);
2761                         }
2762
2763                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2764                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
2765                         else
2766                                 mac_mode = MAC_MODE_PORT_MODE_MII;
2767
2768                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
2769                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
2770                             ASIC_REV_5700) {
2771                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
2772                                              SPEED_100 : SPEED_10;
2773                                 if (tg3_5700_link_polarity(tp, speed))
2774                                         mac_mode |= MAC_MODE_LINK_POLARITY;
2775                                 else
2776                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
2777                         }
2778                 } else {
2779                         mac_mode = MAC_MODE_PORT_MODE_TBI;
2780                 }
2781
2782                 if (!tg3_flag(tp, 5750_PLUS))
2783                         tw32(MAC_LED_CTRL, tp->led_ctrl);
2784
2785                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
2786                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
2787                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
2788                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2789
2790                 if (tg3_flag(tp, ENABLE_APE))
2791                         mac_mode |= MAC_MODE_APE_TX_EN |
2792                                     MAC_MODE_APE_RX_EN |
2793                                     MAC_MODE_TDE_ENABLE;
2794
2795                 tw32_f(MAC_MODE, mac_mode);
2796                 udelay(100);
2797
2798                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
2799                 udelay(10);
2800         }
2801
2802         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
2803             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2804              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
2805                 u32 base_val;
2806
2807                 base_val = tp->pci_clock_ctrl;
2808                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
2809                              CLOCK_CTRL_TXCLK_DISABLE);
2810
2811                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
2812                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
2813         } else if (tg3_flag(tp, 5780_CLASS) ||
2814                    tg3_flag(tp, CPMU_PRESENT) ||
2815                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2816                 /* do nothing */
2817         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
2818                 u32 newbits1, newbits2;
2819
2820                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2821                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2822                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
2823                                     CLOCK_CTRL_TXCLK_DISABLE |
2824                                     CLOCK_CTRL_ALTCLK);
2825                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2826                 } else if (tg3_flag(tp, 5705_PLUS)) {
2827                         newbits1 = CLOCK_CTRL_625_CORE;
2828                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
2829                 } else {
2830                         newbits1 = CLOCK_CTRL_ALTCLK;
2831                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
2832                 }
2833
2834                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
2835                             40);
2836
2837                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
2838                             40);
2839
2840                 if (!tg3_flag(tp, 5705_PLUS)) {
2841                         u32 newbits3;
2842
2843                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2844                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2845                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
2846                                             CLOCK_CTRL_TXCLK_DISABLE |
2847                                             CLOCK_CTRL_44MHZ_CORE);
2848                         } else {
2849                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
2850                         }
2851
2852                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
2853                                     tp->pci_clock_ctrl | newbits3, 40);
2854                 }
2855         }
2856
2857         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
2858                 tg3_power_down_phy(tp, do_low_power);
2859
2860         tg3_frob_aux_power(tp);
2861
2862         /* Workaround for unstable PLL clock */
2863         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
2864             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
2865                 u32 val = tr32(0x7d00);
2866
2867                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
2868                 tw32(0x7d00, val);
2869                 if (!tg3_flag(tp, ENABLE_ASF)) {
2870                         int err;
2871
2872                         err = tg3_nvram_lock(tp);
2873                         tg3_halt_cpu(tp, RX_CPU_BASE);
2874                         if (!err)
2875                                 tg3_nvram_unlock(tp);
2876                 }
2877         }
2878
2879         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2880
2881         return 0;
2882 }
2883
2884 static void tg3_power_down(struct tg3 *tp)
2885 {
2886         tg3_power_down_prepare(tp);
2887
2888         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
2889         pci_set_power_state(tp->pdev, PCI_D3hot);
2890 }
2891
2892 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
2893 {
2894         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
2895         case MII_TG3_AUX_STAT_10HALF:
2896                 *speed = SPEED_10;
2897                 *duplex = DUPLEX_HALF;
2898                 break;
2899
2900         case MII_TG3_AUX_STAT_10FULL:
2901                 *speed = SPEED_10;
2902                 *duplex = DUPLEX_FULL;
2903                 break;
2904
2905         case MII_TG3_AUX_STAT_100HALF:
2906                 *speed = SPEED_100;
2907                 *duplex = DUPLEX_HALF;
2908                 break;
2909
2910         case MII_TG3_AUX_STAT_100FULL:
2911                 *speed = SPEED_100;
2912                 *duplex = DUPLEX_FULL;
2913                 break;
2914
2915         case MII_TG3_AUX_STAT_1000HALF:
2916                 *speed = SPEED_1000;
2917                 *duplex = DUPLEX_HALF;
2918                 break;
2919
2920         case MII_TG3_AUX_STAT_1000FULL:
2921                 *speed = SPEED_1000;
2922                 *duplex = DUPLEX_FULL;
2923                 break;
2924
2925         default:
2926                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2927                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
2928                                  SPEED_10;
2929                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
2930                                   DUPLEX_HALF;
2931                         break;
2932                 }
2933                 *speed = SPEED_INVALID;
2934                 *duplex = DUPLEX_INVALID;
2935                 break;
2936         }
2937 }
2938
2939 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
2940 {
2941         int err = 0;
2942         u32 val, new_adv;
2943
2944         new_adv = ADVERTISE_CSMA;
2945         if (advertise & ADVERTISED_10baseT_Half)
2946                 new_adv |= ADVERTISE_10HALF;
2947         if (advertise & ADVERTISED_10baseT_Full)
2948                 new_adv |= ADVERTISE_10FULL;
2949         if (advertise & ADVERTISED_100baseT_Half)
2950                 new_adv |= ADVERTISE_100HALF;
2951         if (advertise & ADVERTISED_100baseT_Full)
2952                 new_adv |= ADVERTISE_100FULL;
2953
2954         new_adv |= tg3_advert_flowctrl_1000T(flowctrl);
2955
2956         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
2957         if (err)
2958                 goto done;
2959
2960         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
2961                 goto done;
2962
2963         new_adv = 0;
2964         if (advertise & ADVERTISED_1000baseT_Half)
2965                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
2966         if (advertise & ADVERTISED_1000baseT_Full)
2967                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
2968
2969         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
2970             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
2971                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
2972                             MII_TG3_CTRL_ENABLE_AS_MASTER);
2973
2974         err = tg3_writephy(tp, MII_TG3_CTRL, new_adv);
2975         if (err)
2976                 goto done;
2977
2978         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2979                 goto done;
2980
2981         tw32(TG3_CPMU_EEE_MODE,
2982              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2983
2984         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2985         if (!err) {
2986                 u32 err2;
2987
2988                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
2989                 case ASIC_REV_5717:
2990                 case ASIC_REV_57765:
2991                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
2992                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
2993                                                  MII_TG3_DSP_CH34TP2_HIBW01);
2994                         /* Fall through */
2995                 case ASIC_REV_5719:
2996                         val = MII_TG3_DSP_TAP26_ALNOKO |
2997                               MII_TG3_DSP_TAP26_RMRXSTO |
2998                               MII_TG3_DSP_TAP26_OPCSINPT;
2999                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3000                 }
3001
3002                 val = 0;
3003                 /* Advertise 100-BaseTX EEE ability */
3004                 if (advertise & ADVERTISED_100baseT_Full)
3005                         val |= MDIO_AN_EEE_ADV_100TX;
3006                 /* Advertise 1000-BaseT EEE ability */
3007                 if (advertise & ADVERTISED_1000baseT_Full)
3008                         val |= MDIO_AN_EEE_ADV_1000T;
3009                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3010
3011                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3012                 if (!err)
3013                         err = err2;
3014         }
3015
3016 done:
3017         return err;
3018 }
3019
3020 static void tg3_phy_copper_begin(struct tg3 *tp)
3021 {
3022         u32 new_adv;
3023         int i;
3024
3025         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3026                 new_adv = ADVERTISED_10baseT_Half |
3027                           ADVERTISED_10baseT_Full;
3028                 if (tg3_flag(tp, WOL_SPEED_100MB))
3029                         new_adv |= ADVERTISED_100baseT_Half |
3030                                    ADVERTISED_100baseT_Full;
3031
3032                 tg3_phy_autoneg_cfg(tp, new_adv,
3033                                     FLOW_CTRL_TX | FLOW_CTRL_RX);
3034         } else if (tp->link_config.speed == SPEED_INVALID) {
3035                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3036                         tp->link_config.advertising &=
3037                                 ~(ADVERTISED_1000baseT_Half |
3038                                   ADVERTISED_1000baseT_Full);
3039
3040                 tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
3041                                     tp->link_config.flowctrl);
3042         } else {
3043                 /* Asking for a specific link mode. */
3044                 if (tp->link_config.speed == SPEED_1000) {
3045                         if (tp->link_config.duplex == DUPLEX_FULL)
3046                                 new_adv = ADVERTISED_1000baseT_Full;
3047                         else
3048                                 new_adv = ADVERTISED_1000baseT_Half;
3049                 } else if (tp->link_config.speed == SPEED_100) {
3050                         if (tp->link_config.duplex == DUPLEX_FULL)
3051                                 new_adv = ADVERTISED_100baseT_Full;
3052                         else
3053                                 new_adv = ADVERTISED_100baseT_Half;
3054                 } else {
3055                         if (tp->link_config.duplex == DUPLEX_FULL)
3056                                 new_adv = ADVERTISED_10baseT_Full;
3057                         else
3058                                 new_adv = ADVERTISED_10baseT_Half;
3059                 }
3060
3061                 tg3_phy_autoneg_cfg(tp, new_adv,
3062                                     tp->link_config.flowctrl);
3063         }
3064
3065         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
3066             tp->link_config.speed != SPEED_INVALID) {
3067                 u32 bmcr, orig_bmcr;
3068
3069                 tp->link_config.active_speed = tp->link_config.speed;
3070                 tp->link_config.active_duplex = tp->link_config.duplex;
3071
3072                 bmcr = 0;
3073                 switch (tp->link_config.speed) {
3074                 default:
3075                 case SPEED_10:
3076                         break;
3077
3078                 case SPEED_100:
3079                         bmcr |= BMCR_SPEED100;
3080                         break;
3081
3082                 case SPEED_1000:
3083                         bmcr |= TG3_BMCR_SPEED1000;
3084                         break;
3085                 }
3086
3087                 if (tp->link_config.duplex == DUPLEX_FULL)
3088                         bmcr |= BMCR_FULLDPLX;
3089
3090                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3091                     (bmcr != orig_bmcr)) {
3092                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3093                         for (i = 0; i < 1500; i++) {
3094                                 u32 tmp;
3095
3096                                 udelay(10);
3097                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3098                                     tg3_readphy(tp, MII_BMSR, &tmp))
3099                                         continue;
3100                                 if (!(tmp & BMSR_LSTATUS)) {
3101                                         udelay(40);
3102                                         break;
3103                                 }
3104                         }
3105                         tg3_writephy(tp, MII_BMCR, bmcr);
3106                         udelay(40);
3107                 }
3108         } else {
3109                 tg3_writephy(tp, MII_BMCR,
3110                              BMCR_ANENABLE | BMCR_ANRESTART);
3111         }
3112 }
3113
3114 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3115 {
3116         int err;
3117
3118         /* Turn off tap power management. */
3119         /* Set Extended packet length bit */
3120         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3121
3122         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3123         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3124         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3125         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3126         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3127
3128         udelay(40);
3129
3130         return err;
3131 }
3132
3133 static int tg3_copper_is_advertising_all(struct tg3 *tp, u32 mask)
3134 {
3135         u32 adv_reg, all_mask = 0;
3136
3137         if (mask & ADVERTISED_10baseT_Half)
3138                 all_mask |= ADVERTISE_10HALF;
3139         if (mask & ADVERTISED_10baseT_Full)
3140                 all_mask |= ADVERTISE_10FULL;
3141         if (mask & ADVERTISED_100baseT_Half)
3142                 all_mask |= ADVERTISE_100HALF;
3143         if (mask & ADVERTISED_100baseT_Full)
3144                 all_mask |= ADVERTISE_100FULL;
3145
3146         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
3147                 return 0;
3148
3149         if ((adv_reg & all_mask) != all_mask)
3150                 return 0;
3151         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3152                 u32 tg3_ctrl;
3153
3154                 all_mask = 0;
3155                 if (mask & ADVERTISED_1000baseT_Half)
3156                         all_mask |= ADVERTISE_1000HALF;
3157                 if (mask & ADVERTISED_1000baseT_Full)
3158                         all_mask |= ADVERTISE_1000FULL;
3159
3160                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
3161                         return 0;
3162
3163                 if ((tg3_ctrl & all_mask) != all_mask)
3164                         return 0;
3165         }
3166         return 1;
3167 }
3168
3169 static int tg3_adv_1000T_flowctrl_ok(struct tg3 *tp, u32 *lcladv, u32 *rmtadv)
3170 {
3171         u32 curadv, reqadv;
3172
3173         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
3174                 return 1;
3175
3176         curadv = *lcladv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3177         reqadv = tg3_advert_flowctrl_1000T(tp->link_config.flowctrl);
3178
3179         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3180                 if (curadv != reqadv)
3181                         return 0;
3182
3183                 if (tg3_flag(tp, PAUSE_AUTONEG))
3184                         tg3_readphy(tp, MII_LPA, rmtadv);
3185         } else {
3186                 /* Reprogram the advertisement register, even if it
3187                  * does not affect the current link.  If the link
3188                  * gets renegotiated in the future, we can save an
3189                  * additional renegotiation cycle by advertising
3190                  * it correctly in the first place.
3191                  */
3192                 if (curadv != reqadv) {
3193                         *lcladv &= ~(ADVERTISE_PAUSE_CAP |
3194                                      ADVERTISE_PAUSE_ASYM);
3195                         tg3_writephy(tp, MII_ADVERTISE, *lcladv | reqadv);
3196                 }
3197         }
3198
3199         return 1;
3200 }
3201
3202 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
3203 {
3204         int current_link_up;
3205         u32 bmsr, val;
3206         u32 lcl_adv, rmt_adv;
3207         u16 current_speed;
3208         u8 current_duplex;
3209         int i, err;
3210
3211         tw32(MAC_EVENT, 0);
3212
3213         tw32_f(MAC_STATUS,
3214              (MAC_STATUS_SYNC_CHANGED |
3215               MAC_STATUS_CFG_CHANGED |
3216               MAC_STATUS_MI_COMPLETION |
3217               MAC_STATUS_LNKSTATE_CHANGED));
3218         udelay(40);
3219
3220         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
3221                 tw32_f(MAC_MI_MODE,
3222                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
3223                 udelay(80);
3224         }
3225
3226         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
3227
3228         /* Some third-party PHYs need to be reset on link going
3229          * down.
3230          */
3231         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3232              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
3233              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
3234             netif_carrier_ok(tp->dev)) {
3235                 tg3_readphy(tp, MII_BMSR, &bmsr);
3236                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3237                     !(bmsr & BMSR_LSTATUS))
3238                         force_reset = 1;
3239         }
3240         if (force_reset)
3241                 tg3_phy_reset(tp);
3242
3243         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
3244                 tg3_readphy(tp, MII_BMSR, &bmsr);
3245                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
3246                     !tg3_flag(tp, INIT_COMPLETE))
3247                         bmsr = 0;
3248
3249                 if (!(bmsr & BMSR_LSTATUS)) {
3250                         err = tg3_init_5401phy_dsp(tp);
3251                         if (err)
3252                                 return err;
3253
3254                         tg3_readphy(tp, MII_BMSR, &bmsr);
3255                         for (i = 0; i < 1000; i++) {
3256                                 udelay(10);
3257                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3258                                     (bmsr & BMSR_LSTATUS)) {
3259                                         udelay(40);
3260                                         break;
3261                                 }
3262                         }
3263
3264                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
3265                             TG3_PHY_REV_BCM5401_B0 &&
3266                             !(bmsr & BMSR_LSTATUS) &&
3267                             tp->link_config.active_speed == SPEED_1000) {
3268                                 err = tg3_phy_reset(tp);
3269                                 if (!err)
3270                                         err = tg3_init_5401phy_dsp(tp);
3271                                 if (err)
3272                                         return err;
3273                         }
3274                 }
3275         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3276                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
3277                 /* 5701 {A0,B0} CRC bug workaround */
3278                 tg3_writephy(tp, 0x15, 0x0a75);
3279                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3280                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
3281                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
3282         }
3283
3284         /* Clear pending interrupts... */
3285         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3286         tg3_readphy(tp, MII_TG3_ISTAT, &val);
3287
3288         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
3289                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
3290         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
3291                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
3292
3293         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3294             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3295                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
3296                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3297                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
3298                 else
3299                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
3300         }
3301
3302         current_link_up = 0;
3303         current_speed = SPEED_INVALID;
3304         current_duplex = DUPLEX_INVALID;
3305
3306         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
3307                 err = tg3_phy_auxctl_read(tp,
3308                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3309                                           &val);
3310                 if (!err && !(val & (1 << 10))) {
3311                         tg3_phy_auxctl_write(tp,
3312                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
3313                                              val | (1 << 10));
3314                         goto relink;
3315                 }
3316         }
3317
3318         bmsr = 0;
3319         for (i = 0; i < 100; i++) {
3320                 tg3_readphy(tp, MII_BMSR, &bmsr);
3321                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
3322                     (bmsr & BMSR_LSTATUS))
3323                         break;
3324                 udelay(40);
3325         }
3326
3327         if (bmsr & BMSR_LSTATUS) {
3328                 u32 aux_stat, bmcr;
3329
3330                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
3331                 for (i = 0; i < 2000; i++) {
3332                         udelay(10);
3333                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
3334                             aux_stat)
3335                                 break;
3336                 }
3337
3338                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
3339                                              &current_speed,
3340                                              &current_duplex);
3341
3342                 bmcr = 0;
3343                 for (i = 0; i < 200; i++) {
3344                         tg3_readphy(tp, MII_BMCR, &bmcr);
3345                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
3346                                 continue;
3347                         if (bmcr && bmcr != 0x7fff)
3348                                 break;
3349                         udelay(10);
3350                 }
3351
3352                 lcl_adv = 0;
3353                 rmt_adv = 0;
3354
3355                 tp->link_config.active_speed = current_speed;
3356                 tp->link_config.active_duplex = current_duplex;
3357
3358                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
3359                         if ((bmcr & BMCR_ANENABLE) &&
3360                             tg3_copper_is_advertising_all(tp,
3361                                                 tp->link_config.advertising)) {
3362                                 if (tg3_adv_1000T_flowctrl_ok(tp, &lcl_adv,
3363                                                                   &rmt_adv))
3364                                         current_link_up = 1;
3365                         }
3366                 } else {
3367                         if (!(bmcr & BMCR_ANENABLE) &&
3368                             tp->link_config.speed == current_speed &&
3369                             tp->link_config.duplex == current_duplex &&
3370                             tp->link_config.flowctrl ==
3371                             tp->link_config.active_flowctrl) {
3372                                 current_link_up = 1;
3373                         }
3374                 }
3375
3376                 if (current_link_up == 1 &&
3377                     tp->link_config.active_duplex == DUPLEX_FULL)
3378                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
3379         }
3380
3381 relink:
3382         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3383                 tg3_phy_copper_begin(tp);
3384
3385                 tg3_readphy(tp, MII_BMSR, &bmsr);
3386                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3387                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3388                         current_link_up = 1;
3389         }
3390
3391         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
3392         if (current_link_up == 1) {
3393                 if (tp->link_config.active_speed == SPEED_100 ||
3394                     tp->link_config.active_speed == SPEED_10)
3395                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3396                 else
3397                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3398         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
3399                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
3400         else
3401                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
3402
3403         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
3404         if (tp->link_config.active_duplex == DUPLEX_HALF)
3405                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
3406
3407         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
3408                 if (current_link_up == 1 &&
3409                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
3410                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
3411                 else
3412                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
3413         }
3414
3415         /* ??? Without this setting Netgear GA302T PHY does not
3416          * ??? send/receive packets...
3417          */
3418         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
3419             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
3420                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
3421                 tw32_f(MAC_MI_MODE, tp->mi_mode);
3422                 udelay(80);
3423         }
3424
3425         tw32_f(MAC_MODE, tp->mac_mode);
3426         udelay(40);
3427
3428         tg3_phy_eee_adjust(tp, current_link_up);
3429
3430         if (tg3_flag(tp, USE_LINKCHG_REG)) {
3431                 /* Polled via timer. */
3432                 tw32_f(MAC_EVENT, 0);
3433         } else {
3434                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
3435         }
3436         udelay(40);
3437
3438         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
3439             current_link_up == 1 &&
3440             tp->link_config.active_speed == SPEED_1000 &&
3441             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
3442                 udelay(120);
3443                 tw32_f(MAC_STATUS,
3444                      (MAC_STATUS_SYNC_CHANGED |
3445                       MAC_STATUS_CFG_CHANGED));
3446                 udelay(40);
3447                 tg3_write_mem(tp,
3448                               NIC_SRAM_FIRMWARE_MBOX,
3449                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
3450         }
3451
3452         /* Prevent send BD corruption. */
3453         if (tg3_flag(tp, CLKREQ_BUG)) {
3454                 u16 oldlnkctl, newlnkctl;
3455
3456                 pci_read_config_word(tp->pdev,
3457                                      tp->pcie_cap + PCI_EXP_LNKCTL,
3458                                      &oldlnkctl);
3459                 if (tp->link_config.active_speed == SPEED_100 ||
3460                     tp->link_config.active_speed == SPEED_10)
3461                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
3462                 else
3463                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
3464                 if (newlnkctl != oldlnkctl)
3465                         pci_write_config_word(tp->pdev,
3466                                               tp->pcie_cap + PCI_EXP_LNKCTL,
3467                                               newlnkctl);
3468         }
3469
3470         if (current_link_up != netif_carrier_ok(tp->dev)) {
3471                 if (current_link_up)
3472                         netif_carrier_on(tp->dev);
3473                 else
3474                         netif_carrier_off(tp->dev);
3475                 tg3_link_report(tp);
3476         }
3477
3478         return 0;
3479 }
3480
3481 struct tg3_fiber_aneginfo {
3482         int state;
3483 #define ANEG_STATE_UNKNOWN              0
3484 #define ANEG_STATE_AN_ENABLE            1
3485 #define ANEG_STATE_RESTART_INIT         2
3486 #define ANEG_STATE_RESTART              3
3487 #define ANEG_STATE_DISABLE_LINK_OK      4
3488 #define ANEG_STATE_ABILITY_DETECT_INIT  5
3489 #define ANEG_STATE_ABILITY_DETECT       6
3490 #define ANEG_STATE_ACK_DETECT_INIT      7
3491 #define ANEG_STATE_ACK_DETECT           8
3492 #define ANEG_STATE_COMPLETE_ACK_INIT    9
3493 #define ANEG_STATE_COMPLETE_ACK         10
3494 #define ANEG_STATE_IDLE_DETECT_INIT     11
3495 #define ANEG_STATE_IDLE_DETECT          12
3496 #define ANEG_STATE_LINK_OK              13
3497 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
3498 #define ANEG_STATE_NEXT_PAGE_WAIT       15
3499
3500         u32 flags;
3501 #define MR_AN_ENABLE            0x00000001
3502 #define MR_RESTART_AN           0x00000002
3503 #define MR_AN_COMPLETE          0x00000004
3504 #define MR_PAGE_RX              0x00000008
3505 #define MR_NP_LOADED            0x00000010
3506 #define MR_TOGGLE_TX            0x00000020
3507 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
3508 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
3509 #define MR_LP_ADV_SYM_PAUSE     0x00000100
3510 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
3511 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
3512 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
3513 #define MR_LP_ADV_NEXT_PAGE     0x00001000
3514 #define MR_TOGGLE_RX            0x00002000
3515 #define MR_NP_RX                0x00004000
3516
3517 #define MR_LINK_OK              0x80000000
3518
3519         unsigned long link_time, cur_time;
3520
3521         u32 ability_match_cfg;
3522         int ability_match_count;
3523
3524         char ability_match, idle_match, ack_match;
3525
3526         u32 txconfig, rxconfig;
3527 #define ANEG_CFG_NP             0x00000080
3528 #define ANEG_CFG_ACK            0x00000040
3529 #define ANEG_CFG_RF2            0x00000020
3530 #define ANEG_CFG_RF1            0x00000010
3531 #define ANEG_CFG_PS2            0x00000001
3532 #define ANEG_CFG_PS1            0x00008000
3533 #define ANEG_CFG_HD             0x00004000
3534 #define ANEG_CFG_FD             0x00002000
3535 #define ANEG_CFG_INVAL          0x00001f06
3536
3537 };
3538 #define ANEG_OK         0
3539 #define ANEG_DONE       1
3540 #define ANEG_TIMER_ENAB 2
3541 #define ANEG_FAILED     -1
3542
3543 #define ANEG_STATE_SETTLE_TIME  10000
3544
3545 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
3546                                    struct tg3_fiber_aneginfo *ap)
3547 {
3548         u16 flowctrl;
3549         unsigned long delta;
3550         u32 rx_cfg_reg;
3551         int ret;
3552
3553         if (ap->state == ANEG_STATE_UNKNOWN) {
3554                 ap->rxconfig = 0;
3555                 ap->link_time = 0;
3556                 ap->cur_time = 0;
3557                 ap->ability_match_cfg = 0;
3558                 ap->ability_match_count = 0;
3559                 ap->ability_match = 0;
3560                 ap->idle_match = 0;
3561                 ap->ack_match = 0;
3562         }
3563         ap->cur_time++;
3564
3565         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
3566                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
3567
3568                 if (rx_cfg_reg != ap->ability_match_cfg) {
3569                         ap->ability_match_cfg = rx_cfg_reg;
3570                         ap->ability_match = 0;
3571                         ap->ability_match_count = 0;
3572                 } else {
3573                         if (++ap->ability_match_count > 1) {
3574                                 ap->ability_match = 1;
3575                                 ap->ability_match_cfg = rx_cfg_reg;
3576                         }
3577                 }
3578                 if (rx_cfg_reg & ANEG_CFG_ACK)
3579                         ap->ack_match = 1;
3580                 else
3581                         ap->ack_match = 0;
3582
3583                 ap->idle_match = 0;
3584         } else {
3585                 ap->idle_match = 1;
3586                 ap->ability_match_cfg = 0;
3587                 ap->ability_match_count = 0;
3588                 ap->ability_match = 0;
3589                 ap->ack_match = 0;
3590
3591                 rx_cfg_reg = 0;
3592         }
3593
3594         ap->rxconfig = rx_cfg_reg;
3595         ret = ANEG_OK;
3596
3597         switch (ap->state) {
3598         case ANEG_STATE_UNKNOWN:
3599                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
3600                         ap->state = ANEG_STATE_AN_ENABLE;
3601
3602                 /* fallthru */
3603         case ANEG_STATE_AN_ENABLE:
3604                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
3605                 if (ap->flags & MR_AN_ENABLE) {
3606                         ap->link_time = 0;
3607                         ap->cur_time = 0;
3608                         ap->ability_match_cfg = 0;
3609                         ap->ability_match_count = 0;
3610                         ap->ability_match = 0;
3611                         ap->idle_match = 0;
3612                         ap->ack_match = 0;
3613
3614                         ap->state = ANEG_STATE_RESTART_INIT;
3615                 } else {
3616                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
3617                 }
3618                 break;
3619
3620         case ANEG_STATE_RESTART_INIT:
3621                 ap->link_time = ap->cur_time;
3622                 ap->flags &= ~(MR_NP_LOADED);
3623                 ap->txconfig = 0;
3624                 tw32(MAC_TX_AUTO_NEG, 0);
3625                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3626                 tw32_f(MAC_MODE, tp->mac_mode);
3627                 udelay(40);
3628
3629                 ret = ANEG_TIMER_ENAB;
3630                 ap->state = ANEG_STATE_RESTART;
3631
3632                 /* fallthru */
3633         case ANEG_STATE_RESTART:
3634                 delta = ap->cur_time - ap->link_time;
3635                 if (delta > ANEG_STATE_SETTLE_TIME)
3636                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
3637                 else
3638                         ret = ANEG_TIMER_ENAB;
3639                 break;
3640
3641         case ANEG_STATE_DISABLE_LINK_OK:
3642                 ret = ANEG_DONE;
3643                 break;
3644
3645         case ANEG_STATE_ABILITY_DETECT_INIT:
3646                 ap->flags &= ~(MR_TOGGLE_TX);
3647                 ap->txconfig = ANEG_CFG_FD;
3648                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3649                 if (flowctrl & ADVERTISE_1000XPAUSE)
3650                         ap->txconfig |= ANEG_CFG_PS1;
3651                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3652                         ap->txconfig |= ANEG_CFG_PS2;
3653                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3654                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3655                 tw32_f(MAC_MODE, tp->mac_mode);
3656                 udelay(40);
3657
3658                 ap->state = ANEG_STATE_ABILITY_DETECT;
3659                 break;
3660
3661         case ANEG_STATE_ABILITY_DETECT:
3662                 if (ap->ability_match != 0 && ap->rxconfig != 0)
3663                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
3664                 break;
3665
3666         case ANEG_STATE_ACK_DETECT_INIT:
3667                 ap->txconfig |= ANEG_CFG_ACK;
3668                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
3669                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
3670                 tw32_f(MAC_MODE, tp->mac_mode);
3671                 udelay(40);
3672
3673                 ap->state = ANEG_STATE_ACK_DETECT;
3674
3675                 /* fallthru */
3676         case ANEG_STATE_ACK_DETECT:
3677                 if (ap->ack_match != 0) {
3678                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
3679                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
3680                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
3681                         } else {
3682                                 ap->state = ANEG_STATE_AN_ENABLE;
3683                         }
3684                 } else if (ap->ability_match != 0 &&
3685                            ap->rxconfig == 0) {
3686                         ap->state = ANEG_STATE_AN_ENABLE;
3687                 }
3688                 break;
3689
3690         case ANEG_STATE_COMPLETE_ACK_INIT:
3691                 if (ap->rxconfig & ANEG_CFG_INVAL) {
3692                         ret = ANEG_FAILED;
3693                         break;
3694                 }
3695                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
3696                                MR_LP_ADV_HALF_DUPLEX |
3697                                MR_LP_ADV_SYM_PAUSE |
3698                                MR_LP_ADV_ASYM_PAUSE |
3699                                MR_LP_ADV_REMOTE_FAULT1 |
3700                                MR_LP_ADV_REMOTE_FAULT2 |
3701                                MR_LP_ADV_NEXT_PAGE |
3702                                MR_TOGGLE_RX |
3703                                MR_NP_RX);
3704                 if (ap->rxconfig & ANEG_CFG_FD)
3705                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
3706                 if (ap->rxconfig & ANEG_CFG_HD)
3707                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
3708                 if (ap->rxconfig & ANEG_CFG_PS1)
3709                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
3710                 if (ap->rxconfig & ANEG_CFG_PS2)
3711                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
3712                 if (ap->rxconfig & ANEG_CFG_RF1)
3713                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
3714                 if (ap->rxconfig & ANEG_CFG_RF2)
3715                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
3716                 if (ap->rxconfig & ANEG_CFG_NP)
3717                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
3718
3719                 ap->link_time = ap->cur_time;
3720
3721                 ap->flags ^= (MR_TOGGLE_TX);
3722                 if (ap->rxconfig & 0x0008)
3723                         ap->flags |= MR_TOGGLE_RX;
3724                 if (ap->rxconfig & ANEG_CFG_NP)
3725                         ap->flags |= MR_NP_RX;
3726                 ap->flags |= MR_PAGE_RX;
3727
3728                 ap->state = ANEG_STATE_COMPLETE_ACK;
3729                 ret = ANEG_TIMER_ENAB;
3730                 break;
3731
3732         case ANEG_STATE_COMPLETE_ACK:
3733                 if (ap->ability_match != 0 &&
3734                     ap->rxconfig == 0) {
3735                         ap->state = ANEG_STATE_AN_ENABLE;
3736                         break;
3737                 }
3738                 delta = ap->cur_time - ap->link_time;
3739                 if (delta > ANEG_STATE_SETTLE_TIME) {
3740                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
3741                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3742                         } else {
3743                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
3744                                     !(ap->flags & MR_NP_RX)) {
3745                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
3746                                 } else {
3747                                         ret = ANEG_FAILED;
3748                                 }
3749                         }
3750                 }
3751                 break;
3752
3753         case ANEG_STATE_IDLE_DETECT_INIT:
3754                 ap->link_time = ap->cur_time;
3755                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3756                 tw32_f(MAC_MODE, tp->mac_mode);
3757                 udelay(40);
3758
3759                 ap->state = ANEG_STATE_IDLE_DETECT;
3760                 ret = ANEG_TIMER_ENAB;
3761                 break;
3762
3763         case ANEG_STATE_IDLE_DETECT:
3764                 if (ap->ability_match != 0 &&
3765                     ap->rxconfig == 0) {
3766                         ap->state = ANEG_STATE_AN_ENABLE;
3767                         break;
3768                 }
3769                 delta = ap->cur_time - ap->link_time;
3770                 if (delta > ANEG_STATE_SETTLE_TIME) {
3771                         /* XXX another gem from the Broadcom driver :( */
3772                         ap->state = ANEG_STATE_LINK_OK;
3773                 }
3774                 break;
3775
3776         case ANEG_STATE_LINK_OK:
3777                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
3778                 ret = ANEG_DONE;
3779                 break;
3780
3781         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
3782                 /* ??? unimplemented */
3783                 break;
3784
3785         case ANEG_STATE_NEXT_PAGE_WAIT:
3786                 /* ??? unimplemented */
3787                 break;
3788
3789         default:
3790                 ret = ANEG_FAILED;
3791                 break;
3792         }
3793
3794         return ret;
3795 }
3796
3797 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
3798 {
3799         int res = 0;
3800         struct tg3_fiber_aneginfo aninfo;
3801         int status = ANEG_FAILED;
3802         unsigned int tick;
3803         u32 tmp;
3804
3805         tw32_f(MAC_TX_AUTO_NEG, 0);
3806
3807         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
3808         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
3809         udelay(40);
3810
3811         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
3812         udelay(40);
3813
3814         memset(&aninfo, 0, sizeof(aninfo));
3815         aninfo.flags |= MR_AN_ENABLE;
3816         aninfo.state = ANEG_STATE_UNKNOWN;
3817         aninfo.cur_time = 0;
3818         tick = 0;
3819         while (++tick < 195000) {
3820                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
3821                 if (status == ANEG_DONE || status == ANEG_FAILED)
3822                         break;
3823
3824                 udelay(1);
3825         }
3826
3827         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
3828         tw32_f(MAC_MODE, tp->mac_mode);
3829         udelay(40);
3830
3831         *txflags = aninfo.txconfig;
3832         *rxflags = aninfo.flags;
3833
3834         if (status == ANEG_DONE &&
3835             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
3836                              MR_LP_ADV_FULL_DUPLEX)))
3837                 res = 1;
3838
3839         return res;
3840 }
3841
3842 static void tg3_init_bcm8002(struct tg3 *tp)
3843 {
3844         u32 mac_status = tr32(MAC_STATUS);
3845         int i;
3846
3847         /* Reset when initting first time or we have a link. */
3848         if (tg3_flag(tp, INIT_COMPLETE) &&
3849             !(mac_status & MAC_STATUS_PCS_SYNCED))
3850                 return;
3851
3852         /* Set PLL lock range. */
3853         tg3_writephy(tp, 0x16, 0x8007);
3854
3855         /* SW reset */
3856         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
3857
3858         /* Wait for reset to complete. */
3859         /* XXX schedule_timeout() ... */
3860         for (i = 0; i < 500; i++)
3861                 udelay(10);
3862
3863         /* Config mode; select PMA/Ch 1 regs. */
3864         tg3_writephy(tp, 0x10, 0x8411);
3865
3866         /* Enable auto-lock and comdet, select txclk for tx. */
3867         tg3_writephy(tp, 0x11, 0x0a10);
3868
3869         tg3_writephy(tp, 0x18, 0x00a0);
3870         tg3_writephy(tp, 0x16, 0x41ff);
3871
3872         /* Assert and deassert POR. */
3873         tg3_writephy(tp, 0x13, 0x0400);
3874         udelay(40);
3875         tg3_writephy(tp, 0x13, 0x0000);
3876
3877         tg3_writephy(tp, 0x11, 0x0a50);
3878         udelay(40);
3879         tg3_writephy(tp, 0x11, 0x0a10);
3880
3881         /* Wait for signal to stabilize */
3882         /* XXX schedule_timeout() ... */
3883         for (i = 0; i < 15000; i++)
3884                 udelay(10);
3885
3886         /* Deselect the channel register so we can read the PHYID
3887          * later.
3888          */
3889         tg3_writephy(tp, 0x10, 0x8011);
3890 }
3891
3892 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
3893 {
3894         u16 flowctrl;
3895         u32 sg_dig_ctrl, sg_dig_status;
3896         u32 serdes_cfg, expected_sg_dig_ctrl;
3897         int workaround, port_a;
3898         int current_link_up;
3899
3900         serdes_cfg = 0;
3901         expected_sg_dig_ctrl = 0;
3902         workaround = 0;
3903         port_a = 1;
3904         current_link_up = 0;
3905
3906         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
3907             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
3908                 workaround = 1;
3909                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
3910                         port_a = 0;
3911
3912                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
3913                 /* preserve bits 20-23 for voltage regulator */
3914                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
3915         }
3916
3917         sg_dig_ctrl = tr32(SG_DIG_CTRL);
3918
3919         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
3920                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
3921                         if (workaround) {
3922                                 u32 val = serdes_cfg;
3923
3924                                 if (port_a)
3925                                         val |= 0xc010000;
3926                                 else
3927                                         val |= 0x4010000;
3928                                 tw32_f(MAC_SERDES_CFG, val);
3929                         }
3930
3931                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
3932                 }
3933                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
3934                         tg3_setup_flow_control(tp, 0, 0);
3935                         current_link_up = 1;
3936                 }
3937                 goto out;
3938         }
3939
3940         /* Want auto-negotiation.  */
3941         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
3942
3943         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
3944         if (flowctrl & ADVERTISE_1000XPAUSE)
3945                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
3946         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
3947                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
3948
3949         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
3950                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
3951                     tp->serdes_counter &&
3952                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
3953                                     MAC_STATUS_RCVD_CFG)) ==
3954                      MAC_STATUS_PCS_SYNCED)) {
3955                         tp->serdes_counter--;
3956                         current_link_up = 1;
3957                         goto out;
3958                 }
3959 restart_autoneg:
3960                 if (workaround)
3961                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
3962                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
3963                 udelay(5);
3964                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
3965
3966                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
3967                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3968         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
3969                                  MAC_STATUS_SIGNAL_DET)) {
3970                 sg_dig_status = tr32(SG_DIG_STATUS);
3971                 mac_status = tr32(MAC_STATUS);
3972
3973                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
3974                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
3975                         u32 local_adv = 0, remote_adv = 0;
3976
3977                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
3978                                 local_adv |= ADVERTISE_1000XPAUSE;
3979                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
3980                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
3981
3982                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
3983                                 remote_adv |= LPA_1000XPAUSE;
3984                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
3985                                 remote_adv |= LPA_1000XPAUSE_ASYM;
3986
3987                         tg3_setup_flow_control(tp, local_adv, remote_adv);
3988                         current_link_up = 1;
3989                         tp->serdes_counter = 0;
3990                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
3991                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
3992                         if (tp->serdes_counter)
3993                                 tp->serdes_counter--;
3994                         else {
3995                                 if (workaround) {
3996                                         u32 val = serdes_cfg;
3997
3998                                         if (port_a)
3999                                                 val |= 0xc010000;
4000                                         else
4001                                                 val |= 0x4010000;
4002
4003                                         tw32_f(MAC_SERDES_CFG, val);
4004                                 }
4005
4006                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4007                                 udelay(40);
4008
4009                                 /* Link parallel detection - link is up */
4010                                 /* only if we have PCS_SYNC and not */
4011                                 /* receiving config code words */
4012                                 mac_status = tr32(MAC_STATUS);
4013                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4014                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4015                                         tg3_setup_flow_control(tp, 0, 0);
4016                                         current_link_up = 1;
4017                                         tp->phy_flags |=
4018                                                 TG3_PHYFLG_PARALLEL_DETECT;
4019                                         tp->serdes_counter =
4020                                                 SERDES_PARALLEL_DET_TIMEOUT;
4021                                 } else
4022                                         goto restart_autoneg;
4023                         }
4024                 }
4025         } else {
4026                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4027                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4028         }
4029
4030 out:
4031         return current_link_up;
4032 }
4033
4034 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4035 {
4036         int current_link_up = 0;
4037
4038         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4039                 goto out;
4040
4041         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4042                 u32 txflags, rxflags;
4043                 int i;
4044
4045                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4046                         u32 local_adv = 0, remote_adv = 0;
4047
4048                         if (txflags & ANEG_CFG_PS1)
4049                                 local_adv |= ADVERTISE_1000XPAUSE;
4050                         if (txflags & ANEG_CFG_PS2)
4051                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4052
4053                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4054                                 remote_adv |= LPA_1000XPAUSE;
4055                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4056                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4057
4058                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4059
4060                         current_link_up = 1;
4061                 }
4062                 for (i = 0; i < 30; i++) {
4063                         udelay(20);
4064                         tw32_f(MAC_STATUS,
4065                                (MAC_STATUS_SYNC_CHANGED |
4066                                 MAC_STATUS_CFG_CHANGED));
4067                         udelay(40);
4068                         if ((tr32(MAC_STATUS) &
4069                              (MAC_STATUS_SYNC_CHANGED |
4070                               MAC_STATUS_CFG_CHANGED)) == 0)
4071                                 break;
4072                 }
4073
4074                 mac_status = tr32(MAC_STATUS);
4075                 if (current_link_up == 0 &&
4076                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4077                     !(mac_status & MAC_STATUS_RCVD_CFG))
4078                         current_link_up = 1;
4079         } else {
4080                 tg3_setup_flow_control(tp, 0, 0);
4081
4082                 /* Forcing 1000FD link up. */
4083                 current_link_up = 1;
4084
4085                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4086                 udelay(40);
4087
4088                 tw32_f(MAC_MODE, tp->mac_mode);
4089                 udelay(40);
4090         }
4091
4092 out:
4093         return current_link_up;
4094 }
4095
4096 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4097 {
4098         u32 orig_pause_cfg;
4099         u16 orig_active_speed;
4100         u8 orig_active_duplex;
4101         u32 mac_status;
4102         int current_link_up;
4103         int i;
4104
4105         orig_pause_cfg = tp->link_config.active_flowctrl;
4106         orig_active_speed = tp->link_config.active_speed;
4107         orig_active_duplex = tp->link_config.active_duplex;
4108
4109         if (!tg3_flag(tp, HW_AUTONEG) &&
4110             netif_carrier_ok(tp->dev) &&
4111             tg3_flag(tp, INIT_COMPLETE)) {
4112                 mac_status = tr32(MAC_STATUS);
4113                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4114                                MAC_STATUS_SIGNAL_DET |
4115                                MAC_STATUS_CFG_CHANGED |
4116                                MAC_STATUS_RCVD_CFG);
4117                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4118                                    MAC_STATUS_SIGNAL_DET)) {
4119                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4120                                             MAC_STATUS_CFG_CHANGED));
4121                         return 0;
4122                 }
4123         }
4124
4125         tw32_f(MAC_TX_AUTO_NEG, 0);
4126
4127         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4128         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
4129         tw32_f(MAC_MODE, tp->mac_mode);
4130         udelay(40);
4131
4132         if (tp->phy_id == TG3_PHY_ID_BCM8002)
4133                 tg3_init_bcm8002(tp);
4134
4135         /* Enable link change event even when serdes polling.  */
4136         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4137         udelay(40);
4138
4139         current_link_up = 0;
4140         mac_status = tr32(MAC_STATUS);
4141
4142         if (tg3_flag(tp, HW_AUTONEG))
4143                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
4144         else
4145                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
4146
4147         tp->napi[0].hw_status->status =
4148                 (SD_STATUS_UPDATED |
4149                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
4150
4151         for (i = 0; i < 100; i++) {
4152                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4153                                     MAC_STATUS_CFG_CHANGED));
4154                 udelay(5);
4155                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
4156                                          MAC_STATUS_CFG_CHANGED |
4157                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
4158                         break;
4159         }
4160
4161         mac_status = tr32(MAC_STATUS);
4162         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
4163                 current_link_up = 0;
4164                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
4165                     tp->serdes_counter == 0) {
4166                         tw32_f(MAC_MODE, (tp->mac_mode |
4167                                           MAC_MODE_SEND_CONFIGS));
4168                         udelay(1);
4169                         tw32_f(MAC_MODE, tp->mac_mode);
4170                 }
4171         }
4172
4173         if (current_link_up == 1) {
4174                 tp->link_config.active_speed = SPEED_1000;
4175                 tp->link_config.active_duplex = DUPLEX_FULL;
4176                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4177                                     LED_CTRL_LNKLED_OVERRIDE |
4178                                     LED_CTRL_1000MBPS_ON));
4179         } else {
4180                 tp->link_config.active_speed = SPEED_INVALID;
4181                 tp->link_config.active_duplex = DUPLEX_INVALID;
4182                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
4183                                     LED_CTRL_LNKLED_OVERRIDE |
4184                                     LED_CTRL_TRAFFIC_OVERRIDE));
4185         }
4186
4187         if (current_link_up != netif_carrier_ok(tp->dev)) {
4188                 if (current_link_up)
4189                         netif_carrier_on(tp->dev);
4190                 else
4191                         netif_carrier_off(tp->dev);
4192                 tg3_link_report(tp);
4193         } else {
4194                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
4195                 if (orig_pause_cfg != now_pause_cfg ||
4196                     orig_active_speed != tp->link_config.active_speed ||
4197                     orig_active_duplex != tp->link_config.active_duplex)
4198                         tg3_link_report(tp);
4199         }
4200
4201         return 0;
4202 }
4203
4204 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
4205 {
4206         int current_link_up, err = 0;
4207         u32 bmsr, bmcr;
4208         u16 current_speed;
4209         u8 current_duplex;
4210         u32 local_adv, remote_adv;
4211
4212         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4213         tw32_f(MAC_MODE, tp->mac_mode);
4214         udelay(40);
4215
4216         tw32(MAC_EVENT, 0);
4217
4218         tw32_f(MAC_STATUS,
4219              (MAC_STATUS_SYNC_CHANGED |
4220               MAC_STATUS_CFG_CHANGED |
4221               MAC_STATUS_MI_COMPLETION |
4222               MAC_STATUS_LNKSTATE_CHANGED));
4223         udelay(40);
4224
4225         if (force_reset)
4226                 tg3_phy_reset(tp);
4227
4228         current_link_up = 0;
4229         current_speed = SPEED_INVALID;
4230         current_duplex = DUPLEX_INVALID;
4231
4232         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4233         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4234         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
4235                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4236                         bmsr |= BMSR_LSTATUS;
4237                 else
4238                         bmsr &= ~BMSR_LSTATUS;
4239         }
4240
4241         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
4242
4243         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
4244             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4245                 /* do nothing, just check for link up at the end */
4246         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4247                 u32 adv, new_adv;
4248
4249                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4250                 new_adv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
4251                                   ADVERTISE_1000XPAUSE |
4252                                   ADVERTISE_1000XPSE_ASYM |
4253                                   ADVERTISE_SLCT);
4254
4255                 new_adv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4256
4257                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
4258                         new_adv |= ADVERTISE_1000XHALF;
4259                 if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
4260                         new_adv |= ADVERTISE_1000XFULL;
4261
4262                 if ((new_adv != adv) || !(bmcr & BMCR_ANENABLE)) {
4263                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
4264                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
4265                         tg3_writephy(tp, MII_BMCR, bmcr);
4266
4267                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4268                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
4269                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4270
4271                         return err;
4272                 }
4273         } else {
4274                 u32 new_bmcr;
4275
4276                 bmcr &= ~BMCR_SPEED1000;
4277                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
4278
4279                 if (tp->link_config.duplex == DUPLEX_FULL)
4280                         new_bmcr |= BMCR_FULLDPLX;
4281
4282                 if (new_bmcr != bmcr) {
4283                         /* BMCR_SPEED1000 is a reserved bit that needs
4284                          * to be set on write.
4285                          */
4286                         new_bmcr |= BMCR_SPEED1000;
4287
4288                         /* Force a linkdown */
4289                         if (netif_carrier_ok(tp->dev)) {
4290                                 u32 adv;
4291
4292                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
4293                                 adv &= ~(ADVERTISE_1000XFULL |
4294                                          ADVERTISE_1000XHALF |
4295                                          ADVERTISE_SLCT);
4296                                 tg3_writephy(tp, MII_ADVERTISE, adv);
4297                                 tg3_writephy(tp, MII_BMCR, bmcr |
4298                                                            BMCR_ANRESTART |
4299                                                            BMCR_ANENABLE);
4300                                 udelay(10);
4301                                 netif_carrier_off(tp->dev);
4302                         }
4303                         tg3_writephy(tp, MII_BMCR, new_bmcr);
4304                         bmcr = new_bmcr;
4305                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4306                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
4307                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
4308                             ASIC_REV_5714) {
4309                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
4310                                         bmsr |= BMSR_LSTATUS;
4311                                 else
4312                                         bmsr &= ~BMSR_LSTATUS;
4313                         }
4314                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4315                 }
4316         }
4317
4318         if (bmsr & BMSR_LSTATUS) {
4319                 current_speed = SPEED_1000;
4320                 current_link_up = 1;
4321                 if (bmcr & BMCR_FULLDPLX)
4322                         current_duplex = DUPLEX_FULL;
4323                 else
4324                         current_duplex = DUPLEX_HALF;
4325
4326                 local_adv = 0;
4327                 remote_adv = 0;
4328
4329                 if (bmcr & BMCR_ANENABLE) {
4330                         u32 common;
4331
4332                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
4333                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
4334                         common = local_adv & remote_adv;
4335                         if (common & (ADVERTISE_1000XHALF |
4336                                       ADVERTISE_1000XFULL)) {
4337                                 if (common & ADVERTISE_1000XFULL)
4338                                         current_duplex = DUPLEX_FULL;
4339                                 else
4340                                         current_duplex = DUPLEX_HALF;
4341                         } else if (!tg3_flag(tp, 5780_CLASS)) {
4342                                 /* Link is up via parallel detect */
4343                         } else {
4344                                 current_link_up = 0;
4345                         }
4346                 }
4347         }
4348
4349         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
4350                 tg3_setup_flow_control(tp, local_adv, remote_adv);
4351
4352         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4353         if (tp->link_config.active_duplex == DUPLEX_HALF)
4354                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4355
4356         tw32_f(MAC_MODE, tp->mac_mode);
4357         udelay(40);
4358
4359         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4360
4361         tp->link_config.active_speed = current_speed;
4362         tp->link_config.active_duplex = current_duplex;
4363
4364         if (current_link_up != netif_carrier_ok(tp->dev)) {
4365                 if (current_link_up)
4366                         netif_carrier_on(tp->dev);
4367                 else {
4368                         netif_carrier_off(tp->dev);
4369                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4370                 }
4371                 tg3_link_report(tp);
4372         }
4373         return err;
4374 }
4375
4376 static void tg3_serdes_parallel_detect(struct tg3 *tp)
4377 {
4378         if (tp->serdes_counter) {
4379                 /* Give autoneg time to complete. */
4380                 tp->serdes_counter--;
4381                 return;
4382         }
4383
4384         if (!netif_carrier_ok(tp->dev) &&
4385             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
4386                 u32 bmcr;
4387
4388                 tg3_readphy(tp, MII_BMCR, &bmcr);
4389                 if (bmcr & BMCR_ANENABLE) {
4390                         u32 phy1, phy2;
4391
4392                         /* Select shadow register 0x1f */
4393                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
4394                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
4395
4396                         /* Select expansion interrupt status register */
4397                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4398                                          MII_TG3_DSP_EXP1_INT_STAT);
4399                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4400                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4401
4402                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
4403                                 /* We have signal detect and not receiving
4404                                  * config code words, link is up by parallel
4405                                  * detection.
4406                                  */
4407
4408                                 bmcr &= ~BMCR_ANENABLE;
4409                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4410                                 tg3_writephy(tp, MII_BMCR, bmcr);
4411                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
4412                         }
4413                 }
4414         } else if (netif_carrier_ok(tp->dev) &&
4415                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
4416                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
4417                 u32 phy2;
4418
4419                 /* Select expansion interrupt status register */
4420                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
4421                                  MII_TG3_DSP_EXP1_INT_STAT);
4422                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
4423                 if (phy2 & 0x20) {
4424                         u32 bmcr;
4425
4426                         /* Config code words received, turn on autoneg. */
4427                         tg3_readphy(tp, MII_BMCR, &bmcr);
4428                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
4429
4430                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4431
4432                 }
4433         }
4434 }
4435
4436 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
4437 {
4438         u32 val;
4439         int err;
4440
4441         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
4442                 err = tg3_setup_fiber_phy(tp, force_reset);
4443         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4444                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
4445         else
4446                 err = tg3_setup_copper_phy(tp, force_reset);
4447
4448         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
4449                 u32 scale;
4450
4451                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
4452                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
4453                         scale = 65;
4454                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
4455                         scale = 6;
4456                 else
4457                         scale = 12;
4458
4459                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
4460                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
4461                 tw32(GRC_MISC_CFG, val);
4462         }
4463
4464         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4465               (6 << TX_LENGTHS_IPG_SHIFT);
4466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
4467                 val |= tr32(MAC_TX_LENGTHS) &
4468                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
4469                         TX_LENGTHS_CNT_DWN_VAL_MSK);
4470
4471         if (tp->link_config.active_speed == SPEED_1000 &&
4472             tp->link_config.active_duplex == DUPLEX_HALF)
4473                 tw32(MAC_TX_LENGTHS, val |
4474                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
4475         else
4476                 tw32(MAC_TX_LENGTHS, val |
4477                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4478
4479         if (!tg3_flag(tp, 5705_PLUS)) {
4480                 if (netif_carrier_ok(tp->dev)) {
4481                         tw32(HOSTCC_STAT_COAL_TICKS,
4482                              tp->coal.stats_block_coalesce_usecs);
4483                 } else {
4484                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
4485                 }
4486         }
4487
4488         if (tg3_flag(tp, ASPM_WORKAROUND)) {
4489                 val = tr32(PCIE_PWR_MGMT_THRESH);
4490                 if (!netif_carrier_ok(tp->dev))
4491                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
4492                               tp->pwrmgmt_thresh;
4493                 else
4494                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
4495                 tw32(PCIE_PWR_MGMT_THRESH, val);
4496         }
4497
4498         return err;
4499 }
4500
4501 static inline int tg3_irq_sync(struct tg3 *tp)
4502 {
4503         return tp->irq_sync;
4504 }
4505
4506 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
4507 {
4508         int i;
4509
4510         dst = (u32 *)((u8 *)dst + off);
4511         for (i = 0; i < len; i += sizeof(u32))
4512                 *dst++ = tr32(off + i);
4513 }
4514
4515 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
4516 {
4517         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
4518         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
4519         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
4520         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
4521         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
4522         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
4523         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
4524         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
4525         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
4526         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
4527         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
4528         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
4529         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
4530         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
4531         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
4532         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
4533         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
4534         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
4535         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
4536
4537         if (tg3_flag(tp, SUPPORT_MSIX))
4538                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
4539
4540         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
4541         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
4542         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
4543         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
4544         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
4545         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
4546         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
4547         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
4548
4549         if (!tg3_flag(tp, 5705_PLUS)) {
4550                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
4551                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
4552                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
4553         }
4554
4555         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
4556         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
4557         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
4558         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
4559         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
4560
4561         if (tg3_flag(tp, NVRAM))
4562                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
4563 }
4564
4565 static void tg3_dump_state(struct tg3 *tp)
4566 {
4567         int i;
4568         u32 *regs;
4569
4570         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
4571         if (!regs) {
4572                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
4573                 return;
4574         }
4575
4576         if (tg3_flag(tp, PCI_EXPRESS)) {
4577                 /* Read up to but not including private PCI registers */
4578                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
4579                         regs[i / sizeof(u32)] = tr32(i);
4580         } else
4581                 tg3_dump_legacy_regs(tp, regs);
4582
4583         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
4584                 if (!regs[i + 0] && !regs[i + 1] &&
4585                     !regs[i + 2] && !regs[i + 3])
4586                         continue;
4587
4588                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
4589                            i * 4,
4590                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
4591         }
4592
4593         kfree(regs);
4594
4595         for (i = 0; i < tp->irq_cnt; i++) {
4596                 struct tg3_napi *tnapi = &tp->napi[i];
4597
4598                 /* SW status block */
4599                 netdev_err(tp->dev,
4600                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
4601                            i,
4602                            tnapi->hw_status->status,
4603                            tnapi->hw_status->status_tag,
4604                            tnapi->hw_status->rx_jumbo_consumer,
4605                            tnapi->hw_status->rx_consumer,
4606                            tnapi->hw_status->rx_mini_consumer,
4607                            tnapi->hw_status->idx[0].rx_producer,
4608                            tnapi->hw_status->idx[0].tx_consumer);
4609
4610                 netdev_err(tp->dev,
4611                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
4612                            i,
4613                            tnapi->last_tag, tnapi->last_irq_tag,
4614                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
4615                            tnapi->rx_rcb_ptr,
4616                            tnapi->prodring.rx_std_prod_idx,
4617                            tnapi->prodring.rx_std_cons_idx,
4618                            tnapi->prodring.rx_jmb_prod_idx,
4619                            tnapi->prodring.rx_jmb_cons_idx);
4620         }
4621 }
4622
4623 /* This is called whenever we suspect that the system chipset is re-
4624  * ordering the sequence of MMIO to the tx send mailbox. The symptom
4625  * is bogus tx completions. We try to recover by setting the
4626  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
4627  * in the workqueue.
4628  */
4629 static void tg3_tx_recover(struct tg3 *tp)
4630 {
4631         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
4632                tp->write32_tx_mbox == tg3_write_indirect_mbox);
4633
4634         netdev_warn(tp->dev,
4635                     "The system may be re-ordering memory-mapped I/O "
4636                     "cycles to the network device, attempting to recover. "
4637                     "Please report the problem to the driver maintainer "
4638                     "and include system chipset information.\n");
4639
4640         spin_lock(&tp->lock);
4641         tg3_flag_set(tp, TX_RECOVERY_PENDING);
4642         spin_unlock(&tp->lock);
4643 }
4644
4645 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
4646 {
4647         /* Tell compiler to fetch tx indices from memory. */
4648         barrier();
4649         return tnapi->tx_pending -
4650                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
4651 }
4652
4653 /* Tigon3 never reports partial packet sends.  So we do not
4654  * need special logic to handle SKBs that have not had all
4655  * of their frags sent yet, like SunGEM does.
4656  */
4657 static void tg3_tx(struct tg3_napi *tnapi)
4658 {
4659         struct tg3 *tp = tnapi->tp;
4660         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
4661         u32 sw_idx = tnapi->tx_cons;
4662         struct netdev_queue *txq;
4663         int index = tnapi - tp->napi;
4664
4665         if (tg3_flag(tp, ENABLE_TSS))
4666                 index--;
4667
4668         txq = netdev_get_tx_queue(tp->dev, index);
4669
4670         while (sw_idx != hw_idx) {
4671                 struct ring_info *ri = &tnapi->tx_buffers[sw_idx];
4672                 struct sk_buff *skb = ri->skb;
4673                 int i, tx_bug = 0;
4674
4675                 if (unlikely(skb == NULL)) {
4676                         tg3_tx_recover(tp);
4677                         return;
4678                 }
4679
4680                 pci_unmap_single(tp->pdev,
4681                                  dma_unmap_addr(ri, mapping),
4682                                  skb_headlen(skb),
4683                                  PCI_DMA_TODEVICE);
4684
4685                 ri->skb = NULL;
4686
4687                 sw_idx = NEXT_TX(sw_idx);
4688
4689                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
4690                         ri = &tnapi->tx_buffers[sw_idx];
4691                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
4692                                 tx_bug = 1;
4693
4694                         pci_unmap_page(tp->pdev,
4695                                        dma_unmap_addr(ri, mapping),
4696                                        skb_shinfo(skb)->frags[i].size,
4697                                        PCI_DMA_TODEVICE);
4698                         sw_idx = NEXT_TX(sw_idx);
4699                 }
4700
4701                 dev_kfree_skb(skb);
4702
4703                 if (unlikely(tx_bug)) {
4704                         tg3_tx_recover(tp);
4705                         return;
4706                 }
4707         }
4708
4709         tnapi->tx_cons = sw_idx;
4710
4711         /* Need to make the tx_cons update visible to tg3_start_xmit()
4712          * before checking for netif_queue_stopped().  Without the
4713          * memory barrier, there is a small possibility that tg3_start_xmit()
4714          * will miss it and cause the queue to be stopped forever.
4715          */
4716         smp_mb();
4717
4718         if (unlikely(netif_tx_queue_stopped(txq) &&
4719                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
4720                 __netif_tx_lock(txq, smp_processor_id());
4721                 if (netif_tx_queue_stopped(txq) &&
4722                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
4723                         netif_tx_wake_queue(txq);
4724                 __netif_tx_unlock(txq);
4725         }
4726 }
4727
4728 static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4729 {
4730         if (!ri->skb)
4731                 return;
4732
4733         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
4734                          map_sz, PCI_DMA_FROMDEVICE);
4735         dev_kfree_skb_any(ri->skb);
4736         ri->skb = NULL;
4737 }
4738
4739 /* Returns size of skb allocated or < 0 on error.
4740  *
4741  * We only need to fill in the address because the other members
4742  * of the RX descriptor are invariant, see tg3_init_rings.
4743  *
4744  * Note the purposeful assymetry of cpu vs. chip accesses.  For
4745  * posting buffers we only dirty the first cache line of the RX
4746  * descriptor (containing the address).  Whereas for the RX status
4747  * buffers the cpu only reads the last cacheline of the RX descriptor
4748  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4749  */
4750 static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4751                             u32 opaque_key, u32 dest_idx_unmasked)
4752 {
4753         struct tg3_rx_buffer_desc *desc;
4754         struct ring_info *map;
4755         struct sk_buff *skb;
4756         dma_addr_t mapping;
4757         int skb_size, dest_idx;
4758
4759         switch (opaque_key) {
4760         case RXD_OPAQUE_RING_STD:
4761                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4762                 desc = &tpr->rx_std[dest_idx];
4763                 map = &tpr->rx_std_buffers[dest_idx];
4764                 skb_size = tp->rx_pkt_map_sz;
4765                 break;
4766
4767         case RXD_OPAQUE_RING_JUMBO:
4768                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4769                 desc = &tpr->rx_jmb[dest_idx].std;
4770                 map = &tpr->rx_jmb_buffers[dest_idx];
4771                 skb_size = TG3_RX_JMB_MAP_SZ;
4772                 break;
4773
4774         default:
4775                 return -EINVAL;
4776         }
4777
4778         /* Do not overwrite any of the map or rp information
4779          * until we are sure we can commit to a new buffer.
4780          *
4781          * Callers depend upon this behavior and assume that
4782          * we leave everything unchanged if we fail.
4783          */
4784         skb = netdev_alloc_skb(tp->dev, skb_size + tp->rx_offset);
4785         if (skb == NULL)
4786                 return -ENOMEM;
4787
4788         skb_reserve(skb, tp->rx_offset);
4789
4790         mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4791                                  PCI_DMA_FROMDEVICE);
4792         if (pci_dma_mapping_error(tp->pdev, mapping)) {
4793                 dev_kfree_skb(skb);
4794                 return -EIO;
4795         }
4796
4797         map->skb = skb;
4798         dma_unmap_addr_set(map, mapping, mapping);
4799
4800         desc->addr_hi = ((u64)mapping >> 32);
4801         desc->addr_lo = ((u64)mapping & 0xffffffff);
4802
4803         return skb_size;
4804 }
4805
4806 /* We only need to move over in the address because the other
4807  * members of the RX descriptor are invariant.  See notes above
4808  * tg3_alloc_rx_skb for full details.
4809  */
4810 static void tg3_recycle_rx(struct tg3_napi *tnapi,
4811                            struct tg3_rx_prodring_set *dpr,
4812                            u32 opaque_key, int src_idx,
4813                            u32 dest_idx_unmasked)
4814 {
4815         struct tg3 *tp = tnapi->tp;
4816         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4817         struct ring_info *src_map, *dest_map;
4818         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
4819         int dest_idx;
4820
4821         switch (opaque_key) {
4822         case RXD_OPAQUE_RING_STD:
4823                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
4824                 dest_desc = &dpr->rx_std[dest_idx];
4825                 dest_map = &dpr->rx_std_buffers[dest_idx];
4826                 src_desc = &spr->rx_std[src_idx];
4827                 src_map = &spr->rx_std_buffers[src_idx];
4828                 break;
4829
4830         case RXD_OPAQUE_RING_JUMBO:
4831                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
4832                 dest_desc = &dpr->rx_jmb[dest_idx].std;
4833                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4834                 src_desc = &spr->rx_jmb[src_idx].std;
4835                 src_map = &spr->rx_jmb_buffers[src_idx];
4836                 break;
4837
4838         default:
4839                 return;
4840         }
4841
4842         dest_map->skb = src_map->skb;
4843         dma_unmap_addr_set(dest_map, mapping,
4844                            dma_unmap_addr(src_map, mapping));
4845         dest_desc->addr_hi = src_desc->addr_hi;
4846         dest_desc->addr_lo = src_desc->addr_lo;
4847
4848         /* Ensure that the update to the skb happens after the physical
4849          * addresses have been transferred to the new BD location.
4850          */
4851         smp_wmb();
4852
4853         src_map->skb = NULL;
4854 }
4855
4856 /* The RX ring scheme is composed of multiple rings which post fresh
4857  * buffers to the chip, and one special ring the chip uses to report
4858  * status back to the host.
4859  *
4860  * The special ring reports the status of received packets to the
4861  * host.  The chip does not write into the original descriptor the
4862  * RX buffer was obtained from.  The chip simply takes the original
4863  * descriptor as provided by the host, updates the status and length
4864  * field, then writes this into the next status ring entry.
4865  *
4866  * Each ring the host uses to post buffers to the chip is described
4867  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
4868  * it is first placed into the on-chip ram.  When the packet's length
4869  * is known, it walks down the TG3_BDINFO entries to select the ring.
4870  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
4871  * which is within the range of the new packet's length is chosen.
4872  *
4873  * The "separate ring for rx status" scheme may sound queer, but it makes
4874  * sense from a cache coherency perspective.  If only the host writes
4875  * to the buffer post rings, and only the chip writes to the rx status
4876  * rings, then cache lines never move beyond shared-modified state.
4877  * If both the host and chip were to write into the same ring, cache line
4878  * eviction could occur since both entities want it in an exclusive state.
4879  */
4880 static int tg3_rx(struct tg3_napi *tnapi, int budget)
4881 {
4882         struct tg3 *tp = tnapi->tp;
4883         u32 work_mask, rx_std_posted = 0;
4884         u32 std_prod_idx, jmb_prod_idx;
4885         u32 sw_idx = tnapi->rx_rcb_ptr;
4886         u16 hw_idx;
4887         int received;
4888         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
4889
4890         hw_idx = *(tnapi->rx_rcb_prod_idx);
4891         /*
4892          * We need to order the read of hw_idx and the read of
4893          * the opaque cookie.
4894          */
4895         rmb();
4896         work_mask = 0;
4897         received = 0;
4898         std_prod_idx = tpr->rx_std_prod_idx;
4899         jmb_prod_idx = tpr->rx_jmb_prod_idx;
4900         while (sw_idx != hw_idx && budget > 0) {
4901                 struct ring_info *ri;
4902                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4903                 unsigned int len;
4904                 struct sk_buff *skb;
4905                 dma_addr_t dma_addr;
4906                 u32 opaque_key, desc_idx, *post_ptr;
4907
4908                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4909                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4910                 if (opaque_key == RXD_OPAQUE_RING_STD) {
4911                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
4912                         dma_addr = dma_unmap_addr(ri, mapping);
4913                         skb = ri->skb;
4914                         post_ptr = &std_prod_idx;
4915                         rx_std_posted++;
4916                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4917                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
4918                         dma_addr = dma_unmap_addr(ri, mapping);
4919                         skb = ri->skb;
4920                         post_ptr = &jmb_prod_idx;
4921                 } else
4922                         goto next_pkt_nopost;
4923
4924                 work_mask |= opaque_key;
4925
4926                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4927                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4928                 drop_it:
4929                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4930                                        desc_idx, *post_ptr);
4931                 drop_it_no_recycle:
4932                         /* Other statistics kept track of by card. */
4933                         tp->rx_dropped++;
4934                         goto next_pkt;
4935                 }
4936
4937                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
4938                       ETH_FCS_LEN;
4939
4940                 if (len > TG3_RX_COPY_THRESH(tp)) {
4941                         int skb_size;
4942
4943                         skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4944                                                     *post_ptr);
4945                         if (skb_size < 0)
4946                                 goto drop_it;
4947
4948                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
4949                                          PCI_DMA_FROMDEVICE);
4950
4951                         /* Ensure that the update to the skb happens
4952                          * after the usage of the old DMA mapping.
4953                          */
4954                         smp_wmb();
4955
4956                         ri->skb = NULL;
4957
4958                         skb_put(skb, len);
4959                 } else {
4960                         struct sk_buff *copy_skb;
4961
4962                         tg3_recycle_rx(tnapi, tpr, opaque_key,
4963                                        desc_idx, *post_ptr);
4964
4965                         copy_skb = netdev_alloc_skb(tp->dev, len +
4966                                                     TG3_RAW_IP_ALIGN);
4967                         if (copy_skb == NULL)
4968                                 goto drop_it_no_recycle;
4969
4970                         skb_reserve(copy_skb, TG3_RAW_IP_ALIGN);
4971                         skb_put(copy_skb, len);
4972                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4973                         skb_copy_from_linear_data(skb, copy_skb->data, len);
4974                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
4975
4976                         /* We'll reuse the original ring buffer. */
4977                         skb = copy_skb;
4978                 }
4979
4980                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
4981                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
4982                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
4983                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
4984                         skb->ip_summed = CHECKSUM_UNNECESSARY;
4985                 else
4986                         skb_checksum_none_assert(skb);
4987
4988                 skb->protocol = eth_type_trans(skb, tp->dev);
4989
4990                 if (len > (tp->dev->mtu + ETH_HLEN) &&
4991                     skb->protocol != htons(ETH_P_8021Q)) {
4992                         dev_kfree_skb(skb);
4993                         goto drop_it_no_recycle;
4994                 }
4995
4996                 if (desc->type_flags & RXD_FLAG_VLAN &&
4997                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
4998                         __vlan_hwaccel_put_tag(skb,
4999                                                desc->err_vlan & RXD_VLAN_MASK);
5000
5001                 napi_gro_receive(&tnapi->napi, skb);
5002
5003                 received++;
5004                 budget--;
5005
5006 next_pkt:
5007                 (*post_ptr)++;
5008
5009                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5010                         tpr->rx_std_prod_idx = std_prod_idx &
5011                                                tp->rx_std_ring_mask;
5012                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5013                                      tpr->rx_std_prod_idx);
5014                         work_mask &= ~RXD_OPAQUE_RING_STD;
5015                         rx_std_posted = 0;
5016                 }
5017 next_pkt_nopost:
5018                 sw_idx++;
5019                 sw_idx &= tp->rx_ret_ring_mask;
5020
5021                 /* Refresh hw_idx to see if there is new work */
5022                 if (sw_idx == hw_idx) {
5023                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5024                         rmb();
5025                 }
5026         }
5027
5028         /* ACK the status ring. */
5029         tnapi->rx_rcb_ptr = sw_idx;
5030         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5031
5032         /* Refill RX ring(s). */
5033         if (!tg3_flag(tp, ENABLE_RSS)) {
5034                 if (work_mask & RXD_OPAQUE_RING_STD) {
5035                         tpr->rx_std_prod_idx = std_prod_idx &
5036                                                tp->rx_std_ring_mask;
5037                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5038                                      tpr->rx_std_prod_idx);
5039                 }
5040                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5041                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5042                                                tp->rx_jmb_ring_mask;
5043                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5044                                      tpr->rx_jmb_prod_idx);
5045                 }
5046                 mmiowb();
5047         } else if (work_mask) {
5048                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5049                  * updated before the producer indices can be updated.
5050                  */
5051                 smp_wmb();
5052
5053                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5054                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5055
5056                 if (tnapi != &tp->napi[1])
5057                         napi_schedule(&tp->napi[1].napi);
5058         }
5059
5060         return received;
5061 }
5062
5063 static void tg3_poll_link(struct tg3 *tp)
5064 {
5065         /* handle link change and other phy events */
5066         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5067                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5068
5069                 if (sblk->status & SD_STATUS_LINK_CHG) {
5070                         sblk->status = SD_STATUS_UPDATED |
5071                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5072                         spin_lock(&tp->lock);
5073                         if (tg3_flag(tp, USE_PHYLIB)) {
5074                                 tw32_f(MAC_STATUS,
5075                                      (MAC_STATUS_SYNC_CHANGED |
5076                                       MAC_STATUS_CFG_CHANGED |
5077                                       MAC_STATUS_MI_COMPLETION |
5078                                       MAC_STATUS_LNKSTATE_CHANGED));
5079                                 udelay(40);
5080                         } else
5081                                 tg3_setup_phy(tp, 0);
5082                         spin_unlock(&tp->lock);
5083                 }
5084         }
5085 }
5086
5087 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5088                                 struct tg3_rx_prodring_set *dpr,
5089                                 struct tg3_rx_prodring_set *spr)
5090 {
5091         u32 si, di, cpycnt, src_prod_idx;
5092         int i, err = 0;
5093
5094         while (1) {
5095                 src_prod_idx = spr->rx_std_prod_idx;
5096
5097                 /* Make sure updates to the rx_std_buffers[] entries and the
5098                  * standard producer index are seen in the correct order.
5099                  */
5100                 smp_rmb();
5101
5102                 if (spr->rx_std_cons_idx == src_prod_idx)
5103                         break;
5104
5105                 if (spr->rx_std_cons_idx < src_prod_idx)
5106                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
5107                 else
5108                         cpycnt = tp->rx_std_ring_mask + 1 -
5109                                  spr->rx_std_cons_idx;
5110
5111                 cpycnt = min(cpycnt,
5112                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
5113
5114                 si = spr->rx_std_cons_idx;
5115                 di = dpr->rx_std_prod_idx;
5116
5117                 for (i = di; i < di + cpycnt; i++) {
5118                         if (dpr->rx_std_buffers[i].skb) {
5119                                 cpycnt = i - di;
5120                                 err = -ENOSPC;
5121                                 break;
5122                         }
5123                 }
5124
5125                 if (!cpycnt)
5126                         break;
5127
5128                 /* Ensure that updates to the rx_std_buffers ring and the
5129                  * shadowed hardware producer ring from tg3_recycle_skb() are
5130                  * ordered correctly WRT the skb check above.
5131                  */
5132                 smp_rmb();
5133
5134                 memcpy(&dpr->rx_std_buffers[di],
5135                        &spr->rx_std_buffers[si],
5136                        cpycnt * sizeof(struct ring_info));
5137
5138                 for (i = 0; i < cpycnt; i++, di++, si++) {
5139                         struct tg3_rx_buffer_desc *sbd, *dbd;
5140                         sbd = &spr->rx_std[si];
5141                         dbd = &dpr->rx_std[di];
5142                         dbd->addr_hi = sbd->addr_hi;
5143                         dbd->addr_lo = sbd->addr_lo;
5144                 }
5145
5146                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
5147                                        tp->rx_std_ring_mask;
5148                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
5149                                        tp->rx_std_ring_mask;
5150         }
5151
5152         while (1) {
5153                 src_prod_idx = spr->rx_jmb_prod_idx;
5154
5155                 /* Make sure updates to the rx_jmb_buffers[] entries and
5156                  * the jumbo producer index are seen in the correct order.
5157                  */
5158                 smp_rmb();
5159
5160                 if (spr->rx_jmb_cons_idx == src_prod_idx)
5161                         break;
5162
5163                 if (spr->rx_jmb_cons_idx < src_prod_idx)
5164                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
5165                 else
5166                         cpycnt = tp->rx_jmb_ring_mask + 1 -
5167                                  spr->rx_jmb_cons_idx;
5168
5169                 cpycnt = min(cpycnt,
5170                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
5171
5172                 si = spr->rx_jmb_cons_idx;
5173                 di = dpr->rx_jmb_prod_idx;
5174
5175                 for (i = di; i < di + cpycnt; i++) {
5176                         if (dpr->rx_jmb_buffers[i].skb) {
5177                                 cpycnt = i - di;
5178                                 err = -ENOSPC;
5179                                 break;
5180                         }
5181                 }
5182
5183                 if (!cpycnt)
5184                         break;
5185
5186                 /* Ensure that updates to the rx_jmb_buffers ring and the
5187                  * shadowed hardware producer ring from tg3_recycle_skb() are
5188                  * ordered correctly WRT the skb check above.
5189                  */
5190                 smp_rmb();
5191
5192                 memcpy(&dpr->rx_jmb_buffers[di],
5193                        &spr->rx_jmb_buffers[si],
5194                        cpycnt * sizeof(struct ring_info));
5195
5196                 for (i = 0; i < cpycnt; i++, di++, si++) {
5197                         struct tg3_rx_buffer_desc *sbd, *dbd;
5198                         sbd = &spr->rx_jmb[si].std;
5199                         dbd = &dpr->rx_jmb[di].std;
5200                         dbd->addr_hi = sbd->addr_hi;
5201                         dbd->addr_lo = sbd->addr_lo;
5202                 }
5203
5204                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
5205                                        tp->rx_jmb_ring_mask;
5206                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
5207                                        tp->rx_jmb_ring_mask;
5208         }
5209
5210         return err;
5211 }
5212
5213 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
5214 {
5215         struct tg3 *tp = tnapi->tp;
5216
5217         /* run TX completion thread */
5218         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
5219                 tg3_tx(tnapi);
5220                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5221                         return work_done;
5222         }
5223
5224         if (!tnapi->rx_rcb_prod_idx)
5225                 return work_done;
5226
5227         /* run RX thread, within the bounds set by NAPI.
5228          * All RX "locking" is done by ensuring outside
5229          * code synchronizes with tg3->napi.poll()
5230          */
5231         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
5232                 work_done += tg3_rx(tnapi, budget - work_done);
5233
5234         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
5235                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
5236                 int i, err = 0;
5237                 u32 std_prod_idx = dpr->rx_std_prod_idx;
5238                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
5239
5240                 for (i = 1; i < tp->irq_cnt; i++)
5241                         err |= tg3_rx_prodring_xfer(tp, dpr,
5242                                                     &tp->napi[i].prodring);
5243
5244                 wmb();
5245
5246                 if (std_prod_idx != dpr->rx_std_prod_idx)
5247                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5248                                      dpr->rx_std_prod_idx);
5249
5250                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
5251                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5252                                      dpr->rx_jmb_prod_idx);
5253
5254                 mmiowb();
5255
5256                 if (err)
5257                         tw32_f(HOSTCC_MODE, tp->coal_now);
5258         }
5259
5260         return work_done;
5261 }
5262
5263 static int tg3_poll_msix(struct napi_struct *napi, int budget)
5264 {
5265         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5266         struct tg3 *tp = tnapi->tp;
5267         int work_done = 0;
5268         struct tg3_hw_status *sblk = tnapi->hw_status;
5269
5270         while (1) {
5271                 work_done = tg3_poll_work(tnapi, work_done, budget);
5272
5273                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5274                         goto tx_recovery;
5275
5276                 if (unlikely(work_done >= budget))
5277                         break;
5278
5279                 /* tp->last_tag is used in tg3_int_reenable() below
5280                  * to tell the hw how much work has been processed,
5281                  * so we must read it before checking for more work.
5282                  */
5283                 tnapi->last_tag = sblk->status_tag;
5284                 tnapi->last_irq_tag = tnapi->last_tag;
5285                 rmb();
5286
5287                 /* check for RX/TX work to do */
5288                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
5289                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
5290                         napi_complete(napi);
5291                         /* Reenable interrupts. */
5292                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
5293                         mmiowb();
5294                         break;
5295                 }
5296         }
5297
5298         return work_done;
5299
5300 tx_recovery:
5301         /* work_done is guaranteed to be less than budget. */
5302         napi_complete(napi);
5303         schedule_work(&tp->reset_task);
5304         return work_done;
5305 }
5306
5307 static void tg3_process_error(struct tg3 *tp)
5308 {
5309         u32 val;
5310         bool real_error = false;
5311
5312         if (tg3_flag(tp, ERROR_PROCESSED))
5313                 return;
5314
5315         /* Check Flow Attention register */
5316         val = tr32(HOSTCC_FLOW_ATTN);
5317         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
5318                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
5319                 real_error = true;
5320         }
5321
5322         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
5323                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
5324                 real_error = true;
5325         }
5326
5327         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
5328                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
5329                 real_error = true;
5330         }
5331
5332         if (!real_error)
5333                 return;
5334
5335         tg3_dump_state(tp);
5336
5337         tg3_flag_set(tp, ERROR_PROCESSED);
5338         schedule_work(&tp->reset_task);
5339 }
5340
5341 static int tg3_poll(struct napi_struct *napi, int budget)
5342 {
5343         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
5344         struct tg3 *tp = tnapi->tp;
5345         int work_done = 0;
5346         struct tg3_hw_status *sblk = tnapi->hw_status;
5347
5348         while (1) {
5349                 if (sblk->status & SD_STATUS_ERROR)
5350                         tg3_process_error(tp);
5351
5352                 tg3_poll_link(tp);
5353
5354                 work_done = tg3_poll_work(tnapi, work_done, budget);
5355
5356                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
5357                         goto tx_recovery;
5358
5359                 if (unlikely(work_done >= budget))
5360                         break;
5361
5362                 if (tg3_flag(tp, TAGGED_STATUS)) {
5363                         /* tp->last_tag is used in tg3_int_reenable() below
5364                          * to tell the hw how much work has been processed,
5365                          * so we must read it before checking for more work.
5366                          */
5367                         tnapi->last_tag = sblk->status_tag;
5368                         tnapi->last_irq_tag = tnapi->last_tag;
5369                         rmb();
5370                 } else
5371                         sblk->status &= ~SD_STATUS_UPDATED;
5372
5373                 if (likely(!tg3_has_work(tnapi))) {
5374                         napi_complete(napi);
5375                         tg3_int_reenable(tnapi);
5376                         break;
5377                 }
5378         }
5379
5380         return work_done;
5381
5382 tx_recovery:
5383         /* work_done is guaranteed to be less than budget. */
5384         napi_complete(napi);
5385         schedule_work(&tp->reset_task);
5386         return work_done;
5387 }
5388
5389 static void tg3_napi_disable(struct tg3 *tp)
5390 {
5391         int i;
5392
5393         for (i = tp->irq_cnt - 1; i >= 0; i--)
5394                 napi_disable(&tp->napi[i].napi);
5395 }
5396
5397 static void tg3_napi_enable(struct tg3 *tp)
5398 {
5399         int i;
5400
5401         for (i = 0; i < tp->irq_cnt; i++)
5402                 napi_enable(&tp->napi[i].napi);
5403 }
5404
5405 static void tg3_napi_init(struct tg3 *tp)
5406 {
5407         int i;
5408
5409         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
5410         for (i = 1; i < tp->irq_cnt; i++)
5411                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
5412 }
5413
5414 static void tg3_napi_fini(struct tg3 *tp)
5415 {
5416         int i;
5417
5418         for (i = 0; i < tp->irq_cnt; i++)
5419                 netif_napi_del(&tp->napi[i].napi);
5420 }
5421
5422 static inline void tg3_netif_stop(struct tg3 *tp)
5423 {
5424         tp->dev->trans_start = jiffies; /* prevent tx timeout */
5425         tg3_napi_disable(tp);
5426         netif_tx_disable(tp->dev);
5427 }
5428
5429 static inline void tg3_netif_start(struct tg3 *tp)
5430 {
5431         /* NOTE: unconditional netif_tx_wake_all_queues is only
5432          * appropriate so long as all callers are assured to
5433          * have free tx slots (such as after tg3_init_hw)
5434          */
5435         netif_tx_wake_all_queues(tp->dev);
5436
5437         tg3_napi_enable(tp);
5438         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
5439         tg3_enable_ints(tp);
5440 }
5441
5442 static void tg3_irq_quiesce(struct tg3 *tp)
5443 {
5444         int i;
5445
5446         BUG_ON(tp->irq_sync);
5447
5448         tp->irq_sync = 1;
5449         smp_mb();
5450
5451         for (i = 0; i < tp->irq_cnt; i++)
5452                 synchronize_irq(tp->napi[i].irq_vec);
5453 }
5454
5455 /* Fully shutdown all tg3 driver activity elsewhere in the system.
5456  * If irq_sync is non-zero, then the IRQ handler must be synchronized
5457  * with as well.  Most of the time, this is not necessary except when
5458  * shutting down the device.
5459  */
5460 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
5461 {
5462         spin_lock_bh(&tp->lock);
5463         if (irq_sync)
5464                 tg3_irq_quiesce(tp);
5465 }
5466
5467 static inline void tg3_full_unlock(struct tg3 *tp)
5468 {
5469         spin_unlock_bh(&tp->lock);
5470 }
5471
5472 /* One-shot MSI handler - Chip automatically disables interrupt
5473  * after sending MSI so driver doesn't have to do it.
5474  */
5475 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
5476 {
5477         struct tg3_napi *tnapi = dev_id;
5478         struct tg3 *tp = tnapi->tp;
5479
5480         prefetch(tnapi->hw_status);
5481         if (tnapi->rx_rcb)
5482                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5483
5484         if (likely(!tg3_irq_sync(tp)))
5485                 napi_schedule(&tnapi->napi);
5486
5487         return IRQ_HANDLED;
5488 }
5489
5490 /* MSI ISR - No need to check for interrupt sharing and no need to
5491  * flush status block and interrupt mailbox. PCI ordering rules
5492  * guarantee that MSI will arrive after the status block.
5493  */
5494 static irqreturn_t tg3_msi(int irq, void *dev_id)
5495 {
5496         struct tg3_napi *tnapi = dev_id;
5497         struct tg3 *tp = tnapi->tp;
5498
5499         prefetch(tnapi->hw_status);
5500         if (tnapi->rx_rcb)
5501                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5502         /*
5503          * Writing any value to intr-mbox-0 clears PCI INTA# and
5504          * chip-internal interrupt pending events.
5505          * Writing non-zero to intr-mbox-0 additional tells the
5506          * NIC to stop sending us irqs, engaging "in-intr-handler"
5507          * event coalescing.
5508          */
5509         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5510         if (likely(!tg3_irq_sync(tp)))
5511                 napi_schedule(&tnapi->napi);
5512
5513         return IRQ_RETVAL(1);
5514 }
5515
5516 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
5517 {
5518         struct tg3_napi *tnapi = dev_id;
5519         struct tg3 *tp = tnapi->tp;
5520         struct tg3_hw_status *sblk = tnapi->hw_status;
5521         unsigned int handled = 1;
5522
5523         /* In INTx mode, it is possible for the interrupt to arrive at
5524          * the CPU before the status block posted prior to the interrupt.
5525          * Reading the PCI State register will confirm whether the
5526          * interrupt is ours and will flush the status block.
5527          */
5528         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
5529                 if (tg3_flag(tp, CHIP_RESETTING) ||
5530                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5531                         handled = 0;
5532                         goto out;
5533                 }
5534         }
5535
5536         /*
5537          * Writing any value to intr-mbox-0 clears PCI INTA# and
5538          * chip-internal interrupt pending events.
5539          * Writing non-zero to intr-mbox-0 additional tells the
5540          * NIC to stop sending us irqs, engaging "in-intr-handler"
5541          * event coalescing.
5542          *
5543          * Flush the mailbox to de-assert the IRQ immediately to prevent
5544          * spurious interrupts.  The flush impacts performance but
5545          * excessive spurious interrupts can be worse in some cases.
5546          */
5547         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5548         if (tg3_irq_sync(tp))
5549                 goto out;
5550         sblk->status &= ~SD_STATUS_UPDATED;
5551         if (likely(tg3_has_work(tnapi))) {
5552                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5553                 napi_schedule(&tnapi->napi);
5554         } else {
5555                 /* No work, shared interrupt perhaps?  re-enable
5556                  * interrupts, and flush that PCI write
5557                  */
5558                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
5559                                0x00000000);
5560         }
5561 out:
5562         return IRQ_RETVAL(handled);
5563 }
5564
5565 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
5566 {
5567         struct tg3_napi *tnapi = dev_id;
5568         struct tg3 *tp = tnapi->tp;
5569         struct tg3_hw_status *sblk = tnapi->hw_status;
5570         unsigned int handled = 1;
5571
5572         /* In INTx mode, it is possible for the interrupt to arrive at
5573          * the CPU before the status block posted prior to the interrupt.
5574          * Reading the PCI State register will confirm whether the
5575          * interrupt is ours and will flush the status block.
5576          */
5577         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
5578                 if (tg3_flag(tp, CHIP_RESETTING) ||
5579                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5580                         handled = 0;
5581                         goto out;
5582                 }
5583         }
5584
5585         /*
5586          * writing any value to intr-mbox-0 clears PCI INTA# and
5587          * chip-internal interrupt pending events.
5588          * writing non-zero to intr-mbox-0 additional tells the
5589          * NIC to stop sending us irqs, engaging "in-intr-handler"
5590          * event coalescing.
5591          *
5592          * Flush the mailbox to de-assert the IRQ immediately to prevent
5593          * spurious interrupts.  The flush impacts performance but
5594          * excessive spurious interrupts can be worse in some cases.
5595          */
5596         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
5597
5598         /*
5599          * In a shared interrupt configuration, sometimes other devices'
5600          * interrupts will scream.  We record the current status tag here
5601          * so that the above check can report that the screaming interrupts
5602          * are unhandled.  Eventually they will be silenced.
5603          */
5604         tnapi->last_irq_tag = sblk->status_tag;
5605
5606         if (tg3_irq_sync(tp))
5607                 goto out;
5608
5609         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
5610
5611         napi_schedule(&tnapi->napi);
5612
5613 out:
5614         return IRQ_RETVAL(handled);
5615 }
5616
5617 /* ISR for interrupt test */
5618 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
5619 {
5620         struct tg3_napi *tnapi = dev_id;
5621         struct tg3 *tp = tnapi->tp;
5622         struct tg3_hw_status *sblk = tnapi->hw_status;
5623
5624         if ((sblk->status & SD_STATUS_UPDATED) ||
5625             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
5626                 tg3_disable_ints(tp);
5627                 return IRQ_RETVAL(1);
5628         }
5629         return IRQ_RETVAL(0);
5630 }
5631
5632 static int tg3_init_hw(struct tg3 *, int);
5633 static int tg3_halt(struct tg3 *, int, int);
5634
5635 /* Restart hardware after configuration changes, self-test, etc.
5636  * Invoked with tp->lock held.
5637  */
5638 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
5639         __releases(tp->lock)
5640         __acquires(tp->lock)
5641 {
5642         int err;
5643
5644         err = tg3_init_hw(tp, reset_phy);
5645         if (err) {
5646                 netdev_err(tp->dev,
5647                            "Failed to re-initialize device, aborting\n");
5648                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
5649                 tg3_full_unlock(tp);
5650                 del_timer_sync(&tp->timer);
5651                 tp->irq_sync = 0;
5652                 tg3_napi_enable(tp);
5653                 dev_close(tp->dev);
5654                 tg3_full_lock(tp, 0);
5655         }
5656         return err;
5657 }
5658
5659 #ifdef CONFIG_NET_POLL_CONTROLLER
5660 static void tg3_poll_controller(struct net_device *dev)
5661 {
5662         int i;
5663         struct tg3 *tp = netdev_priv(dev);
5664
5665         for (i = 0; i < tp->irq_cnt; i++)
5666                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
5667 }
5668 #endif
5669
5670 static void tg3_reset_task(struct work_struct *work)
5671 {
5672         struct tg3 *tp = container_of(work, struct tg3, reset_task);
5673         int err;
5674         unsigned int restart_timer;
5675
5676         tg3_full_lock(tp, 0);
5677
5678         if (!netif_running(tp->dev)) {
5679                 tg3_full_unlock(tp);
5680                 return;
5681         }
5682
5683         tg3_full_unlock(tp);
5684
5685         tg3_phy_stop(tp);
5686
5687         tg3_netif_stop(tp);
5688
5689         tg3_full_lock(tp, 1);
5690
5691         restart_timer = tg3_flag(tp, RESTART_TIMER);
5692         tg3_flag_clear(tp, RESTART_TIMER);
5693
5694         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
5695                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
5696                 tp->write32_rx_mbox = tg3_write_flush_reg32;
5697                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
5698                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
5699         }
5700
5701         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
5702         err = tg3_init_hw(tp, 1);
5703         if (err)
5704                 goto out;
5705
5706         tg3_netif_start(tp);
5707
5708         if (restart_timer)
5709                 mod_timer(&tp->timer, jiffies + 1);
5710
5711 out:
5712         tg3_full_unlock(tp);
5713
5714         if (!err)
5715                 tg3_phy_start(tp);
5716 }
5717
5718 static void tg3_tx_timeout(struct net_device *dev)
5719 {
5720         struct tg3 *tp = netdev_priv(dev);
5721
5722         if (netif_msg_tx_err(tp)) {
5723                 netdev_err(dev, "transmit timed out, resetting\n");
5724                 tg3_dump_state(tp);
5725         }
5726
5727         schedule_work(&tp->reset_task);
5728 }
5729
5730 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
5731 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
5732 {
5733         u32 base = (u32) mapping & 0xffffffff;
5734
5735         return (base > 0xffffdcc0) && (base + len + 8 < base);
5736 }
5737
5738 /* Test for DMA addresses > 40-bit */
5739 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5740                                           int len)
5741 {
5742 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
5743         if (tg3_flag(tp, 40BIT_DMA_BUG))
5744                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
5745         return 0;
5746 #else
5747         return 0;
5748 #endif
5749 }
5750
5751 static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5752                         dma_addr_t mapping, int len, u32 flags,
5753                         u32 mss_and_is_end)
5754 {
5755         struct tg3_tx_buffer_desc *txd = &tnapi->tx_ring[entry];
5756         int is_end = (mss_and_is_end & 0x1);
5757         u32 mss = (mss_and_is_end >> 1);
5758         u32 vlan_tag = 0;
5759
5760         if (is_end)
5761                 flags |= TXD_FLAG_END;
5762         if (flags & TXD_FLAG_VLAN) {
5763                 vlan_tag = flags >> 16;
5764                 flags &= 0xffff;
5765         }
5766         vlan_tag |= (mss << TXD_MSS_SHIFT);
5767
5768         txd->addr_hi = ((u64) mapping >> 32);
5769         txd->addr_lo = ((u64) mapping & 0xffffffff);
5770         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
5771         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
5772 }
5773
5774 static void tg3_skb_error_unmap(struct tg3_napi *tnapi,
5775                                 struct sk_buff *skb, int last)
5776 {
5777         int i;
5778         u32 entry = tnapi->tx_prod;
5779         struct ring_info *txb = &tnapi->tx_buffers[entry];
5780
5781         pci_unmap_single(tnapi->tp->pdev,
5782                          dma_unmap_addr(txb, mapping),
5783                          skb_headlen(skb),
5784                          PCI_DMA_TODEVICE);
5785         for (i = 0; i < last; i++) {
5786                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5787
5788                 entry = NEXT_TX(entry);
5789                 txb = &tnapi->tx_buffers[entry];
5790
5791                 pci_unmap_page(tnapi->tp->pdev,
5792                                dma_unmap_addr(txb, mapping),
5793                                frag->size, PCI_DMA_TODEVICE);
5794         }
5795 }
5796
5797 /* Workaround 4GB and 40-bit hardware DMA bugs. */
5798 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5799                                        struct sk_buff *skb,
5800                                        u32 base_flags, u32 mss)
5801 {
5802         struct tg3 *tp = tnapi->tp;
5803         struct sk_buff *new_skb;
5804         dma_addr_t new_addr = 0;
5805         u32 entry = tnapi->tx_prod;
5806         int ret = 0;
5807
5808         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
5809                 new_skb = skb_copy(skb, GFP_ATOMIC);
5810         else {
5811                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
5812
5813                 new_skb = skb_copy_expand(skb,
5814                                           skb_headroom(skb) + more_headroom,
5815                                           skb_tailroom(skb), GFP_ATOMIC);
5816         }
5817
5818         if (!new_skb) {
5819                 ret = -1;
5820         } else {
5821                 /* New SKB is guaranteed to be linear. */
5822                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
5823                                           PCI_DMA_TODEVICE);
5824                 /* Make sure the mapping succeeded */
5825                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
5826                         ret = -1;
5827                         dev_kfree_skb(new_skb);
5828
5829                 /* Make sure new skb does not cross any 4G boundaries.
5830                  * Drop the packet if it does.
5831                  */
5832                 } else if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
5833                            tg3_4g_overflow_test(new_addr, new_skb->len)) {
5834                         pci_unmap_single(tp->pdev, new_addr, new_skb->len,
5835                                          PCI_DMA_TODEVICE);
5836                         ret = -1;
5837                         dev_kfree_skb(new_skb);
5838                 } else {
5839                         tnapi->tx_buffers[entry].skb = new_skb;
5840                         dma_unmap_addr_set(&tnapi->tx_buffers[entry],
5841                                            mapping, new_addr);
5842
5843                         tg3_set_txd(tnapi, entry, new_addr, new_skb->len,
5844                                     base_flags, 1 | (mss << 1));
5845                 }
5846         }
5847
5848         dev_kfree_skb(skb);
5849
5850         return ret;
5851 }
5852
5853 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
5854
5855 /* Use GSO to workaround a rare TSO bug that may be triggered when the
5856  * TSO header is greater than 80 bytes.
5857  */
5858 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
5859 {
5860         struct sk_buff *segs, *nskb;
5861         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
5862
5863         /* Estimate the number of fragments in the worst case */
5864         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
5865                 netif_stop_queue(tp->dev);
5866
5867                 /* netif_tx_stop_queue() must be done before checking
5868                  * checking tx index in tg3_tx_avail() below, because in
5869                  * tg3_tx(), we update tx index before checking for
5870                  * netif_tx_queue_stopped().
5871                  */
5872                 smp_mb();
5873                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
5874                         return NETDEV_TX_BUSY;
5875
5876                 netif_wake_queue(tp->dev);
5877         }
5878
5879         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
5880         if (IS_ERR(segs))
5881                 goto tg3_tso_bug_end;
5882
5883         do {
5884                 nskb = segs;
5885                 segs = segs->next;
5886                 nskb->next = NULL;
5887                 tg3_start_xmit(nskb, tp->dev);
5888         } while (segs);
5889
5890 tg3_tso_bug_end:
5891         dev_kfree_skb(skb);
5892
5893         return NETDEV_TX_OK;
5894 }
5895
5896 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
5897  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
5898  */
5899 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
5900 {
5901         struct tg3 *tp = netdev_priv(dev);
5902         u32 len, entry, base_flags, mss;
5903         int i = -1, would_hit_hwbug;
5904         dma_addr_t mapping;
5905         struct tg3_napi *tnapi;
5906         struct netdev_queue *txq;
5907         unsigned int last;
5908
5909         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5910         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5911         if (tg3_flag(tp, ENABLE_TSS))
5912                 tnapi++;
5913
5914         /* We are running in BH disabled context with netif_tx_lock
5915          * and TX reclaim runs via tp->napi.poll inside of a software
5916          * interrupt.  Furthermore, IRQ processing runs lockless so we have
5917          * no IRQ context deadlocks to worry about either.  Rejoice!
5918          */
5919         if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5920                 if (!netif_tx_queue_stopped(txq)) {
5921                         netif_tx_stop_queue(txq);
5922
5923                         /* This is a hard error, log it. */
5924                         netdev_err(dev,
5925                                    "BUG! Tx Ring full when queue awake!\n");
5926                 }
5927                 return NETDEV_TX_BUSY;
5928         }
5929
5930         entry = tnapi->tx_prod;
5931         base_flags = 0;
5932         if (skb->ip_summed == CHECKSUM_PARTIAL)
5933                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5934
5935         mss = skb_shinfo(skb)->gso_size;
5936         if (mss) {
5937                 struct iphdr *iph;
5938                 u32 tcp_opt_len, hdr_len;
5939
5940                 if (skb_header_cloned(skb) &&
5941                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5942                         dev_kfree_skb(skb);
5943                         goto out_unlock;
5944                 }
5945
5946                 iph = ip_hdr(skb);
5947                 tcp_opt_len = tcp_optlen(skb);
5948
5949                 if (skb_is_gso_v6(skb)) {
5950                         hdr_len = skb_headlen(skb) - ETH_HLEN;
5951                 } else {
5952                         u32 ip_tcp_len;
5953
5954                         ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5955                         hdr_len = ip_tcp_len + tcp_opt_len;
5956
5957                         iph->check = 0;
5958                         iph->tot_len = htons(mss + hdr_len);
5959                 }
5960
5961                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
5962                     tg3_flag(tp, TSO_BUG))
5963                         return tg3_tso_bug(tp, skb);
5964
5965                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
5966                                TXD_FLAG_CPU_POST_DMA);
5967
5968                 if (tg3_flag(tp, HW_TSO_1) ||
5969                     tg3_flag(tp, HW_TSO_2) ||
5970                     tg3_flag(tp, HW_TSO_3)) {
5971                         tcp_hdr(skb)->check = 0;
5972                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
5973                 } else
5974                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5975                                                                  iph->daddr, 0,
5976                                                                  IPPROTO_TCP,
5977                                                                  0);
5978
5979                 if (tg3_flag(tp, HW_TSO_3)) {
5980                         mss |= (hdr_len & 0xc) << 12;
5981                         if (hdr_len & 0x10)
5982                                 base_flags |= 0x00000010;
5983                         base_flags |= (hdr_len & 0x3e0) << 5;
5984                 } else if (tg3_flag(tp, HW_TSO_2))
5985                         mss |= hdr_len << 9;
5986                 else if (tg3_flag(tp, HW_TSO_1) ||
5987                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5988                         if (tcp_opt_len || iph->ihl > 5) {
5989                                 int tsflags;
5990
5991                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5992                                 mss |= (tsflags << 11);
5993                         }
5994                 } else {
5995                         if (tcp_opt_len || iph->ihl > 5) {
5996                                 int tsflags;
5997
5998                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
5999                                 base_flags |= tsflags << 12;
6000                         }
6001                 }
6002         }
6003
6004         if (vlan_tx_tag_present(skb))
6005                 base_flags |= (TXD_FLAG_VLAN |
6006                                (vlan_tx_tag_get(skb) << 16));
6007
6008         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6009             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6010                 base_flags |= TXD_FLAG_JMB_PKT;
6011
6012         len = skb_headlen(skb);
6013
6014         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6015         if (pci_dma_mapping_error(tp->pdev, mapping)) {
6016                 dev_kfree_skb(skb);
6017                 goto out_unlock;
6018         }
6019
6020         tnapi->tx_buffers[entry].skb = skb;
6021         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6022
6023         would_hit_hwbug = 0;
6024
6025         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6026                 would_hit_hwbug = 1;
6027
6028         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6029             tg3_4g_overflow_test(mapping, len))
6030                 would_hit_hwbug = 1;
6031
6032         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6033             tg3_40bit_overflow_test(tp, mapping, len))
6034                 would_hit_hwbug = 1;
6035
6036         if (tg3_flag(tp, 5701_DMA_BUG))
6037                 would_hit_hwbug = 1;
6038
6039         tg3_set_txd(tnapi, entry, mapping, len, base_flags,
6040                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
6041
6042         entry = NEXT_TX(entry);
6043
6044         /* Now loop through additional data fragments, and queue them. */
6045         if (skb_shinfo(skb)->nr_frags > 0) {
6046                 last = skb_shinfo(skb)->nr_frags - 1;
6047                 for (i = 0; i <= last; i++) {
6048                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6049
6050                         len = frag->size;
6051                         mapping = pci_map_page(tp->pdev,
6052                                                frag->page,
6053                                                frag->page_offset,
6054                                                len, PCI_DMA_TODEVICE);
6055
6056                         tnapi->tx_buffers[entry].skb = NULL;
6057                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6058                                            mapping);
6059                         if (pci_dma_mapping_error(tp->pdev, mapping))
6060                                 goto dma_error;
6061
6062                         if (tg3_flag(tp, SHORT_DMA_BUG) &&
6063                             len <= 8)
6064                                 would_hit_hwbug = 1;
6065
6066                         if (tg3_flag(tp, 4G_DMA_BNDRY_BUG) &&
6067                             tg3_4g_overflow_test(mapping, len))
6068                                 would_hit_hwbug = 1;
6069
6070                         if (tg3_flag(tp, 40BIT_DMA_LIMIT_BUG) &&
6071                             tg3_40bit_overflow_test(tp, mapping, len))
6072                                 would_hit_hwbug = 1;
6073
6074                         if (tg3_flag(tp, HW_TSO_1) ||
6075                             tg3_flag(tp, HW_TSO_2) ||
6076                             tg3_flag(tp, HW_TSO_3))
6077                                 tg3_set_txd(tnapi, entry, mapping, len,
6078                                             base_flags, (i == last)|(mss << 1));
6079                         else
6080                                 tg3_set_txd(tnapi, entry, mapping, len,
6081                                             base_flags, (i == last));
6082
6083                         entry = NEXT_TX(entry);
6084                 }
6085         }
6086
6087         if (would_hit_hwbug) {
6088                 tg3_skb_error_unmap(tnapi, skb, i);
6089
6090                 /* If the workaround fails due to memory/mapping
6091                  * failure, silently drop this packet.
6092                  */
6093                 if (tigon3_dma_hwbug_workaround(tnapi, skb, base_flags, mss))
6094                         goto out_unlock;
6095
6096                 entry = NEXT_TX(tnapi->tx_prod);
6097         }
6098
6099         /* Packets are ready, update Tx producer idx local and on card. */
6100         tw32_tx_mbox(tnapi->prodmbox, entry);
6101
6102         tnapi->tx_prod = entry;
6103         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6104                 netif_tx_stop_queue(txq);
6105
6106                 /* netif_tx_stop_queue() must be done before checking
6107                  * checking tx index in tg3_tx_avail() below, because in
6108                  * tg3_tx(), we update tx index before checking for
6109                  * netif_tx_queue_stopped().
6110                  */
6111                 smp_mb();
6112                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6113                         netif_tx_wake_queue(txq);
6114         }
6115
6116 out_unlock:
6117         mmiowb();
6118
6119         return NETDEV_TX_OK;
6120
6121 dma_error:
6122         tg3_skb_error_unmap(tnapi, skb, i);
6123         dev_kfree_skb(skb);
6124         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
6125         return NETDEV_TX_OK;
6126 }
6127
6128 static void tg3_set_loopback(struct net_device *dev, u32 features)
6129 {
6130         struct tg3 *tp = netdev_priv(dev);
6131
6132         if (features & NETIF_F_LOOPBACK) {
6133                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6134                         return;
6135
6136                 /*
6137                  * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6138                  * loopback mode if Half-Duplex mode was negotiated earlier.
6139                  */
6140                 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6141
6142                 /* Enable internal MAC loopback mode */
6143                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6144                 spin_lock_bh(&tp->lock);
6145                 tw32(MAC_MODE, tp->mac_mode);
6146                 netif_carrier_on(tp->dev);
6147                 spin_unlock_bh(&tp->lock);
6148                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6149         } else {
6150                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6151                         return;
6152
6153                 /* Disable internal MAC loopback mode */
6154                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6155                 spin_lock_bh(&tp->lock);
6156                 tw32(MAC_MODE, tp->mac_mode);
6157                 /* Force link status check */
6158                 tg3_setup_phy(tp, 1);
6159                 spin_unlock_bh(&tp->lock);
6160                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6161         }
6162 }
6163
6164 static u32 tg3_fix_features(struct net_device *dev, u32 features)
6165 {
6166         struct tg3 *tp = netdev_priv(dev);
6167
6168         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
6169                 features &= ~NETIF_F_ALL_TSO;
6170
6171         return features;
6172 }
6173
6174 static int tg3_set_features(struct net_device *dev, u32 features)
6175 {
6176         u32 changed = dev->features ^ features;
6177
6178         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6179                 tg3_set_loopback(dev, features);
6180
6181         return 0;
6182 }
6183
6184 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6185                                int new_mtu)
6186 {
6187         dev->mtu = new_mtu;
6188
6189         if (new_mtu > ETH_DATA_LEN) {
6190                 if (tg3_flag(tp, 5780_CLASS)) {
6191                         netdev_update_features(dev);
6192                         tg3_flag_clear(tp, TSO_CAPABLE);
6193                 } else {
6194                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
6195                 }
6196         } else {
6197                 if (tg3_flag(tp, 5780_CLASS)) {
6198                         tg3_flag_set(tp, TSO_CAPABLE);
6199                         netdev_update_features(dev);
6200                 }
6201                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
6202         }
6203 }
6204
6205 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
6206 {
6207         struct tg3 *tp = netdev_priv(dev);
6208         int err;
6209
6210         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
6211                 return -EINVAL;
6212
6213         if (!netif_running(dev)) {
6214                 /* We'll just catch it later when the
6215                  * device is up'd.
6216                  */
6217                 tg3_set_mtu(dev, tp, new_mtu);
6218                 return 0;
6219         }
6220
6221         tg3_phy_stop(tp);
6222
6223         tg3_netif_stop(tp);
6224
6225         tg3_full_lock(tp, 1);
6226
6227         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
6228
6229         tg3_set_mtu(dev, tp, new_mtu);
6230
6231         err = tg3_restart_hw(tp, 0);
6232
6233         if (!err)
6234                 tg3_netif_start(tp);
6235
6236         tg3_full_unlock(tp);
6237
6238         if (!err)
6239                 tg3_phy_start(tp);
6240
6241         return err;
6242 }
6243
6244 static void tg3_rx_prodring_free(struct tg3 *tp,
6245                                  struct tg3_rx_prodring_set *tpr)
6246 {
6247         int i;
6248
6249         if (tpr != &tp->napi[0].prodring) {
6250                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
6251                      i = (i + 1) & tp->rx_std_ring_mask)
6252                         tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6253                                         tp->rx_pkt_map_sz);
6254
6255                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
6256                         for (i = tpr->rx_jmb_cons_idx;
6257                              i != tpr->rx_jmb_prod_idx;
6258                              i = (i + 1) & tp->rx_jmb_ring_mask) {
6259                                 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6260                                                 TG3_RX_JMB_MAP_SZ);
6261                         }
6262                 }
6263
6264                 return;
6265         }
6266
6267         for (i = 0; i <= tp->rx_std_ring_mask; i++)
6268                 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
6269                                 tp->rx_pkt_map_sz);
6270
6271         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6272                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
6273                         tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
6274                                         TG3_RX_JMB_MAP_SZ);
6275         }
6276 }
6277
6278 /* Initialize rx rings for packet processing.
6279  *
6280  * The chip has been shut down and the driver detached from
6281  * the networking, so no interrupts or new tx packets will
6282  * end up in the driver.  tp->{tx,}lock are held and thus
6283  * we may not sleep.
6284  */
6285 static int tg3_rx_prodring_alloc(struct tg3 *tp,
6286                                  struct tg3_rx_prodring_set *tpr)
6287 {
6288         u32 i, rx_pkt_dma_sz;
6289
6290         tpr->rx_std_cons_idx = 0;
6291         tpr->rx_std_prod_idx = 0;
6292         tpr->rx_jmb_cons_idx = 0;
6293         tpr->rx_jmb_prod_idx = 0;
6294
6295         if (tpr != &tp->napi[0].prodring) {
6296                 memset(&tpr->rx_std_buffers[0], 0,
6297                        TG3_RX_STD_BUFF_RING_SIZE(tp));
6298                 if (tpr->rx_jmb_buffers)
6299                         memset(&tpr->rx_jmb_buffers[0], 0,
6300                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
6301                 goto done;
6302         }
6303
6304         /* Zero out all descriptors. */
6305         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
6306
6307         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
6308         if (tg3_flag(tp, 5780_CLASS) &&
6309             tp->dev->mtu > ETH_DATA_LEN)
6310                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
6311         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
6312
6313         /* Initialize invariants of the rings, we only set this
6314          * stuff once.  This works because the card does not
6315          * write into the rx buffer posting rings.
6316          */
6317         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
6318                 struct tg3_rx_buffer_desc *rxd;
6319
6320                 rxd = &tpr->rx_std[i];
6321                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
6322                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
6323                 rxd->opaque = (RXD_OPAQUE_RING_STD |
6324                                (i << RXD_OPAQUE_INDEX_SHIFT));
6325         }
6326
6327         /* Now allocate fresh SKBs for each rx ring. */
6328         for (i = 0; i < tp->rx_pending; i++) {
6329                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
6330                         netdev_warn(tp->dev,
6331                                     "Using a smaller RX standard ring. Only "
6332                                     "%d out of %d buffers were allocated "
6333                                     "successfully\n", i, tp->rx_pending);
6334                         if (i == 0)
6335                                 goto initfail;
6336                         tp->rx_pending = i;
6337                         break;
6338                 }
6339         }
6340
6341         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
6342                 goto done;
6343
6344         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
6345
6346         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
6347                 goto done;
6348
6349         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
6350                 struct tg3_rx_buffer_desc *rxd;
6351
6352                 rxd = &tpr->rx_jmb[i].std;
6353                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
6354                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
6355                                   RXD_FLAG_JUMBO;
6356                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
6357                        (i << RXD_OPAQUE_INDEX_SHIFT));
6358         }
6359
6360         for (i = 0; i < tp->rx_jumbo_pending; i++) {
6361                 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
6362                         netdev_warn(tp->dev,
6363                                     "Using a smaller RX jumbo ring. Only %d "
6364                                     "out of %d buffers were allocated "
6365                                     "successfully\n", i, tp->rx_jumbo_pending);
6366                         if (i == 0)
6367                                 goto initfail;
6368                         tp->rx_jumbo_pending = i;
6369                         break;
6370                 }
6371         }
6372
6373 done:
6374         return 0;
6375
6376 initfail:
6377         tg3_rx_prodring_free(tp, tpr);
6378         return -ENOMEM;
6379 }
6380
6381 static void tg3_rx_prodring_fini(struct tg3 *tp,
6382                                  struct tg3_rx_prodring_set *tpr)
6383 {
6384         kfree(tpr->rx_std_buffers);
6385         tpr->rx_std_buffers = NULL;
6386         kfree(tpr->rx_jmb_buffers);
6387         tpr->rx_jmb_buffers = NULL;
6388         if (tpr->rx_std) {
6389                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6390                                   tpr->rx_std, tpr->rx_std_mapping);
6391                 tpr->rx_std = NULL;
6392         }
6393         if (tpr->rx_jmb) {
6394                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6395                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
6396                 tpr->rx_jmb = NULL;
6397         }
6398 }
6399
6400 static int tg3_rx_prodring_init(struct tg3 *tp,
6401                                 struct tg3_rx_prodring_set *tpr)
6402 {
6403         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
6404                                       GFP_KERNEL);
6405         if (!tpr->rx_std_buffers)
6406                 return -ENOMEM;
6407
6408         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6409                                          TG3_RX_STD_RING_BYTES(tp),
6410                                          &tpr->rx_std_mapping,
6411                                          GFP_KERNEL);
6412         if (!tpr->rx_std)
6413                 goto err_out;
6414
6415         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
6416                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
6417                                               GFP_KERNEL);
6418                 if (!tpr->rx_jmb_buffers)
6419                         goto err_out;
6420
6421                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6422                                                  TG3_RX_JMB_RING_BYTES(tp),
6423                                                  &tpr->rx_jmb_mapping,
6424                                                  GFP_KERNEL);
6425                 if (!tpr->rx_jmb)
6426                         goto err_out;
6427         }
6428
6429         return 0;
6430
6431 err_out:
6432         tg3_rx_prodring_fini(tp, tpr);
6433         return -ENOMEM;
6434 }
6435
6436 /* Free up pending packets in all rx/tx rings.
6437  *
6438  * The chip has been shut down and the driver detached from
6439  * the networking, so no interrupts or new tx packets will
6440  * end up in the driver.  tp->{tx,}lock is not held and we are not
6441  * in an interrupt context and thus may sleep.
6442  */
6443 static void tg3_free_rings(struct tg3 *tp)
6444 {
6445         int i, j;
6446
6447         for (j = 0; j < tp->irq_cnt; j++) {
6448                 struct tg3_napi *tnapi = &tp->napi[j];
6449
6450                 tg3_rx_prodring_free(tp, &tnapi->prodring);
6451
6452                 if (!tnapi->tx_buffers)
6453                         continue;
6454
6455                 for (i = 0; i < TG3_TX_RING_SIZE; ) {
6456                         struct ring_info *txp;
6457                         struct sk_buff *skb;
6458                         unsigned int k;
6459
6460                         txp = &tnapi->tx_buffers[i];
6461                         skb = txp->skb;
6462
6463                         if (skb == NULL) {
6464                                 i++;
6465                                 continue;
6466                         }
6467
6468                         pci_unmap_single(tp->pdev,
6469                                          dma_unmap_addr(txp, mapping),
6470                                          skb_headlen(skb),
6471                                          PCI_DMA_TODEVICE);
6472                         txp->skb = NULL;
6473
6474                         i++;
6475
6476                         for (k = 0; k < skb_shinfo(skb)->nr_frags; k++) {
6477                                 txp = &tnapi->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
6478                                 pci_unmap_page(tp->pdev,
6479                                                dma_unmap_addr(txp, mapping),
6480                                                skb_shinfo(skb)->frags[k].size,
6481                                                PCI_DMA_TODEVICE);
6482                                 i++;
6483                         }
6484
6485                         dev_kfree_skb_any(skb);
6486                 }
6487         }
6488 }
6489
6490 /* Initialize tx/rx rings for packet processing.
6491  *
6492  * The chip has been shut down and the driver detached from
6493  * the networking, so no interrupts or new tx packets will
6494  * end up in the driver.  tp->{tx,}lock are held and thus
6495  * we may not sleep.
6496  */
6497 static int tg3_init_rings(struct tg3 *tp)
6498 {
6499         int i;
6500
6501         /* Free up all the SKBs. */
6502         tg3_free_rings(tp);
6503
6504         for (i = 0; i < tp->irq_cnt; i++) {
6505                 struct tg3_napi *tnapi = &tp->napi[i];
6506
6507                 tnapi->last_tag = 0;
6508                 tnapi->last_irq_tag = 0;
6509                 tnapi->hw_status->status = 0;
6510                 tnapi->hw_status->status_tag = 0;
6511                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6512
6513                 tnapi->tx_prod = 0;
6514                 tnapi->tx_cons = 0;
6515                 if (tnapi->tx_ring)
6516                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
6517
6518                 tnapi->rx_rcb_ptr = 0;
6519                 if (tnapi->rx_rcb)
6520                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6521
6522                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
6523                         tg3_free_rings(tp);
6524                         return -ENOMEM;
6525                 }
6526         }
6527
6528         return 0;
6529 }
6530
6531 /*
6532  * Must not be invoked with interrupt sources disabled and
6533  * the hardware shutdown down.
6534  */
6535 static void tg3_free_consistent(struct tg3 *tp)
6536 {
6537         int i;
6538
6539         for (i = 0; i < tp->irq_cnt; i++) {
6540                 struct tg3_napi *tnapi = &tp->napi[i];
6541
6542                 if (tnapi->tx_ring) {
6543                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6544                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
6545                         tnapi->tx_ring = NULL;
6546                 }
6547
6548                 kfree(tnapi->tx_buffers);
6549                 tnapi->tx_buffers = NULL;
6550
6551                 if (tnapi->rx_rcb) {
6552                         dma_free_coherent(&tp->pdev->dev,
6553                                           TG3_RX_RCB_RING_BYTES(tp),
6554                                           tnapi->rx_rcb,
6555                                           tnapi->rx_rcb_mapping);
6556                         tnapi->rx_rcb = NULL;
6557                 }
6558
6559                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6560
6561                 if (tnapi->hw_status) {
6562                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6563                                           tnapi->hw_status,
6564                                           tnapi->status_mapping);
6565                         tnapi->hw_status = NULL;
6566                 }
6567         }
6568
6569         if (tp->hw_stats) {
6570                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6571                                   tp->hw_stats, tp->stats_mapping);
6572                 tp->hw_stats = NULL;
6573         }
6574 }
6575
6576 /*
6577  * Must not be invoked with interrupt sources disabled and
6578  * the hardware shutdown down.  Can sleep.
6579  */
6580 static int tg3_alloc_consistent(struct tg3 *tp)
6581 {
6582         int i;
6583
6584         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6585                                           sizeof(struct tg3_hw_stats),
6586                                           &tp->stats_mapping,
6587                                           GFP_KERNEL);
6588         if (!tp->hw_stats)
6589                 goto err_out;
6590
6591         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6592
6593         for (i = 0; i < tp->irq_cnt; i++) {
6594                 struct tg3_napi *tnapi = &tp->napi[i];
6595                 struct tg3_hw_status *sblk;
6596
6597                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6598                                                       TG3_HW_STATUS_SIZE,
6599                                                       &tnapi->status_mapping,
6600                                                       GFP_KERNEL);
6601                 if (!tnapi->hw_status)
6602                         goto err_out;
6603
6604                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6605                 sblk = tnapi->hw_status;
6606
6607                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
6608                         goto err_out;
6609
6610                 /* If multivector TSS is enabled, vector 0 does not handle
6611                  * tx interrupts.  Don't allocate any resources for it.
6612                  */
6613                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
6614                     (i && tg3_flag(tp, ENABLE_TSS))) {
6615                         tnapi->tx_buffers = kzalloc(sizeof(struct ring_info) *
6616                                                     TG3_TX_RING_SIZE,
6617                                                     GFP_KERNEL);
6618                         if (!tnapi->tx_buffers)
6619                                 goto err_out;
6620
6621                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6622                                                             TG3_TX_RING_BYTES,
6623                                                         &tnapi->tx_desc_mapping,
6624                                                             GFP_KERNEL);
6625                         if (!tnapi->tx_ring)
6626                                 goto err_out;
6627                 }
6628
6629                 /*
6630                  * When RSS is enabled, the status block format changes
6631                  * slightly.  The "rx_jumbo_consumer", "reserved",
6632                  * and "rx_mini_consumer" members get mapped to the
6633                  * other three rx return ring producer indexes.
6634                  */
6635                 switch (i) {
6636                 default:
6637                         if (tg3_flag(tp, ENABLE_RSS)) {
6638                                 tnapi->rx_rcb_prod_idx = NULL;
6639                                 break;
6640                         }
6641                         /* Fall through */
6642                 case 1:
6643                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
6644                         break;
6645                 case 2:
6646                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
6647                         break;
6648                 case 3:
6649                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
6650                         break;
6651                 case 4:
6652                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
6653                         break;
6654                 }
6655
6656                 /*
6657                  * If multivector RSS is enabled, vector 0 does not handle
6658                  * rx or tx interrupts.  Don't allocate any resources for it.
6659                  */
6660                 if (!i && tg3_flag(tp, ENABLE_RSS))
6661                         continue;
6662
6663                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6664                                                    TG3_RX_RCB_RING_BYTES(tp),
6665                                                    &tnapi->rx_rcb_mapping,
6666                                                    GFP_KERNEL);
6667                 if (!tnapi->rx_rcb)
6668                         goto err_out;
6669
6670                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6671         }
6672
6673         return 0;
6674
6675 err_out:
6676         tg3_free_consistent(tp);
6677         return -ENOMEM;
6678 }
6679
6680 #define MAX_WAIT_CNT 1000
6681
6682 /* To stop a block, clear the enable bit and poll till it
6683  * clears.  tp->lock is held.
6684  */
6685 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
6686 {
6687         unsigned int i;
6688         u32 val;
6689
6690         if (tg3_flag(tp, 5705_PLUS)) {
6691                 switch (ofs) {
6692                 case RCVLSC_MODE:
6693                 case DMAC_MODE:
6694                 case MBFREE_MODE:
6695                 case BUFMGR_MODE:
6696                 case MEMARB_MODE:
6697                         /* We can't enable/disable these bits of the
6698                          * 5705/5750, just say success.
6699                          */
6700                         return 0;
6701
6702                 default:
6703                         break;
6704                 }
6705         }
6706
6707         val = tr32(ofs);
6708         val &= ~enable_bit;
6709         tw32_f(ofs, val);
6710
6711         for (i = 0; i < MAX_WAIT_CNT; i++) {
6712                 udelay(100);
6713                 val = tr32(ofs);
6714                 if ((val & enable_bit) == 0)
6715                         break;
6716         }
6717
6718         if (i == MAX_WAIT_CNT && !silent) {
6719                 dev_err(&tp->pdev->dev,
6720                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
6721                         ofs, enable_bit);
6722                 return -ENODEV;
6723         }
6724
6725         return 0;
6726 }
6727
6728 /* tp->lock is held. */
6729 static int tg3_abort_hw(struct tg3 *tp, int silent)
6730 {
6731         int i, err;
6732
6733         tg3_disable_ints(tp);
6734
6735         tp->rx_mode &= ~RX_MODE_ENABLE;
6736         tw32_f(MAC_RX_MODE, tp->rx_mode);
6737         udelay(10);
6738
6739         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
6740         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
6741         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
6742         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
6743         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
6744         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
6745
6746         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
6747         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
6748         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
6749         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
6750         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
6751         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
6752         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
6753
6754         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
6755         tw32_f(MAC_MODE, tp->mac_mode);
6756         udelay(40);
6757
6758         tp->tx_mode &= ~TX_MODE_ENABLE;
6759         tw32_f(MAC_TX_MODE, tp->tx_mode);
6760
6761         for (i = 0; i < MAX_WAIT_CNT; i++) {
6762                 udelay(100);
6763                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
6764                         break;
6765         }
6766         if (i >= MAX_WAIT_CNT) {
6767                 dev_err(&tp->pdev->dev,
6768                         "%s timed out, TX_MODE_ENABLE will not clear "
6769                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
6770                 err |= -ENODEV;
6771         }
6772
6773         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
6774         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
6775         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
6776
6777         tw32(FTQ_RESET, 0xffffffff);
6778         tw32(FTQ_RESET, 0x00000000);
6779
6780         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
6781         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
6782
6783         for (i = 0; i < tp->irq_cnt; i++) {
6784                 struct tg3_napi *tnapi = &tp->napi[i];
6785                 if (tnapi->hw_status)
6786                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
6787         }
6788         if (tp->hw_stats)
6789                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
6790
6791         return err;
6792 }
6793
6794 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
6795 {
6796         int i;
6797         u32 apedata;
6798
6799         /* NCSI does not support APE events */
6800         if (tg3_flag(tp, APE_HAS_NCSI))
6801                 return;
6802
6803         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
6804         if (apedata != APE_SEG_SIG_MAGIC)
6805                 return;
6806
6807         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
6808         if (!(apedata & APE_FW_STATUS_READY))
6809                 return;
6810
6811         /* Wait for up to 1 millisecond for APE to service previous event. */
6812         for (i = 0; i < 10; i++) {
6813                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
6814                         return;
6815
6816                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
6817
6818                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6819                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
6820                                         event | APE_EVENT_STATUS_EVENT_PENDING);
6821
6822                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
6823
6824                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6825                         break;
6826
6827                 udelay(100);
6828         }
6829
6830         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
6831                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
6832 }
6833
6834 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
6835 {
6836         u32 event;
6837         u32 apedata;
6838
6839         if (!tg3_flag(tp, ENABLE_APE))
6840                 return;
6841
6842         switch (kind) {
6843         case RESET_KIND_INIT:
6844                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
6845                                 APE_HOST_SEG_SIG_MAGIC);
6846                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
6847                                 APE_HOST_SEG_LEN_MAGIC);
6848                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
6849                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
6850                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
6851                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
6852                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
6853                                 APE_HOST_BEHAV_NO_PHYLOCK);
6854                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
6855                                     TG3_APE_HOST_DRVR_STATE_START);
6856
6857                 event = APE_EVENT_STATUS_STATE_START;
6858                 break;
6859         case RESET_KIND_SHUTDOWN:
6860                 /* With the interface we are currently using,
6861                  * APE does not track driver state.  Wiping
6862                  * out the HOST SEGMENT SIGNATURE forces
6863                  * the APE to assume OS absent status.
6864                  */
6865                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
6866
6867                 if (device_may_wakeup(&tp->pdev->dev) &&
6868                     tg3_flag(tp, WOL_ENABLE)) {
6869                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
6870                                             TG3_APE_HOST_WOL_SPEED_AUTO);
6871                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
6872                 } else
6873                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
6874
6875                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
6876
6877                 event = APE_EVENT_STATUS_STATE_UNLOAD;
6878                 break;
6879         case RESET_KIND_SUSPEND:
6880                 event = APE_EVENT_STATUS_STATE_SUSPEND;
6881                 break;
6882         default:
6883                 return;
6884         }
6885
6886         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
6887
6888         tg3_ape_send_event(tp, event);
6889 }
6890
6891 /* tp->lock is held. */
6892 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
6893 {
6894         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
6895                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
6896
6897         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6898                 switch (kind) {
6899                 case RESET_KIND_INIT:
6900                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6901                                       DRV_STATE_START);
6902                         break;
6903
6904                 case RESET_KIND_SHUTDOWN:
6905                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6906                                       DRV_STATE_UNLOAD);
6907                         break;
6908
6909                 case RESET_KIND_SUSPEND:
6910                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6911                                       DRV_STATE_SUSPEND);
6912                         break;
6913
6914                 default:
6915                         break;
6916                 }
6917         }
6918
6919         if (kind == RESET_KIND_INIT ||
6920             kind == RESET_KIND_SUSPEND)
6921                 tg3_ape_driver_state_change(tp, kind);
6922 }
6923
6924 /* tp->lock is held. */
6925 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
6926 {
6927         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
6928                 switch (kind) {
6929                 case RESET_KIND_INIT:
6930                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6931                                       DRV_STATE_START_DONE);
6932                         break;
6933
6934                 case RESET_KIND_SHUTDOWN:
6935                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6936                                       DRV_STATE_UNLOAD_DONE);
6937                         break;
6938
6939                 default:
6940                         break;
6941                 }
6942         }
6943
6944         if (kind == RESET_KIND_SHUTDOWN)
6945                 tg3_ape_driver_state_change(tp, kind);
6946 }
6947
6948 /* tp->lock is held. */
6949 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
6950 {
6951         if (tg3_flag(tp, ENABLE_ASF)) {
6952                 switch (kind) {
6953                 case RESET_KIND_INIT:
6954                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6955                                       DRV_STATE_START);
6956                         break;
6957
6958                 case RESET_KIND_SHUTDOWN:
6959                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6960                                       DRV_STATE_UNLOAD);
6961                         break;
6962
6963                 case RESET_KIND_SUSPEND:
6964                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
6965                                       DRV_STATE_SUSPEND);
6966                         break;
6967
6968                 default:
6969                         break;
6970                 }
6971         }
6972 }
6973
6974 static int tg3_poll_fw(struct tg3 *tp)
6975 {
6976         int i;
6977         u32 val;
6978
6979         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
6980                 /* Wait up to 20ms for init done. */
6981                 for (i = 0; i < 200; i++) {
6982                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
6983                                 return 0;
6984                         udelay(100);
6985                 }
6986                 return -ENODEV;
6987         }
6988
6989         /* Wait for firmware initialization to complete. */
6990         for (i = 0; i < 100000; i++) {
6991                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
6992                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
6993                         break;
6994                 udelay(10);
6995         }
6996
6997         /* Chip might not be fitted with firmware.  Some Sun onboard
6998          * parts are configured like that.  So don't signal the timeout
6999          * of the above loop as an error, but do report the lack of
7000          * running firmware once.
7001          */
7002         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
7003                 tg3_flag_set(tp, NO_FWARE_REPORTED);
7004
7005                 netdev_info(tp->dev, "No firmware running\n");
7006         }
7007
7008         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7009                 /* The 57765 A0 needs a little more
7010                  * time to do some important work.
7011                  */
7012                 mdelay(10);
7013         }
7014
7015         return 0;
7016 }
7017
7018 /* Save PCI command register before chip reset */
7019 static void tg3_save_pci_state(struct tg3 *tp)
7020 {
7021         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7022 }
7023
7024 /* Restore PCI state after chip reset */
7025 static void tg3_restore_pci_state(struct tg3 *tp)
7026 {
7027         u32 val;
7028
7029         /* Re-enable indirect register accesses. */
7030         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7031                                tp->misc_host_ctrl);
7032
7033         /* Set MAX PCI retry to zero. */
7034         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7035         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7036             tg3_flag(tp, PCIX_MODE))
7037                 val |= PCISTATE_RETRY_SAME_DMA;
7038         /* Allow reads and writes to the APE register and memory space. */
7039         if (tg3_flag(tp, ENABLE_APE))
7040                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7041                        PCISTATE_ALLOW_APE_SHMEM_WR |
7042                        PCISTATE_ALLOW_APE_PSPACE_WR;
7043         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7044
7045         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7046
7047         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
7048                 if (tg3_flag(tp, PCI_EXPRESS))
7049                         pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7050                 else {
7051                         pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7052                                               tp->pci_cacheline_sz);
7053                         pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7054                                               tp->pci_lat_timer);
7055                 }
7056         }
7057
7058         /* Make sure PCI-X relaxed ordering bit is clear. */
7059         if (tg3_flag(tp, PCIX_MODE)) {
7060                 u16 pcix_cmd;
7061
7062                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7063                                      &pcix_cmd);
7064                 pcix_cmd &= ~PCI_X_CMD_ERO;
7065                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7066                                       pcix_cmd);
7067         }
7068
7069         if (tg3_flag(tp, 5780_CLASS)) {
7070
7071                 /* Chip reset on 5780 will reset MSI enable bit,
7072                  * so need to restore it.
7073                  */
7074                 if (tg3_flag(tp, USING_MSI)) {
7075                         u16 ctrl;
7076
7077                         pci_read_config_word(tp->pdev,
7078                                              tp->msi_cap + PCI_MSI_FLAGS,
7079                                              &ctrl);
7080                         pci_write_config_word(tp->pdev,
7081                                               tp->msi_cap + PCI_MSI_FLAGS,
7082                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7083                         val = tr32(MSGINT_MODE);
7084                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7085                 }
7086         }
7087 }
7088
7089 static void tg3_stop_fw(struct tg3 *);
7090
7091 /* tp->lock is held. */
7092 static int tg3_chip_reset(struct tg3 *tp)
7093 {
7094         u32 val;
7095         void (*write_op)(struct tg3 *, u32, u32);
7096         int i, err;
7097
7098         tg3_nvram_lock(tp);
7099
7100         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7101
7102         /* No matching tg3_nvram_unlock() after this because
7103          * chip reset below will undo the nvram lock.
7104          */
7105         tp->nvram_lock_cnt = 0;
7106
7107         /* GRC_MISC_CFG core clock reset will clear the memory
7108          * enable bit in PCI register 4 and the MSI enable bit
7109          * on some chips, so we save relevant registers here.
7110          */
7111         tg3_save_pci_state(tp);
7112
7113         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7114             tg3_flag(tp, 5755_PLUS))
7115                 tw32(GRC_FASTBOOT_PC, 0);
7116
7117         /*
7118          * We must avoid the readl() that normally takes place.
7119          * It locks machines, causes machine checks, and other
7120          * fun things.  So, temporarily disable the 5701
7121          * hardware workaround, while we do the reset.
7122          */
7123         write_op = tp->write32;
7124         if (write_op == tg3_write_flush_reg32)
7125                 tp->write32 = tg3_write32;
7126
7127         /* Prevent the irq handler from reading or writing PCI registers
7128          * during chip reset when the memory enable bit in the PCI command
7129          * register may be cleared.  The chip does not generate interrupt
7130          * at this time, but the irq handler may still be called due to irq
7131          * sharing or irqpoll.
7132          */
7133         tg3_flag_set(tp, CHIP_RESETTING);
7134         for (i = 0; i < tp->irq_cnt; i++) {
7135                 struct tg3_napi *tnapi = &tp->napi[i];
7136                 if (tnapi->hw_status) {
7137                         tnapi->hw_status->status = 0;
7138                         tnapi->hw_status->status_tag = 0;
7139                 }
7140                 tnapi->last_tag = 0;
7141                 tnapi->last_irq_tag = 0;
7142         }
7143         smp_mb();
7144
7145         for (i = 0; i < tp->irq_cnt; i++)
7146                 synchronize_irq(tp->napi[i].irq_vec);
7147
7148         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7149                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7150                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7151         }
7152
7153         /* do the reset */
7154         val = GRC_MISC_CFG_CORECLK_RESET;
7155
7156         if (tg3_flag(tp, PCI_EXPRESS)) {
7157                 /* Force PCIe 1.0a mode */
7158                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7159                     !tg3_flag(tp, 57765_PLUS) &&
7160                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7161                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7162                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7163
7164                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7165                         tw32(GRC_MISC_CFG, (1 << 29));
7166                         val |= (1 << 29);
7167                 }
7168         }
7169
7170         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7171                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7172                 tw32(GRC_VCPU_EXT_CTRL,
7173                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7174         }
7175
7176         /* Manage gphy power for all CPMU absent PCIe devices. */
7177         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7178                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7179
7180         tw32(GRC_MISC_CFG, val);
7181
7182         /* restore 5701 hardware bug workaround write method */
7183         tp->write32 = write_op;
7184
7185         /* Unfortunately, we have to delay before the PCI read back.
7186          * Some 575X chips even will not respond to a PCI cfg access
7187          * when the reset command is given to the chip.
7188          *
7189          * How do these hardware designers expect things to work
7190          * properly if the PCI write is posted for a long period
7191          * of time?  It is always necessary to have some method by
7192          * which a register read back can occur to push the write
7193          * out which does the reset.
7194          *
7195          * For most tg3 variants the trick below was working.
7196          * Ho hum...
7197          */
7198         udelay(120);
7199
7200         /* Flush PCI posted writes.  The normal MMIO registers
7201          * are inaccessible at this time so this is the only
7202          * way to make this reliably (actually, this is no longer
7203          * the case, see above).  I tried to use indirect
7204          * register read/write but this upset some 5701 variants.
7205          */
7206         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7207
7208         udelay(120);
7209
7210         if (tg3_flag(tp, PCI_EXPRESS) && tp->pcie_cap) {
7211                 u16 val16;
7212
7213                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7214                         int i;
7215                         u32 cfg_val;
7216
7217                         /* Wait for link training to complete.  */
7218                         for (i = 0; i < 5000; i++)
7219                                 udelay(100);
7220
7221                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7222                         pci_write_config_dword(tp->pdev, 0xc4,
7223                                                cfg_val | (1 << 15));
7224                 }
7225
7226                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7227                 pci_read_config_word(tp->pdev,
7228                                      tp->pcie_cap + PCI_EXP_DEVCTL,
7229                                      &val16);
7230                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7231                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7232                 /*
7233                  * Older PCIe devices only support the 128 byte
7234                  * MPS setting.  Enforce the restriction.
7235                  */
7236                 if (!tg3_flag(tp, CPMU_PRESENT))
7237                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7238                 pci_write_config_word(tp->pdev,
7239                                       tp->pcie_cap + PCI_EXP_DEVCTL,
7240                                       val16);
7241
7242                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7243
7244                 /* Clear error status */
7245                 pci_write_config_word(tp->pdev,
7246                                       tp->pcie_cap + PCI_EXP_DEVSTA,
7247                                       PCI_EXP_DEVSTA_CED |
7248                                       PCI_EXP_DEVSTA_NFED |
7249                                       PCI_EXP_DEVSTA_FED |
7250                                       PCI_EXP_DEVSTA_URD);
7251         }
7252
7253         tg3_restore_pci_state(tp);
7254
7255         tg3_flag_clear(tp, CHIP_RESETTING);
7256         tg3_flag_clear(tp, ERROR_PROCESSED);
7257
7258         val = 0;
7259         if (tg3_flag(tp, 5780_CLASS))
7260                 val = tr32(MEMARB_MODE);
7261         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7262
7263         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7264                 tg3_stop_fw(tp);
7265                 tw32(0x5000, 0x400);
7266         }
7267
7268         tw32(GRC_MODE, tp->grc_mode);
7269
7270         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7271                 val = tr32(0xc4);
7272
7273                 tw32(0xc4, val | (1 << 15));
7274         }
7275
7276         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7277             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7278                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7279                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7280                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7281                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7282         }
7283
7284         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7285                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7286                 val = tp->mac_mode;
7287         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7288                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7289                 val = tp->mac_mode;
7290         } else
7291                 val = 0;
7292
7293         tw32_f(MAC_MODE, val);
7294         udelay(40);
7295
7296         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7297
7298         err = tg3_poll_fw(tp);
7299         if (err)
7300                 return err;
7301
7302         tg3_mdio_start(tp);
7303
7304         if (tg3_flag(tp, PCI_EXPRESS) &&
7305             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7306             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7307             !tg3_flag(tp, 57765_PLUS)) {
7308                 val = tr32(0x7c00);
7309
7310                 tw32(0x7c00, val | (1 << 25));
7311         }
7312
7313         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7314                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7315                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7316         }
7317
7318         /* Reprobe ASF enable state.  */
7319         tg3_flag_clear(tp, ENABLE_ASF);
7320         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7321         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7322         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7323                 u32 nic_cfg;
7324
7325                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7326                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7327                         tg3_flag_set(tp, ENABLE_ASF);
7328                         tp->last_event_jiffies = jiffies;
7329                         if (tg3_flag(tp, 5750_PLUS))
7330                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
7331                 }
7332         }
7333
7334         return 0;
7335 }
7336
7337 /* tp->lock is held. */
7338 static void tg3_stop_fw(struct tg3 *tp)
7339 {
7340         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
7341                 /* Wait for RX cpu to ACK the previous event. */
7342                 tg3_wait_for_event_ack(tp);
7343
7344                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
7345
7346                 tg3_generate_fw_event(tp);
7347
7348                 /* Wait for RX cpu to ACK this event. */
7349                 tg3_wait_for_event_ack(tp);
7350         }
7351 }
7352
7353 /* tp->lock is held. */
7354 static int tg3_halt(struct tg3 *tp, int kind, int silent)
7355 {
7356         int err;
7357
7358         tg3_stop_fw(tp);
7359
7360         tg3_write_sig_pre_reset(tp, kind);
7361
7362         tg3_abort_hw(tp, silent);
7363         err = tg3_chip_reset(tp);
7364
7365         __tg3_set_mac_addr(tp, 0);
7366
7367         tg3_write_sig_legacy(tp, kind);
7368         tg3_write_sig_post_reset(tp, kind);
7369
7370         if (err)
7371                 return err;
7372
7373         return 0;
7374 }
7375
7376 #define RX_CPU_SCRATCH_BASE     0x30000
7377 #define RX_CPU_SCRATCH_SIZE     0x04000
7378 #define TX_CPU_SCRATCH_BASE     0x34000
7379 #define TX_CPU_SCRATCH_SIZE     0x04000
7380
7381 /* tp->lock is held. */
7382 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
7383 {
7384         int i;
7385
7386         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
7387
7388         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7389                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
7390
7391                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
7392                 return 0;
7393         }
7394         if (offset == RX_CPU_BASE) {
7395                 for (i = 0; i < 10000; i++) {
7396                         tw32(offset + CPU_STATE, 0xffffffff);
7397                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7398                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7399                                 break;
7400                 }
7401
7402                 tw32(offset + CPU_STATE, 0xffffffff);
7403                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
7404                 udelay(10);
7405         } else {
7406                 for (i = 0; i < 10000; i++) {
7407                         tw32(offset + CPU_STATE, 0xffffffff);
7408                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
7409                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
7410                                 break;
7411                 }
7412         }
7413
7414         if (i >= 10000) {
7415                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
7416                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
7417                 return -ENODEV;
7418         }
7419
7420         /* Clear firmware's nvram arbitration. */
7421         if (tg3_flag(tp, NVRAM))
7422                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
7423         return 0;
7424 }
7425
7426 struct fw_info {
7427         unsigned int fw_base;
7428         unsigned int fw_len;
7429         const __be32 *fw_data;
7430 };
7431
7432 /* tp->lock is held. */
7433 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
7434                                  int cpu_scratch_size, struct fw_info *info)
7435 {
7436         int err, lock_err, i;
7437         void (*write_op)(struct tg3 *, u32, u32);
7438
7439         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
7440                 netdev_err(tp->dev,
7441                            "%s: Trying to load TX cpu firmware which is 5705\n",
7442                            __func__);
7443                 return -EINVAL;
7444         }
7445
7446         if (tg3_flag(tp, 5705_PLUS))
7447                 write_op = tg3_write_mem;
7448         else
7449                 write_op = tg3_write_indirect_reg32;
7450
7451         /* It is possible that bootcode is still loading at this point.
7452          * Get the nvram lock first before halting the cpu.
7453          */
7454         lock_err = tg3_nvram_lock(tp);
7455         err = tg3_halt_cpu(tp, cpu_base);
7456         if (!lock_err)
7457                 tg3_nvram_unlock(tp);
7458         if (err)
7459                 goto out;
7460
7461         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
7462                 write_op(tp, cpu_scratch_base + i, 0);
7463         tw32(cpu_base + CPU_STATE, 0xffffffff);
7464         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
7465         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
7466                 write_op(tp, (cpu_scratch_base +
7467                               (info->fw_base & 0xffff) +
7468                               (i * sizeof(u32))),
7469                               be32_to_cpu(info->fw_data[i]));
7470
7471         err = 0;
7472
7473 out:
7474         return err;
7475 }
7476
7477 /* tp->lock is held. */
7478 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
7479 {
7480         struct fw_info info;
7481         const __be32 *fw_data;
7482         int err, i;
7483
7484         fw_data = (void *)tp->fw->data;
7485
7486         /* Firmware blob starts with version numbers, followed by
7487            start address and length. We are setting complete length.
7488            length = end_address_of_bss - start_address_of_text.
7489            Remainder is the blob to be loaded contiguously
7490            from start address. */
7491
7492         info.fw_base = be32_to_cpu(fw_data[1]);
7493         info.fw_len = tp->fw->size - 12;
7494         info.fw_data = &fw_data[3];
7495
7496         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
7497                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
7498                                     &info);
7499         if (err)
7500                 return err;
7501
7502         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
7503                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
7504                                     &info);
7505         if (err)
7506                 return err;
7507
7508         /* Now startup only the RX cpu. */
7509         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7510         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7511
7512         for (i = 0; i < 5; i++) {
7513                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
7514                         break;
7515                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7516                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
7517                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
7518                 udelay(1000);
7519         }
7520         if (i >= 5) {
7521                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
7522                            "should be %08x\n", __func__,
7523                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
7524                 return -ENODEV;
7525         }
7526         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
7527         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
7528
7529         return 0;
7530 }
7531
7532 /* tp->lock is held. */
7533 static int tg3_load_tso_firmware(struct tg3 *tp)
7534 {
7535         struct fw_info info;
7536         const __be32 *fw_data;
7537         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
7538         int err, i;
7539
7540         if (tg3_flag(tp, HW_TSO_1) ||
7541             tg3_flag(tp, HW_TSO_2) ||
7542             tg3_flag(tp, HW_TSO_3))
7543                 return 0;
7544
7545         fw_data = (void *)tp->fw->data;
7546
7547         /* Firmware blob starts with version numbers, followed by
7548            start address and length. We are setting complete length.
7549            length = end_address_of_bss - start_address_of_text.
7550            Remainder is the blob to be loaded contiguously
7551            from start address. */
7552
7553         info.fw_base = be32_to_cpu(fw_data[1]);
7554         cpu_scratch_size = tp->fw_len;
7555         info.fw_len = tp->fw->size - 12;
7556         info.fw_data = &fw_data[3];
7557
7558         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7559                 cpu_base = RX_CPU_BASE;
7560                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
7561         } else {
7562                 cpu_base = TX_CPU_BASE;
7563                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
7564                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
7565         }
7566
7567         err = tg3_load_firmware_cpu(tp, cpu_base,
7568                                     cpu_scratch_base, cpu_scratch_size,
7569                                     &info);
7570         if (err)
7571                 return err;
7572
7573         /* Now startup the cpu. */
7574         tw32(cpu_base + CPU_STATE, 0xffffffff);
7575         tw32_f(cpu_base + CPU_PC, info.fw_base);
7576
7577         for (i = 0; i < 5; i++) {
7578                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
7579                         break;
7580                 tw32(cpu_base + CPU_STATE, 0xffffffff);
7581                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
7582                 tw32_f(cpu_base + CPU_PC, info.fw_base);
7583                 udelay(1000);
7584         }
7585         if (i >= 5) {
7586                 netdev_err(tp->dev,
7587                            "%s fails to set CPU PC, is %08x should be %08x\n",
7588                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
7589                 return -ENODEV;
7590         }
7591         tw32(cpu_base + CPU_STATE, 0xffffffff);
7592         tw32_f(cpu_base + CPU_MODE,  0x00000000);
7593         return 0;
7594 }
7595
7596
7597 static int tg3_set_mac_addr(struct net_device *dev, void *p)
7598 {
7599         struct tg3 *tp = netdev_priv(dev);
7600         struct sockaddr *addr = p;
7601         int err = 0, skip_mac_1 = 0;
7602
7603         if (!is_valid_ether_addr(addr->sa_data))
7604                 return -EINVAL;
7605
7606         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7607
7608         if (!netif_running(dev))
7609                 return 0;
7610
7611         if (tg3_flag(tp, ENABLE_ASF)) {
7612                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
7613
7614                 addr0_high = tr32(MAC_ADDR_0_HIGH);
7615                 addr0_low = tr32(MAC_ADDR_0_LOW);
7616                 addr1_high = tr32(MAC_ADDR_1_HIGH);
7617                 addr1_low = tr32(MAC_ADDR_1_LOW);
7618
7619                 /* Skip MAC addr 1 if ASF is using it. */
7620                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
7621                     !(addr1_high == 0 && addr1_low == 0))
7622                         skip_mac_1 = 1;
7623         }
7624         spin_lock_bh(&tp->lock);
7625         __tg3_set_mac_addr(tp, skip_mac_1);
7626         spin_unlock_bh(&tp->lock);
7627
7628         return err;
7629 }
7630
7631 /* tp->lock is held. */
7632 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
7633                            dma_addr_t mapping, u32 maxlen_flags,
7634                            u32 nic_addr)
7635 {
7636         tg3_write_mem(tp,
7637                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
7638                       ((u64) mapping >> 32));
7639         tg3_write_mem(tp,
7640                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
7641                       ((u64) mapping & 0xffffffff));
7642         tg3_write_mem(tp,
7643                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
7644                        maxlen_flags);
7645
7646         if (!tg3_flag(tp, 5705_PLUS))
7647                 tg3_write_mem(tp,
7648                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
7649                               nic_addr);
7650 }
7651
7652 static void __tg3_set_rx_mode(struct net_device *);
7653 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
7654 {
7655         int i;
7656
7657         if (!tg3_flag(tp, ENABLE_TSS)) {
7658                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
7659                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
7660                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
7661         } else {
7662                 tw32(HOSTCC_TXCOL_TICKS, 0);
7663                 tw32(HOSTCC_TXMAX_FRAMES, 0);
7664                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
7665         }
7666
7667         if (!tg3_flag(tp, ENABLE_RSS)) {
7668                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
7669                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
7670                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
7671         } else {
7672                 tw32(HOSTCC_RXCOL_TICKS, 0);
7673                 tw32(HOSTCC_RXMAX_FRAMES, 0);
7674                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
7675         }
7676
7677         if (!tg3_flag(tp, 5705_PLUS)) {
7678                 u32 val = ec->stats_block_coalesce_usecs;
7679
7680                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
7681                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
7682
7683                 if (!netif_carrier_ok(tp->dev))
7684                         val = 0;
7685
7686                 tw32(HOSTCC_STAT_COAL_TICKS, val);
7687         }
7688
7689         for (i = 0; i < tp->irq_cnt - 1; i++) {
7690                 u32 reg;
7691
7692                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
7693                 tw32(reg, ec->rx_coalesce_usecs);
7694                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
7695                 tw32(reg, ec->rx_max_coalesced_frames);
7696                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
7697                 tw32(reg, ec->rx_max_coalesced_frames_irq);
7698
7699                 if (tg3_flag(tp, ENABLE_TSS)) {
7700                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
7701                         tw32(reg, ec->tx_coalesce_usecs);
7702                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
7703                         tw32(reg, ec->tx_max_coalesced_frames);
7704                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
7705                         tw32(reg, ec->tx_max_coalesced_frames_irq);
7706                 }
7707         }
7708
7709         for (; i < tp->irq_max - 1; i++) {
7710                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
7711                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
7712                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7713
7714                 if (tg3_flag(tp, ENABLE_TSS)) {
7715                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
7716                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
7717                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
7718                 }
7719         }
7720 }
7721
7722 /* tp->lock is held. */
7723 static void tg3_rings_reset(struct tg3 *tp)
7724 {
7725         int i;
7726         u32 stblk, txrcb, rxrcb, limit;
7727         struct tg3_napi *tnapi = &tp->napi[0];
7728
7729         /* Disable all transmit rings but the first. */
7730         if (!tg3_flag(tp, 5705_PLUS))
7731                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
7732         else if (tg3_flag(tp, 5717_PLUS))
7733                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
7734         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7735                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
7736         else
7737                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7738
7739         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
7740              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
7741                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
7742                               BDINFO_FLAGS_DISABLED);
7743
7744
7745         /* Disable all receive return rings but the first. */
7746         if (tg3_flag(tp, 5717_PLUS))
7747                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
7748         else if (!tg3_flag(tp, 5705_PLUS))
7749                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
7750         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7751                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
7752                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
7753         else
7754                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7755
7756         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
7757              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
7758                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
7759                               BDINFO_FLAGS_DISABLED);
7760
7761         /* Disable interrupts */
7762         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
7763
7764         /* Zero mailbox registers. */
7765         if (tg3_flag(tp, SUPPORT_MSIX)) {
7766                 for (i = 1; i < tp->irq_max; i++) {
7767                         tp->napi[i].tx_prod = 0;
7768                         tp->napi[i].tx_cons = 0;
7769                         if (tg3_flag(tp, ENABLE_TSS))
7770                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
7771                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
7772                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
7773                 }
7774                 if (!tg3_flag(tp, ENABLE_TSS))
7775                         tw32_mailbox(tp->napi[0].prodmbox, 0);
7776         } else {
7777                 tp->napi[0].tx_prod = 0;
7778                 tp->napi[0].tx_cons = 0;
7779                 tw32_mailbox(tp->napi[0].prodmbox, 0);
7780                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
7781         }
7782
7783         /* Make sure the NIC-based send BD rings are disabled. */
7784         if (!tg3_flag(tp, 5705_PLUS)) {
7785                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
7786                 for (i = 0; i < 16; i++)
7787                         tw32_tx_mbox(mbox + i * 8, 0);
7788         }
7789
7790         txrcb = NIC_SRAM_SEND_RCB;
7791         rxrcb = NIC_SRAM_RCV_RET_RCB;
7792
7793         /* Clear status block in ram. */
7794         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7795
7796         /* Set status block DMA address */
7797         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
7798              ((u64) tnapi->status_mapping >> 32));
7799         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
7800              ((u64) tnapi->status_mapping & 0xffffffff));
7801
7802         if (tnapi->tx_ring) {
7803                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7804                                (TG3_TX_RING_SIZE <<
7805                                 BDINFO_FLAGS_MAXLEN_SHIFT),
7806                                NIC_SRAM_TX_BUFFER_DESC);
7807                 txrcb += TG3_BDINFO_SIZE;
7808         }
7809
7810         if (tnapi->rx_rcb) {
7811                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7812                                (tp->rx_ret_ring_mask + 1) <<
7813                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
7814                 rxrcb += TG3_BDINFO_SIZE;
7815         }
7816
7817         stblk = HOSTCC_STATBLCK_RING1;
7818
7819         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
7820                 u64 mapping = (u64)tnapi->status_mapping;
7821                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
7822                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
7823
7824                 /* Clear status block in ram. */
7825                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7826
7827                 if (tnapi->tx_ring) {
7828                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
7829                                        (TG3_TX_RING_SIZE <<
7830                                         BDINFO_FLAGS_MAXLEN_SHIFT),
7831                                        NIC_SRAM_TX_BUFFER_DESC);
7832                         txrcb += TG3_BDINFO_SIZE;
7833                 }
7834
7835                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
7836                                ((tp->rx_ret_ring_mask + 1) <<
7837                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
7838
7839                 stblk += 8;
7840                 rxrcb += TG3_BDINFO_SIZE;
7841         }
7842 }
7843
7844 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
7845 {
7846         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
7847
7848         if (!tg3_flag(tp, 5750_PLUS) ||
7849             tg3_flag(tp, 5780_CLASS) ||
7850             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
7851             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
7852                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
7853         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
7854                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
7855                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
7856         else
7857                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
7858
7859         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
7860         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
7861
7862         val = min(nic_rep_thresh, host_rep_thresh);
7863         tw32(RCVBDI_STD_THRESH, val);
7864
7865         if (tg3_flag(tp, 57765_PLUS))
7866                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
7867
7868         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7869                 return;
7870
7871         if (!tg3_flag(tp, 5705_PLUS))
7872                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
7873         else
7874                 bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5717;
7875
7876         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
7877
7878         val = min(bdcache_maxcnt / 2, host_rep_thresh);
7879         tw32(RCVBDI_JUMBO_THRESH, val);
7880
7881         if (tg3_flag(tp, 57765_PLUS))
7882                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
7883 }
7884
7885 /* tp->lock is held. */
7886 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7887 {
7888         u32 val, rdmac_mode;
7889         int i, err, limit;
7890         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
7891
7892         tg3_disable_ints(tp);
7893
7894         tg3_stop_fw(tp);
7895
7896         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
7897
7898         if (tg3_flag(tp, INIT_COMPLETE))
7899                 tg3_abort_hw(tp, 1);
7900
7901         /* Enable MAC control of LPI */
7902         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7903                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7904                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7905                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
7906
7907                 tw32_f(TG3_CPMU_EEE_CTRL,
7908                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7909
7910                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7911                       TG3_CPMU_EEEMD_LPI_IN_TX |
7912                       TG3_CPMU_EEEMD_LPI_IN_RX |
7913                       TG3_CPMU_EEEMD_EEE_ENABLE;
7914
7915                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7916                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7917
7918                 if (tg3_flag(tp, ENABLE_APE))
7919                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7920
7921                 tw32_f(TG3_CPMU_EEE_MODE, val);
7922
7923                 tw32_f(TG3_CPMU_EEE_DBTMR1,
7924                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7925                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7926
7927                 tw32_f(TG3_CPMU_EEE_DBTMR2,
7928                        TG3_CPMU_DBTMR2_APE_TX_2047US |
7929                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7930         }
7931
7932         if (reset_phy)
7933                 tg3_phy_reset(tp);
7934
7935         err = tg3_chip_reset(tp);
7936         if (err)
7937                 return err;
7938
7939         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
7940
7941         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
7942                 val = tr32(TG3_CPMU_CTRL);
7943                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
7944                 tw32(TG3_CPMU_CTRL, val);
7945
7946                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7947                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
7948                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
7949                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7950
7951                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
7952                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
7953                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
7954                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
7955
7956                 val = tr32(TG3_CPMU_HST_ACC);
7957                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
7958                 val |= CPMU_HST_ACC_MACCLK_6_25;
7959                 tw32(TG3_CPMU_HST_ACC, val);
7960         }
7961
7962         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7963                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
7964                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
7965                        PCIE_PWR_MGMT_L1_THRESH_4MS;
7966                 tw32(PCIE_PWR_MGMT_THRESH, val);
7967
7968                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
7969                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7970
7971                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7972
7973                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7974                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7975         }
7976
7977         if (tg3_flag(tp, L1PLLPD_EN)) {
7978                 u32 grc_mode = tr32(GRC_MODE);
7979
7980                 /* Access the lower 1K of PL PCIE block registers. */
7981                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7982                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7983
7984                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
7985                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
7986                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
7987
7988                 tw32(GRC_MODE, grc_mode);
7989         }
7990
7991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7992                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7993                         u32 grc_mode = tr32(GRC_MODE);
7994
7995                         /* Access the lower 1K of PL PCIE block registers. */
7996                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7997                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7998
7999                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8000                                    TG3_PCIE_PL_LO_PHYCTL5);
8001                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8002                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8003
8004                         tw32(GRC_MODE, grc_mode);
8005                 }
8006
8007                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8008                         u32 grc_mode = tr32(GRC_MODE);
8009
8010                         /* Access the lower 1K of DL PCIE block registers. */
8011                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8012                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8013
8014                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8015                                    TG3_PCIE_DL_LO_FTSMAX);
8016                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8017                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8018                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8019
8020                         tw32(GRC_MODE, grc_mode);
8021                 }
8022
8023                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8024                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8025                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8026                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8027         }
8028
8029         /* This works around an issue with Athlon chipsets on
8030          * B3 tigon3 silicon.  This bit has no effect on any
8031          * other revision.  But do not set this on PCI Express
8032          * chips and don't even touch the clocks if the CPMU is present.
8033          */
8034         if (!tg3_flag(tp, CPMU_PRESENT)) {
8035                 if (!tg3_flag(tp, PCI_EXPRESS))
8036                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8037                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8038         }
8039
8040         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8041             tg3_flag(tp, PCIX_MODE)) {
8042                 val = tr32(TG3PCI_PCISTATE);
8043                 val |= PCISTATE_RETRY_SAME_DMA;
8044                 tw32(TG3PCI_PCISTATE, val);
8045         }
8046
8047         if (tg3_flag(tp, ENABLE_APE)) {
8048                 /* Allow reads and writes to the
8049                  * APE register and memory space.
8050                  */
8051                 val = tr32(TG3PCI_PCISTATE);
8052                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8053                        PCISTATE_ALLOW_APE_SHMEM_WR |
8054                        PCISTATE_ALLOW_APE_PSPACE_WR;
8055                 tw32(TG3PCI_PCISTATE, val);
8056         }
8057
8058         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8059                 /* Enable some hw fixes.  */
8060                 val = tr32(TG3PCI_MSI_DATA);
8061                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8062                 tw32(TG3PCI_MSI_DATA, val);
8063         }
8064
8065         /* Descriptor ring init may make accesses to the
8066          * NIC SRAM area to setup the TX descriptors, so we
8067          * can only do this after the hardware has been
8068          * successfully reset.
8069          */
8070         err = tg3_init_rings(tp);
8071         if (err)
8072                 return err;
8073
8074         if (tg3_flag(tp, 57765_PLUS)) {
8075                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8076                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8077                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8078                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8079                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57765 &&
8080                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8081                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8082                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8083         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8084                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8085                 /* This value is determined during the probe time DMA
8086                  * engine test, tg3_test_dma.
8087                  */
8088                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8089         }
8090
8091         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8092                           GRC_MODE_4X_NIC_SEND_RINGS |
8093                           GRC_MODE_NO_TX_PHDR_CSUM |
8094                           GRC_MODE_NO_RX_PHDR_CSUM);
8095         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8096
8097         /* Pseudo-header checksum is done by hardware logic and not
8098          * the offload processers, so make the chip do the pseudo-
8099          * header checksums on receive.  For transmit it is more
8100          * convenient to do the pseudo-header checksum in software
8101          * as Linux does that on transmit for us in all cases.
8102          */
8103         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8104
8105         tw32(GRC_MODE,
8106              tp->grc_mode |
8107              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8108
8109         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8110         val = tr32(GRC_MISC_CFG);
8111         val &= ~0xff;
8112         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8113         tw32(GRC_MISC_CFG, val);
8114
8115         /* Initialize MBUF/DESC pool. */
8116         if (tg3_flag(tp, 5750_PLUS)) {
8117                 /* Do nothing.  */
8118         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8119                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8121                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8122                 else
8123                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8124                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8125                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8126         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8127                 int fw_len;
8128
8129                 fw_len = tp->fw_len;
8130                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8131                 tw32(BUFMGR_MB_POOL_ADDR,
8132                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8133                 tw32(BUFMGR_MB_POOL_SIZE,
8134                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8135         }
8136
8137         if (tp->dev->mtu <= ETH_DATA_LEN) {
8138                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8139                      tp->bufmgr_config.mbuf_read_dma_low_water);
8140                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8141                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8142                 tw32(BUFMGR_MB_HIGH_WATER,
8143                      tp->bufmgr_config.mbuf_high_water);
8144         } else {
8145                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8146                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8147                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8148                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8149                 tw32(BUFMGR_MB_HIGH_WATER,
8150                      tp->bufmgr_config.mbuf_high_water_jumbo);
8151         }
8152         tw32(BUFMGR_DMA_LOW_WATER,
8153              tp->bufmgr_config.dma_low_water);
8154         tw32(BUFMGR_DMA_HIGH_WATER,
8155              tp->bufmgr_config.dma_high_water);
8156
8157         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8158         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8159                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8160         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8161             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8162             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8163                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8164         tw32(BUFMGR_MODE, val);
8165         for (i = 0; i < 2000; i++) {
8166                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8167                         break;
8168                 udelay(10);
8169         }
8170         if (i >= 2000) {
8171                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8172                 return -ENODEV;
8173         }
8174
8175         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8176                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8177
8178         tg3_setup_rxbd_thresholds(tp);
8179
8180         /* Initialize TG3_BDINFO's at:
8181          *  RCVDBDI_STD_BD:     standard eth size rx ring
8182          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8183          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8184          *
8185          * like so:
8186          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8187          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8188          *                              ring attribute flags
8189          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8190          *
8191          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8192          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8193          *
8194          * The size of each ring is fixed in the firmware, but the location is
8195          * configurable.
8196          */
8197         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8198              ((u64) tpr->rx_std_mapping >> 32));
8199         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8200              ((u64) tpr->rx_std_mapping & 0xffffffff));
8201         if (!tg3_flag(tp, 5717_PLUS))
8202                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8203                      NIC_SRAM_RX_BUFFER_DESC);
8204
8205         /* Disable the mini ring */
8206         if (!tg3_flag(tp, 5705_PLUS))
8207                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8208                      BDINFO_FLAGS_DISABLED);
8209
8210         /* Program the jumbo buffer descriptor ring control
8211          * blocks on those devices that have them.
8212          */
8213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8214             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8215
8216                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8217                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8218                              ((u64) tpr->rx_jmb_mapping >> 32));
8219                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8220                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8221                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8222                               BDINFO_FLAGS_MAXLEN_SHIFT;
8223                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8224                              val | BDINFO_FLAGS_USE_EXT_RECV);
8225                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8226                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8227                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8228                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8229                 } else {
8230                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8231                              BDINFO_FLAGS_DISABLED);
8232                 }
8233
8234                 if (tg3_flag(tp, 57765_PLUS)) {
8235                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8236                                 val = TG3_RX_STD_MAX_SIZE_5700;
8237                         else
8238                                 val = TG3_RX_STD_MAX_SIZE_5717;
8239                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8240                         val |= (TG3_RX_STD_DMA_SZ << 2);
8241                 } else
8242                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8243         } else
8244                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8245
8246         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8247
8248         tpr->rx_std_prod_idx = tp->rx_pending;
8249         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8250
8251         tpr->rx_jmb_prod_idx =
8252                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8253         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8254
8255         tg3_rings_reset(tp);
8256
8257         /* Initialize MAC address and backoff seed. */
8258         __tg3_set_mac_addr(tp, 0);
8259
8260         /* MTU + ethernet header + FCS + optional VLAN tag */
8261         tw32(MAC_RX_MTU_SIZE,
8262              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8263
8264         /* The slot time is changed by tg3_setup_phy if we
8265          * run at gigabit with half duplex.
8266          */
8267         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8268               (6 << TX_LENGTHS_IPG_SHIFT) |
8269               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8270
8271         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8272                 val |= tr32(MAC_TX_LENGTHS) &
8273                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8274                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8275
8276         tw32(MAC_TX_LENGTHS, val);
8277
8278         /* Receive rules. */
8279         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8280         tw32(RCVLPC_CONFIG, 0x0181);
8281
8282         /* Calculate RDMAC_MODE setting early, we need it to determine
8283          * the RCVLPC_STATE_ENABLE mask.
8284          */
8285         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8286                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8287                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8288                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8289                       RDMAC_MODE_LNGREAD_ENAB);
8290
8291         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8292                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8293
8294         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8295             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8296             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8297                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8298                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8299                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8300
8301         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8302             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8303                 if (tg3_flag(tp, TSO_CAPABLE) &&
8304                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8305                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8306                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8307                            !tg3_flag(tp, IS_5788)) {
8308                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8309                 }
8310         }
8311
8312         if (tg3_flag(tp, PCI_EXPRESS))
8313                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8314
8315         if (tg3_flag(tp, HW_TSO_1) ||
8316             tg3_flag(tp, HW_TSO_2) ||
8317             tg3_flag(tp, HW_TSO_3))
8318                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8319
8320         if (tg3_flag(tp, 57765_PLUS) ||
8321             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8322             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8323                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8324
8325         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8326                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8327
8328         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8329             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8330             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8331             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8332             tg3_flag(tp, 57765_PLUS)) {
8333                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8334                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8335                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8336                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8337                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8338                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8339                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8340                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8341                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8342                 }
8343                 tw32(TG3_RDMA_RSRVCTRL_REG,
8344                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8345         }
8346
8347         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8348             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8349                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8350                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8351                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8352                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8353         }
8354
8355         /* Receive/send statistics. */
8356         if (tg3_flag(tp, 5750_PLUS)) {
8357                 val = tr32(RCVLPC_STATS_ENABLE);
8358                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8359                 tw32(RCVLPC_STATS_ENABLE, val);
8360         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8361                    tg3_flag(tp, TSO_CAPABLE)) {
8362                 val = tr32(RCVLPC_STATS_ENABLE);
8363                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8364                 tw32(RCVLPC_STATS_ENABLE, val);
8365         } else {
8366                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8367         }
8368         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8369         tw32(SNDDATAI_STATSENAB, 0xffffff);
8370         tw32(SNDDATAI_STATSCTRL,
8371              (SNDDATAI_SCTRL_ENABLE |
8372               SNDDATAI_SCTRL_FASTUPD));
8373
8374         /* Setup host coalescing engine. */
8375         tw32(HOSTCC_MODE, 0);
8376         for (i = 0; i < 2000; i++) {
8377                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8378                         break;
8379                 udelay(10);
8380         }
8381
8382         __tg3_set_coalesce(tp, &tp->coal);
8383
8384         if (!tg3_flag(tp, 5705_PLUS)) {
8385                 /* Status/statistics block address.  See tg3_timer,
8386                  * the tg3_periodic_fetch_stats call there, and
8387                  * tg3_get_stats to see how this works for 5705/5750 chips.
8388                  */
8389                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8390                      ((u64) tp->stats_mapping >> 32));
8391                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8392                      ((u64) tp->stats_mapping & 0xffffffff));
8393                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8394
8395                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8396
8397                 /* Clear statistics and status block memory areas */
8398                 for (i = NIC_SRAM_STATS_BLK;
8399                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8400                      i += sizeof(u32)) {
8401                         tg3_write_mem(tp, i, 0);
8402                         udelay(40);
8403                 }
8404         }
8405
8406         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8407
8408         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8409         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8410         if (!tg3_flag(tp, 5705_PLUS))
8411                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8412
8413         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8414                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8415                 /* reset to prevent losing 1st rx packet intermittently */
8416                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8417                 udelay(10);
8418         }
8419
8420         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
8421                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
8422                         MAC_MODE_FHDE_ENABLE;
8423         if (tg3_flag(tp, ENABLE_APE))
8424                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8425         if (!tg3_flag(tp, 5705_PLUS) &&
8426             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8427             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
8428                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
8429         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
8430         udelay(40);
8431
8432         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
8433          * If TG3_FLAG_IS_NIC is zero, we should read the
8434          * register to preserve the GPIO settings for LOMs. The GPIOs,
8435          * whether used as inputs or outputs, are set by boot code after
8436          * reset.
8437          */
8438         if (!tg3_flag(tp, IS_NIC)) {
8439                 u32 gpio_mask;
8440
8441                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
8442                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
8443                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
8444
8445                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
8446                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
8447                                      GRC_LCLCTRL_GPIO_OUTPUT3;
8448
8449                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
8450                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
8451
8452                 tp->grc_local_ctrl &= ~gpio_mask;
8453                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
8454
8455                 /* GPIO1 must be driven high for eeprom write protect */
8456                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
8457                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
8458                                                GRC_LCLCTRL_GPIO_OUTPUT1);
8459         }
8460         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8461         udelay(100);
8462
8463         if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
8464                 val = tr32(MSGINT_MODE);
8465                 val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
8466                 tw32(MSGINT_MODE, val);
8467         }
8468
8469         if (!tg3_flag(tp, 5705_PLUS)) {
8470                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
8471                 udelay(40);
8472         }
8473
8474         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
8475                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
8476                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
8477                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
8478                WDMAC_MODE_LNGREAD_ENAB);
8479
8480         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8481             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8482                 if (tg3_flag(tp, TSO_CAPABLE) &&
8483                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
8484                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
8485                         /* nothing */
8486                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8487                            !tg3_flag(tp, IS_5788)) {
8488                         val |= WDMAC_MODE_RX_ACCEL;
8489                 }
8490         }
8491
8492         /* Enable host coalescing bug fix */
8493         if (tg3_flag(tp, 5755_PLUS))
8494                 val |= WDMAC_MODE_STATUS_TAG_FIX;
8495
8496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
8497                 val |= WDMAC_MODE_BURST_ALL_DATA;
8498
8499         tw32_f(WDMAC_MODE, val);
8500         udelay(40);
8501
8502         if (tg3_flag(tp, PCIX_MODE)) {
8503                 u16 pcix_cmd;
8504
8505                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8506                                      &pcix_cmd);
8507                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
8508                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
8509                         pcix_cmd |= PCI_X_CMD_READ_2K;
8510                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8511                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
8512                         pcix_cmd |= PCI_X_CMD_READ_2K;
8513                 }
8514                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8515                                       pcix_cmd);
8516         }
8517
8518         tw32_f(RDMAC_MODE, rdmac_mode);
8519         udelay(40);
8520
8521         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
8522         if (!tg3_flag(tp, 5705_PLUS))
8523                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
8524
8525         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
8526                 tw32(SNDDATAC_MODE,
8527                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
8528         else
8529                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
8530
8531         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
8532         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
8533         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
8534         if (tg3_flag(tp, LRG_PROD_RING_CAP))
8535                 val |= RCVDBDI_MODE_LRG_RING_SZ;
8536         tw32(RCVDBDI_MODE, val);
8537         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
8538         if (tg3_flag(tp, HW_TSO_1) ||
8539             tg3_flag(tp, HW_TSO_2) ||
8540             tg3_flag(tp, HW_TSO_3))
8541                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
8542         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
8543         if (tg3_flag(tp, ENABLE_TSS))
8544                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
8545         tw32(SNDBDI_MODE, val);
8546         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
8547
8548         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
8549                 err = tg3_load_5701_a0_firmware_fix(tp);
8550                 if (err)
8551                         return err;
8552         }
8553
8554         if (tg3_flag(tp, TSO_CAPABLE)) {
8555                 err = tg3_load_tso_firmware(tp);
8556                 if (err)
8557                         return err;
8558         }
8559
8560         tp->tx_mode = TX_MODE_ENABLE;
8561
8562         if (tg3_flag(tp, 5755_PLUS) ||
8563             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
8564                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
8565
8566         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8567                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
8568                 tp->tx_mode &= ~val;
8569                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
8570         }
8571
8572         tw32_f(MAC_TX_MODE, tp->tx_mode);
8573         udelay(100);
8574
8575         if (tg3_flag(tp, ENABLE_RSS)) {
8576                 u32 reg = MAC_RSS_INDIR_TBL_0;
8577                 u8 *ent = (u8 *)&val;
8578
8579                 /* Setup the indirection table */
8580                 for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8581                         int idx = i % sizeof(val);
8582
8583                         ent[idx] = i % (tp->irq_cnt - 1);
8584                         if (idx == sizeof(val) - 1) {
8585                                 tw32(reg, val);
8586                                 reg += 4;
8587                         }
8588                 }
8589
8590                 /* Setup the "secret" hash key. */
8591                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
8592                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
8593                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
8594                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
8595                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
8596                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
8597                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
8598                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
8599                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
8600                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
8601         }
8602
8603         tp->rx_mode = RX_MODE_ENABLE;
8604         if (tg3_flag(tp, 5755_PLUS))
8605                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
8606
8607         if (tg3_flag(tp, ENABLE_RSS))
8608                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
8609                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
8610                                RX_MODE_RSS_IPV6_HASH_EN |
8611                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
8612                                RX_MODE_RSS_IPV4_HASH_EN |
8613                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
8614
8615         tw32_f(MAC_RX_MODE, tp->rx_mode);
8616         udelay(10);
8617
8618         tw32(MAC_LED_CTRL, tp->led_ctrl);
8619
8620         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
8621         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8622                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8623                 udelay(10);
8624         }
8625         tw32_f(MAC_RX_MODE, tp->rx_mode);
8626         udelay(10);
8627
8628         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
8629                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
8630                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
8631                         /* Set drive transmission level to 1.2V  */
8632                         /* only if the signal pre-emphasis bit is not set  */
8633                         val = tr32(MAC_SERDES_CFG);
8634                         val &= 0xfffff000;
8635                         val |= 0x880;
8636                         tw32(MAC_SERDES_CFG, val);
8637                 }
8638                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
8639                         tw32(MAC_SERDES_CFG, 0x616000);
8640         }
8641
8642         /* Prevent chip from dropping frames when flow control
8643          * is enabled.
8644          */
8645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
8646                 val = 1;
8647         else
8648                 val = 2;
8649         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
8650
8651         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8652             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
8653                 /* Use hardware link auto-negotiation */
8654                 tg3_flag_set(tp, HW_AUTONEG);
8655         }
8656
8657         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8658             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
8659                 u32 tmp;
8660
8661                 tmp = tr32(SERDES_RX_CTRL);
8662                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
8663                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
8664                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
8665                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
8666         }
8667
8668         if (!tg3_flag(tp, USE_PHYLIB)) {
8669                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
8670                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
8671                         tp->link_config.speed = tp->link_config.orig_speed;
8672                         tp->link_config.duplex = tp->link_config.orig_duplex;
8673                         tp->link_config.autoneg = tp->link_config.orig_autoneg;
8674                 }
8675
8676                 err = tg3_setup_phy(tp, 0);
8677                 if (err)
8678                         return err;
8679
8680                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
8681                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8682                         u32 tmp;
8683
8684                         /* Clear CRC stats. */
8685                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
8686                                 tg3_writephy(tp, MII_TG3_TEST1,
8687                                              tmp | MII_TG3_TEST1_CRC_EN);
8688                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
8689                         }
8690                 }
8691         }
8692
8693         __tg3_set_rx_mode(tp->dev);
8694
8695         /* Initialize receive rules. */
8696         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
8697         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
8698         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
8699         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
8700
8701         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
8702                 limit = 8;
8703         else
8704                 limit = 16;
8705         if (tg3_flag(tp, ENABLE_ASF))
8706                 limit -= 4;
8707         switch (limit) {
8708         case 16:
8709                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
8710         case 15:
8711                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
8712         case 14:
8713                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
8714         case 13:
8715                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
8716         case 12:
8717                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
8718         case 11:
8719                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
8720         case 10:
8721                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
8722         case 9:
8723                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
8724         case 8:
8725                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
8726         case 7:
8727                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
8728         case 6:
8729                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
8730         case 5:
8731                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
8732         case 4:
8733                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
8734         case 3:
8735                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
8736         case 2:
8737         case 1:
8738
8739         default:
8740                 break;
8741         }
8742
8743         if (tg3_flag(tp, ENABLE_APE))
8744                 /* Write our heartbeat update interval to APE. */
8745                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
8746                                 APE_HOST_HEARTBEAT_INT_DISABLE);
8747
8748         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
8749
8750         return 0;
8751 }
8752
8753 /* Called at device open time to get the chip ready for
8754  * packet processing.  Invoked with tp->lock held.
8755  */
8756 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
8757 {
8758         tg3_switch_clocks(tp);
8759
8760         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8761
8762         return tg3_reset_hw(tp, reset_phy);
8763 }
8764
8765 #define TG3_STAT_ADD32(PSTAT, REG) \
8766 do {    u32 __val = tr32(REG); \
8767         (PSTAT)->low += __val; \
8768         if ((PSTAT)->low < __val) \
8769                 (PSTAT)->high += 1; \
8770 } while (0)
8771
8772 static void tg3_periodic_fetch_stats(struct tg3 *tp)
8773 {
8774         struct tg3_hw_stats *sp = tp->hw_stats;
8775
8776         if (!netif_carrier_ok(tp->dev))
8777                 return;
8778
8779         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
8780         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
8781         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
8782         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
8783         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
8784         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
8785         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
8786         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
8787         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
8788         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
8789         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
8790         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
8791         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
8792
8793         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
8794         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
8795         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
8796         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
8797         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
8798         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
8799         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
8800         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
8801         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
8802         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
8803         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
8804         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
8805         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
8806         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
8807
8808         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
8809         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
8810             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
8811             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
8812                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
8813         } else {
8814                 u32 val = tr32(HOSTCC_FLOW_ATTN);
8815                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
8816                 if (val) {
8817                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
8818                         sp->rx_discards.low += val;
8819                         if (sp->rx_discards.low < val)
8820                                 sp->rx_discards.high += 1;
8821                 }
8822                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
8823         }
8824         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
8825 }
8826
8827 static void tg3_timer(unsigned long __opaque)
8828 {
8829         struct tg3 *tp = (struct tg3 *) __opaque;
8830
8831         if (tp->irq_sync)
8832                 goto restart_timer;
8833
8834         spin_lock(&tp->lock);
8835
8836         if (!tg3_flag(tp, TAGGED_STATUS)) {
8837                 /* All of this garbage is because when using non-tagged
8838                  * IRQ status the mailbox/status_block protocol the chip
8839                  * uses with the cpu is race prone.
8840                  */
8841                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
8842                         tw32(GRC_LOCAL_CTRL,
8843                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
8844                 } else {
8845                         tw32(HOSTCC_MODE, tp->coalesce_mode |
8846                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
8847                 }
8848
8849                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8850                         tg3_flag_set(tp, RESTART_TIMER);
8851                         spin_unlock(&tp->lock);
8852                         schedule_work(&tp->reset_task);
8853                         return;
8854                 }
8855         }
8856
8857         /* This part only runs once per second. */
8858         if (!--tp->timer_counter) {
8859                 if (tg3_flag(tp, 5705_PLUS))
8860                         tg3_periodic_fetch_stats(tp);
8861
8862                 if (tp->setlpicnt && !--tp->setlpicnt)
8863                         tg3_phy_eee_enable(tp);
8864
8865                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
8866                         u32 mac_stat;
8867                         int phy_event;
8868
8869                         mac_stat = tr32(MAC_STATUS);
8870
8871                         phy_event = 0;
8872                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
8873                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
8874                                         phy_event = 1;
8875                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
8876                                 phy_event = 1;
8877
8878                         if (phy_event)
8879                                 tg3_setup_phy(tp, 0);
8880                 } else if (tg3_flag(tp, POLL_SERDES)) {
8881                         u32 mac_stat = tr32(MAC_STATUS);
8882                         int need_setup = 0;
8883
8884                         if (netif_carrier_ok(tp->dev) &&
8885                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
8886                                 need_setup = 1;
8887                         }
8888                         if (!netif_carrier_ok(tp->dev) &&
8889                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
8890                                          MAC_STATUS_SIGNAL_DET))) {
8891                                 need_setup = 1;
8892                         }
8893                         if (need_setup) {
8894                                 if (!tp->serdes_counter) {
8895                                         tw32_f(MAC_MODE,
8896                                              (tp->mac_mode &
8897                                               ~MAC_MODE_PORT_MODE_MASK));
8898                                         udelay(40);
8899                                         tw32_f(MAC_MODE, tp->mac_mode);
8900                                         udelay(40);
8901                                 }
8902                                 tg3_setup_phy(tp, 0);
8903                         }
8904                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8905                            tg3_flag(tp, 5780_CLASS)) {
8906                         tg3_serdes_parallel_detect(tp);
8907                 }
8908
8909                 tp->timer_counter = tp->timer_multiplier;
8910         }
8911
8912         /* Heartbeat is only sent once every 2 seconds.
8913          *
8914          * The heartbeat is to tell the ASF firmware that the host
8915          * driver is still alive.  In the event that the OS crashes,
8916          * ASF needs to reset the hardware to free up the FIFO space
8917          * that may be filled with rx packets destined for the host.
8918          * If the FIFO is full, ASF will no longer function properly.
8919          *
8920          * Unintended resets have been reported on real time kernels
8921          * where the timer doesn't run on time.  Netpoll will also have
8922          * same problem.
8923          *
8924          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
8925          * to check the ring condition when the heartbeat is expiring
8926          * before doing the reset.  This will prevent most unintended
8927          * resets.
8928          */
8929         if (!--tp->asf_counter) {
8930                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
8931                         tg3_wait_for_event_ack(tp);
8932
8933                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
8934                                       FWCMD_NICDRV_ALIVE3);
8935                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
8936                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
8937                                       TG3_FW_UPDATE_TIMEOUT_SEC);
8938
8939                         tg3_generate_fw_event(tp);
8940                 }
8941                 tp->asf_counter = tp->asf_multiplier;
8942         }
8943
8944         spin_unlock(&tp->lock);
8945
8946 restart_timer:
8947         tp->timer.expires = jiffies + tp->timer_offset;
8948         add_timer(&tp->timer);
8949 }
8950
8951 static int tg3_request_irq(struct tg3 *tp, int irq_num)
8952 {
8953         irq_handler_t fn;
8954         unsigned long flags;
8955         char *name;
8956         struct tg3_napi *tnapi = &tp->napi[irq_num];
8957
8958         if (tp->irq_cnt == 1)
8959                 name = tp->dev->name;
8960         else {
8961                 name = &tnapi->irq_lbl[0];
8962                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
8963                 name[IFNAMSIZ-1] = 0;
8964         }
8965
8966         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
8967                 fn = tg3_msi;
8968                 if (tg3_flag(tp, 1SHOT_MSI))
8969                         fn = tg3_msi_1shot;
8970                 flags = 0;
8971         } else {
8972                 fn = tg3_interrupt;
8973                 if (tg3_flag(tp, TAGGED_STATUS))
8974                         fn = tg3_interrupt_tagged;
8975                 flags = IRQF_SHARED;
8976         }
8977
8978         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
8979 }
8980
8981 static int tg3_test_interrupt(struct tg3 *tp)
8982 {
8983         struct tg3_napi *tnapi = &tp->napi[0];
8984         struct net_device *dev = tp->dev;
8985         int err, i, intr_ok = 0;
8986         u32 val;
8987
8988         if (!netif_running(dev))
8989                 return -ENODEV;
8990
8991         tg3_disable_ints(tp);
8992
8993         free_irq(tnapi->irq_vec, tnapi);
8994
8995         /*
8996          * Turn off MSI one shot mode.  Otherwise this test has no
8997          * observable way to know whether the interrupt was delivered.
8998          */
8999         if (tg3_flag(tp, 57765_PLUS)) {
9000                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9001                 tw32(MSGINT_MODE, val);
9002         }
9003
9004         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9005                           IRQF_SHARED | IRQF_SAMPLE_RANDOM, dev->name, tnapi);
9006         if (err)
9007                 return err;
9008
9009         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9010         tg3_enable_ints(tp);
9011
9012         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9013                tnapi->coal_now);
9014
9015         for (i = 0; i < 5; i++) {
9016                 u32 int_mbox, misc_host_ctrl;
9017
9018                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9019                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9020
9021                 if ((int_mbox != 0) ||
9022                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9023                         intr_ok = 1;
9024                         break;
9025                 }
9026
9027                 if (tg3_flag(tp, 57765_PLUS) &&
9028                     tnapi->hw_status->status_tag != tnapi->last_tag)
9029                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9030
9031                 msleep(10);
9032         }
9033
9034         tg3_disable_ints(tp);
9035
9036         free_irq(tnapi->irq_vec, tnapi);
9037
9038         err = tg3_request_irq(tp, 0);
9039
9040         if (err)
9041                 return err;
9042
9043         if (intr_ok) {
9044                 /* Reenable MSI one shot mode. */
9045                 if (tg3_flag(tp, 57765_PLUS)) {
9046                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9047                         tw32(MSGINT_MODE, val);
9048                 }
9049                 return 0;
9050         }
9051
9052         return -EIO;
9053 }
9054
9055 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9056  * successfully restored
9057  */
9058 static int tg3_test_msi(struct tg3 *tp)
9059 {
9060         int err;
9061         u16 pci_cmd;
9062
9063         if (!tg3_flag(tp, USING_MSI))
9064                 return 0;
9065
9066         /* Turn off SERR reporting in case MSI terminates with Master
9067          * Abort.
9068          */
9069         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9070         pci_write_config_word(tp->pdev, PCI_COMMAND,
9071                               pci_cmd & ~PCI_COMMAND_SERR);
9072
9073         err = tg3_test_interrupt(tp);
9074
9075         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9076
9077         if (!err)
9078                 return 0;
9079
9080         /* other failures */
9081         if (err != -EIO)
9082                 return err;
9083
9084         /* MSI test failed, go back to INTx mode */
9085         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9086                     "to INTx mode. Please report this failure to the PCI "
9087                     "maintainer and include system chipset information\n");
9088
9089         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9090
9091         pci_disable_msi(tp->pdev);
9092
9093         tg3_flag_clear(tp, USING_MSI);
9094         tp->napi[0].irq_vec = tp->pdev->irq;
9095
9096         err = tg3_request_irq(tp, 0);
9097         if (err)
9098                 return err;
9099
9100         /* Need to reset the chip because the MSI cycle may have terminated
9101          * with Master Abort.
9102          */
9103         tg3_full_lock(tp, 1);
9104
9105         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9106         err = tg3_init_hw(tp, 1);
9107
9108         tg3_full_unlock(tp);
9109
9110         if (err)
9111                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9112
9113         return err;
9114 }
9115
9116 static int tg3_request_firmware(struct tg3 *tp)
9117 {
9118         const __be32 *fw_data;
9119
9120         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9121                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9122                            tp->fw_needed);
9123                 return -ENOENT;
9124         }
9125
9126         fw_data = (void *)tp->fw->data;
9127
9128         /* Firmware blob starts with version numbers, followed by
9129          * start address and _full_ length including BSS sections
9130          * (which must be longer than the actual data, of course
9131          */
9132
9133         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9134         if (tp->fw_len < (tp->fw->size - 12)) {
9135                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9136                            tp->fw_len, tp->fw_needed);
9137                 release_firmware(tp->fw);
9138                 tp->fw = NULL;
9139                 return -EINVAL;
9140         }
9141
9142         /* We no longer need firmware; we have it. */
9143         tp->fw_needed = NULL;
9144         return 0;
9145 }
9146
9147 static bool tg3_enable_msix(struct tg3 *tp)
9148 {
9149         int i, rc, cpus = num_online_cpus();
9150         struct msix_entry msix_ent[tp->irq_max];
9151
9152         if (cpus == 1)
9153                 /* Just fallback to the simpler MSI mode. */
9154                 return false;
9155
9156         /*
9157          * We want as many rx rings enabled as there are cpus.
9158          * The first MSIX vector only deals with link interrupts, etc,
9159          * so we add one to the number of vectors we are requesting.
9160          */
9161         tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
9162
9163         for (i = 0; i < tp->irq_max; i++) {
9164                 msix_ent[i].entry  = i;
9165                 msix_ent[i].vector = 0;
9166         }
9167
9168         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9169         if (rc < 0) {
9170                 return false;
9171         } else if (rc != 0) {
9172                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9173                         return false;
9174                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9175                               tp->irq_cnt, rc);
9176                 tp->irq_cnt = rc;
9177         }
9178
9179         for (i = 0; i < tp->irq_max; i++)
9180                 tp->napi[i].irq_vec = msix_ent[i].vector;
9181
9182         netif_set_real_num_tx_queues(tp->dev, 1);
9183         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9184         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9185                 pci_disable_msix(tp->pdev);
9186                 return false;
9187         }
9188
9189         if (tp->irq_cnt > 1) {
9190                 tg3_flag_set(tp, ENABLE_RSS);
9191
9192                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9193                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9194                         tg3_flag_set(tp, ENABLE_TSS);
9195                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9196                 }
9197         }
9198
9199         return true;
9200 }
9201
9202 static void tg3_ints_init(struct tg3 *tp)
9203 {
9204         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9205             !tg3_flag(tp, TAGGED_STATUS)) {
9206                 /* All MSI supporting chips should support tagged
9207                  * status.  Assert that this is the case.
9208                  */
9209                 netdev_warn(tp->dev,
9210                             "MSI without TAGGED_STATUS? Not using MSI\n");
9211                 goto defcfg;
9212         }
9213
9214         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9215                 tg3_flag_set(tp, USING_MSIX);
9216         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9217                 tg3_flag_set(tp, USING_MSI);
9218
9219         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9220                 u32 msi_mode = tr32(MSGINT_MODE);
9221                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9222                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9223                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9224         }
9225 defcfg:
9226         if (!tg3_flag(tp, USING_MSIX)) {
9227                 tp->irq_cnt = 1;
9228                 tp->napi[0].irq_vec = tp->pdev->irq;
9229                 netif_set_real_num_tx_queues(tp->dev, 1);
9230                 netif_set_real_num_rx_queues(tp->dev, 1);
9231         }
9232 }
9233
9234 static void tg3_ints_fini(struct tg3 *tp)
9235 {
9236         if (tg3_flag(tp, USING_MSIX))
9237                 pci_disable_msix(tp->pdev);
9238         else if (tg3_flag(tp, USING_MSI))
9239                 pci_disable_msi(tp->pdev);
9240         tg3_flag_clear(tp, USING_MSI);
9241         tg3_flag_clear(tp, USING_MSIX);
9242         tg3_flag_clear(tp, ENABLE_RSS);
9243         tg3_flag_clear(tp, ENABLE_TSS);
9244 }
9245
9246 static int tg3_open(struct net_device *dev)
9247 {
9248         struct tg3 *tp = netdev_priv(dev);
9249         int i, err;
9250
9251         if (tp->fw_needed) {
9252                 err = tg3_request_firmware(tp);
9253                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9254                         if (err)
9255                                 return err;
9256                 } else if (err) {
9257                         netdev_warn(tp->dev, "TSO capability disabled\n");
9258                         tg3_flag_clear(tp, TSO_CAPABLE);
9259                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9260                         netdev_notice(tp->dev, "TSO capability restored\n");
9261                         tg3_flag_set(tp, TSO_CAPABLE);
9262                 }
9263         }
9264
9265         netif_carrier_off(tp->dev);
9266
9267         err = tg3_power_up(tp);
9268         if (err)
9269                 return err;
9270
9271         tg3_full_lock(tp, 0);
9272
9273         tg3_disable_ints(tp);
9274         tg3_flag_clear(tp, INIT_COMPLETE);
9275
9276         tg3_full_unlock(tp);
9277
9278         /*
9279          * Setup interrupts first so we know how
9280          * many NAPI resources to allocate
9281          */
9282         tg3_ints_init(tp);
9283
9284         /* The placement of this call is tied
9285          * to the setup and use of Host TX descriptors.
9286          */
9287         err = tg3_alloc_consistent(tp);
9288         if (err)
9289                 goto err_out1;
9290
9291         tg3_napi_init(tp);
9292
9293         tg3_napi_enable(tp);
9294
9295         for (i = 0; i < tp->irq_cnt; i++) {
9296                 struct tg3_napi *tnapi = &tp->napi[i];
9297                 err = tg3_request_irq(tp, i);
9298                 if (err) {
9299                         for (i--; i >= 0; i--)
9300                                 free_irq(tnapi->irq_vec, tnapi);
9301                         break;
9302                 }
9303         }
9304
9305         if (err)
9306                 goto err_out2;
9307
9308         tg3_full_lock(tp, 0);
9309
9310         err = tg3_init_hw(tp, 1);
9311         if (err) {
9312                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9313                 tg3_free_rings(tp);
9314         } else {
9315                 if (tg3_flag(tp, TAGGED_STATUS))
9316                         tp->timer_offset = HZ;
9317                 else
9318                         tp->timer_offset = HZ / 10;
9319
9320                 BUG_ON(tp->timer_offset > HZ);
9321                 tp->timer_counter = tp->timer_multiplier =
9322                         (HZ / tp->timer_offset);
9323                 tp->asf_counter = tp->asf_multiplier =
9324                         ((HZ / tp->timer_offset) * 2);
9325
9326                 init_timer(&tp->timer);
9327                 tp->timer.expires = jiffies + tp->timer_offset;
9328                 tp->timer.data = (unsigned long) tp;
9329                 tp->timer.function = tg3_timer;
9330         }
9331
9332         tg3_full_unlock(tp);
9333
9334         if (err)
9335                 goto err_out3;
9336
9337         if (tg3_flag(tp, USING_MSI)) {
9338                 err = tg3_test_msi(tp);
9339
9340                 if (err) {
9341                         tg3_full_lock(tp, 0);
9342                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9343                         tg3_free_rings(tp);
9344                         tg3_full_unlock(tp);
9345
9346                         goto err_out2;
9347                 }
9348
9349                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
9350                         u32 val = tr32(PCIE_TRANSACTION_CFG);
9351
9352                         tw32(PCIE_TRANSACTION_CFG,
9353                              val | PCIE_TRANS_CFG_1SHOT_MSI);
9354                 }
9355         }
9356
9357         tg3_phy_start(tp);
9358
9359         tg3_full_lock(tp, 0);
9360
9361         add_timer(&tp->timer);
9362         tg3_flag_set(tp, INIT_COMPLETE);
9363         tg3_enable_ints(tp);
9364
9365         tg3_full_unlock(tp);
9366
9367         netif_tx_start_all_queues(dev);
9368
9369         /*
9370          * Reset loopback feature if it was turned on while the device was down
9371          * make sure that it's installed properly now.
9372          */
9373         if (dev->features & NETIF_F_LOOPBACK)
9374                 tg3_set_loopback(dev, dev->features);
9375
9376         return 0;
9377
9378 err_out3:
9379         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9380                 struct tg3_napi *tnapi = &tp->napi[i];
9381                 free_irq(tnapi->irq_vec, tnapi);
9382         }
9383
9384 err_out2:
9385         tg3_napi_disable(tp);
9386         tg3_napi_fini(tp);
9387         tg3_free_consistent(tp);
9388
9389 err_out1:
9390         tg3_ints_fini(tp);
9391         return err;
9392 }
9393
9394 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
9395                                                  struct rtnl_link_stats64 *);
9396 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
9397
9398 static int tg3_close(struct net_device *dev)
9399 {
9400         int i;
9401         struct tg3 *tp = netdev_priv(dev);
9402
9403         tg3_napi_disable(tp);
9404         cancel_work_sync(&tp->reset_task);
9405
9406         netif_tx_stop_all_queues(dev);
9407
9408         del_timer_sync(&tp->timer);
9409
9410         tg3_phy_stop(tp);
9411
9412         tg3_full_lock(tp, 1);
9413
9414         tg3_disable_ints(tp);
9415
9416         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9417         tg3_free_rings(tp);
9418         tg3_flag_clear(tp, INIT_COMPLETE);
9419
9420         tg3_full_unlock(tp);
9421
9422         for (i = tp->irq_cnt - 1; i >= 0; i--) {
9423                 struct tg3_napi *tnapi = &tp->napi[i];
9424                 free_irq(tnapi->irq_vec, tnapi);
9425         }
9426
9427         tg3_ints_fini(tp);
9428
9429         tg3_get_stats64(tp->dev, &tp->net_stats_prev);
9430
9431         memcpy(&tp->estats_prev, tg3_get_estats(tp),
9432                sizeof(tp->estats_prev));
9433
9434         tg3_napi_fini(tp);
9435
9436         tg3_free_consistent(tp);
9437
9438         tg3_power_down(tp);
9439
9440         netif_carrier_off(tp->dev);
9441
9442         return 0;
9443 }
9444
9445 static inline u64 get_stat64(tg3_stat64_t *val)
9446 {
9447        return ((u64)val->high << 32) | ((u64)val->low);
9448 }
9449
9450 static u64 calc_crc_errors(struct tg3 *tp)
9451 {
9452         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9453
9454         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9455             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
9456              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
9457                 u32 val;
9458
9459                 spin_lock_bh(&tp->lock);
9460                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
9461                         tg3_writephy(tp, MII_TG3_TEST1,
9462                                      val | MII_TG3_TEST1_CRC_EN);
9463                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
9464                 } else
9465                         val = 0;
9466                 spin_unlock_bh(&tp->lock);
9467
9468                 tp->phy_crc_errors += val;
9469
9470                 return tp->phy_crc_errors;
9471         }
9472
9473         return get_stat64(&hw_stats->rx_fcs_errors);
9474 }
9475
9476 #define ESTAT_ADD(member) \
9477         estats->member =        old_estats->member + \
9478                                 get_stat64(&hw_stats->member)
9479
9480 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
9481 {
9482         struct tg3_ethtool_stats *estats = &tp->estats;
9483         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
9484         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9485
9486         if (!hw_stats)
9487                 return old_estats;
9488
9489         ESTAT_ADD(rx_octets);
9490         ESTAT_ADD(rx_fragments);
9491         ESTAT_ADD(rx_ucast_packets);
9492         ESTAT_ADD(rx_mcast_packets);
9493         ESTAT_ADD(rx_bcast_packets);
9494         ESTAT_ADD(rx_fcs_errors);
9495         ESTAT_ADD(rx_align_errors);
9496         ESTAT_ADD(rx_xon_pause_rcvd);
9497         ESTAT_ADD(rx_xoff_pause_rcvd);
9498         ESTAT_ADD(rx_mac_ctrl_rcvd);
9499         ESTAT_ADD(rx_xoff_entered);
9500         ESTAT_ADD(rx_frame_too_long_errors);
9501         ESTAT_ADD(rx_jabbers);
9502         ESTAT_ADD(rx_undersize_packets);
9503         ESTAT_ADD(rx_in_length_errors);
9504         ESTAT_ADD(rx_out_length_errors);
9505         ESTAT_ADD(rx_64_or_less_octet_packets);
9506         ESTAT_ADD(rx_65_to_127_octet_packets);
9507         ESTAT_ADD(rx_128_to_255_octet_packets);
9508         ESTAT_ADD(rx_256_to_511_octet_packets);
9509         ESTAT_ADD(rx_512_to_1023_octet_packets);
9510         ESTAT_ADD(rx_1024_to_1522_octet_packets);
9511         ESTAT_ADD(rx_1523_to_2047_octet_packets);
9512         ESTAT_ADD(rx_2048_to_4095_octet_packets);
9513         ESTAT_ADD(rx_4096_to_8191_octet_packets);
9514         ESTAT_ADD(rx_8192_to_9022_octet_packets);
9515
9516         ESTAT_ADD(tx_octets);
9517         ESTAT_ADD(tx_collisions);
9518         ESTAT_ADD(tx_xon_sent);
9519         ESTAT_ADD(tx_xoff_sent);
9520         ESTAT_ADD(tx_flow_control);
9521         ESTAT_ADD(tx_mac_errors);
9522         ESTAT_ADD(tx_single_collisions);
9523         ESTAT_ADD(tx_mult_collisions);
9524         ESTAT_ADD(tx_deferred);
9525         ESTAT_ADD(tx_excessive_collisions);
9526         ESTAT_ADD(tx_late_collisions);
9527         ESTAT_ADD(tx_collide_2times);
9528         ESTAT_ADD(tx_collide_3times);
9529         ESTAT_ADD(tx_collide_4times);
9530         ESTAT_ADD(tx_collide_5times);
9531         ESTAT_ADD(tx_collide_6times);
9532         ESTAT_ADD(tx_collide_7times);
9533         ESTAT_ADD(tx_collide_8times);
9534         ESTAT_ADD(tx_collide_9times);
9535         ESTAT_ADD(tx_collide_10times);
9536         ESTAT_ADD(tx_collide_11times);
9537         ESTAT_ADD(tx_collide_12times);
9538         ESTAT_ADD(tx_collide_13times);
9539         ESTAT_ADD(tx_collide_14times);
9540         ESTAT_ADD(tx_collide_15times);
9541         ESTAT_ADD(tx_ucast_packets);
9542         ESTAT_ADD(tx_mcast_packets);
9543         ESTAT_ADD(tx_bcast_packets);
9544         ESTAT_ADD(tx_carrier_sense_errors);
9545         ESTAT_ADD(tx_discards);
9546         ESTAT_ADD(tx_errors);
9547
9548         ESTAT_ADD(dma_writeq_full);
9549         ESTAT_ADD(dma_write_prioq_full);
9550         ESTAT_ADD(rxbds_empty);
9551         ESTAT_ADD(rx_discards);
9552         ESTAT_ADD(rx_errors);
9553         ESTAT_ADD(rx_threshold_hit);
9554
9555         ESTAT_ADD(dma_readq_full);
9556         ESTAT_ADD(dma_read_prioq_full);
9557         ESTAT_ADD(tx_comp_queue_full);
9558
9559         ESTAT_ADD(ring_set_send_prod_index);
9560         ESTAT_ADD(ring_status_update);
9561         ESTAT_ADD(nic_irqs);
9562         ESTAT_ADD(nic_avoided_irqs);
9563         ESTAT_ADD(nic_tx_threshold_hit);
9564
9565         ESTAT_ADD(mbuf_lwm_thresh_hit);
9566
9567         return estats;
9568 }
9569
9570 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
9571                                                  struct rtnl_link_stats64 *stats)
9572 {
9573         struct tg3 *tp = netdev_priv(dev);
9574         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
9575         struct tg3_hw_stats *hw_stats = tp->hw_stats;
9576
9577         if (!hw_stats)
9578                 return old_stats;
9579
9580         stats->rx_packets = old_stats->rx_packets +
9581                 get_stat64(&hw_stats->rx_ucast_packets) +
9582                 get_stat64(&hw_stats->rx_mcast_packets) +
9583                 get_stat64(&hw_stats->rx_bcast_packets);
9584
9585         stats->tx_packets = old_stats->tx_packets +
9586                 get_stat64(&hw_stats->tx_ucast_packets) +
9587                 get_stat64(&hw_stats->tx_mcast_packets) +
9588                 get_stat64(&hw_stats->tx_bcast_packets);
9589
9590         stats->rx_bytes = old_stats->rx_bytes +
9591                 get_stat64(&hw_stats->rx_octets);
9592         stats->tx_bytes = old_stats->tx_bytes +
9593                 get_stat64(&hw_stats->tx_octets);
9594
9595         stats->rx_errors = old_stats->rx_errors +
9596                 get_stat64(&hw_stats->rx_errors);
9597         stats->tx_errors = old_stats->tx_errors +
9598                 get_stat64(&hw_stats->tx_errors) +
9599                 get_stat64(&hw_stats->tx_mac_errors) +
9600                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
9601                 get_stat64(&hw_stats->tx_discards);
9602
9603         stats->multicast = old_stats->multicast +
9604                 get_stat64(&hw_stats->rx_mcast_packets);
9605         stats->collisions = old_stats->collisions +
9606                 get_stat64(&hw_stats->tx_collisions);
9607
9608         stats->rx_length_errors = old_stats->rx_length_errors +
9609                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
9610                 get_stat64(&hw_stats->rx_undersize_packets);
9611
9612         stats->rx_over_errors = old_stats->rx_over_errors +
9613                 get_stat64(&hw_stats->rxbds_empty);
9614         stats->rx_frame_errors = old_stats->rx_frame_errors +
9615                 get_stat64(&hw_stats->rx_align_errors);
9616         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
9617                 get_stat64(&hw_stats->tx_discards);
9618         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
9619                 get_stat64(&hw_stats->tx_carrier_sense_errors);
9620
9621         stats->rx_crc_errors = old_stats->rx_crc_errors +
9622                 calc_crc_errors(tp);
9623
9624         stats->rx_missed_errors = old_stats->rx_missed_errors +
9625                 get_stat64(&hw_stats->rx_discards);
9626
9627         stats->rx_dropped = tp->rx_dropped;
9628
9629         return stats;
9630 }
9631
9632 static inline u32 calc_crc(unsigned char *buf, int len)
9633 {
9634         u32 reg;
9635         u32 tmp;
9636         int j, k;
9637
9638         reg = 0xffffffff;
9639
9640         for (j = 0; j < len; j++) {
9641                 reg ^= buf[j];
9642
9643                 for (k = 0; k < 8; k++) {
9644                         tmp = reg & 0x01;
9645
9646                         reg >>= 1;
9647
9648                         if (tmp)
9649                                 reg ^= 0xedb88320;
9650                 }
9651         }
9652
9653         return ~reg;
9654 }
9655
9656 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9657 {
9658         /* accept or reject all multicast frames */
9659         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9660         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9661         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9662         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9663 }
9664
9665 static void __tg3_set_rx_mode(struct net_device *dev)
9666 {
9667         struct tg3 *tp = netdev_priv(dev);
9668         u32 rx_mode;
9669
9670         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9671                                   RX_MODE_KEEP_VLAN_TAG);
9672
9673 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9674         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9675          * flag clear.
9676          */
9677         if (!tg3_flag(tp, ENABLE_ASF))
9678                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9679 #endif
9680
9681         if (dev->flags & IFF_PROMISC) {
9682                 /* Promiscuous mode. */
9683                 rx_mode |= RX_MODE_PROMISC;
9684         } else if (dev->flags & IFF_ALLMULTI) {
9685                 /* Accept all multicast. */
9686                 tg3_set_multi(tp, 1);
9687         } else if (netdev_mc_empty(dev)) {
9688                 /* Reject all multicast. */
9689                 tg3_set_multi(tp, 0);
9690         } else {
9691                 /* Accept one or more multicast(s). */
9692                 struct netdev_hw_addr *ha;
9693                 u32 mc_filter[4] = { 0, };
9694                 u32 regidx;
9695                 u32 bit;
9696                 u32 crc;
9697
9698                 netdev_for_each_mc_addr(ha, dev) {
9699                         crc = calc_crc(ha->addr, ETH_ALEN);
9700                         bit = ~crc & 0x7f;
9701                         regidx = (bit & 0x60) >> 5;
9702                         bit &= 0x1f;
9703                         mc_filter[regidx] |= (1 << bit);
9704                 }
9705
9706                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9707                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9708                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9709                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9710         }
9711
9712         if (rx_mode != tp->rx_mode) {
9713                 tp->rx_mode = rx_mode;
9714                 tw32_f(MAC_RX_MODE, rx_mode);
9715                 udelay(10);
9716         }
9717 }
9718
9719 static void tg3_set_rx_mode(struct net_device *dev)
9720 {
9721         struct tg3 *tp = netdev_priv(dev);
9722
9723         if (!netif_running(dev))
9724                 return;
9725
9726         tg3_full_lock(tp, 0);
9727         __tg3_set_rx_mode(dev);
9728         tg3_full_unlock(tp);
9729 }
9730
9731 static int tg3_get_regs_len(struct net_device *dev)
9732 {
9733         return TG3_REG_BLK_SIZE;
9734 }
9735
9736 static void tg3_get_regs(struct net_device *dev,
9737                 struct ethtool_regs *regs, void *_p)
9738 {
9739         struct tg3 *tp = netdev_priv(dev);
9740
9741         regs->version = 0;
9742
9743         memset(_p, 0, TG3_REG_BLK_SIZE);
9744
9745         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9746                 return;
9747
9748         tg3_full_lock(tp, 0);
9749
9750         tg3_dump_legacy_regs(tp, (u32 *)_p);
9751
9752         tg3_full_unlock(tp);
9753 }
9754
9755 static int tg3_get_eeprom_len(struct net_device *dev)
9756 {
9757         struct tg3 *tp = netdev_priv(dev);
9758
9759         return tp->nvram_size;
9760 }
9761
9762 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9763 {
9764         struct tg3 *tp = netdev_priv(dev);
9765         int ret;
9766         u8  *pd;
9767         u32 i, offset, len, b_offset, b_count;
9768         __be32 val;
9769
9770         if (tg3_flag(tp, NO_NVRAM))
9771                 return -EINVAL;
9772
9773         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9774                 return -EAGAIN;
9775
9776         offset = eeprom->offset;
9777         len = eeprom->len;
9778         eeprom->len = 0;
9779
9780         eeprom->magic = TG3_EEPROM_MAGIC;
9781
9782         if (offset & 3) {
9783                 /* adjustments to start on required 4 byte boundary */
9784                 b_offset = offset & 3;
9785                 b_count = 4 - b_offset;
9786                 if (b_count > len) {
9787                         /* i.e. offset=1 len=2 */
9788                         b_count = len;
9789                 }
9790                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
9791                 if (ret)
9792                         return ret;
9793                 memcpy(data, ((char *)&val) + b_offset, b_count);
9794                 len -= b_count;
9795                 offset += b_count;
9796                 eeprom->len += b_count;
9797         }
9798
9799         /* read bytes up to the last 4 byte boundary */
9800         pd = &data[eeprom->len];
9801         for (i = 0; i < (len - (len & 3)); i += 4) {
9802                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
9803                 if (ret) {
9804                         eeprom->len += i;
9805                         return ret;
9806                 }
9807                 memcpy(pd + i, &val, 4);
9808         }
9809         eeprom->len += i;
9810
9811         if (len & 3) {
9812                 /* read last bytes not ending on 4 byte boundary */
9813                 pd = &data[eeprom->len];
9814                 b_count = len & 3;
9815                 b_offset = offset + len - b_count;
9816                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
9817                 if (ret)
9818                         return ret;
9819                 memcpy(pd, &val, b_count);
9820                 eeprom->len += b_count;
9821         }
9822         return 0;
9823 }
9824
9825 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf);
9826
9827 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
9828 {
9829         struct tg3 *tp = netdev_priv(dev);
9830         int ret;
9831         u32 offset, len, b_offset, odd_len;
9832         u8 *buf;
9833         __be32 start, end;
9834
9835         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9836                 return -EAGAIN;
9837
9838         if (tg3_flag(tp, NO_NVRAM) ||
9839             eeprom->magic != TG3_EEPROM_MAGIC)
9840                 return -EINVAL;
9841
9842         offset = eeprom->offset;
9843         len = eeprom->len;
9844
9845         if ((b_offset = (offset & 3))) {
9846                 /* adjustments to start on required 4 byte boundary */
9847                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
9848                 if (ret)
9849                         return ret;
9850                 len += b_offset;
9851                 offset &= ~3;
9852                 if (len < 4)
9853                         len = 4;
9854         }
9855
9856         odd_len = 0;
9857         if (len & 3) {
9858                 /* adjustments to end on required 4 byte boundary */
9859                 odd_len = 1;
9860                 len = (len + 3) & ~3;
9861                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
9862                 if (ret)
9863                         return ret;
9864         }
9865
9866         buf = data;
9867         if (b_offset || odd_len) {
9868                 buf = kmalloc(len, GFP_KERNEL);
9869                 if (!buf)
9870                         return -ENOMEM;
9871                 if (b_offset)
9872                         memcpy(buf, &start, 4);
9873                 if (odd_len)
9874                         memcpy(buf+len-4, &end, 4);
9875                 memcpy(buf + b_offset, data, eeprom->len);
9876         }
9877
9878         ret = tg3_nvram_write_block(tp, offset, len, buf);
9879
9880         if (buf != data)
9881                 kfree(buf);
9882
9883         return ret;
9884 }
9885
9886 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9887 {
9888         struct tg3 *tp = netdev_priv(dev);
9889
9890         if (tg3_flag(tp, USE_PHYLIB)) {
9891                 struct phy_device *phydev;
9892                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9893                         return -EAGAIN;
9894                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9895                 return phy_ethtool_gset(phydev, cmd);
9896         }
9897
9898         cmd->supported = (SUPPORTED_Autoneg);
9899
9900         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9901                 cmd->supported |= (SUPPORTED_1000baseT_Half |
9902                                    SUPPORTED_1000baseT_Full);
9903
9904         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
9905                 cmd->supported |= (SUPPORTED_100baseT_Half |
9906                                   SUPPORTED_100baseT_Full |
9907                                   SUPPORTED_10baseT_Half |
9908                                   SUPPORTED_10baseT_Full |
9909                                   SUPPORTED_TP);
9910                 cmd->port = PORT_TP;
9911         } else {
9912                 cmd->supported |= SUPPORTED_FIBRE;
9913                 cmd->port = PORT_FIBRE;
9914         }
9915
9916         cmd->advertising = tp->link_config.advertising;
9917         if (netif_running(dev)) {
9918                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
9919                 cmd->duplex = tp->link_config.active_duplex;
9920         } else {
9921                 ethtool_cmd_speed_set(cmd, SPEED_INVALID);
9922                 cmd->duplex = DUPLEX_INVALID;
9923         }
9924         cmd->phy_address = tp->phy_addr;
9925         cmd->transceiver = XCVR_INTERNAL;
9926         cmd->autoneg = tp->link_config.autoneg;
9927         cmd->maxtxpkt = 0;
9928         cmd->maxrxpkt = 0;
9929         return 0;
9930 }
9931
9932 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9933 {
9934         struct tg3 *tp = netdev_priv(dev);
9935         u32 speed = ethtool_cmd_speed(cmd);
9936
9937         if (tg3_flag(tp, USE_PHYLIB)) {
9938                 struct phy_device *phydev;
9939                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
9940                         return -EAGAIN;
9941                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9942                 return phy_ethtool_sset(phydev, cmd);
9943         }
9944
9945         if (cmd->autoneg != AUTONEG_ENABLE &&
9946             cmd->autoneg != AUTONEG_DISABLE)
9947                 return -EINVAL;
9948
9949         if (cmd->autoneg == AUTONEG_DISABLE &&
9950             cmd->duplex != DUPLEX_FULL &&
9951             cmd->duplex != DUPLEX_HALF)
9952                 return -EINVAL;
9953
9954         if (cmd->autoneg == AUTONEG_ENABLE) {
9955                 u32 mask = ADVERTISED_Autoneg |
9956                            ADVERTISED_Pause |
9957                            ADVERTISED_Asym_Pause;
9958
9959                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
9960                         mask |= ADVERTISED_1000baseT_Half |
9961                                 ADVERTISED_1000baseT_Full;
9962
9963                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
9964                         mask |= ADVERTISED_100baseT_Half |
9965                                 ADVERTISED_100baseT_Full |
9966                                 ADVERTISED_10baseT_Half |
9967                                 ADVERTISED_10baseT_Full |
9968                                 ADVERTISED_TP;
9969                 else
9970                         mask |= ADVERTISED_FIBRE;
9971
9972                 if (cmd->advertising & ~mask)
9973                         return -EINVAL;
9974
9975                 mask &= (ADVERTISED_1000baseT_Half |
9976                          ADVERTISED_1000baseT_Full |
9977                          ADVERTISED_100baseT_Half |
9978                          ADVERTISED_100baseT_Full |
9979                          ADVERTISED_10baseT_Half |
9980                          ADVERTISED_10baseT_Full);
9981
9982                 cmd->advertising &= mask;
9983         } else {
9984                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
9985                         if (speed != SPEED_1000)
9986                                 return -EINVAL;
9987
9988                         if (cmd->duplex != DUPLEX_FULL)
9989                                 return -EINVAL;
9990                 } else {
9991                         if (speed != SPEED_100 &&
9992                             speed != SPEED_10)
9993                                 return -EINVAL;
9994                 }
9995         }
9996
9997         tg3_full_lock(tp, 0);
9998
9999         tp->link_config.autoneg = cmd->autoneg;
10000         if (cmd->autoneg == AUTONEG_ENABLE) {
10001                 tp->link_config.advertising = (cmd->advertising |
10002                                               ADVERTISED_Autoneg);
10003                 tp->link_config.speed = SPEED_INVALID;
10004                 tp->link_config.duplex = DUPLEX_INVALID;
10005         } else {
10006                 tp->link_config.advertising = 0;
10007                 tp->link_config.speed = speed;
10008                 tp->link_config.duplex = cmd->duplex;
10009         }
10010
10011         tp->link_config.orig_speed = tp->link_config.speed;
10012         tp->link_config.orig_duplex = tp->link_config.duplex;
10013         tp->link_config.orig_autoneg = tp->link_config.autoneg;
10014
10015         if (netif_running(dev))
10016                 tg3_setup_phy(tp, 1);
10017
10018         tg3_full_unlock(tp);
10019
10020         return 0;
10021 }
10022
10023 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10024 {
10025         struct tg3 *tp = netdev_priv(dev);
10026
10027         strcpy(info->driver, DRV_MODULE_NAME);
10028         strcpy(info->version, DRV_MODULE_VERSION);
10029         strcpy(info->fw_version, tp->fw_ver);
10030         strcpy(info->bus_info, pci_name(tp->pdev));
10031 }
10032
10033 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10034 {
10035         struct tg3 *tp = netdev_priv(dev);
10036
10037         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10038                 wol->supported = WAKE_MAGIC;
10039         else
10040                 wol->supported = 0;
10041         wol->wolopts = 0;
10042         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10043                 wol->wolopts = WAKE_MAGIC;
10044         memset(&wol->sopass, 0, sizeof(wol->sopass));
10045 }
10046
10047 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10048 {
10049         struct tg3 *tp = netdev_priv(dev);
10050         struct device *dp = &tp->pdev->dev;
10051
10052         if (wol->wolopts & ~WAKE_MAGIC)
10053                 return -EINVAL;
10054         if ((wol->wolopts & WAKE_MAGIC) &&
10055             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10056                 return -EINVAL;
10057
10058         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10059
10060         spin_lock_bh(&tp->lock);
10061         if (device_may_wakeup(dp))
10062                 tg3_flag_set(tp, WOL_ENABLE);
10063         else
10064                 tg3_flag_clear(tp, WOL_ENABLE);
10065         spin_unlock_bh(&tp->lock);
10066
10067         return 0;
10068 }
10069
10070 static u32 tg3_get_msglevel(struct net_device *dev)
10071 {
10072         struct tg3 *tp = netdev_priv(dev);
10073         return tp->msg_enable;
10074 }
10075
10076 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10077 {
10078         struct tg3 *tp = netdev_priv(dev);
10079         tp->msg_enable = value;
10080 }
10081
10082 static int tg3_nway_reset(struct net_device *dev)
10083 {
10084         struct tg3 *tp = netdev_priv(dev);
10085         int r;
10086
10087         if (!netif_running(dev))
10088                 return -EAGAIN;
10089
10090         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10091                 return -EINVAL;
10092
10093         if (tg3_flag(tp, USE_PHYLIB)) {
10094                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10095                         return -EAGAIN;
10096                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10097         } else {
10098                 u32 bmcr;
10099
10100                 spin_lock_bh(&tp->lock);
10101                 r = -EINVAL;
10102                 tg3_readphy(tp, MII_BMCR, &bmcr);
10103                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10104                     ((bmcr & BMCR_ANENABLE) ||
10105                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10106                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10107                                                    BMCR_ANENABLE);
10108                         r = 0;
10109                 }
10110                 spin_unlock_bh(&tp->lock);
10111         }
10112
10113         return r;
10114 }
10115
10116 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10117 {
10118         struct tg3 *tp = netdev_priv(dev);
10119
10120         ering->rx_max_pending = tp->rx_std_ring_mask;
10121         ering->rx_mini_max_pending = 0;
10122         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10123                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10124         else
10125                 ering->rx_jumbo_max_pending = 0;
10126
10127         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10128
10129         ering->rx_pending = tp->rx_pending;
10130         ering->rx_mini_pending = 0;
10131         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10132                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10133         else
10134                 ering->rx_jumbo_pending = 0;
10135
10136         ering->tx_pending = tp->napi[0].tx_pending;
10137 }
10138
10139 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10140 {
10141         struct tg3 *tp = netdev_priv(dev);
10142         int i, irq_sync = 0, err = 0;
10143
10144         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10145             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10146             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10147             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10148             (tg3_flag(tp, TSO_BUG) &&
10149              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10150                 return -EINVAL;
10151
10152         if (netif_running(dev)) {
10153                 tg3_phy_stop(tp);
10154                 tg3_netif_stop(tp);
10155                 irq_sync = 1;
10156         }
10157
10158         tg3_full_lock(tp, irq_sync);
10159
10160         tp->rx_pending = ering->rx_pending;
10161
10162         if (tg3_flag(tp, MAX_RXPEND_64) &&
10163             tp->rx_pending > 63)
10164                 tp->rx_pending = 63;
10165         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10166
10167         for (i = 0; i < tp->irq_max; i++)
10168                 tp->napi[i].tx_pending = ering->tx_pending;
10169
10170         if (netif_running(dev)) {
10171                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10172                 err = tg3_restart_hw(tp, 1);
10173                 if (!err)
10174                         tg3_netif_start(tp);
10175         }
10176
10177         tg3_full_unlock(tp);
10178
10179         if (irq_sync && !err)
10180                 tg3_phy_start(tp);
10181
10182         return err;
10183 }
10184
10185 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10186 {
10187         struct tg3 *tp = netdev_priv(dev);
10188
10189         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10190
10191         if (tp->link_config.active_flowctrl & FLOW_CTRL_RX)
10192                 epause->rx_pause = 1;
10193         else
10194                 epause->rx_pause = 0;
10195
10196         if (tp->link_config.active_flowctrl & FLOW_CTRL_TX)
10197                 epause->tx_pause = 1;
10198         else
10199                 epause->tx_pause = 0;
10200 }
10201
10202 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10203 {
10204         struct tg3 *tp = netdev_priv(dev);
10205         int err = 0;
10206
10207         if (tg3_flag(tp, USE_PHYLIB)) {
10208                 u32 newadv;
10209                 struct phy_device *phydev;
10210
10211                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10212
10213                 if (!(phydev->supported & SUPPORTED_Pause) ||
10214                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10215                      (epause->rx_pause != epause->tx_pause)))
10216                         return -EINVAL;
10217
10218                 tp->link_config.flowctrl = 0;
10219                 if (epause->rx_pause) {
10220                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10221
10222                         if (epause->tx_pause) {
10223                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10224                                 newadv = ADVERTISED_Pause;
10225                         } else
10226                                 newadv = ADVERTISED_Pause |
10227                                          ADVERTISED_Asym_Pause;
10228                 } else if (epause->tx_pause) {
10229                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10230                         newadv = ADVERTISED_Asym_Pause;
10231                 } else
10232                         newadv = 0;
10233
10234                 if (epause->autoneg)
10235                         tg3_flag_set(tp, PAUSE_AUTONEG);
10236                 else
10237                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10238
10239                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10240                         u32 oldadv = phydev->advertising &
10241                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10242                         if (oldadv != newadv) {
10243                                 phydev->advertising &=
10244                                         ~(ADVERTISED_Pause |
10245                                           ADVERTISED_Asym_Pause);
10246                                 phydev->advertising |= newadv;
10247                                 if (phydev->autoneg) {
10248                                         /*
10249                                          * Always renegotiate the link to
10250                                          * inform our link partner of our
10251                                          * flow control settings, even if the
10252                                          * flow control is forced.  Let
10253                                          * tg3_adjust_link() do the final
10254                                          * flow control setup.
10255                                          */
10256                                         return phy_start_aneg(phydev);
10257                                 }
10258                         }
10259
10260                         if (!epause->autoneg)
10261                                 tg3_setup_flow_control(tp, 0, 0);
10262                 } else {
10263                         tp->link_config.orig_advertising &=
10264                                         ~(ADVERTISED_Pause |
10265                                           ADVERTISED_Asym_Pause);
10266                         tp->link_config.orig_advertising |= newadv;
10267                 }
10268         } else {
10269                 int irq_sync = 0;
10270
10271                 if (netif_running(dev)) {
10272                         tg3_netif_stop(tp);
10273                         irq_sync = 1;
10274                 }
10275
10276                 tg3_full_lock(tp, irq_sync);
10277
10278                 if (epause->autoneg)
10279                         tg3_flag_set(tp, PAUSE_AUTONEG);
10280                 else
10281                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10282                 if (epause->rx_pause)
10283                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10284                 else
10285                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10286                 if (epause->tx_pause)
10287                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10288                 else
10289                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10290
10291                 if (netif_running(dev)) {
10292                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10293                         err = tg3_restart_hw(tp, 1);
10294                         if (!err)
10295                                 tg3_netif_start(tp);
10296                 }
10297
10298                 tg3_full_unlock(tp);
10299         }
10300
10301         return err;
10302 }
10303
10304 static int tg3_get_sset_count(struct net_device *dev, int sset)
10305 {
10306         switch (sset) {
10307         case ETH_SS_TEST:
10308                 return TG3_NUM_TEST;
10309         case ETH_SS_STATS:
10310                 return TG3_NUM_STATS;
10311         default:
10312                 return -EOPNOTSUPP;
10313         }
10314 }
10315
10316 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10317 {
10318         switch (stringset) {
10319         case ETH_SS_STATS:
10320                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10321                 break;
10322         case ETH_SS_TEST:
10323                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10324                 break;
10325         default:
10326                 WARN_ON(1);     /* we need a WARN() */
10327                 break;
10328         }
10329 }
10330
10331 static int tg3_set_phys_id(struct net_device *dev,
10332                             enum ethtool_phys_id_state state)
10333 {
10334         struct tg3 *tp = netdev_priv(dev);
10335
10336         if (!netif_running(tp->dev))
10337                 return -EAGAIN;
10338
10339         switch (state) {
10340         case ETHTOOL_ID_ACTIVE:
10341                 return 1;       /* cycle on/off once per second */
10342
10343         case ETHTOOL_ID_ON:
10344                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10345                      LED_CTRL_1000MBPS_ON |
10346                      LED_CTRL_100MBPS_ON |
10347                      LED_CTRL_10MBPS_ON |
10348                      LED_CTRL_TRAFFIC_OVERRIDE |
10349                      LED_CTRL_TRAFFIC_BLINK |
10350                      LED_CTRL_TRAFFIC_LED);
10351                 break;
10352
10353         case ETHTOOL_ID_OFF:
10354                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10355                      LED_CTRL_TRAFFIC_OVERRIDE);
10356                 break;
10357
10358         case ETHTOOL_ID_INACTIVE:
10359                 tw32(MAC_LED_CTRL, tp->led_ctrl);
10360                 break;
10361         }
10362
10363         return 0;
10364 }
10365
10366 static void tg3_get_ethtool_stats(struct net_device *dev,
10367                                    struct ethtool_stats *estats, u64 *tmp_stats)
10368 {
10369         struct tg3 *tp = netdev_priv(dev);
10370         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
10371 }
10372
10373 static __be32 * tg3_vpd_readblock(struct tg3 *tp)
10374 {
10375         int i;
10376         __be32 *buf;
10377         u32 offset = 0, len = 0;
10378         u32 magic, val;
10379
10380         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
10381                 return NULL;
10382
10383         if (magic == TG3_EEPROM_MAGIC) {
10384                 for (offset = TG3_NVM_DIR_START;
10385                      offset < TG3_NVM_DIR_END;
10386                      offset += TG3_NVM_DIRENT_SIZE) {
10387                         if (tg3_nvram_read(tp, offset, &val))
10388                                 return NULL;
10389
10390                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
10391                             TG3_NVM_DIRTYPE_EXTVPD)
10392                                 break;
10393                 }
10394
10395                 if (offset != TG3_NVM_DIR_END) {
10396                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
10397                         if (tg3_nvram_read(tp, offset + 4, &offset))
10398                                 return NULL;
10399
10400                         offset = tg3_nvram_logical_addr(tp, offset);
10401                 }
10402         }
10403
10404         if (!offset || !len) {
10405                 offset = TG3_NVM_VPD_OFF;
10406                 len = TG3_NVM_VPD_LEN;
10407         }
10408
10409         buf = kmalloc(len, GFP_KERNEL);
10410         if (buf == NULL)
10411                 return NULL;
10412
10413         if (magic == TG3_EEPROM_MAGIC) {
10414                 for (i = 0; i < len; i += 4) {
10415                         /* The data is in little-endian format in NVRAM.
10416                          * Use the big-endian read routines to preserve
10417                          * the byte order as it exists in NVRAM.
10418                          */
10419                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
10420                                 goto error;
10421                 }
10422         } else {
10423                 u8 *ptr;
10424                 ssize_t cnt;
10425                 unsigned int pos = 0;
10426
10427                 ptr = (u8 *)&buf[0];
10428                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
10429                         cnt = pci_read_vpd(tp->pdev, pos,
10430                                            len - pos, ptr);
10431                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
10432                                 cnt = 0;
10433                         else if (cnt < 0)
10434                                 goto error;
10435                 }
10436                 if (pos != len)
10437                         goto error;
10438         }
10439
10440         return buf;
10441
10442 error:
10443         kfree(buf);
10444         return NULL;
10445 }
10446
10447 #define NVRAM_TEST_SIZE 0x100
10448 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
10449 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
10450 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
10451 #define NVRAM_SELFBOOT_HW_SIZE 0x20
10452 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
10453
10454 static int tg3_test_nvram(struct tg3 *tp)
10455 {
10456         u32 csum, magic;
10457         __be32 *buf;
10458         int i, j, k, err = 0, size;
10459
10460         if (tg3_flag(tp, NO_NVRAM))
10461                 return 0;
10462
10463         if (tg3_nvram_read(tp, 0, &magic) != 0)
10464                 return -EIO;
10465
10466         if (magic == TG3_EEPROM_MAGIC)
10467                 size = NVRAM_TEST_SIZE;
10468         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
10469                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
10470                     TG3_EEPROM_SB_FORMAT_1) {
10471                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
10472                         case TG3_EEPROM_SB_REVISION_0:
10473                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
10474                                 break;
10475                         case TG3_EEPROM_SB_REVISION_2:
10476                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
10477                                 break;
10478                         case TG3_EEPROM_SB_REVISION_3:
10479                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
10480                                 break;
10481                         default:
10482                                 return 0;
10483                         }
10484                 } else
10485                         return 0;
10486         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
10487                 size = NVRAM_SELFBOOT_HW_SIZE;
10488         else
10489                 return -EIO;
10490
10491         buf = kmalloc(size, GFP_KERNEL);
10492         if (buf == NULL)
10493                 return -ENOMEM;
10494
10495         err = -EIO;
10496         for (i = 0, j = 0; i < size; i += 4, j++) {
10497                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
10498                 if (err)
10499                         break;
10500         }
10501         if (i < size)
10502                 goto out;
10503
10504         /* Selfboot format */
10505         magic = be32_to_cpu(buf[0]);
10506         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
10507             TG3_EEPROM_MAGIC_FW) {
10508                 u8 *buf8 = (u8 *) buf, csum8 = 0;
10509
10510                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
10511                     TG3_EEPROM_SB_REVISION_2) {
10512                         /* For rev 2, the csum doesn't include the MBA. */
10513                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
10514                                 csum8 += buf8[i];
10515                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
10516                                 csum8 += buf8[i];
10517                 } else {
10518                         for (i = 0; i < size; i++)
10519                                 csum8 += buf8[i];
10520                 }
10521
10522                 if (csum8 == 0) {
10523                         err = 0;
10524                         goto out;
10525                 }
10526
10527                 err = -EIO;
10528                 goto out;
10529         }
10530
10531         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
10532             TG3_EEPROM_MAGIC_HW) {
10533                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
10534                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
10535                 u8 *buf8 = (u8 *) buf;
10536
10537                 /* Separate the parity bits and the data bytes.  */
10538                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
10539                         if ((i == 0) || (i == 8)) {
10540                                 int l;
10541                                 u8 msk;
10542
10543                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
10544                                         parity[k++] = buf8[i] & msk;
10545                                 i++;
10546                         } else if (i == 16) {
10547                                 int l;
10548                                 u8 msk;
10549
10550                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
10551                                         parity[k++] = buf8[i] & msk;
10552                                 i++;
10553
10554                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
10555                                         parity[k++] = buf8[i] & msk;
10556                                 i++;
10557                         }
10558                         data[j++] = buf8[i];
10559                 }
10560
10561                 err = -EIO;
10562                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
10563                         u8 hw8 = hweight8(data[i]);
10564
10565                         if ((hw8 & 0x1) && parity[i])
10566                                 goto out;
10567                         else if (!(hw8 & 0x1) && !parity[i])
10568                                 goto out;
10569                 }
10570                 err = 0;
10571                 goto out;
10572         }
10573
10574         err = -EIO;
10575
10576         /* Bootstrap checksum at offset 0x10 */
10577         csum = calc_crc((unsigned char *) buf, 0x10);
10578         if (csum != le32_to_cpu(buf[0x10/4]))
10579                 goto out;
10580
10581         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
10582         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
10583         if (csum != le32_to_cpu(buf[0xfc/4]))
10584                 goto out;
10585
10586         kfree(buf);
10587
10588         buf = tg3_vpd_readblock(tp);
10589         if (!buf)
10590                 return -ENOMEM;
10591
10592         i = pci_vpd_find_tag((u8 *)buf, 0, TG3_NVM_VPD_LEN,
10593                              PCI_VPD_LRDT_RO_DATA);
10594         if (i > 0) {
10595                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
10596                 if (j < 0)
10597                         goto out;
10598
10599                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > TG3_NVM_VPD_LEN)
10600                         goto out;
10601
10602                 i += PCI_VPD_LRDT_TAG_SIZE;
10603                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
10604                                               PCI_VPD_RO_KEYWORD_CHKSUM);
10605                 if (j > 0) {
10606                         u8 csum8 = 0;
10607
10608                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
10609
10610                         for (i = 0; i <= j; i++)
10611                                 csum8 += ((u8 *)buf)[i];
10612
10613                         if (csum8)
10614                                 goto out;
10615                 }
10616         }
10617
10618         err = 0;
10619
10620 out:
10621         kfree(buf);
10622         return err;
10623 }
10624
10625 #define TG3_SERDES_TIMEOUT_SEC  2
10626 #define TG3_COPPER_TIMEOUT_SEC  6
10627
10628 static int tg3_test_link(struct tg3 *tp)
10629 {
10630         int i, max;
10631
10632         if (!netif_running(tp->dev))
10633                 return -ENODEV;
10634
10635         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
10636                 max = TG3_SERDES_TIMEOUT_SEC;
10637         else
10638                 max = TG3_COPPER_TIMEOUT_SEC;
10639
10640         for (i = 0; i < max; i++) {
10641                 if (netif_carrier_ok(tp->dev))
10642                         return 0;
10643
10644                 if (msleep_interruptible(1000))
10645                         break;
10646         }
10647
10648         return -EIO;
10649 }
10650
10651 /* Only test the commonly used registers */
10652 static int tg3_test_registers(struct tg3 *tp)
10653 {
10654         int i, is_5705, is_5750;
10655         u32 offset, read_mask, write_mask, val, save_val, read_val;
10656         static struct {
10657                 u16 offset;
10658                 u16 flags;
10659 #define TG3_FL_5705     0x1
10660 #define TG3_FL_NOT_5705 0x2
10661 #define TG3_FL_NOT_5788 0x4
10662 #define TG3_FL_NOT_5750 0x8
10663                 u32 read_mask;
10664                 u32 write_mask;
10665         } reg_tbl[] = {
10666                 /* MAC Control Registers */
10667                 { MAC_MODE, TG3_FL_NOT_5705,
10668                         0x00000000, 0x00ef6f8c },
10669                 { MAC_MODE, TG3_FL_5705,
10670                         0x00000000, 0x01ef6b8c },
10671                 { MAC_STATUS, TG3_FL_NOT_5705,
10672                         0x03800107, 0x00000000 },
10673                 { MAC_STATUS, TG3_FL_5705,
10674                         0x03800100, 0x00000000 },
10675                 { MAC_ADDR_0_HIGH, 0x0000,
10676                         0x00000000, 0x0000ffff },
10677                 { MAC_ADDR_0_LOW, 0x0000,
10678                         0x00000000, 0xffffffff },
10679                 { MAC_RX_MTU_SIZE, 0x0000,
10680                         0x00000000, 0x0000ffff },
10681                 { MAC_TX_MODE, 0x0000,
10682                         0x00000000, 0x00000070 },
10683                 { MAC_TX_LENGTHS, 0x0000,
10684                         0x00000000, 0x00003fff },
10685                 { MAC_RX_MODE, TG3_FL_NOT_5705,
10686                         0x00000000, 0x000007fc },
10687                 { MAC_RX_MODE, TG3_FL_5705,
10688                         0x00000000, 0x000007dc },
10689                 { MAC_HASH_REG_0, 0x0000,
10690                         0x00000000, 0xffffffff },
10691                 { MAC_HASH_REG_1, 0x0000,
10692                         0x00000000, 0xffffffff },
10693                 { MAC_HASH_REG_2, 0x0000,
10694                         0x00000000, 0xffffffff },
10695                 { MAC_HASH_REG_3, 0x0000,
10696                         0x00000000, 0xffffffff },
10697
10698                 /* Receive Data and Receive BD Initiator Control Registers. */
10699                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
10700                         0x00000000, 0xffffffff },
10701                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
10702                         0x00000000, 0xffffffff },
10703                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
10704                         0x00000000, 0x00000003 },
10705                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
10706                         0x00000000, 0xffffffff },
10707                 { RCVDBDI_STD_BD+0, 0x0000,
10708                         0x00000000, 0xffffffff },
10709                 { RCVDBDI_STD_BD+4, 0x0000,
10710                         0x00000000, 0xffffffff },
10711                 { RCVDBDI_STD_BD+8, 0x0000,
10712                         0x00000000, 0xffff0002 },
10713                 { RCVDBDI_STD_BD+0xc, 0x0000,
10714                         0x00000000, 0xffffffff },
10715
10716                 /* Receive BD Initiator Control Registers. */
10717                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
10718                         0x00000000, 0xffffffff },
10719                 { RCVBDI_STD_THRESH, TG3_FL_5705,
10720                         0x00000000, 0x000003ff },
10721                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
10722                         0x00000000, 0xffffffff },
10723
10724                 /* Host Coalescing Control Registers. */
10725                 { HOSTCC_MODE, TG3_FL_NOT_5705,
10726                         0x00000000, 0x00000004 },
10727                 { HOSTCC_MODE, TG3_FL_5705,
10728                         0x00000000, 0x000000f6 },
10729                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
10730                         0x00000000, 0xffffffff },
10731                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
10732                         0x00000000, 0x000003ff },
10733                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
10734                         0x00000000, 0xffffffff },
10735                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
10736                         0x00000000, 0x000003ff },
10737                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
10738                         0x00000000, 0xffffffff },
10739                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10740                         0x00000000, 0x000000ff },
10741                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
10742                         0x00000000, 0xffffffff },
10743                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
10744                         0x00000000, 0x000000ff },
10745                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
10746                         0x00000000, 0xffffffff },
10747                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
10748                         0x00000000, 0xffffffff },
10749                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10750                         0x00000000, 0xffffffff },
10751                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10752                         0x00000000, 0x000000ff },
10753                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
10754                         0x00000000, 0xffffffff },
10755                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
10756                         0x00000000, 0x000000ff },
10757                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
10758                         0x00000000, 0xffffffff },
10759                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
10760                         0x00000000, 0xffffffff },
10761                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
10762                         0x00000000, 0xffffffff },
10763                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
10764                         0x00000000, 0xffffffff },
10765                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
10766                         0x00000000, 0xffffffff },
10767                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
10768                         0xffffffff, 0x00000000 },
10769                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
10770                         0xffffffff, 0x00000000 },
10771
10772                 /* Buffer Manager Control Registers. */
10773                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
10774                         0x00000000, 0x007fff80 },
10775                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
10776                         0x00000000, 0x007fffff },
10777                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
10778                         0x00000000, 0x0000003f },
10779                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
10780                         0x00000000, 0x000001ff },
10781                 { BUFMGR_MB_HIGH_WATER, 0x0000,
10782                         0x00000000, 0x000001ff },
10783                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
10784                         0xffffffff, 0x00000000 },
10785                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
10786                         0xffffffff, 0x00000000 },
10787
10788                 /* Mailbox Registers */
10789                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
10790                         0x00000000, 0x000001ff },
10791                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
10792                         0x00000000, 0x000001ff },
10793                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
10794                         0x00000000, 0x000007ff },
10795                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
10796                         0x00000000, 0x000001ff },
10797
10798                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
10799         };
10800
10801         is_5705 = is_5750 = 0;
10802         if (tg3_flag(tp, 5705_PLUS)) {
10803                 is_5705 = 1;
10804                 if (tg3_flag(tp, 5750_PLUS))
10805                         is_5750 = 1;
10806         }
10807
10808         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
10809                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
10810                         continue;
10811
10812                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
10813                         continue;
10814
10815                 if (tg3_flag(tp, IS_5788) &&
10816                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
10817                         continue;
10818
10819                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
10820                         continue;
10821
10822                 offset = (u32) reg_tbl[i].offset;
10823                 read_mask = reg_tbl[i].read_mask;
10824                 write_mask = reg_tbl[i].write_mask;
10825
10826                 /* Save the original register content */
10827                 save_val = tr32(offset);
10828
10829                 /* Determine the read-only value. */
10830                 read_val = save_val & read_mask;
10831
10832                 /* Write zero to the register, then make sure the read-only bits
10833                  * are not changed and the read/write bits are all zeros.
10834                  */
10835                 tw32(offset, 0);
10836
10837                 val = tr32(offset);
10838
10839                 /* Test the read-only and read/write bits. */
10840                 if (((val & read_mask) != read_val) || (val & write_mask))
10841                         goto out;
10842
10843                 /* Write ones to all the bits defined by RdMask and WrMask, then
10844                  * make sure the read-only bits are not changed and the
10845                  * read/write bits are all ones.
10846                  */
10847                 tw32(offset, read_mask | write_mask);
10848
10849                 val = tr32(offset);
10850
10851                 /* Test the read-only bits. */
10852                 if ((val & read_mask) != read_val)
10853                         goto out;
10854
10855                 /* Test the read/write bits. */
10856                 if ((val & write_mask) != write_mask)
10857                         goto out;
10858
10859                 tw32(offset, save_val);
10860         }
10861
10862         return 0;
10863
10864 out:
10865         if (netif_msg_hw(tp))
10866                 netdev_err(tp->dev,
10867                            "Register test failed at offset %x\n", offset);
10868         tw32(offset, save_val);
10869         return -EIO;
10870 }
10871
10872 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
10873 {
10874         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
10875         int i;
10876         u32 j;
10877
10878         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
10879                 for (j = 0; j < len; j += 4) {
10880                         u32 val;
10881
10882                         tg3_write_mem(tp, offset + j, test_pattern[i]);
10883                         tg3_read_mem(tp, offset + j, &val);
10884                         if (val != test_pattern[i])
10885                                 return -EIO;
10886                 }
10887         }
10888         return 0;
10889 }
10890
10891 static int tg3_test_memory(struct tg3 *tp)
10892 {
10893         static struct mem_entry {
10894                 u32 offset;
10895                 u32 len;
10896         } mem_tbl_570x[] = {
10897                 { 0x00000000, 0x00b50},
10898                 { 0x00002000, 0x1c000},
10899                 { 0xffffffff, 0x00000}
10900         }, mem_tbl_5705[] = {
10901                 { 0x00000100, 0x0000c},
10902                 { 0x00000200, 0x00008},
10903                 { 0x00004000, 0x00800},
10904                 { 0x00006000, 0x01000},
10905                 { 0x00008000, 0x02000},
10906                 { 0x00010000, 0x0e000},
10907                 { 0xffffffff, 0x00000}
10908         }, mem_tbl_5755[] = {
10909                 { 0x00000200, 0x00008},
10910                 { 0x00004000, 0x00800},
10911                 { 0x00006000, 0x00800},
10912                 { 0x00008000, 0x02000},
10913                 { 0x00010000, 0x0c000},
10914                 { 0xffffffff, 0x00000}
10915         }, mem_tbl_5906[] = {
10916                 { 0x00000200, 0x00008},
10917                 { 0x00004000, 0x00400},
10918                 { 0x00006000, 0x00400},
10919                 { 0x00008000, 0x01000},
10920                 { 0x00010000, 0x01000},
10921                 { 0xffffffff, 0x00000}
10922         }, mem_tbl_5717[] = {
10923                 { 0x00000200, 0x00008},
10924                 { 0x00010000, 0x0a000},
10925                 { 0x00020000, 0x13c00},
10926                 { 0xffffffff, 0x00000}
10927         }, mem_tbl_57765[] = {
10928                 { 0x00000200, 0x00008},
10929                 { 0x00004000, 0x00800},
10930                 { 0x00006000, 0x09800},
10931                 { 0x00010000, 0x0a000},
10932                 { 0xffffffff, 0x00000}
10933         };
10934         struct mem_entry *mem_tbl;
10935         int err = 0;
10936         int i;
10937
10938         if (tg3_flag(tp, 5717_PLUS))
10939                 mem_tbl = mem_tbl_5717;
10940         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
10941                 mem_tbl = mem_tbl_57765;
10942         else if (tg3_flag(tp, 5755_PLUS))
10943                 mem_tbl = mem_tbl_5755;
10944         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
10945                 mem_tbl = mem_tbl_5906;
10946         else if (tg3_flag(tp, 5705_PLUS))
10947                 mem_tbl = mem_tbl_5705;
10948         else
10949                 mem_tbl = mem_tbl_570x;
10950
10951         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
10952                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
10953                 if (err)
10954                         break;
10955         }
10956
10957         return err;
10958 }
10959
10960 #define TG3_MAC_LOOPBACK        0
10961 #define TG3_PHY_LOOPBACK        1
10962 #define TG3_TSO_LOOPBACK        2
10963
10964 #define TG3_TSO_MSS             500
10965
10966 #define TG3_TSO_IP_HDR_LEN      20
10967 #define TG3_TSO_TCP_HDR_LEN     20
10968 #define TG3_TSO_TCP_OPT_LEN     12
10969
10970 static const u8 tg3_tso_header[] = {
10971 0x08, 0x00,
10972 0x45, 0x00, 0x00, 0x00,
10973 0x00, 0x00, 0x40, 0x00,
10974 0x40, 0x06, 0x00, 0x00,
10975 0x0a, 0x00, 0x00, 0x01,
10976 0x0a, 0x00, 0x00, 0x02,
10977 0x0d, 0x00, 0xe0, 0x00,
10978 0x00, 0x00, 0x01, 0x00,
10979 0x00, 0x00, 0x02, 0x00,
10980 0x80, 0x10, 0x10, 0x00,
10981 0x14, 0x09, 0x00, 0x00,
10982 0x01, 0x01, 0x08, 0x0a,
10983 0x11, 0x11, 0x11, 0x11,
10984 0x11, 0x11, 0x11, 0x11,
10985 };
10986
10987 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, int loopback_mode)
10988 {
10989         u32 mac_mode, rx_start_idx, rx_idx, tx_idx, opaque_key;
10990         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
10991         struct sk_buff *skb, *rx_skb;
10992         u8 *tx_data;
10993         dma_addr_t map;
10994         int num_pkts, tx_len, rx_len, i, err;
10995         struct tg3_rx_buffer_desc *desc;
10996         struct tg3_napi *tnapi, *rnapi;
10997         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
10998
10999         tnapi = &tp->napi[0];
11000         rnapi = &tp->napi[0];
11001         if (tp->irq_cnt > 1) {
11002                 if (tg3_flag(tp, ENABLE_RSS))
11003                         rnapi = &tp->napi[1];
11004                 if (tg3_flag(tp, ENABLE_TSS))
11005                         tnapi = &tp->napi[1];
11006         }
11007         coal_now = tnapi->coal_now | rnapi->coal_now;
11008
11009         if (loopback_mode == TG3_MAC_LOOPBACK) {
11010                 /* HW errata - mac loopback fails in some cases on 5780.
11011                  * Normal traffic and PHY loopback are not affected by
11012                  * errata.  Also, the MAC loopback test is deprecated for
11013                  * all newer ASIC revisions.
11014                  */
11015                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
11016                     tg3_flag(tp, CPMU_PRESENT))
11017                         return 0;
11018
11019                 mac_mode = tp->mac_mode &
11020                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11021                 mac_mode |= MAC_MODE_PORT_INT_LPBACK;
11022                 if (!tg3_flag(tp, 5705_PLUS))
11023                         mac_mode |= MAC_MODE_LINK_POLARITY;
11024                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
11025                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11026                 else
11027                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11028                 tw32(MAC_MODE, mac_mode);
11029         } else {
11030                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11031                         tg3_phy_fet_toggle_apd(tp, false);
11032                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED100;
11033                 } else
11034                         val = BMCR_LOOPBACK | BMCR_FULLDPLX | BMCR_SPEED1000;
11035
11036                 tg3_phy_toggle_automdix(tp, 0);
11037
11038                 tg3_writephy(tp, MII_BMCR, val);
11039                 udelay(40);
11040
11041                 mac_mode = tp->mac_mode &
11042                            ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
11043                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
11044                         tg3_writephy(tp, MII_TG3_FET_PTEST,
11045                                      MII_TG3_FET_PTEST_FRC_TX_LINK |
11046                                      MII_TG3_FET_PTEST_FRC_TX_LOCK);
11047                         /* The write needs to be flushed for the AC131 */
11048                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
11049                                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
11050                         mac_mode |= MAC_MODE_PORT_MODE_MII;
11051                 } else
11052                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
11053
11054                 /* reset to prevent losing 1st rx packet intermittently */
11055                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
11056                         tw32_f(MAC_RX_MODE, RX_MODE_RESET);
11057                         udelay(10);
11058                         tw32_f(MAC_RX_MODE, tp->rx_mode);
11059                 }
11060                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
11061                         u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
11062                         if (masked_phy_id == TG3_PHY_ID_BCM5401)
11063                                 mac_mode &= ~MAC_MODE_LINK_POLARITY;
11064                         else if (masked_phy_id == TG3_PHY_ID_BCM5411)
11065                                 mac_mode |= MAC_MODE_LINK_POLARITY;
11066                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
11067                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
11068                 }
11069                 tw32(MAC_MODE, mac_mode);
11070
11071                 /* Wait for link */
11072                 for (i = 0; i < 100; i++) {
11073                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11074                                 break;
11075                         mdelay(1);
11076                 }
11077         }
11078
11079         err = -EIO;
11080
11081         tx_len = pktsz;
11082         skb = netdev_alloc_skb(tp->dev, tx_len);
11083         if (!skb)
11084                 return -ENOMEM;
11085
11086         tx_data = skb_put(skb, tx_len);
11087         memcpy(tx_data, tp->dev->dev_addr, 6);
11088         memset(tx_data + 6, 0x0, 8);
11089
11090         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11091
11092         if (loopback_mode == TG3_TSO_LOOPBACK) {
11093                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11094
11095                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11096                               TG3_TSO_TCP_OPT_LEN;
11097
11098                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11099                        sizeof(tg3_tso_header));
11100                 mss = TG3_TSO_MSS;
11101
11102                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11103                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11104
11105                 /* Set the total length field in the IP header */
11106                 iph->tot_len = htons((u16)(mss + hdr_len));
11107
11108                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11109                               TXD_FLAG_CPU_POST_DMA);
11110
11111                 if (tg3_flag(tp, HW_TSO_1) ||
11112                     tg3_flag(tp, HW_TSO_2) ||
11113                     tg3_flag(tp, HW_TSO_3)) {
11114                         struct tcphdr *th;
11115                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11116                         th = (struct tcphdr *)&tx_data[val];
11117                         th->check = 0;
11118                 } else
11119                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11120
11121                 if (tg3_flag(tp, HW_TSO_3)) {
11122                         mss |= (hdr_len & 0xc) << 12;
11123                         if (hdr_len & 0x10)
11124                                 base_flags |= 0x00000010;
11125                         base_flags |= (hdr_len & 0x3e0) << 5;
11126                 } else if (tg3_flag(tp, HW_TSO_2))
11127                         mss |= hdr_len << 9;
11128                 else if (tg3_flag(tp, HW_TSO_1) ||
11129                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11130                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11131                 } else {
11132                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11133                 }
11134
11135                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11136         } else {
11137                 num_pkts = 1;
11138                 data_off = ETH_HLEN;
11139         }
11140
11141         for (i = data_off; i < tx_len; i++)
11142                 tx_data[i] = (u8) (i & 0xff);
11143
11144         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11145         if (pci_dma_mapping_error(tp->pdev, map)) {
11146                 dev_kfree_skb(skb);
11147                 return -EIO;
11148         }
11149
11150         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11151                rnapi->coal_now);
11152
11153         udelay(10);
11154
11155         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11156
11157         tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len,
11158                     base_flags, (mss << 1) | 1);
11159
11160         tnapi->tx_prod++;
11161
11162         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11163         tr32_mailbox(tnapi->prodmbox);
11164
11165         udelay(10);
11166
11167         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11168         for (i = 0; i < 35; i++) {
11169                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11170                        coal_now);
11171
11172                 udelay(10);
11173
11174                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11175                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11176                 if ((tx_idx == tnapi->tx_prod) &&
11177                     (rx_idx == (rx_start_idx + num_pkts)))
11178                         break;
11179         }
11180
11181         pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE);
11182         dev_kfree_skb(skb);
11183
11184         if (tx_idx != tnapi->tx_prod)
11185                 goto out;
11186
11187         if (rx_idx != rx_start_idx + num_pkts)
11188                 goto out;
11189
11190         val = data_off;
11191         while (rx_idx != rx_start_idx) {
11192                 desc = &rnapi->rx_rcb[rx_start_idx++];
11193                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11194                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11195
11196                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11197                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11198                         goto out;
11199
11200                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11201                          - ETH_FCS_LEN;
11202
11203                 if (loopback_mode != TG3_TSO_LOOPBACK) {
11204                         if (rx_len != tx_len)
11205                                 goto out;
11206
11207                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11208                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11209                                         goto out;
11210                         } else {
11211                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11212                                         goto out;
11213                         }
11214                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11215                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11216                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11217                         goto out;
11218                 }
11219
11220                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11221                         rx_skb = tpr->rx_std_buffers[desc_idx].skb;
11222                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11223                                              mapping);
11224                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11225                         rx_skb = tpr->rx_jmb_buffers[desc_idx].skb;
11226                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11227                                              mapping);
11228                 } else
11229                         goto out;
11230
11231                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11232                                             PCI_DMA_FROMDEVICE);
11233
11234                 for (i = data_off; i < rx_len; i++, val++) {
11235                         if (*(rx_skb->data + i) != (u8) (val & 0xff))
11236                                 goto out;
11237                 }
11238         }
11239
11240         err = 0;
11241
11242         /* tg3_free_rings will unmap and free the rx_skb */
11243 out:
11244         return err;
11245 }
11246
11247 #define TG3_STD_LOOPBACK_FAILED         1
11248 #define TG3_JMB_LOOPBACK_FAILED         2
11249 #define TG3_TSO_LOOPBACK_FAILED         4
11250
11251 #define TG3_MAC_LOOPBACK_SHIFT          0
11252 #define TG3_PHY_LOOPBACK_SHIFT          4
11253 #define TG3_LOOPBACK_FAILED             0x00000077
11254
11255 static int tg3_test_loopback(struct tg3 *tp)
11256 {
11257         int err = 0;
11258         u32 eee_cap, cpmuctrl = 0;
11259
11260         if (!netif_running(tp->dev))
11261                 return TG3_LOOPBACK_FAILED;
11262
11263         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11264         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11265
11266         err = tg3_reset_hw(tp, 1);
11267         if (err) {
11268                 err = TG3_LOOPBACK_FAILED;
11269                 goto done;
11270         }
11271
11272         if (tg3_flag(tp, ENABLE_RSS)) {
11273                 int i;
11274
11275                 /* Reroute all rx packets to the 1st queue */
11276                 for (i = MAC_RSS_INDIR_TBL_0;
11277                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11278                         tw32(i, 0x0);
11279         }
11280
11281         /* Turn off gphy autopowerdown. */
11282         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11283                 tg3_phy_toggle_apd(tp, false);
11284
11285         if (tg3_flag(tp, CPMU_PRESENT)) {
11286                 int i;
11287                 u32 status;
11288
11289                 tw32(TG3_CPMU_MUTEX_REQ, CPMU_MUTEX_REQ_DRIVER);
11290
11291                 /* Wait for up to 40 microseconds to acquire lock. */
11292                 for (i = 0; i < 4; i++) {
11293                         status = tr32(TG3_CPMU_MUTEX_GNT);
11294                         if (status == CPMU_MUTEX_GNT_DRIVER)
11295                                 break;
11296                         udelay(10);
11297                 }
11298
11299                 if (status != CPMU_MUTEX_GNT_DRIVER) {
11300                         err = TG3_LOOPBACK_FAILED;
11301                         goto done;
11302                 }
11303
11304                 /* Turn off link-based power management. */
11305                 cpmuctrl = tr32(TG3_CPMU_CTRL);
11306                 tw32(TG3_CPMU_CTRL,
11307                      cpmuctrl & ~(CPMU_CTRL_LINK_SPEED_MODE |
11308                                   CPMU_CTRL_LINK_AWARE_MODE));
11309         }
11310
11311         if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_MAC_LOOPBACK))
11312                 err |= TG3_STD_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11313
11314         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11315             tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_MAC_LOOPBACK))
11316                 err |= TG3_JMB_LOOPBACK_FAILED << TG3_MAC_LOOPBACK_SHIFT;
11317
11318         if (tg3_flag(tp, CPMU_PRESENT)) {
11319                 tw32(TG3_CPMU_CTRL, cpmuctrl);
11320
11321                 /* Release the mutex */
11322                 tw32(TG3_CPMU_MUTEX_GNT, CPMU_MUTEX_GNT_DRIVER);
11323         }
11324
11325         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11326             !tg3_flag(tp, USE_PHYLIB)) {
11327                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_PHY_LOOPBACK))
11328                         err |= TG3_STD_LOOPBACK_FAILED <<
11329                                TG3_PHY_LOOPBACK_SHIFT;
11330                 if (tg3_flag(tp, TSO_CAPABLE) &&
11331                     tg3_run_loopback(tp, ETH_FRAME_LEN, TG3_TSO_LOOPBACK))
11332                         err |= TG3_TSO_LOOPBACK_FAILED <<
11333                                TG3_PHY_LOOPBACK_SHIFT;
11334                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11335                     tg3_run_loopback(tp, 9000 + ETH_HLEN, TG3_PHY_LOOPBACK))
11336                         err |= TG3_JMB_LOOPBACK_FAILED <<
11337                                TG3_PHY_LOOPBACK_SHIFT;
11338         }
11339
11340         /* Re-enable gphy autopowerdown. */
11341         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11342                 tg3_phy_toggle_apd(tp, true);
11343
11344 done:
11345         tp->phy_flags |= eee_cap;
11346
11347         return err;
11348 }
11349
11350 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11351                           u64 *data)
11352 {
11353         struct tg3 *tp = netdev_priv(dev);
11354
11355         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11356                 tg3_power_up(tp);
11357
11358         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11359
11360         if (tg3_test_nvram(tp) != 0) {
11361                 etest->flags |= ETH_TEST_FL_FAILED;
11362                 data[0] = 1;
11363         }
11364         if (tg3_test_link(tp) != 0) {
11365                 etest->flags |= ETH_TEST_FL_FAILED;
11366                 data[1] = 1;
11367         }
11368         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11369                 int err, err2 = 0, irq_sync = 0;
11370
11371                 if (netif_running(dev)) {
11372                         tg3_phy_stop(tp);
11373                         tg3_netif_stop(tp);
11374                         irq_sync = 1;
11375                 }
11376
11377                 tg3_full_lock(tp, irq_sync);
11378
11379                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
11380                 err = tg3_nvram_lock(tp);
11381                 tg3_halt_cpu(tp, RX_CPU_BASE);
11382                 if (!tg3_flag(tp, 5705_PLUS))
11383                         tg3_halt_cpu(tp, TX_CPU_BASE);
11384                 if (!err)
11385                         tg3_nvram_unlock(tp);
11386
11387                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
11388                         tg3_phy_reset(tp);
11389
11390                 if (tg3_test_registers(tp) != 0) {
11391                         etest->flags |= ETH_TEST_FL_FAILED;
11392                         data[2] = 1;
11393                 }
11394                 if (tg3_test_memory(tp) != 0) {
11395                         etest->flags |= ETH_TEST_FL_FAILED;
11396                         data[3] = 1;
11397                 }
11398                 if ((data[4] = tg3_test_loopback(tp)) != 0)
11399                         etest->flags |= ETH_TEST_FL_FAILED;
11400
11401                 tg3_full_unlock(tp);
11402
11403                 if (tg3_test_interrupt(tp) != 0) {
11404                         etest->flags |= ETH_TEST_FL_FAILED;
11405                         data[5] = 1;
11406                 }
11407
11408                 tg3_full_lock(tp, 0);
11409
11410                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11411                 if (netif_running(dev)) {
11412                         tg3_flag_set(tp, INIT_COMPLETE);
11413                         err2 = tg3_restart_hw(tp, 1);
11414                         if (!err2)
11415                                 tg3_netif_start(tp);
11416                 }
11417
11418                 tg3_full_unlock(tp);
11419
11420                 if (irq_sync && !err2)
11421                         tg3_phy_start(tp);
11422         }
11423         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11424                 tg3_power_down(tp);
11425
11426 }
11427
11428 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11429 {
11430         struct mii_ioctl_data *data = if_mii(ifr);
11431         struct tg3 *tp = netdev_priv(dev);
11432         int err;
11433
11434         if (tg3_flag(tp, USE_PHYLIB)) {
11435                 struct phy_device *phydev;
11436                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11437                         return -EAGAIN;
11438                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11439                 return phy_mii_ioctl(phydev, ifr, cmd);
11440         }
11441
11442         switch (cmd) {
11443         case SIOCGMIIPHY:
11444                 data->phy_id = tp->phy_addr;
11445
11446                 /* fallthru */
11447         case SIOCGMIIREG: {
11448                 u32 mii_regval;
11449
11450                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11451                         break;                  /* We have no PHY */
11452
11453                 if (!netif_running(dev))
11454                         return -EAGAIN;
11455
11456                 spin_lock_bh(&tp->lock);
11457                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
11458                 spin_unlock_bh(&tp->lock);
11459
11460                 data->val_out = mii_regval;
11461
11462                 return err;
11463         }
11464
11465         case SIOCSMIIREG:
11466                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
11467                         break;                  /* We have no PHY */
11468
11469                 if (!netif_running(dev))
11470                         return -EAGAIN;
11471
11472                 spin_lock_bh(&tp->lock);
11473                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
11474                 spin_unlock_bh(&tp->lock);
11475
11476                 return err;
11477
11478         default:
11479                 /* do nothing */
11480                 break;
11481         }
11482         return -EOPNOTSUPP;
11483 }
11484
11485 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11486 {
11487         struct tg3 *tp = netdev_priv(dev);
11488
11489         memcpy(ec, &tp->coal, sizeof(*ec));
11490         return 0;
11491 }
11492
11493 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
11494 {
11495         struct tg3 *tp = netdev_priv(dev);
11496         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
11497         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
11498
11499         if (!tg3_flag(tp, 5705_PLUS)) {
11500                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
11501                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
11502                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
11503                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
11504         }
11505
11506         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
11507             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
11508             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
11509             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
11510             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
11511             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
11512             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
11513             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
11514             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
11515             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
11516                 return -EINVAL;
11517
11518         /* No rx interrupts will be generated if both are zero */
11519         if ((ec->rx_coalesce_usecs == 0) &&
11520             (ec->rx_max_coalesced_frames == 0))
11521                 return -EINVAL;
11522
11523         /* No tx interrupts will be generated if both are zero */
11524         if ((ec->tx_coalesce_usecs == 0) &&
11525             (ec->tx_max_coalesced_frames == 0))
11526                 return -EINVAL;
11527
11528         /* Only copy relevant parameters, ignore all others. */
11529         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
11530         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
11531         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
11532         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
11533         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
11534         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
11535         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
11536         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
11537         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
11538
11539         if (netif_running(dev)) {
11540                 tg3_full_lock(tp, 0);
11541                 __tg3_set_coalesce(tp, &tp->coal);
11542                 tg3_full_unlock(tp);
11543         }
11544         return 0;
11545 }
11546
11547 static const struct ethtool_ops tg3_ethtool_ops = {
11548         .get_settings           = tg3_get_settings,
11549         .set_settings           = tg3_set_settings,
11550         .get_drvinfo            = tg3_get_drvinfo,
11551         .get_regs_len           = tg3_get_regs_len,
11552         .get_regs               = tg3_get_regs,
11553         .get_wol                = tg3_get_wol,
11554         .set_wol                = tg3_set_wol,
11555         .get_msglevel           = tg3_get_msglevel,
11556         .set_msglevel           = tg3_set_msglevel,
11557         .nway_reset             = tg3_nway_reset,
11558         .get_link               = ethtool_op_get_link,
11559         .get_eeprom_len         = tg3_get_eeprom_len,
11560         .get_eeprom             = tg3_get_eeprom,
11561         .set_eeprom             = tg3_set_eeprom,
11562         .get_ringparam          = tg3_get_ringparam,
11563         .set_ringparam          = tg3_set_ringparam,
11564         .get_pauseparam         = tg3_get_pauseparam,
11565         .set_pauseparam         = tg3_set_pauseparam,
11566         .self_test              = tg3_self_test,
11567         .get_strings            = tg3_get_strings,
11568         .set_phys_id            = tg3_set_phys_id,
11569         .get_ethtool_stats      = tg3_get_ethtool_stats,
11570         .get_coalesce           = tg3_get_coalesce,
11571         .set_coalesce           = tg3_set_coalesce,
11572         .get_sset_count         = tg3_get_sset_count,
11573 };
11574
11575 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
11576 {
11577         u32 cursize, val, magic;
11578
11579         tp->nvram_size = EEPROM_CHIP_SIZE;
11580
11581         if (tg3_nvram_read(tp, 0, &magic) != 0)
11582                 return;
11583
11584         if ((magic != TG3_EEPROM_MAGIC) &&
11585             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
11586             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
11587                 return;
11588
11589         /*
11590          * Size the chip by reading offsets at increasing powers of two.
11591          * When we encounter our validation signature, we know the addressing
11592          * has wrapped around, and thus have our chip size.
11593          */
11594         cursize = 0x10;
11595
11596         while (cursize < tp->nvram_size) {
11597                 if (tg3_nvram_read(tp, cursize, &val) != 0)
11598                         return;
11599
11600                 if (val == magic)
11601                         break;
11602
11603                 cursize <<= 1;
11604         }
11605
11606         tp->nvram_size = cursize;
11607 }
11608
11609 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
11610 {
11611         u32 val;
11612
11613         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
11614                 return;
11615
11616         /* Selfboot format */
11617         if (val != TG3_EEPROM_MAGIC) {
11618                 tg3_get_eeprom_size(tp);
11619                 return;
11620         }
11621
11622         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
11623                 if (val != 0) {
11624                         /* This is confusing.  We want to operate on the
11625                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
11626                          * call will read from NVRAM and byteswap the data
11627                          * according to the byteswapping settings for all
11628                          * other register accesses.  This ensures the data we
11629                          * want will always reside in the lower 16-bits.
11630                          * However, the data in NVRAM is in LE format, which
11631                          * means the data from the NVRAM read will always be
11632                          * opposite the endianness of the CPU.  The 16-bit
11633                          * byteswap then brings the data to CPU endianness.
11634                          */
11635                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
11636                         return;
11637                 }
11638         }
11639         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11640 }
11641
11642 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
11643 {
11644         u32 nvcfg1;
11645
11646         nvcfg1 = tr32(NVRAM_CFG1);
11647         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
11648                 tg3_flag_set(tp, FLASH);
11649         } else {
11650                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11651                 tw32(NVRAM_CFG1, nvcfg1);
11652         }
11653
11654         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
11655             tg3_flag(tp, 5780_CLASS)) {
11656                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
11657                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
11658                         tp->nvram_jedecnum = JEDEC_ATMEL;
11659                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11660                         tg3_flag_set(tp, NVRAM_BUFFERED);
11661                         break;
11662                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
11663                         tp->nvram_jedecnum = JEDEC_ATMEL;
11664                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
11665                         break;
11666                 case FLASH_VENDOR_ATMEL_EEPROM:
11667                         tp->nvram_jedecnum = JEDEC_ATMEL;
11668                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11669                         tg3_flag_set(tp, NVRAM_BUFFERED);
11670                         break;
11671                 case FLASH_VENDOR_ST:
11672                         tp->nvram_jedecnum = JEDEC_ST;
11673                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
11674                         tg3_flag_set(tp, NVRAM_BUFFERED);
11675                         break;
11676                 case FLASH_VENDOR_SAIFUN:
11677                         tp->nvram_jedecnum = JEDEC_SAIFUN;
11678                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
11679                         break;
11680                 case FLASH_VENDOR_SST_SMALL:
11681                 case FLASH_VENDOR_SST_LARGE:
11682                         tp->nvram_jedecnum = JEDEC_SST;
11683                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
11684                         break;
11685                 }
11686         } else {
11687                 tp->nvram_jedecnum = JEDEC_ATMEL;
11688                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
11689                 tg3_flag_set(tp, NVRAM_BUFFERED);
11690         }
11691 }
11692
11693 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
11694 {
11695         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
11696         case FLASH_5752PAGE_SIZE_256:
11697                 tp->nvram_pagesize = 256;
11698                 break;
11699         case FLASH_5752PAGE_SIZE_512:
11700                 tp->nvram_pagesize = 512;
11701                 break;
11702         case FLASH_5752PAGE_SIZE_1K:
11703                 tp->nvram_pagesize = 1024;
11704                 break;
11705         case FLASH_5752PAGE_SIZE_2K:
11706                 tp->nvram_pagesize = 2048;
11707                 break;
11708         case FLASH_5752PAGE_SIZE_4K:
11709                 tp->nvram_pagesize = 4096;
11710                 break;
11711         case FLASH_5752PAGE_SIZE_264:
11712                 tp->nvram_pagesize = 264;
11713                 break;
11714         case FLASH_5752PAGE_SIZE_528:
11715                 tp->nvram_pagesize = 528;
11716                 break;
11717         }
11718 }
11719
11720 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
11721 {
11722         u32 nvcfg1;
11723
11724         nvcfg1 = tr32(NVRAM_CFG1);
11725
11726         /* NVRAM protection for TPM */
11727         if (nvcfg1 & (1 << 27))
11728                 tg3_flag_set(tp, PROTECTED_NVRAM);
11729
11730         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11731         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
11732         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
11733                 tp->nvram_jedecnum = JEDEC_ATMEL;
11734                 tg3_flag_set(tp, NVRAM_BUFFERED);
11735                 break;
11736         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11737                 tp->nvram_jedecnum = JEDEC_ATMEL;
11738                 tg3_flag_set(tp, NVRAM_BUFFERED);
11739                 tg3_flag_set(tp, FLASH);
11740                 break;
11741         case FLASH_5752VENDOR_ST_M45PE10:
11742         case FLASH_5752VENDOR_ST_M45PE20:
11743         case FLASH_5752VENDOR_ST_M45PE40:
11744                 tp->nvram_jedecnum = JEDEC_ST;
11745                 tg3_flag_set(tp, NVRAM_BUFFERED);
11746                 tg3_flag_set(tp, FLASH);
11747                 break;
11748         }
11749
11750         if (tg3_flag(tp, FLASH)) {
11751                 tg3_nvram_get_pagesize(tp, nvcfg1);
11752         } else {
11753                 /* For eeprom, set pagesize to maximum eeprom size */
11754                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11755
11756                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11757                 tw32(NVRAM_CFG1, nvcfg1);
11758         }
11759 }
11760
11761 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
11762 {
11763         u32 nvcfg1, protect = 0;
11764
11765         nvcfg1 = tr32(NVRAM_CFG1);
11766
11767         /* NVRAM protection for TPM */
11768         if (nvcfg1 & (1 << 27)) {
11769                 tg3_flag_set(tp, PROTECTED_NVRAM);
11770                 protect = 1;
11771         }
11772
11773         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11774         switch (nvcfg1) {
11775         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11776         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11777         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11778         case FLASH_5755VENDOR_ATMEL_FLASH_5:
11779                 tp->nvram_jedecnum = JEDEC_ATMEL;
11780                 tg3_flag_set(tp, NVRAM_BUFFERED);
11781                 tg3_flag_set(tp, FLASH);
11782                 tp->nvram_pagesize = 264;
11783                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
11784                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
11785                         tp->nvram_size = (protect ? 0x3e200 :
11786                                           TG3_NVRAM_SIZE_512KB);
11787                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
11788                         tp->nvram_size = (protect ? 0x1f200 :
11789                                           TG3_NVRAM_SIZE_256KB);
11790                 else
11791                         tp->nvram_size = (protect ? 0x1f200 :
11792                                           TG3_NVRAM_SIZE_128KB);
11793                 break;
11794         case FLASH_5752VENDOR_ST_M45PE10:
11795         case FLASH_5752VENDOR_ST_M45PE20:
11796         case FLASH_5752VENDOR_ST_M45PE40:
11797                 tp->nvram_jedecnum = JEDEC_ST;
11798                 tg3_flag_set(tp, NVRAM_BUFFERED);
11799                 tg3_flag_set(tp, FLASH);
11800                 tp->nvram_pagesize = 256;
11801                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
11802                         tp->nvram_size = (protect ?
11803                                           TG3_NVRAM_SIZE_64KB :
11804                                           TG3_NVRAM_SIZE_128KB);
11805                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
11806                         tp->nvram_size = (protect ?
11807                                           TG3_NVRAM_SIZE_64KB :
11808                                           TG3_NVRAM_SIZE_256KB);
11809                 else
11810                         tp->nvram_size = (protect ?
11811                                           TG3_NVRAM_SIZE_128KB :
11812                                           TG3_NVRAM_SIZE_512KB);
11813                 break;
11814         }
11815 }
11816
11817 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
11818 {
11819         u32 nvcfg1;
11820
11821         nvcfg1 = tr32(NVRAM_CFG1);
11822
11823         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11824         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
11825         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11826         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
11827         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11828                 tp->nvram_jedecnum = JEDEC_ATMEL;
11829                 tg3_flag_set(tp, NVRAM_BUFFERED);
11830                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11831
11832                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11833                 tw32(NVRAM_CFG1, nvcfg1);
11834                 break;
11835         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11836         case FLASH_5755VENDOR_ATMEL_FLASH_1:
11837         case FLASH_5755VENDOR_ATMEL_FLASH_2:
11838         case FLASH_5755VENDOR_ATMEL_FLASH_3:
11839                 tp->nvram_jedecnum = JEDEC_ATMEL;
11840                 tg3_flag_set(tp, NVRAM_BUFFERED);
11841                 tg3_flag_set(tp, FLASH);
11842                 tp->nvram_pagesize = 264;
11843                 break;
11844         case FLASH_5752VENDOR_ST_M45PE10:
11845         case FLASH_5752VENDOR_ST_M45PE20:
11846         case FLASH_5752VENDOR_ST_M45PE40:
11847                 tp->nvram_jedecnum = JEDEC_ST;
11848                 tg3_flag_set(tp, NVRAM_BUFFERED);
11849                 tg3_flag_set(tp, FLASH);
11850                 tp->nvram_pagesize = 256;
11851                 break;
11852         }
11853 }
11854
11855 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11856 {
11857         u32 nvcfg1, protect = 0;
11858
11859         nvcfg1 = tr32(NVRAM_CFG1);
11860
11861         /* NVRAM protection for TPM */
11862         if (nvcfg1 & (1 << 27)) {
11863                 tg3_flag_set(tp, PROTECTED_NVRAM);
11864                 protect = 1;
11865         }
11866
11867         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
11868         switch (nvcfg1) {
11869         case FLASH_5761VENDOR_ATMEL_ADB021D:
11870         case FLASH_5761VENDOR_ATMEL_ADB041D:
11871         case FLASH_5761VENDOR_ATMEL_ADB081D:
11872         case FLASH_5761VENDOR_ATMEL_ADB161D:
11873         case FLASH_5761VENDOR_ATMEL_MDB021D:
11874         case FLASH_5761VENDOR_ATMEL_MDB041D:
11875         case FLASH_5761VENDOR_ATMEL_MDB081D:
11876         case FLASH_5761VENDOR_ATMEL_MDB161D:
11877                 tp->nvram_jedecnum = JEDEC_ATMEL;
11878                 tg3_flag_set(tp, NVRAM_BUFFERED);
11879                 tg3_flag_set(tp, FLASH);
11880                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
11881                 tp->nvram_pagesize = 256;
11882                 break;
11883         case FLASH_5761VENDOR_ST_A_M45PE20:
11884         case FLASH_5761VENDOR_ST_A_M45PE40:
11885         case FLASH_5761VENDOR_ST_A_M45PE80:
11886         case FLASH_5761VENDOR_ST_A_M45PE16:
11887         case FLASH_5761VENDOR_ST_M_M45PE20:
11888         case FLASH_5761VENDOR_ST_M_M45PE40:
11889         case FLASH_5761VENDOR_ST_M_M45PE80:
11890         case FLASH_5761VENDOR_ST_M_M45PE16:
11891                 tp->nvram_jedecnum = JEDEC_ST;
11892                 tg3_flag_set(tp, NVRAM_BUFFERED);
11893                 tg3_flag_set(tp, FLASH);
11894                 tp->nvram_pagesize = 256;
11895                 break;
11896         }
11897
11898         if (protect) {
11899                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
11900         } else {
11901                 switch (nvcfg1) {
11902                 case FLASH_5761VENDOR_ATMEL_ADB161D:
11903                 case FLASH_5761VENDOR_ATMEL_MDB161D:
11904                 case FLASH_5761VENDOR_ST_A_M45PE16:
11905                 case FLASH_5761VENDOR_ST_M_M45PE16:
11906                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
11907                         break;
11908                 case FLASH_5761VENDOR_ATMEL_ADB081D:
11909                 case FLASH_5761VENDOR_ATMEL_MDB081D:
11910                 case FLASH_5761VENDOR_ST_A_M45PE80:
11911                 case FLASH_5761VENDOR_ST_M_M45PE80:
11912                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
11913                         break;
11914                 case FLASH_5761VENDOR_ATMEL_ADB041D:
11915                 case FLASH_5761VENDOR_ATMEL_MDB041D:
11916                 case FLASH_5761VENDOR_ST_A_M45PE40:
11917                 case FLASH_5761VENDOR_ST_M_M45PE40:
11918                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11919                         break;
11920                 case FLASH_5761VENDOR_ATMEL_ADB021D:
11921                 case FLASH_5761VENDOR_ATMEL_MDB021D:
11922                 case FLASH_5761VENDOR_ST_A_M45PE20:
11923                 case FLASH_5761VENDOR_ST_M_M45PE20:
11924                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11925                         break;
11926                 }
11927         }
11928 }
11929
11930 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
11931 {
11932         tp->nvram_jedecnum = JEDEC_ATMEL;
11933         tg3_flag_set(tp, NVRAM_BUFFERED);
11934         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11935 }
11936
11937 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
11938 {
11939         u32 nvcfg1;
11940
11941         nvcfg1 = tr32(NVRAM_CFG1);
11942
11943         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11944         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
11945         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
11946                 tp->nvram_jedecnum = JEDEC_ATMEL;
11947                 tg3_flag_set(tp, NVRAM_BUFFERED);
11948                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
11949
11950                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
11951                 tw32(NVRAM_CFG1, nvcfg1);
11952                 return;
11953         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11954         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11955         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11956         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11957         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11958         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11959         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11960                 tp->nvram_jedecnum = JEDEC_ATMEL;
11961                 tg3_flag_set(tp, NVRAM_BUFFERED);
11962                 tg3_flag_set(tp, FLASH);
11963
11964                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11965                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
11966                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
11967                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
11968                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11969                         break;
11970                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
11971                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
11972                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11973                         break;
11974                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
11975                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
11976                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11977                         break;
11978                 }
11979                 break;
11980         case FLASH_5752VENDOR_ST_M45PE10:
11981         case FLASH_5752VENDOR_ST_M45PE20:
11982         case FLASH_5752VENDOR_ST_M45PE40:
11983                 tp->nvram_jedecnum = JEDEC_ST;
11984                 tg3_flag_set(tp, NVRAM_BUFFERED);
11985                 tg3_flag_set(tp, FLASH);
11986
11987                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
11988                 case FLASH_5752VENDOR_ST_M45PE10:
11989                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
11990                         break;
11991                 case FLASH_5752VENDOR_ST_M45PE20:
11992                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
11993                         break;
11994                 case FLASH_5752VENDOR_ST_M45PE40:
11995                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
11996                         break;
11997                 }
11998                 break;
11999         default:
12000                 tg3_flag_set(tp, NO_NVRAM);
12001                 return;
12002         }
12003
12004         tg3_nvram_get_pagesize(tp, nvcfg1);
12005         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12006                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12007 }
12008
12009
12010 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12011 {
12012         u32 nvcfg1;
12013
12014         nvcfg1 = tr32(NVRAM_CFG1);
12015
12016         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12017         case FLASH_5717VENDOR_ATMEL_EEPROM:
12018         case FLASH_5717VENDOR_MICRO_EEPROM:
12019                 tp->nvram_jedecnum = JEDEC_ATMEL;
12020                 tg3_flag_set(tp, NVRAM_BUFFERED);
12021                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12022
12023                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12024                 tw32(NVRAM_CFG1, nvcfg1);
12025                 return;
12026         case FLASH_5717VENDOR_ATMEL_MDB011D:
12027         case FLASH_5717VENDOR_ATMEL_ADB011B:
12028         case FLASH_5717VENDOR_ATMEL_ADB011D:
12029         case FLASH_5717VENDOR_ATMEL_MDB021D:
12030         case FLASH_5717VENDOR_ATMEL_ADB021B:
12031         case FLASH_5717VENDOR_ATMEL_ADB021D:
12032         case FLASH_5717VENDOR_ATMEL_45USPT:
12033                 tp->nvram_jedecnum = JEDEC_ATMEL;
12034                 tg3_flag_set(tp, NVRAM_BUFFERED);
12035                 tg3_flag_set(tp, FLASH);
12036
12037                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12038                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12039                         /* Detect size with tg3_nvram_get_size() */
12040                         break;
12041                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12042                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12043                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12044                         break;
12045                 default:
12046                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12047                         break;
12048                 }
12049                 break;
12050         case FLASH_5717VENDOR_ST_M_M25PE10:
12051         case FLASH_5717VENDOR_ST_A_M25PE10:
12052         case FLASH_5717VENDOR_ST_M_M45PE10:
12053         case FLASH_5717VENDOR_ST_A_M45PE10:
12054         case FLASH_5717VENDOR_ST_M_M25PE20:
12055         case FLASH_5717VENDOR_ST_A_M25PE20:
12056         case FLASH_5717VENDOR_ST_M_M45PE20:
12057         case FLASH_5717VENDOR_ST_A_M45PE20:
12058         case FLASH_5717VENDOR_ST_25USPT:
12059         case FLASH_5717VENDOR_ST_45USPT:
12060                 tp->nvram_jedecnum = JEDEC_ST;
12061                 tg3_flag_set(tp, NVRAM_BUFFERED);
12062                 tg3_flag_set(tp, FLASH);
12063
12064                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12065                 case FLASH_5717VENDOR_ST_M_M25PE20:
12066                 case FLASH_5717VENDOR_ST_M_M45PE20:
12067                         /* Detect size with tg3_nvram_get_size() */
12068                         break;
12069                 case FLASH_5717VENDOR_ST_A_M25PE20:
12070                 case FLASH_5717VENDOR_ST_A_M45PE20:
12071                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12072                         break;
12073                 default:
12074                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12075                         break;
12076                 }
12077                 break;
12078         default:
12079                 tg3_flag_set(tp, NO_NVRAM);
12080                 return;
12081         }
12082
12083         tg3_nvram_get_pagesize(tp, nvcfg1);
12084         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12085                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12086 }
12087
12088 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12089 {
12090         u32 nvcfg1, nvmpinstrp;
12091
12092         nvcfg1 = tr32(NVRAM_CFG1);
12093         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12094
12095         switch (nvmpinstrp) {
12096         case FLASH_5720_EEPROM_HD:
12097         case FLASH_5720_EEPROM_LD:
12098                 tp->nvram_jedecnum = JEDEC_ATMEL;
12099                 tg3_flag_set(tp, NVRAM_BUFFERED);
12100
12101                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12102                 tw32(NVRAM_CFG1, nvcfg1);
12103                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12104                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12105                 else
12106                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12107                 return;
12108         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12109         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12110         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12111         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12112         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12113         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12114         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12115         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12116         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12117         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12118         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12119         case FLASH_5720VENDOR_ATMEL_45USPT:
12120                 tp->nvram_jedecnum = JEDEC_ATMEL;
12121                 tg3_flag_set(tp, NVRAM_BUFFERED);
12122                 tg3_flag_set(tp, FLASH);
12123
12124                 switch (nvmpinstrp) {
12125                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12126                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12127                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12128                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12129                         break;
12130                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12131                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12132                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12133                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12134                         break;
12135                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12136                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12137                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12138                         break;
12139                 default:
12140                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12141                         break;
12142                 }
12143                 break;
12144         case FLASH_5720VENDOR_M_ST_M25PE10:
12145         case FLASH_5720VENDOR_M_ST_M45PE10:
12146         case FLASH_5720VENDOR_A_ST_M25PE10:
12147         case FLASH_5720VENDOR_A_ST_M45PE10:
12148         case FLASH_5720VENDOR_M_ST_M25PE20:
12149         case FLASH_5720VENDOR_M_ST_M45PE20:
12150         case FLASH_5720VENDOR_A_ST_M25PE20:
12151         case FLASH_5720VENDOR_A_ST_M45PE20:
12152         case FLASH_5720VENDOR_M_ST_M25PE40:
12153         case FLASH_5720VENDOR_M_ST_M45PE40:
12154         case FLASH_5720VENDOR_A_ST_M25PE40:
12155         case FLASH_5720VENDOR_A_ST_M45PE40:
12156         case FLASH_5720VENDOR_M_ST_M25PE80:
12157         case FLASH_5720VENDOR_M_ST_M45PE80:
12158         case FLASH_5720VENDOR_A_ST_M25PE80:
12159         case FLASH_5720VENDOR_A_ST_M45PE80:
12160         case FLASH_5720VENDOR_ST_25USPT:
12161         case FLASH_5720VENDOR_ST_45USPT:
12162                 tp->nvram_jedecnum = JEDEC_ST;
12163                 tg3_flag_set(tp, NVRAM_BUFFERED);
12164                 tg3_flag_set(tp, FLASH);
12165
12166                 switch (nvmpinstrp) {
12167                 case FLASH_5720VENDOR_M_ST_M25PE20:
12168                 case FLASH_5720VENDOR_M_ST_M45PE20:
12169                 case FLASH_5720VENDOR_A_ST_M25PE20:
12170                 case FLASH_5720VENDOR_A_ST_M45PE20:
12171                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12172                         break;
12173                 case FLASH_5720VENDOR_M_ST_M25PE40:
12174                 case FLASH_5720VENDOR_M_ST_M45PE40:
12175                 case FLASH_5720VENDOR_A_ST_M25PE40:
12176                 case FLASH_5720VENDOR_A_ST_M45PE40:
12177                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12178                         break;
12179                 case FLASH_5720VENDOR_M_ST_M25PE80:
12180                 case FLASH_5720VENDOR_M_ST_M45PE80:
12181                 case FLASH_5720VENDOR_A_ST_M25PE80:
12182                 case FLASH_5720VENDOR_A_ST_M45PE80:
12183                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12184                         break;
12185                 default:
12186                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12187                         break;
12188                 }
12189                 break;
12190         default:
12191                 tg3_flag_set(tp, NO_NVRAM);
12192                 return;
12193         }
12194
12195         tg3_nvram_get_pagesize(tp, nvcfg1);
12196         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12197                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12198 }
12199
12200 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12201 static void __devinit tg3_nvram_init(struct tg3 *tp)
12202 {
12203         tw32_f(GRC_EEPROM_ADDR,
12204              (EEPROM_ADDR_FSM_RESET |
12205               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12206                EEPROM_ADDR_CLKPERD_SHIFT)));
12207
12208         msleep(1);
12209
12210         /* Enable seeprom accesses. */
12211         tw32_f(GRC_LOCAL_CTRL,
12212              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12213         udelay(100);
12214
12215         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12216             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12217                 tg3_flag_set(tp, NVRAM);
12218
12219                 if (tg3_nvram_lock(tp)) {
12220                         netdev_warn(tp->dev,
12221                                     "Cannot get nvram lock, %s failed\n",
12222                                     __func__);
12223                         return;
12224                 }
12225                 tg3_enable_nvram_access(tp);
12226
12227                 tp->nvram_size = 0;
12228
12229                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12230                         tg3_get_5752_nvram_info(tp);
12231                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12232                         tg3_get_5755_nvram_info(tp);
12233                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12234                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12235                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12236                         tg3_get_5787_nvram_info(tp);
12237                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12238                         tg3_get_5761_nvram_info(tp);
12239                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12240                         tg3_get_5906_nvram_info(tp);
12241                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12242                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
12243                         tg3_get_57780_nvram_info(tp);
12244                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12245                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
12246                         tg3_get_5717_nvram_info(tp);
12247                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
12248                         tg3_get_5720_nvram_info(tp);
12249                 else
12250                         tg3_get_nvram_info(tp);
12251
12252                 if (tp->nvram_size == 0)
12253                         tg3_get_nvram_size(tp);
12254
12255                 tg3_disable_nvram_access(tp);
12256                 tg3_nvram_unlock(tp);
12257
12258         } else {
12259                 tg3_flag_clear(tp, NVRAM);
12260                 tg3_flag_clear(tp, NVRAM_BUFFERED);
12261
12262                 tg3_get_eeprom_size(tp);
12263         }
12264 }
12265
12266 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
12267                                     u32 offset, u32 len, u8 *buf)
12268 {
12269         int i, j, rc = 0;
12270         u32 val;
12271
12272         for (i = 0; i < len; i += 4) {
12273                 u32 addr;
12274                 __be32 data;
12275
12276                 addr = offset + i;
12277
12278                 memcpy(&data, buf + i, 4);
12279
12280                 /*
12281                  * The SEEPROM interface expects the data to always be opposite
12282                  * the native endian format.  We accomplish this by reversing
12283                  * all the operations that would have been performed on the
12284                  * data from a call to tg3_nvram_read_be32().
12285                  */
12286                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
12287
12288                 val = tr32(GRC_EEPROM_ADDR);
12289                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
12290
12291                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
12292                         EEPROM_ADDR_READ);
12293                 tw32(GRC_EEPROM_ADDR, val |
12294                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
12295                         (addr & EEPROM_ADDR_ADDR_MASK) |
12296                         EEPROM_ADDR_START |
12297                         EEPROM_ADDR_WRITE);
12298
12299                 for (j = 0; j < 1000; j++) {
12300                         val = tr32(GRC_EEPROM_ADDR);
12301
12302                         if (val & EEPROM_ADDR_COMPLETE)
12303                                 break;
12304                         msleep(1);
12305                 }
12306                 if (!(val & EEPROM_ADDR_COMPLETE)) {
12307                         rc = -EBUSY;
12308                         break;
12309                 }
12310         }
12311
12312         return rc;
12313 }
12314
12315 /* offset and length are dword aligned */
12316 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
12317                 u8 *buf)
12318 {
12319         int ret = 0;
12320         u32 pagesize = tp->nvram_pagesize;
12321         u32 pagemask = pagesize - 1;
12322         u32 nvram_cmd;
12323         u8 *tmp;
12324
12325         tmp = kmalloc(pagesize, GFP_KERNEL);
12326         if (tmp == NULL)
12327                 return -ENOMEM;
12328
12329         while (len) {
12330                 int j;
12331                 u32 phy_addr, page_off, size;
12332
12333                 phy_addr = offset & ~pagemask;
12334
12335                 for (j = 0; j < pagesize; j += 4) {
12336                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
12337                                                   (__be32 *) (tmp + j));
12338                         if (ret)
12339                                 break;
12340                 }
12341                 if (ret)
12342                         break;
12343
12344                 page_off = offset & pagemask;
12345                 size = pagesize;
12346                 if (len < size)
12347                         size = len;
12348
12349                 len -= size;
12350
12351                 memcpy(tmp + page_off, buf, size);
12352
12353                 offset = offset + (pagesize - page_off);
12354
12355                 tg3_enable_nvram_access(tp);
12356
12357                 /*
12358                  * Before we can erase the flash page, we need
12359                  * to issue a special "write enable" command.
12360                  */
12361                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12362
12363                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12364                         break;
12365
12366                 /* Erase the target page */
12367                 tw32(NVRAM_ADDR, phy_addr);
12368
12369                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
12370                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
12371
12372                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12373                         break;
12374
12375                 /* Issue another write enable to start the write. */
12376                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12377
12378                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
12379                         break;
12380
12381                 for (j = 0; j < pagesize; j += 4) {
12382                         __be32 data;
12383
12384                         data = *((__be32 *) (tmp + j));
12385
12386                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
12387
12388                         tw32(NVRAM_ADDR, phy_addr + j);
12389
12390                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
12391                                 NVRAM_CMD_WR;
12392
12393                         if (j == 0)
12394                                 nvram_cmd |= NVRAM_CMD_FIRST;
12395                         else if (j == (pagesize - 4))
12396                                 nvram_cmd |= NVRAM_CMD_LAST;
12397
12398                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12399                                 break;
12400                 }
12401                 if (ret)
12402                         break;
12403         }
12404
12405         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
12406         tg3_nvram_exec_cmd(tp, nvram_cmd);
12407
12408         kfree(tmp);
12409
12410         return ret;
12411 }
12412
12413 /* offset and length are dword aligned */
12414 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
12415                 u8 *buf)
12416 {
12417         int i, ret = 0;
12418
12419         for (i = 0; i < len; i += 4, offset += 4) {
12420                 u32 page_off, phy_addr, nvram_cmd;
12421                 __be32 data;
12422
12423                 memcpy(&data, buf + i, 4);
12424                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
12425
12426                 page_off = offset % tp->nvram_pagesize;
12427
12428                 phy_addr = tg3_nvram_phys_addr(tp, offset);
12429
12430                 tw32(NVRAM_ADDR, phy_addr);
12431
12432                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
12433
12434                 if (page_off == 0 || i == 0)
12435                         nvram_cmd |= NVRAM_CMD_FIRST;
12436                 if (page_off == (tp->nvram_pagesize - 4))
12437                         nvram_cmd |= NVRAM_CMD_LAST;
12438
12439                 if (i == (len - 4))
12440                         nvram_cmd |= NVRAM_CMD_LAST;
12441
12442                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
12443                     !tg3_flag(tp, 5755_PLUS) &&
12444                     (tp->nvram_jedecnum == JEDEC_ST) &&
12445                     (nvram_cmd & NVRAM_CMD_FIRST)) {
12446
12447                         if ((ret = tg3_nvram_exec_cmd(tp,
12448                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
12449                                 NVRAM_CMD_DONE)))
12450
12451                                 break;
12452                 }
12453                 if (!tg3_flag(tp, FLASH)) {
12454                         /* We always do complete word writes to eeprom. */
12455                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
12456                 }
12457
12458                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
12459                         break;
12460         }
12461         return ret;
12462 }
12463
12464 /* offset and length are dword aligned */
12465 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
12466 {
12467         int ret;
12468
12469         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12470                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
12471                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
12472                 udelay(40);
12473         }
12474
12475         if (!tg3_flag(tp, NVRAM)) {
12476                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
12477         } else {
12478                 u32 grc_mode;
12479
12480                 ret = tg3_nvram_lock(tp);
12481                 if (ret)
12482                         return ret;
12483
12484                 tg3_enable_nvram_access(tp);
12485                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
12486                         tw32(NVRAM_WRITE1, 0x406);
12487
12488                 grc_mode = tr32(GRC_MODE);
12489                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
12490
12491                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
12492                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
12493                                 buf);
12494                 } else {
12495                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
12496                                 buf);
12497                 }
12498
12499                 grc_mode = tr32(GRC_MODE);
12500                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
12501
12502                 tg3_disable_nvram_access(tp);
12503                 tg3_nvram_unlock(tp);
12504         }
12505
12506         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
12507                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
12508                 udelay(40);
12509         }
12510
12511         return ret;
12512 }
12513
12514 struct subsys_tbl_ent {
12515         u16 subsys_vendor, subsys_devid;
12516         u32 phy_id;
12517 };
12518
12519 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
12520         /* Broadcom boards. */
12521         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12522           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
12523         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12524           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
12525         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12526           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
12527         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12528           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
12529         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12530           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
12531         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12532           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
12533         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12534           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
12535         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12536           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
12537         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12538           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
12539         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12540           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
12541         { TG3PCI_SUBVENDOR_ID_BROADCOM,
12542           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
12543
12544         /* 3com boards. */
12545         { TG3PCI_SUBVENDOR_ID_3COM,
12546           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
12547         { TG3PCI_SUBVENDOR_ID_3COM,
12548           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
12549         { TG3PCI_SUBVENDOR_ID_3COM,
12550           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
12551         { TG3PCI_SUBVENDOR_ID_3COM,
12552           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
12553         { TG3PCI_SUBVENDOR_ID_3COM,
12554           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
12555
12556         /* DELL boards. */
12557         { TG3PCI_SUBVENDOR_ID_DELL,
12558           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
12559         { TG3PCI_SUBVENDOR_ID_DELL,
12560           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
12561         { TG3PCI_SUBVENDOR_ID_DELL,
12562           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
12563         { TG3PCI_SUBVENDOR_ID_DELL,
12564           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
12565
12566         /* Compaq boards. */
12567         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12568           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
12569         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12570           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
12571         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12572           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
12573         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12574           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
12575         { TG3PCI_SUBVENDOR_ID_COMPAQ,
12576           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
12577
12578         /* IBM boards. */
12579         { TG3PCI_SUBVENDOR_ID_IBM,
12580           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
12581 };
12582
12583 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
12584 {
12585         int i;
12586
12587         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
12588                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
12589                      tp->pdev->subsystem_vendor) &&
12590                     (subsys_id_to_phy_id[i].subsys_devid ==
12591                      tp->pdev->subsystem_device))
12592                         return &subsys_id_to_phy_id[i];
12593         }
12594         return NULL;
12595 }
12596
12597 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12598 {
12599         u32 val;
12600         u16 pmcsr;
12601
12602         /* On some early chips the SRAM cannot be accessed in D3hot state,
12603          * so need make sure we're in D0.
12604          */
12605         pci_read_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, &pmcsr);
12606         pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
12607         pci_write_config_word(tp->pdev, tp->pm_cap + PCI_PM_CTRL, pmcsr);
12608         msleep(1);
12609
12610         /* Make sure register accesses (indirect or otherwise)
12611          * will function correctly.
12612          */
12613         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
12614                                tp->misc_host_ctrl);
12615
12616         /* The memory arbiter has to be enabled in order for SRAM accesses
12617          * to succeed.  Normally on powerup the tg3 chip firmware will make
12618          * sure it is enabled, but other entities such as system netboot
12619          * code might disable it.
12620          */
12621         val = tr32(MEMARB_MODE);
12622         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
12623
12624         tp->phy_id = TG3_PHY_ID_INVALID;
12625         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12626
12627         /* Assume an onboard device and WOL capable by default.  */
12628         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12629         tg3_flag_set(tp, WOL_CAP);
12630
12631         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12632                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
12633                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12634                         tg3_flag_set(tp, IS_NIC);
12635                 }
12636                 val = tr32(VCPU_CFGSHDW);
12637                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
12638                         tg3_flag_set(tp, ASPM_WORKAROUND);
12639                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
12640                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
12641                         tg3_flag_set(tp, WOL_ENABLE);
12642                         device_set_wakeup_enable(&tp->pdev->dev, true);
12643                 }
12644                 goto done;
12645         }
12646
12647         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
12648         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
12649                 u32 nic_cfg, led_cfg;
12650                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
12651                 int eeprom_phy_serdes = 0;
12652
12653                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
12654                 tp->nic_sram_data_cfg = nic_cfg;
12655
12656                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
12657                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
12658                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12659                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12660                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
12661                     (ver > 0) && (ver < 0x100))
12662                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
12663
12664                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12665                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
12666
12667                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
12668                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
12669                         eeprom_phy_serdes = 1;
12670
12671                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
12672                 if (nic_phy_id != 0) {
12673                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
12674                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
12675
12676                         eeprom_phy_id  = (id1 >> 16) << 10;
12677                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
12678                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
12679                 } else
12680                         eeprom_phy_id = 0;
12681
12682                 tp->phy_id = eeprom_phy_id;
12683                 if (eeprom_phy_serdes) {
12684                         if (!tg3_flag(tp, 5705_PLUS))
12685                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12686                         else
12687                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
12688                 }
12689
12690                 if (tg3_flag(tp, 5750_PLUS))
12691                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
12692                                     SHASTA_EXT_LED_MODE_MASK);
12693                 else
12694                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
12695
12696                 switch (led_cfg) {
12697                 default:
12698                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
12699                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12700                         break;
12701
12702                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
12703                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12704                         break;
12705
12706                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
12707                         tp->led_ctrl = LED_CTRL_MODE_MAC;
12708
12709                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
12710                          * read on some older 5700/5701 bootcode.
12711                          */
12712                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12713                             ASIC_REV_5700 ||
12714                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
12715                             ASIC_REV_5701)
12716                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12717
12718                         break;
12719
12720                 case SHASTA_EXT_LED_SHARED:
12721                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
12722                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
12723                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
12724                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12725                                                  LED_CTRL_MODE_PHY_2);
12726                         break;
12727
12728                 case SHASTA_EXT_LED_MAC:
12729                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
12730                         break;
12731
12732                 case SHASTA_EXT_LED_COMBO:
12733                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
12734                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
12735                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
12736                                                  LED_CTRL_MODE_PHY_2);
12737                         break;
12738
12739                 }
12740
12741                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
12742                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
12743                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
12744                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
12745
12746                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
12747                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
12748
12749                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
12750                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
12751                         if ((tp->pdev->subsystem_vendor ==
12752                              PCI_VENDOR_ID_ARIMA) &&
12753                             (tp->pdev->subsystem_device == 0x205a ||
12754                              tp->pdev->subsystem_device == 0x2063))
12755                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12756                 } else {
12757                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
12758                         tg3_flag_set(tp, IS_NIC);
12759                 }
12760
12761                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
12762                         tg3_flag_set(tp, ENABLE_ASF);
12763                         if (tg3_flag(tp, 5750_PLUS))
12764                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
12765                 }
12766
12767                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
12768                     tg3_flag(tp, 5750_PLUS))
12769                         tg3_flag_set(tp, ENABLE_APE);
12770
12771                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
12772                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
12773                         tg3_flag_clear(tp, WOL_CAP);
12774
12775                 if (tg3_flag(tp, WOL_CAP) &&
12776                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
12777                         tg3_flag_set(tp, WOL_ENABLE);
12778                         device_set_wakeup_enable(&tp->pdev->dev, true);
12779                 }
12780
12781                 if (cfg2 & (1 << 17))
12782                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
12783
12784                 /* serdes signal pre-emphasis in register 0x590 set by */
12785                 /* bootcode if bit 18 is set */
12786                 if (cfg2 & (1 << 18))
12787                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12788
12789                 if ((tg3_flag(tp, 57765_PLUS) ||
12790                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12791                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
12792                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12793                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12794
12795                 if (tg3_flag(tp, PCI_EXPRESS) &&
12796                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
12797                     !tg3_flag(tp, 57765_PLUS)) {
12798                         u32 cfg3;
12799
12800                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
12801                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
12802                                 tg3_flag_set(tp, ASPM_WORKAROUND);
12803                 }
12804
12805                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
12806                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
12807                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
12808                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
12809                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
12810                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
12811         }
12812 done:
12813         if (tg3_flag(tp, WOL_CAP))
12814                 device_set_wakeup_enable(&tp->pdev->dev,
12815                                          tg3_flag(tp, WOL_ENABLE));
12816         else
12817                 device_set_wakeup_capable(&tp->pdev->dev, false);
12818 }
12819
12820 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
12821 {
12822         int i;
12823         u32 val;
12824
12825         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
12826         tw32(OTP_CTRL, cmd);
12827
12828         /* Wait for up to 1 ms for command to execute. */
12829         for (i = 0; i < 100; i++) {
12830                 val = tr32(OTP_STATUS);
12831                 if (val & OTP_STATUS_CMD_DONE)
12832                         break;
12833                 udelay(10);
12834         }
12835
12836         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
12837 }
12838
12839 /* Read the gphy configuration from the OTP region of the chip.  The gphy
12840  * configuration is a 32-bit value that straddles the alignment boundary.
12841  * We do two 32-bit reads and then shift and merge the results.
12842  */
12843 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
12844 {
12845         u32 bhalf_otp, thalf_otp;
12846
12847         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
12848
12849         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
12850                 return 0;
12851
12852         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
12853
12854         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12855                 return 0;
12856
12857         thalf_otp = tr32(OTP_READ_DATA);
12858
12859         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
12860
12861         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
12862                 return 0;
12863
12864         bhalf_otp = tr32(OTP_READ_DATA);
12865
12866         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
12867 }
12868
12869 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
12870 {
12871         u32 adv = ADVERTISED_Autoneg |
12872                   ADVERTISED_Pause;
12873
12874         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
12875                 adv |= ADVERTISED_1000baseT_Half |
12876                        ADVERTISED_1000baseT_Full;
12877
12878         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
12879                 adv |= ADVERTISED_100baseT_Half |
12880                        ADVERTISED_100baseT_Full |
12881                        ADVERTISED_10baseT_Half |
12882                        ADVERTISED_10baseT_Full |
12883                        ADVERTISED_TP;
12884         else
12885                 adv |= ADVERTISED_FIBRE;
12886
12887         tp->link_config.advertising = adv;
12888         tp->link_config.speed = SPEED_INVALID;
12889         tp->link_config.duplex = DUPLEX_INVALID;
12890         tp->link_config.autoneg = AUTONEG_ENABLE;
12891         tp->link_config.active_speed = SPEED_INVALID;
12892         tp->link_config.active_duplex = DUPLEX_INVALID;
12893         tp->link_config.orig_speed = SPEED_INVALID;
12894         tp->link_config.orig_duplex = DUPLEX_INVALID;
12895         tp->link_config.orig_autoneg = AUTONEG_INVALID;
12896 }
12897
12898 static int __devinit tg3_phy_probe(struct tg3 *tp)
12899 {
12900         u32 hw_phy_id_1, hw_phy_id_2;
12901         u32 hw_phy_id, hw_phy_id_masked;
12902         int err;
12903
12904         /* flow control autonegotiation is default behavior */
12905         tg3_flag_set(tp, PAUSE_AUTONEG);
12906         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
12907
12908         if (tg3_flag(tp, USE_PHYLIB))
12909                 return tg3_phy_init(tp);
12910
12911         /* Reading the PHY ID register can conflict with ASF
12912          * firmware access to the PHY hardware.
12913          */
12914         err = 0;
12915         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
12916                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
12917         } else {
12918                 /* Now read the physical PHY_ID from the chip and verify
12919                  * that it is sane.  If it doesn't look good, we fall back
12920                  * to either the hard-coded table based PHY_ID and failing
12921                  * that the value found in the eeprom area.
12922                  */
12923                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
12924                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
12925
12926                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
12927                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
12928                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
12929
12930                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
12931         }
12932
12933         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
12934                 tp->phy_id = hw_phy_id;
12935                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
12936                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12937                 else
12938                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
12939         } else {
12940                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
12941                         /* Do nothing, phy ID already set up in
12942                          * tg3_get_eeprom_hw_cfg().
12943                          */
12944                 } else {
12945                         struct subsys_tbl_ent *p;
12946
12947                         /* No eeprom signature?  Try the hardcoded
12948                          * subsys device table.
12949                          */
12950                         p = tg3_lookup_by_subsys(tp);
12951                         if (!p)
12952                                 return -ENODEV;
12953
12954                         tp->phy_id = p->phy_id;
12955                         if (!tp->phy_id ||
12956                             tp->phy_id == TG3_PHY_ID_BCM8002)
12957                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
12958                 }
12959         }
12960
12961         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12962             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
12963              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
12964              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12965               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12966              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12967               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12968                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12969
12970         tg3_phy_init_link_config(tp);
12971
12972         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12973             !tg3_flag(tp, ENABLE_APE) &&
12974             !tg3_flag(tp, ENABLE_ASF)) {
12975                 u32 bmsr, mask;
12976
12977                 tg3_readphy(tp, MII_BMSR, &bmsr);
12978                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
12979                     (bmsr & BMSR_LSTATUS))
12980                         goto skip_phy_reset;
12981
12982                 err = tg3_phy_reset(tp);
12983                 if (err)
12984                         return err;
12985
12986                 tg3_phy_set_wirespeed(tp);
12987
12988                 mask = (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
12989                         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
12990                         ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full);
12991                 if (!tg3_copper_is_advertising_all(tp, mask)) {
12992                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
12993                                             tp->link_config.flowctrl);
12994
12995                         tg3_writephy(tp, MII_BMCR,
12996                                      BMCR_ANENABLE | BMCR_ANRESTART);
12997                 }
12998         }
12999
13000 skip_phy_reset:
13001         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13002                 err = tg3_init_5401phy_dsp(tp);
13003                 if (err)
13004                         return err;
13005
13006                 err = tg3_init_5401phy_dsp(tp);
13007         }
13008
13009         return err;
13010 }
13011
13012 static void __devinit tg3_read_vpd(struct tg3 *tp)
13013 {
13014         u8 *vpd_data;
13015         unsigned int block_end, rosize, len;
13016         int j, i = 0;
13017
13018         vpd_data = (u8 *)tg3_vpd_readblock(tp);
13019         if (!vpd_data)
13020                 goto out_no_vpd;
13021
13022         i = pci_vpd_find_tag(vpd_data, 0, TG3_NVM_VPD_LEN,
13023                              PCI_VPD_LRDT_RO_DATA);
13024         if (i < 0)
13025                 goto out_not_found;
13026
13027         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13028         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13029         i += PCI_VPD_LRDT_TAG_SIZE;
13030
13031         if (block_end > TG3_NVM_VPD_LEN)
13032                 goto out_not_found;
13033
13034         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13035                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13036         if (j > 0) {
13037                 len = pci_vpd_info_field_size(&vpd_data[j]);
13038
13039                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13040                 if (j + len > block_end || len != 4 ||
13041                     memcmp(&vpd_data[j], "1028", 4))
13042                         goto partno;
13043
13044                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13045                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13046                 if (j < 0)
13047                         goto partno;
13048
13049                 len = pci_vpd_info_field_size(&vpd_data[j]);
13050
13051                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13052                 if (j + len > block_end)
13053                         goto partno;
13054
13055                 memcpy(tp->fw_ver, &vpd_data[j], len);
13056                 strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1);
13057         }
13058
13059 partno:
13060         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13061                                       PCI_VPD_RO_KEYWORD_PARTNO);
13062         if (i < 0)
13063                 goto out_not_found;
13064
13065         len = pci_vpd_info_field_size(&vpd_data[i]);
13066
13067         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13068         if (len > TG3_BPN_SIZE ||
13069             (len + i) > TG3_NVM_VPD_LEN)
13070                 goto out_not_found;
13071
13072         memcpy(tp->board_part_number, &vpd_data[i], len);
13073
13074 out_not_found:
13075         kfree(vpd_data);
13076         if (tp->board_part_number[0])
13077                 return;
13078
13079 out_no_vpd:
13080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13081                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13082                         strcpy(tp->board_part_number, "BCM5717");
13083                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13084                         strcpy(tp->board_part_number, "BCM5718");
13085                 else
13086                         goto nomatch;
13087         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13088                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13089                         strcpy(tp->board_part_number, "BCM57780");
13090                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13091                         strcpy(tp->board_part_number, "BCM57760");
13092                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13093                         strcpy(tp->board_part_number, "BCM57790");
13094                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13095                         strcpy(tp->board_part_number, "BCM57788");
13096                 else
13097                         goto nomatch;
13098         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13099                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13100                         strcpy(tp->board_part_number, "BCM57761");
13101                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13102                         strcpy(tp->board_part_number, "BCM57765");
13103                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13104                         strcpy(tp->board_part_number, "BCM57781");
13105                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13106                         strcpy(tp->board_part_number, "BCM57785");
13107                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13108                         strcpy(tp->board_part_number, "BCM57791");
13109                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13110                         strcpy(tp->board_part_number, "BCM57795");
13111                 else
13112                         goto nomatch;
13113         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13114                 strcpy(tp->board_part_number, "BCM95906");
13115         } else {
13116 nomatch:
13117                 strcpy(tp->board_part_number, "none");
13118         }
13119 }
13120
13121 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13122 {
13123         u32 val;
13124
13125         if (tg3_nvram_read(tp, offset, &val) ||
13126             (val & 0xfc000000) != 0x0c000000 ||
13127             tg3_nvram_read(tp, offset + 4, &val) ||
13128             val != 0)
13129                 return 0;
13130
13131         return 1;
13132 }
13133
13134 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13135 {
13136         u32 val, offset, start, ver_offset;
13137         int i, dst_off;
13138         bool newver = false;
13139
13140         if (tg3_nvram_read(tp, 0xc, &offset) ||
13141             tg3_nvram_read(tp, 0x4, &start))
13142                 return;
13143
13144         offset = tg3_nvram_logical_addr(tp, offset);
13145
13146         if (tg3_nvram_read(tp, offset, &val))
13147                 return;
13148
13149         if ((val & 0xfc000000) == 0x0c000000) {
13150                 if (tg3_nvram_read(tp, offset + 4, &val))
13151                         return;
13152
13153                 if (val == 0)
13154                         newver = true;
13155         }
13156
13157         dst_off = strlen(tp->fw_ver);
13158
13159         if (newver) {
13160                 if (TG3_VER_SIZE - dst_off < 16 ||
13161                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13162                         return;
13163
13164                 offset = offset + ver_offset - start;
13165                 for (i = 0; i < 16; i += 4) {
13166                         __be32 v;
13167                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13168                                 return;
13169
13170                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13171                 }
13172         } else {
13173                 u32 major, minor;
13174
13175                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13176                         return;
13177
13178                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13179                         TG3_NVM_BCVER_MAJSFT;
13180                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13181                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13182                          "v%d.%02d", major, minor);
13183         }
13184 }
13185
13186 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13187 {
13188         u32 val, major, minor;
13189
13190         /* Use native endian representation */
13191         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13192                 return;
13193
13194         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13195                 TG3_NVM_HWSB_CFG1_MAJSFT;
13196         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13197                 TG3_NVM_HWSB_CFG1_MINSFT;
13198
13199         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13200 }
13201
13202 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13203 {
13204         u32 offset, major, minor, build;
13205
13206         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13207
13208         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13209                 return;
13210
13211         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13212         case TG3_EEPROM_SB_REVISION_0:
13213                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13214                 break;
13215         case TG3_EEPROM_SB_REVISION_2:
13216                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13217                 break;
13218         case TG3_EEPROM_SB_REVISION_3:
13219                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13220                 break;
13221         case TG3_EEPROM_SB_REVISION_4:
13222                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13223                 break;
13224         case TG3_EEPROM_SB_REVISION_5:
13225                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13226                 break;
13227         case TG3_EEPROM_SB_REVISION_6:
13228                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13229                 break;
13230         default:
13231                 return;
13232         }
13233
13234         if (tg3_nvram_read(tp, offset, &val))
13235                 return;
13236
13237         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13238                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13239         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13240                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13241         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13242
13243         if (minor > 99 || build > 26)
13244                 return;
13245
13246         offset = strlen(tp->fw_ver);
13247         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13248                  " v%d.%02d", major, minor);
13249
13250         if (build > 0) {
13251                 offset = strlen(tp->fw_ver);
13252                 if (offset < TG3_VER_SIZE - 1)
13253                         tp->fw_ver[offset] = 'a' + build - 1;
13254         }
13255 }
13256
13257 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13258 {
13259         u32 val, offset, start;
13260         int i, vlen;
13261
13262         for (offset = TG3_NVM_DIR_START;
13263              offset < TG3_NVM_DIR_END;
13264              offset += TG3_NVM_DIRENT_SIZE) {
13265                 if (tg3_nvram_read(tp, offset, &val))
13266                         return;
13267
13268                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13269                         break;
13270         }
13271
13272         if (offset == TG3_NVM_DIR_END)
13273                 return;
13274
13275         if (!tg3_flag(tp, 5705_PLUS))
13276                 start = 0x08000000;
13277         else if (tg3_nvram_read(tp, offset - 4, &start))
13278                 return;
13279
13280         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13281             !tg3_fw_img_is_valid(tp, offset) ||
13282             tg3_nvram_read(tp, offset + 8, &val))
13283                 return;
13284
13285         offset += val - start;
13286
13287         vlen = strlen(tp->fw_ver);
13288
13289         tp->fw_ver[vlen++] = ',';
13290         tp->fw_ver[vlen++] = ' ';
13291
13292         for (i = 0; i < 4; i++) {
13293                 __be32 v;
13294                 if (tg3_nvram_read_be32(tp, offset, &v))
13295                         return;
13296
13297                 offset += sizeof(v);
13298
13299                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13300                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13301                         break;
13302                 }
13303
13304                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13305                 vlen += sizeof(v);
13306         }
13307 }
13308
13309 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13310 {
13311         int vlen;
13312         u32 apedata;
13313         char *fwtype;
13314
13315         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13316                 return;
13317
13318         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13319         if (apedata != APE_SEG_SIG_MAGIC)
13320                 return;
13321
13322         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13323         if (!(apedata & APE_FW_STATUS_READY))
13324                 return;
13325
13326         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13327
13328         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13329                 tg3_flag_set(tp, APE_HAS_NCSI);
13330                 fwtype = "NCSI";
13331         } else {
13332                 fwtype = "DASH";
13333         }
13334
13335         vlen = strlen(tp->fw_ver);
13336
13337         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13338                  fwtype,
13339                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13340                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13341                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13342                  (apedata & APE_FW_VERSION_BLDMSK));
13343 }
13344
13345 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13346 {
13347         u32 val;
13348         bool vpd_vers = false;
13349
13350         if (tp->fw_ver[0] != 0)
13351                 vpd_vers = true;
13352
13353         if (tg3_flag(tp, NO_NVRAM)) {
13354                 strcat(tp->fw_ver, "sb");
13355                 return;
13356         }
13357
13358         if (tg3_nvram_read(tp, 0, &val))
13359                 return;
13360
13361         if (val == TG3_EEPROM_MAGIC)
13362                 tg3_read_bc_ver(tp);
13363         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13364                 tg3_read_sb_ver(tp, val);
13365         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13366                 tg3_read_hwsb_ver(tp);
13367         else
13368                 return;
13369
13370         if (!tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || vpd_vers)
13371                 goto done;
13372
13373         tg3_read_mgmtfw_ver(tp);
13374
13375 done:
13376         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13377 }
13378
13379 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *);
13380
13381 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13382 {
13383         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13384                 return TG3_RX_RET_MAX_SIZE_5717;
13385         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13386                 return TG3_RX_RET_MAX_SIZE_5700;
13387         else
13388                 return TG3_RX_RET_MAX_SIZE_5705;
13389 }
13390
13391 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13392         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13393         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13394         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13395         { },
13396 };
13397
13398 static int __devinit tg3_get_invariants(struct tg3 *tp)
13399 {
13400         u32 misc_ctrl_reg;
13401         u32 pci_state_reg, grc_misc_cfg;
13402         u32 val;
13403         u16 pci_cmd;
13404         int err;
13405
13406         /* Force memory write invalidate off.  If we leave it on,
13407          * then on 5700_BX chips we have to enable a workaround.
13408          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
13409          * to match the cacheline size.  The Broadcom driver have this
13410          * workaround but turns MWI off all the times so never uses
13411          * it.  This seems to suggest that the workaround is insufficient.
13412          */
13413         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13414         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
13415         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13416
13417         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
13418          * has the register indirect write enable bit set before
13419          * we try to access any of the MMIO registers.  It is also
13420          * critical that the PCI-X hw workaround situation is decided
13421          * before that as well.
13422          */
13423         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13424                               &misc_ctrl_reg);
13425
13426         tp->pci_chip_rev_id = (misc_ctrl_reg >>
13427                                MISC_HOST_CTRL_CHIPREV_SHIFT);
13428         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13429                 u32 prod_id_asic_rev;
13430
13431                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13432                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13433                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13434                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13435                         pci_read_config_dword(tp->pdev,
13436                                               TG3PCI_GEN2_PRODID_ASICREV,
13437                                               &prod_id_asic_rev);
13438                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13439                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13440                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13441                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13442                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13443                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13444                         pci_read_config_dword(tp->pdev,
13445                                               TG3PCI_GEN15_PRODID_ASICREV,
13446                                               &prod_id_asic_rev);
13447                 else
13448                         pci_read_config_dword(tp->pdev, TG3PCI_PRODID_ASICREV,
13449                                               &prod_id_asic_rev);
13450
13451                 tp->pci_chip_rev_id = prod_id_asic_rev;
13452         }
13453
13454         /* Wrong chip ID in 5752 A0. This code can be removed later
13455          * as A0 is not in production.
13456          */
13457         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13458                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13459
13460         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
13461          * we need to disable memory and use config. cycles
13462          * only to access all registers. The 5702/03 chips
13463          * can mistakenly decode the special cycles from the
13464          * ICH chipsets as memory write cycles, causing corruption
13465          * of register and memory space. Only certain ICH bridges
13466          * will drive special cycles with non-zero data during the
13467          * address phase which can fall within the 5703's address
13468          * range. This is not an ICH bug as the PCI spec allows
13469          * non-zero address during special cycles. However, only
13470          * these ICH bridges are known to drive non-zero addresses
13471          * during special cycles.
13472          *
13473          * Since special cycles do not cross PCI bridges, we only
13474          * enable this workaround if the 5703 is on the secondary
13475          * bus of these ICH bridges.
13476          */
13477         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
13478             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
13479                 static struct tg3_dev_id {
13480                         u32     vendor;
13481                         u32     device;
13482                         u32     rev;
13483                 } ich_chipsets[] = {
13484                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
13485                           PCI_ANY_ID },
13486                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
13487                           PCI_ANY_ID },
13488                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
13489                           0xa },
13490                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
13491                           PCI_ANY_ID },
13492                         { },
13493                 };
13494                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
13495                 struct pci_dev *bridge = NULL;
13496
13497                 while (pci_id->vendor != 0) {
13498                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
13499                                                 bridge);
13500                         if (!bridge) {
13501                                 pci_id++;
13502                                 continue;
13503                         }
13504                         if (pci_id->rev != PCI_ANY_ID) {
13505                                 if (bridge->revision > pci_id->rev)
13506                                         continue;
13507                         }
13508                         if (bridge->subordinate &&
13509                             (bridge->subordinate->number ==
13510                              tp->pdev->bus->number)) {
13511                                 tg3_flag_set(tp, ICH_WORKAROUND);
13512                                 pci_dev_put(bridge);
13513                                 break;
13514                         }
13515                 }
13516         }
13517
13518         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
13519                 static struct tg3_dev_id {
13520                         u32     vendor;
13521                         u32     device;
13522                 } bridge_chipsets[] = {
13523                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
13524                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
13525                         { },
13526                 };
13527                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
13528                 struct pci_dev *bridge = NULL;
13529
13530                 while (pci_id->vendor != 0) {
13531                         bridge = pci_get_device(pci_id->vendor,
13532                                                 pci_id->device,
13533                                                 bridge);
13534                         if (!bridge) {
13535                                 pci_id++;
13536                                 continue;
13537                         }
13538                         if (bridge->subordinate &&
13539                             (bridge->subordinate->number <=
13540                              tp->pdev->bus->number) &&
13541                             (bridge->subordinate->subordinate >=
13542                              tp->pdev->bus->number)) {
13543                                 tg3_flag_set(tp, 5701_DMA_BUG);
13544                                 pci_dev_put(bridge);
13545                                 break;
13546                         }
13547                 }
13548         }
13549
13550         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
13551          * DMA addresses > 40-bit. This bridge may have other additional
13552          * 57xx devices behind it in some 4-port NIC designs for example.
13553          * Any tg3 device found behind the bridge will also need the 40-bit
13554          * DMA workaround.
13555          */
13556         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13557             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
13558                 tg3_flag_set(tp, 5780_CLASS);
13559                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13560                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
13561         } else {
13562                 struct pci_dev *bridge = NULL;
13563
13564                 do {
13565                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
13566                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
13567                                                 bridge);
13568                         if (bridge && bridge->subordinate &&
13569                             (bridge->subordinate->number <=
13570                              tp->pdev->bus->number) &&
13571                             (bridge->subordinate->subordinate >=
13572                              tp->pdev->bus->number)) {
13573                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
13574                                 pci_dev_put(bridge);
13575                                 break;
13576                         }
13577                 } while (bridge);
13578         }
13579
13580         /* Initialize misc host control in PCI block. */
13581         tp->misc_host_ctrl |= (misc_ctrl_reg &
13582                                MISC_HOST_CTRL_CHIPREV);
13583         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
13584                                tp->misc_host_ctrl);
13585
13586         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
13587             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 ||
13588             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13589             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13590                 tp->pdev_peer = tg3_find_peer(tp);
13591
13592         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13593             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13594             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13595                 tg3_flag_set(tp, 5717_PLUS);
13596
13597         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13598             tg3_flag(tp, 5717_PLUS))
13599                 tg3_flag_set(tp, 57765_PLUS);
13600
13601         /* Intentionally exclude ASIC_REV_5906 */
13602         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13603             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13604             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13605             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13606             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13607             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13608             tg3_flag(tp, 57765_PLUS))
13609                 tg3_flag_set(tp, 5755_PLUS);
13610
13611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13612             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13613             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13614             tg3_flag(tp, 5755_PLUS) ||
13615             tg3_flag(tp, 5780_CLASS))
13616                 tg3_flag_set(tp, 5750_PLUS);
13617
13618         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13619             tg3_flag(tp, 5750_PLUS))
13620                 tg3_flag_set(tp, 5705_PLUS);
13621
13622         /* Determine TSO capabilities */
13623         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13624                 ; /* Do nothing. HW bug. */
13625         else if (tg3_flag(tp, 57765_PLUS))
13626                 tg3_flag_set(tp, HW_TSO_3);
13627         else if (tg3_flag(tp, 5755_PLUS) ||
13628                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13629                 tg3_flag_set(tp, HW_TSO_2);
13630         else if (tg3_flag(tp, 5750_PLUS)) {
13631                 tg3_flag_set(tp, HW_TSO_1);
13632                 tg3_flag_set(tp, TSO_BUG);
13633                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
13634                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
13635                         tg3_flag_clear(tp, TSO_BUG);
13636         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13637                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13638                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
13639                         tg3_flag_set(tp, TSO_BUG);
13640                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
13641                         tp->fw_needed = FIRMWARE_TG3TSO5;
13642                 else
13643                         tp->fw_needed = FIRMWARE_TG3TSO;
13644         }
13645
13646         /* Selectively allow TSO based on operating conditions */
13647         if (tg3_flag(tp, HW_TSO_1) ||
13648             tg3_flag(tp, HW_TSO_2) ||
13649             tg3_flag(tp, HW_TSO_3) ||
13650             (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
13651                 tg3_flag_set(tp, TSO_CAPABLE);
13652         else {
13653                 tg3_flag_clear(tp, TSO_CAPABLE);
13654                 tg3_flag_clear(tp, TSO_BUG);
13655                 tp->fw_needed = NULL;
13656         }
13657
13658         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
13659                 tp->fw_needed = FIRMWARE_TG3;
13660
13661         tp->irq_max = 1;
13662
13663         if (tg3_flag(tp, 5750_PLUS)) {
13664                 tg3_flag_set(tp, SUPPORT_MSI);
13665                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
13666                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
13667                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
13668                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
13669                      tp->pdev_peer == tp->pdev))
13670                         tg3_flag_clear(tp, SUPPORT_MSI);
13671
13672                 if (tg3_flag(tp, 5755_PLUS) ||
13673                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13674                         tg3_flag_set(tp, 1SHOT_MSI);
13675                 }
13676
13677                 if (tg3_flag(tp, 57765_PLUS)) {
13678                         tg3_flag_set(tp, SUPPORT_MSIX);
13679                         tp->irq_max = TG3_IRQ_MAX_VECS;
13680                 }
13681         }
13682
13683         /* All chips can get confused if TX buffers
13684          * straddle the 4GB address boundary.
13685          */
13686         tg3_flag_set(tp, 4G_DMA_BNDRY_BUG);
13687
13688         if (tg3_flag(tp, 5755_PLUS))
13689                 tg3_flag_set(tp, SHORT_DMA_BUG);
13690         else
13691                 tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG);
13692
13693         if (tg3_flag(tp, 5717_PLUS))
13694                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
13695
13696         if (tg3_flag(tp, 57765_PLUS) &&
13697             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5719)
13698                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
13699
13700         if (!tg3_flag(tp, 5705_PLUS) ||
13701             tg3_flag(tp, 5780_CLASS) ||
13702             tg3_flag(tp, USE_JUMBO_BDFLAG))
13703                 tg3_flag_set(tp, JUMBO_CAPABLE);
13704
13705         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
13706                               &pci_state_reg);
13707
13708         tp->pcie_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_EXP);
13709         if (tp->pcie_cap != 0) {
13710                 u16 lnkctl;
13711
13712                 tg3_flag_set(tp, PCI_EXPRESS);
13713
13714                 tp->pcie_readrq = 4096;
13715                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13716                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13717                         tp->pcie_readrq = 2048;
13718
13719                 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13720
13721                 pci_read_config_word(tp->pdev,
13722                                      tp->pcie_cap + PCI_EXP_LNKCTL,
13723                                      &lnkctl);
13724                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
13725                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13726                             ASIC_REV_5906) {
13727                                 tg3_flag_clear(tp, HW_TSO_2);
13728                                 tg3_flag_clear(tp, TSO_CAPABLE);
13729                         }
13730                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13731                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13732                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
13733                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
13734                                 tg3_flag_set(tp, CLKREQ_BUG);
13735                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
13736                         tg3_flag_set(tp, L1PLLPD_EN);
13737                 }
13738         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
13739                 tg3_flag_set(tp, PCI_EXPRESS);
13740         } else if (!tg3_flag(tp, 5705_PLUS) ||
13741                    tg3_flag(tp, 5780_CLASS)) {
13742                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
13743                 if (!tp->pcix_cap) {
13744                         dev_err(&tp->pdev->dev,
13745                                 "Cannot find PCI-X capability, aborting\n");
13746                         return -EIO;
13747                 }
13748
13749                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
13750                         tg3_flag_set(tp, PCIX_MODE);
13751         }
13752
13753         /* If we have an AMD 762 or VIA K8T800 chipset, write
13754          * reordering to the mailbox registers done by the host
13755          * controller can cause major troubles.  We read back from
13756          * every mailbox register write to force the writes to be
13757          * posted to the chip in order.
13758          */
13759         if (pci_dev_present(tg3_write_reorder_chipsets) &&
13760             !tg3_flag(tp, PCI_EXPRESS))
13761                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
13762
13763         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
13764                              &tp->pci_cacheline_sz);
13765         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13766                              &tp->pci_lat_timer);
13767         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
13768             tp->pci_lat_timer < 64) {
13769                 tp->pci_lat_timer = 64;
13770                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
13771                                       tp->pci_lat_timer);
13772         }
13773
13774         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
13775                 /* 5700 BX chips need to have their TX producer index
13776                  * mailboxes written twice to workaround a bug.
13777                  */
13778                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
13779
13780                 /* If we are in PCI-X mode, enable register write workaround.
13781                  *
13782                  * The workaround is to use indirect register accesses
13783                  * for all chip writes not to mailbox registers.
13784                  */
13785                 if (tg3_flag(tp, PCIX_MODE)) {
13786                         u32 pm_reg;
13787
13788                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
13789
13790                         /* The chip can have it's power management PCI config
13791                          * space registers clobbered due to this bug.
13792                          * So explicitly force the chip into D0 here.
13793                          */
13794                         pci_read_config_dword(tp->pdev,
13795                                               tp->pm_cap + PCI_PM_CTRL,
13796                                               &pm_reg);
13797                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
13798                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
13799                         pci_write_config_dword(tp->pdev,
13800                                                tp->pm_cap + PCI_PM_CTRL,
13801                                                pm_reg);
13802
13803                         /* Also, force SERR#/PERR# in PCI command. */
13804                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13805                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
13806                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13807                 }
13808         }
13809
13810         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
13811                 tg3_flag_set(tp, PCI_HIGH_SPEED);
13812         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
13813                 tg3_flag_set(tp, PCI_32BIT);
13814
13815         /* Chip-specific fixup from Broadcom driver */
13816         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
13817             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
13818                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
13819                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
13820         }
13821
13822         /* Default fast path register access methods */
13823         tp->read32 = tg3_read32;
13824         tp->write32 = tg3_write32;
13825         tp->read32_mbox = tg3_read32;
13826         tp->write32_mbox = tg3_write32;
13827         tp->write32_tx_mbox = tg3_write32;
13828         tp->write32_rx_mbox = tg3_write32;
13829
13830         /* Various workaround register access methods */
13831         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
13832                 tp->write32 = tg3_write_indirect_reg32;
13833         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
13834                  (tg3_flag(tp, PCI_EXPRESS) &&
13835                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
13836                 /*
13837                  * Back to back register writes can cause problems on these
13838                  * chips, the workaround is to read back all reg writes
13839                  * except those to mailbox regs.
13840                  *
13841                  * See tg3_write_indirect_reg32().
13842                  */
13843                 tp->write32 = tg3_write_flush_reg32;
13844         }
13845
13846         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
13847                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
13848                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
13849                         tp->write32_rx_mbox = tg3_write_flush_reg32;
13850         }
13851
13852         if (tg3_flag(tp, ICH_WORKAROUND)) {
13853                 tp->read32 = tg3_read_indirect_reg32;
13854                 tp->write32 = tg3_write_indirect_reg32;
13855                 tp->read32_mbox = tg3_read_indirect_mbox;
13856                 tp->write32_mbox = tg3_write_indirect_mbox;
13857                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
13858                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
13859
13860                 iounmap(tp->regs);
13861                 tp->regs = NULL;
13862
13863                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
13864                 pci_cmd &= ~PCI_COMMAND_MEMORY;
13865                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
13866         }
13867         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13868                 tp->read32_mbox = tg3_read32_mbox_5906;
13869                 tp->write32_mbox = tg3_write32_mbox_5906;
13870                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
13871                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
13872         }
13873
13874         if (tp->write32 == tg3_write_indirect_reg32 ||
13875             (tg3_flag(tp, PCIX_MODE) &&
13876              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13877               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
13878                 tg3_flag_set(tp, SRAM_USE_CONFIG);
13879
13880         /* Get eeprom hw config before calling tg3_set_power_state().
13881          * In particular, the TG3_FLAG_IS_NIC flag must be
13882          * determined before calling tg3_set_power_state() so that
13883          * we know whether or not to switch out of Vaux power.
13884          * When the flag is set, it means that GPIO1 is used for eeprom
13885          * write protect and also implies that it is a LOM where GPIOs
13886          * are not used to switch power.
13887          */
13888         tg3_get_eeprom_hw_cfg(tp);
13889
13890         if (tg3_flag(tp, ENABLE_APE)) {
13891                 /* Allow reads and writes to the
13892                  * APE register and memory space.
13893                  */
13894                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
13895                                  PCISTATE_ALLOW_APE_SHMEM_WR |
13896                                  PCISTATE_ALLOW_APE_PSPACE_WR;
13897                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
13898                                        pci_state_reg);
13899         }
13900
13901         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13902             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13903             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13904             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13905             tg3_flag(tp, 57765_PLUS))
13906                 tg3_flag_set(tp, CPMU_PRESENT);
13907
13908         /* Set up tp->grc_local_ctrl before calling tg3_power_up().
13909          * GPIO1 driven high will bring 5700's external PHY out of reset.
13910          * It is also used as eeprom write protect on LOMs.
13911          */
13912         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
13913         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13914             tg3_flag(tp, EEPROM_WRITE_PROT))
13915                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
13916                                        GRC_LCLCTRL_GPIO_OUTPUT1);
13917         /* Unused GPIO3 must be driven as output on 5752 because there
13918          * are no pull-up resistors on unused GPIO pins.
13919          */
13920         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
13921                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
13922
13923         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13924             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13925             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765)
13926                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13927
13928         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
13929             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
13930                 /* Turn off the debug UART. */
13931                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
13932                 if (tg3_flag(tp, IS_NIC))
13933                         /* Keep VMain power. */
13934                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
13935                                               GRC_LCLCTRL_GPIO_OUTPUT0;
13936         }
13937
13938         /* Force the chip into D0. */
13939         err = tg3_power_up(tp);
13940         if (err) {
13941                 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13942                 return err;
13943         }
13944
13945         /* Derive initial jumbo mode from MTU assigned in
13946          * ether_setup() via the alloc_etherdev() call
13947          */
13948         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
13949                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
13950
13951         /* Determine WakeOnLan speed to use. */
13952         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13953             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
13954             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
13955             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
13956                 tg3_flag_clear(tp, WOL_SPEED_100MB);
13957         } else {
13958                 tg3_flag_set(tp, WOL_SPEED_100MB);
13959         }
13960
13961         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
13962                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
13963
13964         /* A few boards don't want Ethernet@WireSpeed phy feature */
13965         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13966             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
13967              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
13968              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
13969             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
13970             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13971                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
13972
13973         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
13974             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
13975                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
13976         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
13977                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
13978
13979         if (tg3_flag(tp, 5705_PLUS) &&
13980             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
13981             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13982             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
13983             !tg3_flag(tp, 57765_PLUS)) {
13984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13985                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13986                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13987                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
13988                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
13989                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
13990                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
13991                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
13992                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
13993                 } else
13994                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
13995         }
13996
13997         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13998             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
13999                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14000                 if (tp->phy_otp == 0)
14001                         tp->phy_otp = TG3_OTP_DEFAULT;
14002         }
14003
14004         if (tg3_flag(tp, CPMU_PRESENT))
14005                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14006         else
14007                 tp->mi_mode = MAC_MI_MODE_BASE;
14008
14009         tp->coalesce_mode = 0;
14010         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14011             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14012                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14013
14014         /* Set these bits to enable statistics workaround. */
14015         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14016             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14017             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14018                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14019                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14020         }
14021
14022         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14023             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14024                 tg3_flag_set(tp, USE_PHYLIB);
14025
14026         err = tg3_mdio_init(tp);
14027         if (err)
14028                 return err;
14029
14030         /* Initialize data/descriptor byte/word swapping. */
14031         val = tr32(GRC_MODE);
14032         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14033                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14034                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14035                         GRC_MODE_B2HRX_ENABLE |
14036                         GRC_MODE_HTX2B_ENABLE |
14037                         GRC_MODE_HOST_STACKUP);
14038         else
14039                 val &= GRC_MODE_HOST_STACKUP;
14040
14041         tw32(GRC_MODE, val | tp->grc_mode);
14042
14043         tg3_switch_clocks(tp);
14044
14045         /* Clear this out for sanity. */
14046         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14047
14048         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14049                               &pci_state_reg);
14050         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14051             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14052                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14053
14054                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14055                     chiprevid == CHIPREV_ID_5701_B0 ||
14056                     chiprevid == CHIPREV_ID_5701_B2 ||
14057                     chiprevid == CHIPREV_ID_5701_B5) {
14058                         void __iomem *sram_base;
14059
14060                         /* Write some dummy words into the SRAM status block
14061                          * area, see if it reads back correctly.  If the return
14062                          * value is bad, force enable the PCIX workaround.
14063                          */
14064                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14065
14066                         writel(0x00000000, sram_base);
14067                         writel(0x00000000, sram_base + 4);
14068                         writel(0xffffffff, sram_base + 4);
14069                         if (readl(sram_base) != 0x00000000)
14070                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14071                 }
14072         }
14073
14074         udelay(50);
14075         tg3_nvram_init(tp);
14076
14077         grc_misc_cfg = tr32(GRC_MISC_CFG);
14078         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14079
14080         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14081             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14082              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14083                 tg3_flag_set(tp, IS_5788);
14084
14085         if (!tg3_flag(tp, IS_5788) &&
14086             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14087                 tg3_flag_set(tp, TAGGED_STATUS);
14088         if (tg3_flag(tp, TAGGED_STATUS)) {
14089                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14090                                       HOSTCC_MODE_CLRTICK_TXBD);
14091
14092                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14093                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14094                                        tp->misc_host_ctrl);
14095         }
14096
14097         /* Preserve the APE MAC_MODE bits */
14098         if (tg3_flag(tp, ENABLE_APE))
14099                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14100         else
14101                 tp->mac_mode = TG3_DEF_MAC_MODE;
14102
14103         /* these are limited to 10/100 only */
14104         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14105              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14106             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14107              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14108              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14109               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14110               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14111             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14112              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14113               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14114               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14115             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14116             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14117             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14118             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14119                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14120
14121         err = tg3_phy_probe(tp);
14122         if (err) {
14123                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14124                 /* ... but do not return immediately ... */
14125                 tg3_mdio_fini(tp);
14126         }
14127
14128         tg3_read_vpd(tp);
14129         tg3_read_fw_ver(tp);
14130
14131         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14132                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14133         } else {
14134                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14135                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14136                 else
14137                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14138         }
14139
14140         /* 5700 {AX,BX} chips have a broken status block link
14141          * change bit implementation, so we must use the
14142          * status register in those cases.
14143          */
14144         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14145                 tg3_flag_set(tp, USE_LINKCHG_REG);
14146         else
14147                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14148
14149         /* The led_ctrl is set during tg3_phy_probe, here we might
14150          * have to force the link status polling mechanism based
14151          * upon subsystem IDs.
14152          */
14153         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14154             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14155             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14156                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14157                 tg3_flag_set(tp, USE_LINKCHG_REG);
14158         }
14159
14160         /* For all SERDES we poll the MAC status register. */
14161         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14162                 tg3_flag_set(tp, POLL_SERDES);
14163         else
14164                 tg3_flag_clear(tp, POLL_SERDES);
14165
14166         tp->rx_offset = NET_IP_ALIGN;
14167         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14168         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14169             tg3_flag(tp, PCIX_MODE)) {
14170                 tp->rx_offset = 0;
14171 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14172                 tp->rx_copy_thresh = ~(u16)0;
14173 #endif
14174         }
14175
14176         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14177         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14178         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14179
14180         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14181
14182         /* Increment the rx prod index on the rx std ring by at most
14183          * 8 for these chips to workaround hw errata.
14184          */
14185         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14186             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14187             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14188                 tp->rx_std_max_post = 8;
14189
14190         if (tg3_flag(tp, ASPM_WORKAROUND))
14191                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14192                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14193
14194         return err;
14195 }
14196
14197 #ifdef CONFIG_SPARC
14198 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14199 {
14200         struct net_device *dev = tp->dev;
14201         struct pci_dev *pdev = tp->pdev;
14202         struct device_node *dp = pci_device_to_OF_node(pdev);
14203         const unsigned char *addr;
14204         int len;
14205
14206         addr = of_get_property(dp, "local-mac-address", &len);
14207         if (addr && len == 6) {
14208                 memcpy(dev->dev_addr, addr, 6);
14209                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14210                 return 0;
14211         }
14212         return -ENODEV;
14213 }
14214
14215 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14216 {
14217         struct net_device *dev = tp->dev;
14218
14219         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14220         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14221         return 0;
14222 }
14223 #endif
14224
14225 static int __devinit tg3_get_device_address(struct tg3 *tp)
14226 {
14227         struct net_device *dev = tp->dev;
14228         u32 hi, lo, mac_offset;
14229         int addr_ok = 0;
14230
14231 #ifdef CONFIG_SPARC
14232         if (!tg3_get_macaddr_sparc(tp))
14233                 return 0;
14234 #endif
14235
14236         mac_offset = 0x7c;
14237         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14238             tg3_flag(tp, 5780_CLASS)) {
14239                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14240                         mac_offset = 0xcc;
14241                 if (tg3_nvram_lock(tp))
14242                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14243                 else
14244                         tg3_nvram_unlock(tp);
14245         } else if (tg3_flag(tp, 5717_PLUS)) {
14246                 if (PCI_FUNC(tp->pdev->devfn) & 1)
14247                         mac_offset = 0xcc;
14248                 if (PCI_FUNC(tp->pdev->devfn) > 1)
14249                         mac_offset += 0x18c;
14250         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14251                 mac_offset = 0x10;
14252
14253         /* First try to get it from MAC address mailbox. */
14254         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14255         if ((hi >> 16) == 0x484b) {
14256                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14257                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14258
14259                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14260                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14261                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14262                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14263                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14264
14265                 /* Some old bootcode may report a 0 MAC address in SRAM */
14266                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14267         }
14268         if (!addr_ok) {
14269                 /* Next, try NVRAM. */
14270                 if (!tg3_flag(tp, NO_NVRAM) &&
14271                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14272                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14273                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14274                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14275                 }
14276                 /* Finally just fetch it out of the MAC control regs. */
14277                 else {
14278                         hi = tr32(MAC_ADDR_0_HIGH);
14279                         lo = tr32(MAC_ADDR_0_LOW);
14280
14281                         dev->dev_addr[5] = lo & 0xff;
14282                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14283                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14284                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14285                         dev->dev_addr[1] = hi & 0xff;
14286                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14287                 }
14288         }
14289
14290         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14291 #ifdef CONFIG_SPARC
14292                 if (!tg3_get_default_macaddr_sparc(tp))
14293                         return 0;
14294 #endif
14295                 return -EINVAL;
14296         }
14297         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14298         return 0;
14299 }
14300
14301 #define BOUNDARY_SINGLE_CACHELINE       1
14302 #define BOUNDARY_MULTI_CACHELINE        2
14303
14304 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14305 {
14306         int cacheline_size;
14307         u8 byte;
14308         int goal;
14309
14310         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14311         if (byte == 0)
14312                 cacheline_size = 1024;
14313         else
14314                 cacheline_size = (int) byte * 4;
14315
14316         /* On 5703 and later chips, the boundary bits have no
14317          * effect.
14318          */
14319         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14320             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14321             !tg3_flag(tp, PCI_EXPRESS))
14322                 goto out;
14323
14324 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14325         goal = BOUNDARY_MULTI_CACHELINE;
14326 #else
14327 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14328         goal = BOUNDARY_SINGLE_CACHELINE;
14329 #else
14330         goal = 0;
14331 #endif
14332 #endif
14333
14334         if (tg3_flag(tp, 57765_PLUS)) {
14335                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14336                 goto out;
14337         }
14338
14339         if (!goal)
14340                 goto out;
14341
14342         /* PCI controllers on most RISC systems tend to disconnect
14343          * when a device tries to burst across a cache-line boundary.
14344          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14345          *
14346          * Unfortunately, for PCI-E there are only limited
14347          * write-side controls for this, and thus for reads
14348          * we will still get the disconnects.  We'll also waste
14349          * these PCI cycles for both read and write for chips
14350          * other than 5700 and 5701 which do not implement the
14351          * boundary bits.
14352          */
14353         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14354                 switch (cacheline_size) {
14355                 case 16:
14356                 case 32:
14357                 case 64:
14358                 case 128:
14359                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14360                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14361                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14362                         } else {
14363                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14364                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14365                         }
14366                         break;
14367
14368                 case 256:
14369                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14370                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14371                         break;
14372
14373                 default:
14374                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14375                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14376                         break;
14377                 }
14378         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14379                 switch (cacheline_size) {
14380                 case 16:
14381                 case 32:
14382                 case 64:
14383                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14384                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14385                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14386                                 break;
14387                         }
14388                         /* fallthrough */
14389                 case 128:
14390                 default:
14391                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14392                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14393                         break;
14394                 }
14395         } else {
14396                 switch (cacheline_size) {
14397                 case 16:
14398                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14399                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14400                                         DMA_RWCTRL_WRITE_BNDRY_16);
14401                                 break;
14402                         }
14403                         /* fallthrough */
14404                 case 32:
14405                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14406                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14407                                         DMA_RWCTRL_WRITE_BNDRY_32);
14408                                 break;
14409                         }
14410                         /* fallthrough */
14411                 case 64:
14412                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14413                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14414                                         DMA_RWCTRL_WRITE_BNDRY_64);
14415                                 break;
14416                         }
14417                         /* fallthrough */
14418                 case 128:
14419                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14420                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14421                                         DMA_RWCTRL_WRITE_BNDRY_128);
14422                                 break;
14423                         }
14424                         /* fallthrough */
14425                 case 256:
14426                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14427                                 DMA_RWCTRL_WRITE_BNDRY_256);
14428                         break;
14429                 case 512:
14430                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14431                                 DMA_RWCTRL_WRITE_BNDRY_512);
14432                         break;
14433                 case 1024:
14434                 default:
14435                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14436                                 DMA_RWCTRL_WRITE_BNDRY_1024);
14437                         break;
14438                 }
14439         }
14440
14441 out:
14442         return val;
14443 }
14444
14445 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
14446 {
14447         struct tg3_internal_buffer_desc test_desc;
14448         u32 sram_dma_descs;
14449         int i, ret;
14450
14451         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
14452
14453         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
14454         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
14455         tw32(RDMAC_STATUS, 0);
14456         tw32(WDMAC_STATUS, 0);
14457
14458         tw32(BUFMGR_MODE, 0);
14459         tw32(FTQ_RESET, 0);
14460
14461         test_desc.addr_hi = ((u64) buf_dma) >> 32;
14462         test_desc.addr_lo = buf_dma & 0xffffffff;
14463         test_desc.nic_mbuf = 0x00002100;
14464         test_desc.len = size;
14465
14466         /*
14467          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
14468          * the *second* time the tg3 driver was getting loaded after an
14469          * initial scan.
14470          *
14471          * Broadcom tells me:
14472          *   ...the DMA engine is connected to the GRC block and a DMA
14473          *   reset may affect the GRC block in some unpredictable way...
14474          *   The behavior of resets to individual blocks has not been tested.
14475          *
14476          * Broadcom noted the GRC reset will also reset all sub-components.
14477          */
14478         if (to_device) {
14479                 test_desc.cqid_sqid = (13 << 8) | 2;
14480
14481                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
14482                 udelay(40);
14483         } else {
14484                 test_desc.cqid_sqid = (16 << 8) | 7;
14485
14486                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
14487                 udelay(40);
14488         }
14489         test_desc.flags = 0x00000005;
14490
14491         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
14492                 u32 val;
14493
14494                 val = *(((u32 *)&test_desc) + i);
14495                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
14496                                        sram_dma_descs + (i * sizeof(u32)));
14497                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
14498         }
14499         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
14500
14501         if (to_device)
14502                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
14503         else
14504                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
14505
14506         ret = -ENODEV;
14507         for (i = 0; i < 40; i++) {
14508                 u32 val;
14509
14510                 if (to_device)
14511                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
14512                 else
14513                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
14514                 if ((val & 0xffff) == sram_dma_descs) {
14515                         ret = 0;
14516                         break;
14517                 }
14518
14519                 udelay(100);
14520         }
14521
14522         return ret;
14523 }
14524
14525 #define TEST_BUFFER_SIZE        0x2000
14526
14527 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
14528         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14529         { },
14530 };
14531
14532 static int __devinit tg3_test_dma(struct tg3 *tp)
14533 {
14534         dma_addr_t buf_dma;
14535         u32 *buf, saved_dma_rwctrl;
14536         int ret = 0;
14537
14538         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14539                                  &buf_dma, GFP_KERNEL);
14540         if (!buf) {
14541                 ret = -ENOMEM;
14542                 goto out_nofree;
14543         }
14544
14545         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
14546                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
14547
14548         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
14549
14550         if (tg3_flag(tp, 57765_PLUS))
14551                 goto out;
14552
14553         if (tg3_flag(tp, PCI_EXPRESS)) {
14554                 /* DMA read watermark not used on PCIE */
14555                 tp->dma_rwctrl |= 0x00180000;
14556         } else if (!tg3_flag(tp, PCIX_MODE)) {
14557                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
14558                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
14559                         tp->dma_rwctrl |= 0x003f0000;
14560                 else
14561                         tp->dma_rwctrl |= 0x003f000f;
14562         } else {
14563                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14564                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
14565                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
14566                         u32 read_water = 0x7;
14567
14568                         /* If the 5704 is behind the EPB bridge, we can
14569                          * do the less restrictive ONE_DMA workaround for
14570                          * better performance.
14571                          */
14572                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
14573                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14574                                 tp->dma_rwctrl |= 0x8000;
14575                         else if (ccval == 0x6 || ccval == 0x7)
14576                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
14577
14578                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
14579                                 read_water = 4;
14580                         /* Set bit 23 to enable PCIX hw bug fix */
14581                         tp->dma_rwctrl |=
14582                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
14583                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
14584                                 (1 << 23);
14585                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
14586                         /* 5780 always in PCIX mode */
14587                         tp->dma_rwctrl |= 0x00144000;
14588                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
14589                         /* 5714 always in PCIX mode */
14590                         tp->dma_rwctrl |= 0x00148000;
14591                 } else {
14592                         tp->dma_rwctrl |= 0x001b000f;
14593                 }
14594         }
14595
14596         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
14597             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
14598                 tp->dma_rwctrl &= 0xfffffff0;
14599
14600         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14601             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14602                 /* Remove this if it causes problems for some boards. */
14603                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
14604
14605                 /* On 5700/5701 chips, we need to set this bit.
14606                  * Otherwise the chip will issue cacheline transactions
14607                  * to streamable DMA memory with not all the byte
14608                  * enables turned on.  This is an error on several
14609                  * RISC PCI controllers, in particular sparc64.
14610                  *
14611                  * On 5703/5704 chips, this bit has been reassigned
14612                  * a different meaning.  In particular, it is used
14613                  * on those chips to enable a PCI-X workaround.
14614                  */
14615                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
14616         }
14617
14618         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14619
14620 #if 0
14621         /* Unneeded, already done by tg3_get_invariants.  */
14622         tg3_switch_clocks(tp);
14623 #endif
14624
14625         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14626             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
14627                 goto out;
14628
14629         /* It is best to perform DMA test with maximum write burst size
14630          * to expose the 5700/5701 write DMA bug.
14631          */
14632         saved_dma_rwctrl = tp->dma_rwctrl;
14633         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14634         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14635
14636         while (1) {
14637                 u32 *p = buf, i;
14638
14639                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
14640                         p[i] = i;
14641
14642                 /* Send the buffer to the chip. */
14643                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
14644                 if (ret) {
14645                         dev_err(&tp->pdev->dev,
14646                                 "%s: Buffer write failed. err = %d\n",
14647                                 __func__, ret);
14648                         break;
14649                 }
14650
14651 #if 0
14652                 /* validate data reached card RAM correctly. */
14653                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14654                         u32 val;
14655                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
14656                         if (le32_to_cpu(val) != p[i]) {
14657                                 dev_err(&tp->pdev->dev,
14658                                         "%s: Buffer corrupted on device! "
14659                                         "(%d != %d)\n", __func__, val, i);
14660                                 /* ret = -ENODEV here? */
14661                         }
14662                         p[i] = 0;
14663                 }
14664 #endif
14665                 /* Now read it back. */
14666                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
14667                 if (ret) {
14668                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
14669                                 "err = %d\n", __func__, ret);
14670                         break;
14671                 }
14672
14673                 /* Verify it. */
14674                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
14675                         if (p[i] == i)
14676                                 continue;
14677
14678                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14679                             DMA_RWCTRL_WRITE_BNDRY_16) {
14680                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14681                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14682                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14683                                 break;
14684                         } else {
14685                                 dev_err(&tp->pdev->dev,
14686                                         "%s: Buffer corrupted on read back! "
14687                                         "(%d != %d)\n", __func__, p[i], i);
14688                                 ret = -ENODEV;
14689                                 goto out;
14690                         }
14691                 }
14692
14693                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
14694                         /* Success. */
14695                         ret = 0;
14696                         break;
14697                 }
14698         }
14699         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14700             DMA_RWCTRL_WRITE_BNDRY_16) {
14701                 /* DMA test passed without adjusting DMA boundary,
14702                  * now look for chipsets that are known to expose the
14703                  * DMA bug without failing the test.
14704                  */
14705                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
14706                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
14707                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
14708                 } else {
14709                         /* Safe to use the calculated DMA boundary. */
14710                         tp->dma_rwctrl = saved_dma_rwctrl;
14711                 }
14712
14713                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
14714         }
14715
14716 out:
14717         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14718 out_nofree:
14719         return ret;
14720 }
14721
14722 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
14723 {
14724         if (tg3_flag(tp, 57765_PLUS)) {
14725                 tp->bufmgr_config.mbuf_read_dma_low_water =
14726                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14727                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14728                         DEFAULT_MB_MACRX_LOW_WATER_57765;
14729                 tp->bufmgr_config.mbuf_high_water =
14730                         DEFAULT_MB_HIGH_WATER_57765;
14731
14732                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14733                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14734                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14735                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
14736                 tp->bufmgr_config.mbuf_high_water_jumbo =
14737                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
14738         } else if (tg3_flag(tp, 5705_PLUS)) {
14739                 tp->bufmgr_config.mbuf_read_dma_low_water =
14740                         DEFAULT_MB_RDMA_LOW_WATER_5705;
14741                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14742                         DEFAULT_MB_MACRX_LOW_WATER_5705;
14743                 tp->bufmgr_config.mbuf_high_water =
14744                         DEFAULT_MB_HIGH_WATER_5705;
14745                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14746                         tp->bufmgr_config.mbuf_mac_rx_low_water =
14747                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
14748                         tp->bufmgr_config.mbuf_high_water =
14749                                 DEFAULT_MB_HIGH_WATER_5906;
14750                 }
14751
14752                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14753                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
14754                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14755                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
14756                 tp->bufmgr_config.mbuf_high_water_jumbo =
14757                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
14758         } else {
14759                 tp->bufmgr_config.mbuf_read_dma_low_water =
14760                         DEFAULT_MB_RDMA_LOW_WATER;
14761                 tp->bufmgr_config.mbuf_mac_rx_low_water =
14762                         DEFAULT_MB_MACRX_LOW_WATER;
14763                 tp->bufmgr_config.mbuf_high_water =
14764                         DEFAULT_MB_HIGH_WATER;
14765
14766                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
14767                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
14768                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
14769                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
14770                 tp->bufmgr_config.mbuf_high_water_jumbo =
14771                         DEFAULT_MB_HIGH_WATER_JUMBO;
14772         }
14773
14774         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
14775         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
14776 }
14777
14778 static char * __devinit tg3_phy_string(struct tg3 *tp)
14779 {
14780         switch (tp->phy_id & TG3_PHY_ID_MASK) {
14781         case TG3_PHY_ID_BCM5400:        return "5400";
14782         case TG3_PHY_ID_BCM5401:        return "5401";
14783         case TG3_PHY_ID_BCM5411:        return "5411";
14784         case TG3_PHY_ID_BCM5701:        return "5701";
14785         case TG3_PHY_ID_BCM5703:        return "5703";
14786         case TG3_PHY_ID_BCM5704:        return "5704";
14787         case TG3_PHY_ID_BCM5705:        return "5705";
14788         case TG3_PHY_ID_BCM5750:        return "5750";
14789         case TG3_PHY_ID_BCM5752:        return "5752";
14790         case TG3_PHY_ID_BCM5714:        return "5714";
14791         case TG3_PHY_ID_BCM5780:        return "5780";
14792         case TG3_PHY_ID_BCM5755:        return "5755";
14793         case TG3_PHY_ID_BCM5787:        return "5787";
14794         case TG3_PHY_ID_BCM5784:        return "5784";
14795         case TG3_PHY_ID_BCM5756:        return "5722/5756";
14796         case TG3_PHY_ID_BCM5906:        return "5906";
14797         case TG3_PHY_ID_BCM5761:        return "5761";
14798         case TG3_PHY_ID_BCM5718C:       return "5718C";
14799         case TG3_PHY_ID_BCM5718S:       return "5718S";
14800         case TG3_PHY_ID_BCM57765:       return "57765";
14801         case TG3_PHY_ID_BCM5719C:       return "5719C";
14802         case TG3_PHY_ID_BCM5720C:       return "5720C";
14803         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
14804         case 0:                 return "serdes";
14805         default:                return "unknown";
14806         }
14807 }
14808
14809 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
14810 {
14811         if (tg3_flag(tp, PCI_EXPRESS)) {
14812                 strcpy(str, "PCI Express");
14813                 return str;
14814         } else if (tg3_flag(tp, PCIX_MODE)) {
14815                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
14816
14817                 strcpy(str, "PCIX:");
14818
14819                 if ((clock_ctrl == 7) ||
14820                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
14821                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
14822                         strcat(str, "133MHz");
14823                 else if (clock_ctrl == 0)
14824                         strcat(str, "33MHz");
14825                 else if (clock_ctrl == 2)
14826                         strcat(str, "50MHz");
14827                 else if (clock_ctrl == 4)
14828                         strcat(str, "66MHz");
14829                 else if (clock_ctrl == 6)
14830                         strcat(str, "100MHz");
14831         } else {
14832                 strcpy(str, "PCI:");
14833                 if (tg3_flag(tp, PCI_HIGH_SPEED))
14834                         strcat(str, "66MHz");
14835                 else
14836                         strcat(str, "33MHz");
14837         }
14838         if (tg3_flag(tp, PCI_32BIT))
14839                 strcat(str, ":32-bit");
14840         else
14841                 strcat(str, ":64-bit");
14842         return str;
14843 }
14844
14845 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
14846 {
14847         struct pci_dev *peer;
14848         unsigned int func, devnr = tp->pdev->devfn & ~7;
14849
14850         for (func = 0; func < 8; func++) {
14851                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
14852                 if (peer && peer != tp->pdev)
14853                         break;
14854                 pci_dev_put(peer);
14855         }
14856         /* 5704 can be configured in single-port mode, set peer to
14857          * tp->pdev in that case.
14858          */
14859         if (!peer) {
14860                 peer = tp->pdev;
14861                 return peer;
14862         }
14863
14864         /*
14865          * We don't need to keep the refcount elevated; there's no way
14866          * to remove one half of this device without removing the other
14867          */
14868         pci_dev_put(peer);
14869
14870         return peer;
14871 }
14872
14873 static void __devinit tg3_init_coal(struct tg3 *tp)
14874 {
14875         struct ethtool_coalesce *ec = &tp->coal;
14876
14877         memset(ec, 0, sizeof(*ec));
14878         ec->cmd = ETHTOOL_GCOALESCE;
14879         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
14880         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
14881         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
14882         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
14883         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
14884         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
14885         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
14886         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
14887         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
14888
14889         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
14890                                  HOSTCC_MODE_CLRTICK_TXBD)) {
14891                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
14892                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
14893                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
14894                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
14895         }
14896
14897         if (tg3_flag(tp, 5705_PLUS)) {
14898                 ec->rx_coalesce_usecs_irq = 0;
14899                 ec->tx_coalesce_usecs_irq = 0;
14900                 ec->stats_block_coalesce_usecs = 0;
14901         }
14902 }
14903
14904 static const struct net_device_ops tg3_netdev_ops = {
14905         .ndo_open               = tg3_open,
14906         .ndo_stop               = tg3_close,
14907         .ndo_start_xmit         = tg3_start_xmit,
14908         .ndo_get_stats64        = tg3_get_stats64,
14909         .ndo_validate_addr      = eth_validate_addr,
14910         .ndo_set_multicast_list = tg3_set_rx_mode,
14911         .ndo_set_mac_address    = tg3_set_mac_addr,
14912         .ndo_do_ioctl           = tg3_ioctl,
14913         .ndo_tx_timeout         = tg3_tx_timeout,
14914         .ndo_change_mtu         = tg3_change_mtu,
14915         .ndo_fix_features       = tg3_fix_features,
14916         .ndo_set_features       = tg3_set_features,
14917 #ifdef CONFIG_NET_POLL_CONTROLLER
14918         .ndo_poll_controller    = tg3_poll_controller,
14919 #endif
14920 };
14921
14922 static int __devinit tg3_init_one(struct pci_dev *pdev,
14923                                   const struct pci_device_id *ent)
14924 {
14925         struct net_device *dev;
14926         struct tg3 *tp;
14927         int i, err, pm_cap;
14928         u32 sndmbx, rcvmbx, intmbx;
14929         char str[40];
14930         u64 dma_mask, persist_dma_mask;
14931         u32 features = 0;
14932
14933         printk_once(KERN_INFO "%s\n", version);
14934
14935         err = pci_enable_device(pdev);
14936         if (err) {
14937                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
14938                 return err;
14939         }
14940
14941         err = pci_request_regions(pdev, DRV_MODULE_NAME);
14942         if (err) {
14943                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
14944                 goto err_out_disable_pdev;
14945         }
14946
14947         pci_set_master(pdev);
14948
14949         /* Find power-management capability. */
14950         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
14951         if (pm_cap == 0) {
14952                 dev_err(&pdev->dev,
14953                         "Cannot find Power Management capability, aborting\n");
14954                 err = -EIO;
14955                 goto err_out_free_res;
14956         }
14957
14958         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
14959         if (!dev) {
14960                 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
14961                 err = -ENOMEM;
14962                 goto err_out_free_res;
14963         }
14964
14965         SET_NETDEV_DEV(dev, &pdev->dev);
14966
14967         tp = netdev_priv(dev);
14968         tp->pdev = pdev;
14969         tp->dev = dev;
14970         tp->pm_cap = pm_cap;
14971         tp->rx_mode = TG3_DEF_RX_MODE;
14972         tp->tx_mode = TG3_DEF_TX_MODE;
14973
14974         if (tg3_debug > 0)
14975                 tp->msg_enable = tg3_debug;
14976         else
14977                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
14978
14979         /* The word/byte swap controls here control register access byte
14980          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
14981          * setting below.
14982          */
14983         tp->misc_host_ctrl =
14984                 MISC_HOST_CTRL_MASK_PCI_INT |
14985                 MISC_HOST_CTRL_WORD_SWAP |
14986                 MISC_HOST_CTRL_INDIR_ACCESS |
14987                 MISC_HOST_CTRL_PCISTATE_RW;
14988
14989         /* The NONFRM (non-frame) byte/word swap controls take effect
14990          * on descriptor entries, anything which isn't packet data.
14991          *
14992          * The StrongARM chips on the board (one for tx, one for rx)
14993          * are running in big-endian mode.
14994          */
14995         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
14996                         GRC_MODE_WSWAP_NONFRM_DATA);
14997 #ifdef __BIG_ENDIAN
14998         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
14999 #endif
15000         spin_lock_init(&tp->lock);
15001         spin_lock_init(&tp->indirect_lock);
15002         INIT_WORK(&tp->reset_task, tg3_reset_task);
15003
15004         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15005         if (!tp->regs) {
15006                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15007                 err = -ENOMEM;
15008                 goto err_out_free_dev;
15009         }
15010
15011         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15012         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15013
15014         dev->ethtool_ops = &tg3_ethtool_ops;
15015         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15016         dev->netdev_ops = &tg3_netdev_ops;
15017         dev->irq = pdev->irq;
15018
15019         err = tg3_get_invariants(tp);
15020         if (err) {
15021                 dev_err(&pdev->dev,
15022                         "Problem fetching invariants of chip, aborting\n");
15023                 goto err_out_iounmap;
15024         }
15025
15026         /* The EPB bridge inside 5714, 5715, and 5780 and any
15027          * device behind the EPB cannot support DMA addresses > 40-bit.
15028          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15029          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15030          * do DMA address check in tg3_start_xmit().
15031          */
15032         if (tg3_flag(tp, IS_5788))
15033                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15034         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15035                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15036 #ifdef CONFIG_HIGHMEM
15037                 dma_mask = DMA_BIT_MASK(64);
15038 #endif
15039         } else
15040                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15041
15042         /* Configure DMA attributes. */
15043         if (dma_mask > DMA_BIT_MASK(32)) {
15044                 err = pci_set_dma_mask(pdev, dma_mask);
15045                 if (!err) {
15046                         features |= NETIF_F_HIGHDMA;
15047                         err = pci_set_consistent_dma_mask(pdev,
15048                                                           persist_dma_mask);
15049                         if (err < 0) {
15050                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15051                                         "DMA for consistent allocations\n");
15052                                 goto err_out_iounmap;
15053                         }
15054                 }
15055         }
15056         if (err || dma_mask == DMA_BIT_MASK(32)) {
15057                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15058                 if (err) {
15059                         dev_err(&pdev->dev,
15060                                 "No usable DMA configuration, aborting\n");
15061                         goto err_out_iounmap;
15062                 }
15063         }
15064
15065         tg3_init_bufmgr_config(tp);
15066
15067         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15068
15069         /* 5700 B0 chips do not support checksumming correctly due
15070          * to hardware bugs.
15071          */
15072         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15073                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15074
15075                 if (tg3_flag(tp, 5755_PLUS))
15076                         features |= NETIF_F_IPV6_CSUM;
15077         }
15078
15079         /* TSO is on by default on chips that support hardware TSO.
15080          * Firmware TSO on older chips gives lower performance, so it
15081          * is off by default, but can be enabled using ethtool.
15082          */
15083         if ((tg3_flag(tp, HW_TSO_1) ||
15084              tg3_flag(tp, HW_TSO_2) ||
15085              tg3_flag(tp, HW_TSO_3)) &&
15086             (features & NETIF_F_IP_CSUM))
15087                 features |= NETIF_F_TSO;
15088         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15089                 if (features & NETIF_F_IPV6_CSUM)
15090                         features |= NETIF_F_TSO6;
15091                 if (tg3_flag(tp, HW_TSO_3) ||
15092                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15093                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15094                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15095                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15096                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15097                         features |= NETIF_F_TSO_ECN;
15098         }
15099
15100         dev->features |= features;
15101         dev->vlan_features |= features;
15102
15103         /*
15104          * Add loopback capability only for a subset of devices that support
15105          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15106          * loopback for the remaining devices.
15107          */
15108         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15109             !tg3_flag(tp, CPMU_PRESENT))
15110                 /* Add the loopback capability */
15111                 features |= NETIF_F_LOOPBACK;
15112
15113         dev->hw_features |= features;
15114
15115         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15116             !tg3_flag(tp, TSO_CAPABLE) &&
15117             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15118                 tg3_flag_set(tp, MAX_RXPEND_64);
15119                 tp->rx_pending = 63;
15120         }
15121
15122         err = tg3_get_device_address(tp);
15123         if (err) {
15124                 dev_err(&pdev->dev,
15125                         "Could not obtain valid ethernet address, aborting\n");
15126                 goto err_out_iounmap;
15127         }
15128
15129         if (tg3_flag(tp, ENABLE_APE)) {
15130                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15131                 if (!tp->aperegs) {
15132                         dev_err(&pdev->dev,
15133                                 "Cannot map APE registers, aborting\n");
15134                         err = -ENOMEM;
15135                         goto err_out_iounmap;
15136                 }
15137
15138                 tg3_ape_lock_init(tp);
15139
15140                 if (tg3_flag(tp, ENABLE_ASF))
15141                         tg3_read_dash_ver(tp);
15142         }
15143
15144         /*
15145          * Reset chip in case UNDI or EFI driver did not shutdown
15146          * DMA self test will enable WDMAC and we'll see (spurious)
15147          * pending DMA on the PCI bus at that point.
15148          */
15149         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15150             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15151                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15152                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15153         }
15154
15155         err = tg3_test_dma(tp);
15156         if (err) {
15157                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15158                 goto err_out_apeunmap;
15159         }
15160
15161         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15162         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15163         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15164         for (i = 0; i < tp->irq_max; i++) {
15165                 struct tg3_napi *tnapi = &tp->napi[i];
15166
15167                 tnapi->tp = tp;
15168                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15169
15170                 tnapi->int_mbox = intmbx;
15171                 if (i < 4)
15172                         intmbx += 0x8;
15173                 else
15174                         intmbx += 0x4;
15175
15176                 tnapi->consmbox = rcvmbx;
15177                 tnapi->prodmbox = sndmbx;
15178
15179                 if (i)
15180                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15181                 else
15182                         tnapi->coal_now = HOSTCC_MODE_NOW;
15183
15184                 if (!tg3_flag(tp, SUPPORT_MSIX))
15185                         break;
15186
15187                 /*
15188                  * If we support MSIX, we'll be using RSS.  If we're using
15189                  * RSS, the first vector only handles link interrupts and the
15190                  * remaining vectors handle rx and tx interrupts.  Reuse the
15191                  * mailbox values for the next iteration.  The values we setup
15192                  * above are still useful for the single vectored mode.
15193                  */
15194                 if (!i)
15195                         continue;
15196
15197                 rcvmbx += 0x8;
15198
15199                 if (sndmbx & 0x4)
15200                         sndmbx -= 0x4;
15201                 else
15202                         sndmbx += 0xc;
15203         }
15204
15205         tg3_init_coal(tp);
15206
15207         pci_set_drvdata(pdev, dev);
15208
15209         err = register_netdev(dev);
15210         if (err) {
15211                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15212                 goto err_out_apeunmap;
15213         }
15214
15215         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15216                     tp->board_part_number,
15217                     tp->pci_chip_rev_id,
15218                     tg3_bus_string(tp, str),
15219                     dev->dev_addr);
15220
15221         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15222                 struct phy_device *phydev;
15223                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15224                 netdev_info(dev,
15225                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15226                             phydev->drv->name, dev_name(&phydev->dev));
15227         } else {
15228                 char *ethtype;
15229
15230                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15231                         ethtype = "10/100Base-TX";
15232                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15233                         ethtype = "1000Base-SX";
15234                 else
15235                         ethtype = "10/100/1000Base-T";
15236
15237                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15238                             "(WireSpeed[%d], EEE[%d])\n",
15239                             tg3_phy_string(tp), ethtype,
15240                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15241                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15242         }
15243
15244         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15245                     (dev->features & NETIF_F_RXCSUM) != 0,
15246                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15247                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15248                     tg3_flag(tp, ENABLE_ASF) != 0,
15249                     tg3_flag(tp, TSO_CAPABLE) != 0);
15250         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15251                     tp->dma_rwctrl,
15252                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15253                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15254
15255         pci_save_state(pdev);
15256
15257         return 0;
15258
15259 err_out_apeunmap:
15260         if (tp->aperegs) {
15261                 iounmap(tp->aperegs);
15262                 tp->aperegs = NULL;
15263         }
15264
15265 err_out_iounmap:
15266         if (tp->regs) {
15267                 iounmap(tp->regs);
15268                 tp->regs = NULL;
15269         }
15270
15271 err_out_free_dev:
15272         free_netdev(dev);
15273
15274 err_out_free_res:
15275         pci_release_regions(pdev);
15276
15277 err_out_disable_pdev:
15278         pci_disable_device(pdev);
15279         pci_set_drvdata(pdev, NULL);
15280         return err;
15281 }
15282
15283 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15284 {
15285         struct net_device *dev = pci_get_drvdata(pdev);
15286
15287         if (dev) {
15288                 struct tg3 *tp = netdev_priv(dev);
15289
15290                 if (tp->fw)
15291                         release_firmware(tp->fw);
15292
15293                 cancel_work_sync(&tp->reset_task);
15294
15295                 if (tg3_flag(tp, USE_PHYLIB)) {
15296                         tg3_phy_fini(tp);
15297                         tg3_mdio_fini(tp);
15298                 }
15299
15300                 unregister_netdev(dev);
15301                 if (tp->aperegs) {
15302                         iounmap(tp->aperegs);
15303                         tp->aperegs = NULL;
15304                 }
15305                 if (tp->regs) {
15306                         iounmap(tp->regs);
15307                         tp->regs = NULL;
15308                 }
15309                 free_netdev(dev);
15310                 pci_release_regions(pdev);
15311                 pci_disable_device(pdev);
15312                 pci_set_drvdata(pdev, NULL);
15313         }
15314 }
15315
15316 #ifdef CONFIG_PM_SLEEP
15317 static int tg3_suspend(struct device *device)
15318 {
15319         struct pci_dev *pdev = to_pci_dev(device);
15320         struct net_device *dev = pci_get_drvdata(pdev);
15321         struct tg3 *tp = netdev_priv(dev);
15322         int err;
15323
15324         if (!netif_running(dev))
15325                 return 0;
15326
15327         flush_work_sync(&tp->reset_task);
15328         tg3_phy_stop(tp);
15329         tg3_netif_stop(tp);
15330
15331         del_timer_sync(&tp->timer);
15332
15333         tg3_full_lock(tp, 1);
15334         tg3_disable_ints(tp);
15335         tg3_full_unlock(tp);
15336
15337         netif_device_detach(dev);
15338
15339         tg3_full_lock(tp, 0);
15340         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15341         tg3_flag_clear(tp, INIT_COMPLETE);
15342         tg3_full_unlock(tp);
15343
15344         err = tg3_power_down_prepare(tp);
15345         if (err) {
15346                 int err2;
15347
15348                 tg3_full_lock(tp, 0);
15349
15350                 tg3_flag_set(tp, INIT_COMPLETE);
15351                 err2 = tg3_restart_hw(tp, 1);
15352                 if (err2)
15353                         goto out;
15354
15355                 tp->timer.expires = jiffies + tp->timer_offset;
15356                 add_timer(&tp->timer);
15357
15358                 netif_device_attach(dev);
15359                 tg3_netif_start(tp);
15360
15361 out:
15362                 tg3_full_unlock(tp);
15363
15364                 if (!err2)
15365                         tg3_phy_start(tp);
15366         }
15367
15368         return err;
15369 }
15370
15371 static int tg3_resume(struct device *device)
15372 {
15373         struct pci_dev *pdev = to_pci_dev(device);
15374         struct net_device *dev = pci_get_drvdata(pdev);
15375         struct tg3 *tp = netdev_priv(dev);
15376         int err;
15377
15378         if (!netif_running(dev))
15379                 return 0;
15380
15381         netif_device_attach(dev);
15382
15383         tg3_full_lock(tp, 0);
15384
15385         tg3_flag_set(tp, INIT_COMPLETE);
15386         err = tg3_restart_hw(tp, 1);
15387         if (err)
15388                 goto out;
15389
15390         tp->timer.expires = jiffies + tp->timer_offset;
15391         add_timer(&tp->timer);
15392
15393         tg3_netif_start(tp);
15394
15395 out:
15396         tg3_full_unlock(tp);
15397
15398         if (!err)
15399                 tg3_phy_start(tp);
15400
15401         return err;
15402 }
15403
15404 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15405 #define TG3_PM_OPS (&tg3_pm_ops)
15406
15407 #else
15408
15409 #define TG3_PM_OPS NULL
15410
15411 #endif /* CONFIG_PM_SLEEP */
15412
15413 /**
15414  * tg3_io_error_detected - called when PCI error is detected
15415  * @pdev: Pointer to PCI device
15416  * @state: The current pci connection state
15417  *
15418  * This function is called after a PCI bus error affecting
15419  * this device has been detected.
15420  */
15421 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15422                                               pci_channel_state_t state)
15423 {
15424         struct net_device *netdev = pci_get_drvdata(pdev);
15425         struct tg3 *tp = netdev_priv(netdev);
15426         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15427
15428         netdev_info(netdev, "PCI I/O error detected\n");
15429
15430         rtnl_lock();
15431
15432         if (!netif_running(netdev))
15433                 goto done;
15434
15435         tg3_phy_stop(tp);
15436
15437         tg3_netif_stop(tp);
15438
15439         del_timer_sync(&tp->timer);
15440         tg3_flag_clear(tp, RESTART_TIMER);
15441
15442         /* Want to make sure that the reset task doesn't run */
15443         cancel_work_sync(&tp->reset_task);
15444         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
15445         tg3_flag_clear(tp, RESTART_TIMER);
15446
15447         netif_device_detach(netdev);
15448
15449         /* Clean up software state, even if MMIO is blocked */
15450         tg3_full_lock(tp, 0);
15451         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15452         tg3_full_unlock(tp);
15453
15454 done:
15455         if (state == pci_channel_io_perm_failure)
15456                 err = PCI_ERS_RESULT_DISCONNECT;
15457         else
15458                 pci_disable_device(pdev);
15459
15460         rtnl_unlock();
15461
15462         return err;
15463 }
15464
15465 /**
15466  * tg3_io_slot_reset - called after the pci bus has been reset.
15467  * @pdev: Pointer to PCI device
15468  *
15469  * Restart the card from scratch, as if from a cold-boot.
15470  * At this point, the card has exprienced a hard reset,
15471  * followed by fixups by BIOS, and has its config space
15472  * set up identically to what it was at cold boot.
15473  */
15474 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
15475 {
15476         struct net_device *netdev = pci_get_drvdata(pdev);
15477         struct tg3 *tp = netdev_priv(netdev);
15478         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
15479         int err;
15480
15481         rtnl_lock();
15482
15483         if (pci_enable_device(pdev)) {
15484                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
15485                 goto done;
15486         }
15487
15488         pci_set_master(pdev);
15489         pci_restore_state(pdev);
15490         pci_save_state(pdev);
15491
15492         if (!netif_running(netdev)) {
15493                 rc = PCI_ERS_RESULT_RECOVERED;
15494                 goto done;
15495         }
15496
15497         err = tg3_power_up(tp);
15498         if (err) {
15499                 netdev_err(netdev, "Failed to restore register access.\n");
15500                 goto done;
15501         }
15502
15503         rc = PCI_ERS_RESULT_RECOVERED;
15504
15505 done:
15506         rtnl_unlock();
15507
15508         return rc;
15509 }
15510
15511 /**
15512  * tg3_io_resume - called when traffic can start flowing again.
15513  * @pdev: Pointer to PCI device
15514  *
15515  * This callback is called when the error recovery driver tells
15516  * us that its OK to resume normal operation.
15517  */
15518 static void tg3_io_resume(struct pci_dev *pdev)
15519 {
15520         struct net_device *netdev = pci_get_drvdata(pdev);
15521         struct tg3 *tp = netdev_priv(netdev);
15522         int err;
15523
15524         rtnl_lock();
15525
15526         if (!netif_running(netdev))
15527                 goto done;
15528
15529         tg3_full_lock(tp, 0);
15530         tg3_flag_set(tp, INIT_COMPLETE);
15531         err = tg3_restart_hw(tp, 1);
15532         tg3_full_unlock(tp);
15533         if (err) {
15534                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
15535                 goto done;
15536         }
15537
15538         netif_device_attach(netdev);
15539
15540         tp->timer.expires = jiffies + tp->timer_offset;
15541         add_timer(&tp->timer);
15542
15543         tg3_netif_start(tp);
15544
15545         tg3_phy_start(tp);
15546
15547 done:
15548         rtnl_unlock();
15549 }
15550
15551 static struct pci_error_handlers tg3_err_handler = {
15552         .error_detected = tg3_io_error_detected,
15553         .slot_reset     = tg3_io_slot_reset,
15554         .resume         = tg3_io_resume
15555 };
15556
15557 static struct pci_driver tg3_driver = {
15558         .name           = DRV_MODULE_NAME,
15559         .id_table       = tg3_pci_tbl,
15560         .probe          = tg3_init_one,
15561         .remove         = __devexit_p(tg3_remove_one),
15562         .err_handler    = &tg3_err_handler,
15563         .driver.pm      = TG3_PM_OPS,
15564 };
15565
15566 static int __init tg3_init(void)
15567 {
15568         return pci_register_driver(&tg3_driver);
15569 }
15570
15571 static void __exit tg3_cleanup(void)
15572 {
15573         pci_unregister_driver(&tg3_driver);
15574 }
15575
15576 module_init(tg3_init);
15577 module_exit(tg3_cleanup);