680d26d6d2c308a3314b8b3c78ec7b356d65a74d
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2013 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47 #include <linux/ssb/ssb_driver_gige.h>
48 #include <linux/hwmon.h>
49 #include <linux/hwmon-sysfs.h>
50
51 #include <net/checksum.h>
52 #include <net/ip.h>
53
54 #include <linux/io.h>
55 #include <asm/byteorder.h>
56 #include <linux/uaccess.h>
57
58 #include <uapi/linux/net_tstamp.h>
59 #include <linux/ptp_clock_kernel.h>
60
61 #ifdef CONFIG_SPARC
62 #include <asm/idprom.h>
63 #include <asm/prom.h>
64 #endif
65
66 #define BAR_0   0
67 #define BAR_2   2
68
69 #include "tg3.h"
70
71 /* Functions & macros to verify TG3_FLAGS types */
72
73 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         return test_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         set_bit(flag, bits);
81 }
82
83 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
84 {
85         clear_bit(flag, bits);
86 }
87
88 #define tg3_flag(tp, flag)                              \
89         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
90 #define tg3_flag_set(tp, flag)                          \
91         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
92 #define tg3_flag_clear(tp, flag)                        \
93         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
94
95 #define DRV_MODULE_NAME         "tg3"
96 #define TG3_MAJ_NUM                     3
97 #define TG3_MIN_NUM                     132
98 #define DRV_MODULE_VERSION      \
99         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
100 #define DRV_MODULE_RELDATE      "May 21, 2013"
101
102 #define RESET_KIND_SHUTDOWN     0
103 #define RESET_KIND_INIT         1
104 #define RESET_KIND_SUSPEND      2
105
106 #define TG3_DEF_RX_MODE         0
107 #define TG3_DEF_TX_MODE         0
108 #define TG3_DEF_MSG_ENABLE        \
109         (NETIF_MSG_DRV          | \
110          NETIF_MSG_PROBE        | \
111          NETIF_MSG_LINK         | \
112          NETIF_MSG_TIMER        | \
113          NETIF_MSG_IFDOWN       | \
114          NETIF_MSG_IFUP         | \
115          NETIF_MSG_RX_ERR       | \
116          NETIF_MSG_TX_ERR)
117
118 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
119
120 /* length of time before we decide the hardware is borked,
121  * and dev->tx_timeout() should be called to fix the problem
122  */
123
124 #define TG3_TX_TIMEOUT                  (5 * HZ)
125
126 /* hardware minimum and maximum for a single frame's data payload */
127 #define TG3_MIN_MTU                     60
128 #define TG3_MAX_MTU(tp) \
129         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
130
131 /* These numbers seem to be hard coded in the NIC firmware somehow.
132  * You can't change the ring sizes, but you can change where you place
133  * them in the NIC onboard memory.
134  */
135 #define TG3_RX_STD_RING_SIZE(tp) \
136         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
137          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
138 #define TG3_DEF_RX_RING_PENDING         200
139 #define TG3_RX_JMB_RING_SIZE(tp) \
140         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
141          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
142 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
143
144 /* Do not place this n-ring entries value into the tp struct itself,
145  * we really want to expose these constants to GCC so that modulo et
146  * al.  operations are done with shifts and masks instead of with
147  * hw multiply/modulo instructions.  Another solution would be to
148  * replace things like '% foo' with '& (foo - 1)'.
149  */
150
151 #define TG3_TX_RING_SIZE                512
152 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
153
154 #define TG3_RX_STD_RING_BYTES(tp) \
155         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
156 #define TG3_RX_JMB_RING_BYTES(tp) \
157         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
158 #define TG3_RX_RCB_RING_BYTES(tp) \
159         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
160 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
161                                  TG3_TX_RING_SIZE)
162 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
163
164 #define TG3_DMA_BYTE_ENAB               64
165
166 #define TG3_RX_STD_DMA_SZ               1536
167 #define TG3_RX_JMB_DMA_SZ               9046
168
169 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
170
171 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
172 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
173
174 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
175         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
176
177 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
178         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
179
180 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
181  * that are at least dword aligned when used in PCIX mode.  The driver
182  * works around this bug by double copying the packet.  This workaround
183  * is built into the normal double copy length check for efficiency.
184  *
185  * However, the double copy is only necessary on those architectures
186  * where unaligned memory accesses are inefficient.  For those architectures
187  * where unaligned memory accesses incur little penalty, we can reintegrate
188  * the 5701 in the normal rx path.  Doing so saves a device structure
189  * dereference by hardcoding the double copy threshold in place.
190  */
191 #define TG3_RX_COPY_THRESHOLD           256
192 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
193         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
194 #else
195         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
196 #endif
197
198 #if (NET_IP_ALIGN != 0)
199 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
200 #else
201 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
202 #endif
203
204 /* minimum number of free TX descriptors required to wake up TX process */
205 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
206 #define TG3_TX_BD_DMA_MAX_2K            2048
207 #define TG3_TX_BD_DMA_MAX_4K            4096
208
209 #define TG3_RAW_IP_ALIGN 2
210
211 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
212 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
213
214 #define FIRMWARE_TG3            "tigon/tg3.bin"
215 #define FIRMWARE_TG357766       "tigon/tg357766.bin"
216 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
217 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
218
219 static char version[] =
220         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
221
222 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
223 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
224 MODULE_LICENSE("GPL");
225 MODULE_VERSION(DRV_MODULE_VERSION);
226 MODULE_FIRMWARE(FIRMWARE_TG3);
227 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
228 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
229
230 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
231 module_param(tg3_debug, int, 0);
232 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
233
234 #define TG3_DRV_DATA_FLAG_10_100_ONLY   0x0001
235 #define TG3_DRV_DATA_FLAG_5705_10_100   0x0002
236
237 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901),
257          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
258                         TG3_DRV_DATA_FLAG_5705_10_100},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2),
260          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
261                         TG3_DRV_DATA_FLAG_5705_10_100},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F),
264          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY |
265                         TG3_DRV_DATA_FLAG_5705_10_100},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F),
272          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F),
278          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
286         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5787M,
287                         PCI_VENDOR_ID_LENOVO,
288                         TG3PCI_SUBDEVICE_ID_LENOVO_5787M),
289          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F),
292          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
302         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
303         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
304         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
305         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
306         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
307         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
308         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
309         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
310         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
311         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
312                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_A),
313          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
314         {PCI_DEVICE_SUB(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780,
315                         PCI_VENDOR_ID_AI, TG3PCI_SUBDEVICE_ID_ACER_57780_B),
316          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
317         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
318         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
319         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790),
320          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
321         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
322         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
323         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717_C)},
324         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
325         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
326         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
327         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
328         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
329         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791),
330          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
331         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795),
332          .driver_data = TG3_DRV_DATA_FLAG_10_100_ONLY},
333         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
334         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
335         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
336         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57766)},
337         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5762)},
338         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5725)},
339         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5727)},
340         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
341         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
342         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
343         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
344         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
345         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
346         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
347         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
348         {}
349 };
350
351 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
352
353 static const struct {
354         const char string[ETH_GSTRING_LEN];
355 } ethtool_stats_keys[] = {
356         { "rx_octets" },
357         { "rx_fragments" },
358         { "rx_ucast_packets" },
359         { "rx_mcast_packets" },
360         { "rx_bcast_packets" },
361         { "rx_fcs_errors" },
362         { "rx_align_errors" },
363         { "rx_xon_pause_rcvd" },
364         { "rx_xoff_pause_rcvd" },
365         { "rx_mac_ctrl_rcvd" },
366         { "rx_xoff_entered" },
367         { "rx_frame_too_long_errors" },
368         { "rx_jabbers" },
369         { "rx_undersize_packets" },
370         { "rx_in_length_errors" },
371         { "rx_out_length_errors" },
372         { "rx_64_or_less_octet_packets" },
373         { "rx_65_to_127_octet_packets" },
374         { "rx_128_to_255_octet_packets" },
375         { "rx_256_to_511_octet_packets" },
376         { "rx_512_to_1023_octet_packets" },
377         { "rx_1024_to_1522_octet_packets" },
378         { "rx_1523_to_2047_octet_packets" },
379         { "rx_2048_to_4095_octet_packets" },
380         { "rx_4096_to_8191_octet_packets" },
381         { "rx_8192_to_9022_octet_packets" },
382
383         { "tx_octets" },
384         { "tx_collisions" },
385
386         { "tx_xon_sent" },
387         { "tx_xoff_sent" },
388         { "tx_flow_control" },
389         { "tx_mac_errors" },
390         { "tx_single_collisions" },
391         { "tx_mult_collisions" },
392         { "tx_deferred" },
393         { "tx_excessive_collisions" },
394         { "tx_late_collisions" },
395         { "tx_collide_2times" },
396         { "tx_collide_3times" },
397         { "tx_collide_4times" },
398         { "tx_collide_5times" },
399         { "tx_collide_6times" },
400         { "tx_collide_7times" },
401         { "tx_collide_8times" },
402         { "tx_collide_9times" },
403         { "tx_collide_10times" },
404         { "tx_collide_11times" },
405         { "tx_collide_12times" },
406         { "tx_collide_13times" },
407         { "tx_collide_14times" },
408         { "tx_collide_15times" },
409         { "tx_ucast_packets" },
410         { "tx_mcast_packets" },
411         { "tx_bcast_packets" },
412         { "tx_carrier_sense_errors" },
413         { "tx_discards" },
414         { "tx_errors" },
415
416         { "dma_writeq_full" },
417         { "dma_write_prioq_full" },
418         { "rxbds_empty" },
419         { "rx_discards" },
420         { "rx_errors" },
421         { "rx_threshold_hit" },
422
423         { "dma_readq_full" },
424         { "dma_read_prioq_full" },
425         { "tx_comp_queue_full" },
426
427         { "ring_set_send_prod_index" },
428         { "ring_status_update" },
429         { "nic_irqs" },
430         { "nic_avoided_irqs" },
431         { "nic_tx_threshold_hit" },
432
433         { "mbuf_lwm_thresh_hit" },
434 };
435
436 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
437 #define TG3_NVRAM_TEST          0
438 #define TG3_LINK_TEST           1
439 #define TG3_REGISTER_TEST       2
440 #define TG3_MEMORY_TEST         3
441 #define TG3_MAC_LOOPB_TEST      4
442 #define TG3_PHY_LOOPB_TEST      5
443 #define TG3_EXT_LOOPB_TEST      6
444 #define TG3_INTERRUPT_TEST      7
445
446
447 static const struct {
448         const char string[ETH_GSTRING_LEN];
449 } ethtool_test_keys[] = {
450         [TG3_NVRAM_TEST]        = { "nvram test        (online) " },
451         [TG3_LINK_TEST]         = { "link test         (online) " },
452         [TG3_REGISTER_TEST]     = { "register test     (offline)" },
453         [TG3_MEMORY_TEST]       = { "memory test       (offline)" },
454         [TG3_MAC_LOOPB_TEST]    = { "mac loopback test (offline)" },
455         [TG3_PHY_LOOPB_TEST]    = { "phy loopback test (offline)" },
456         [TG3_EXT_LOOPB_TEST]    = { "ext loopback test (offline)" },
457         [TG3_INTERRUPT_TEST]    = { "interrupt test    (offline)" },
458 };
459
460 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
461
462
463 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
464 {
465         writel(val, tp->regs + off);
466 }
467
468 static u32 tg3_read32(struct tg3 *tp, u32 off)
469 {
470         return readl(tp->regs + off);
471 }
472
473 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
474 {
475         writel(val, tp->aperegs + off);
476 }
477
478 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
479 {
480         return readl(tp->aperegs + off);
481 }
482
483 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
484 {
485         unsigned long flags;
486
487         spin_lock_irqsave(&tp->indirect_lock, flags);
488         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
489         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
490         spin_unlock_irqrestore(&tp->indirect_lock, flags);
491 }
492
493 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
494 {
495         writel(val, tp->regs + off);
496         readl(tp->regs + off);
497 }
498
499 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
500 {
501         unsigned long flags;
502         u32 val;
503
504         spin_lock_irqsave(&tp->indirect_lock, flags);
505         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
506         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
507         spin_unlock_irqrestore(&tp->indirect_lock, flags);
508         return val;
509 }
510
511 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
512 {
513         unsigned long flags;
514
515         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
516                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
517                                        TG3_64BIT_REG_LOW, val);
518                 return;
519         }
520         if (off == TG3_RX_STD_PROD_IDX_REG) {
521                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
522                                        TG3_64BIT_REG_LOW, val);
523                 return;
524         }
525
526         spin_lock_irqsave(&tp->indirect_lock, flags);
527         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
528         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
529         spin_unlock_irqrestore(&tp->indirect_lock, flags);
530
531         /* In indirect mode when disabling interrupts, we also need
532          * to clear the interrupt bit in the GRC local ctrl register.
533          */
534         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
535             (val == 0x1)) {
536                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
537                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
538         }
539 }
540
541 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
542 {
543         unsigned long flags;
544         u32 val;
545
546         spin_lock_irqsave(&tp->indirect_lock, flags);
547         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
548         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
549         spin_unlock_irqrestore(&tp->indirect_lock, flags);
550         return val;
551 }
552
553 /* usec_wait specifies the wait time in usec when writing to certain registers
554  * where it is unsafe to read back the register without some delay.
555  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
556  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
557  */
558 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
559 {
560         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
561                 /* Non-posted methods */
562                 tp->write32(tp, off, val);
563         else {
564                 /* Posted method */
565                 tg3_write32(tp, off, val);
566                 if (usec_wait)
567                         udelay(usec_wait);
568                 tp->read32(tp, off);
569         }
570         /* Wait again after the read for the posted method to guarantee that
571          * the wait time is met.
572          */
573         if (usec_wait)
574                 udelay(usec_wait);
575 }
576
577 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
578 {
579         tp->write32_mbox(tp, off, val);
580         if (tg3_flag(tp, FLUSH_POSTED_WRITES) ||
581             (!tg3_flag(tp, MBOX_WRITE_REORDER) &&
582              !tg3_flag(tp, ICH_WORKAROUND)))
583                 tp->read32_mbox(tp, off);
584 }
585
586 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
587 {
588         void __iomem *mbox = tp->regs + off;
589         writel(val, mbox);
590         if (tg3_flag(tp, TXD_MBOX_HWBUG))
591                 writel(val, mbox);
592         if (tg3_flag(tp, MBOX_WRITE_REORDER) ||
593             tg3_flag(tp, FLUSH_POSTED_WRITES))
594                 readl(mbox);
595 }
596
597 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
598 {
599         return readl(tp->regs + off + GRCMBOX_BASE);
600 }
601
602 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
603 {
604         writel(val, tp->regs + off + GRCMBOX_BASE);
605 }
606
607 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
608 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
609 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
610 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
611 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
612
613 #define tw32(reg, val)                  tp->write32(tp, reg, val)
614 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
615 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
616 #define tr32(reg)                       tp->read32(tp, reg)
617
618 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
619 {
620         unsigned long flags;
621
622         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
623             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
624                 return;
625
626         spin_lock_irqsave(&tp->indirect_lock, flags);
627         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
628                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
629                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
630
631                 /* Always leave this as zero. */
632                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
633         } else {
634                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
635                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
636
637                 /* Always leave this as zero. */
638                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
639         }
640         spin_unlock_irqrestore(&tp->indirect_lock, flags);
641 }
642
643 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
644 {
645         unsigned long flags;
646
647         if (tg3_asic_rev(tp) == ASIC_REV_5906 &&
648             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
649                 *val = 0;
650                 return;
651         }
652
653         spin_lock_irqsave(&tp->indirect_lock, flags);
654         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
655                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
656                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
657
658                 /* Always leave this as zero. */
659                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
660         } else {
661                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
662                 *val = tr32(TG3PCI_MEM_WIN_DATA);
663
664                 /* Always leave this as zero. */
665                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
666         }
667         spin_unlock_irqrestore(&tp->indirect_lock, flags);
668 }
669
670 static void tg3_ape_lock_init(struct tg3 *tp)
671 {
672         int i;
673         u32 regbase, bit;
674
675         if (tg3_asic_rev(tp) == ASIC_REV_5761)
676                 regbase = TG3_APE_LOCK_GRANT;
677         else
678                 regbase = TG3_APE_PER_LOCK_GRANT;
679
680         /* Make sure the driver hasn't any stale locks. */
681         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
682                 switch (i) {
683                 case TG3_APE_LOCK_PHY0:
684                 case TG3_APE_LOCK_PHY1:
685                 case TG3_APE_LOCK_PHY2:
686                 case TG3_APE_LOCK_PHY3:
687                         bit = APE_LOCK_GRANT_DRIVER;
688                         break;
689                 default:
690                         if (!tp->pci_fn)
691                                 bit = APE_LOCK_GRANT_DRIVER;
692                         else
693                                 bit = 1 << tp->pci_fn;
694                 }
695                 tg3_ape_write32(tp, regbase + 4 * i, bit);
696         }
697
698 }
699
700 static int tg3_ape_lock(struct tg3 *tp, int locknum)
701 {
702         int i, off;
703         int ret = 0;
704         u32 status, req, gnt, bit;
705
706         if (!tg3_flag(tp, ENABLE_APE))
707                 return 0;
708
709         switch (locknum) {
710         case TG3_APE_LOCK_GPIO:
711                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
712                         return 0;
713         case TG3_APE_LOCK_GRC:
714         case TG3_APE_LOCK_MEM:
715                 if (!tp->pci_fn)
716                         bit = APE_LOCK_REQ_DRIVER;
717                 else
718                         bit = 1 << tp->pci_fn;
719                 break;
720         case TG3_APE_LOCK_PHY0:
721         case TG3_APE_LOCK_PHY1:
722         case TG3_APE_LOCK_PHY2:
723         case TG3_APE_LOCK_PHY3:
724                 bit = APE_LOCK_REQ_DRIVER;
725                 break;
726         default:
727                 return -EINVAL;
728         }
729
730         if (tg3_asic_rev(tp) == ASIC_REV_5761) {
731                 req = TG3_APE_LOCK_REQ;
732                 gnt = TG3_APE_LOCK_GRANT;
733         } else {
734                 req = TG3_APE_PER_LOCK_REQ;
735                 gnt = TG3_APE_PER_LOCK_GRANT;
736         }
737
738         off = 4 * locknum;
739
740         tg3_ape_write32(tp, req + off, bit);
741
742         /* Wait for up to 1 millisecond to acquire lock. */
743         for (i = 0; i < 100; i++) {
744                 status = tg3_ape_read32(tp, gnt + off);
745                 if (status == bit)
746                         break;
747                 if (pci_channel_offline(tp->pdev))
748                         break;
749
750                 udelay(10);
751         }
752
753         if (status != bit) {
754                 /* Revoke the lock request. */
755                 tg3_ape_write32(tp, gnt + off, bit);
756                 ret = -EBUSY;
757         }
758
759         return ret;
760 }
761
762 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
763 {
764         u32 gnt, bit;
765
766         if (!tg3_flag(tp, ENABLE_APE))
767                 return;
768
769         switch (locknum) {
770         case TG3_APE_LOCK_GPIO:
771                 if (tg3_asic_rev(tp) == ASIC_REV_5761)
772                         return;
773         case TG3_APE_LOCK_GRC:
774         case TG3_APE_LOCK_MEM:
775                 if (!tp->pci_fn)
776                         bit = APE_LOCK_GRANT_DRIVER;
777                 else
778                         bit = 1 << tp->pci_fn;
779                 break;
780         case TG3_APE_LOCK_PHY0:
781         case TG3_APE_LOCK_PHY1:
782         case TG3_APE_LOCK_PHY2:
783         case TG3_APE_LOCK_PHY3:
784                 bit = APE_LOCK_GRANT_DRIVER;
785                 break;
786         default:
787                 return;
788         }
789
790         if (tg3_asic_rev(tp) == ASIC_REV_5761)
791                 gnt = TG3_APE_LOCK_GRANT;
792         else
793                 gnt = TG3_APE_PER_LOCK_GRANT;
794
795         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
796 }
797
798 static int tg3_ape_event_lock(struct tg3 *tp, u32 timeout_us)
799 {
800         u32 apedata;
801
802         while (timeout_us) {
803                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
804                         return -EBUSY;
805
806                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
807                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
808                         break;
809
810                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
811
812                 udelay(10);
813                 timeout_us -= (timeout_us > 10) ? 10 : timeout_us;
814         }
815
816         return timeout_us ? 0 : -EBUSY;
817 }
818
819 static int tg3_ape_wait_for_event(struct tg3 *tp, u32 timeout_us)
820 {
821         u32 i, apedata;
822
823         for (i = 0; i < timeout_us / 10; i++) {
824                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
825
826                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
827                         break;
828
829                 udelay(10);
830         }
831
832         return i == timeout_us / 10;
833 }
834
835 static int tg3_ape_scratchpad_read(struct tg3 *tp, u32 *data, u32 base_off,
836                                    u32 len)
837 {
838         int err;
839         u32 i, bufoff, msgoff, maxlen, apedata;
840
841         if (!tg3_flag(tp, APE_HAS_NCSI))
842                 return 0;
843
844         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
845         if (apedata != APE_SEG_SIG_MAGIC)
846                 return -ENODEV;
847
848         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
849         if (!(apedata & APE_FW_STATUS_READY))
850                 return -EAGAIN;
851
852         bufoff = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_OFF) +
853                  TG3_APE_SHMEM_BASE;
854         msgoff = bufoff + 2 * sizeof(u32);
855         maxlen = tg3_ape_read32(tp, TG3_APE_SEG_MSG_BUF_LEN);
856
857         while (len) {
858                 u32 length;
859
860                 /* Cap xfer sizes to scratchpad limits. */
861                 length = (len > maxlen) ? maxlen : len;
862                 len -= length;
863
864                 apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
865                 if (!(apedata & APE_FW_STATUS_READY))
866                         return -EAGAIN;
867
868                 /* Wait for up to 1 msec for APE to service previous event. */
869                 err = tg3_ape_event_lock(tp, 1000);
870                 if (err)
871                         return err;
872
873                 apedata = APE_EVENT_STATUS_DRIVER_EVNT |
874                           APE_EVENT_STATUS_SCRTCHPD_READ |
875                           APE_EVENT_STATUS_EVENT_PENDING;
876                 tg3_ape_write32(tp, TG3_APE_EVENT_STATUS, apedata);
877
878                 tg3_ape_write32(tp, bufoff, base_off);
879                 tg3_ape_write32(tp, bufoff + sizeof(u32), length);
880
881                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
882                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
883
884                 base_off += length;
885
886                 if (tg3_ape_wait_for_event(tp, 30000))
887                         return -EAGAIN;
888
889                 for (i = 0; length; i += 4, length -= 4) {
890                         u32 val = tg3_ape_read32(tp, msgoff + i);
891                         memcpy(data, &val, sizeof(u32));
892                         data++;
893                 }
894         }
895
896         return 0;
897 }
898
899 static int tg3_ape_send_event(struct tg3 *tp, u32 event)
900 {
901         int err;
902         u32 apedata;
903
904         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
905         if (apedata != APE_SEG_SIG_MAGIC)
906                 return -EAGAIN;
907
908         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
909         if (!(apedata & APE_FW_STATUS_READY))
910                 return -EAGAIN;
911
912         /* Wait for up to 1 millisecond for APE to service previous event. */
913         err = tg3_ape_event_lock(tp, 1000);
914         if (err)
915                 return err;
916
917         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
918                         event | APE_EVENT_STATUS_EVENT_PENDING);
919
920         tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
921         tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
922
923         return 0;
924 }
925
926 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
927 {
928         u32 event;
929         u32 apedata;
930
931         if (!tg3_flag(tp, ENABLE_APE))
932                 return;
933
934         switch (kind) {
935         case RESET_KIND_INIT:
936                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
937                                 APE_HOST_SEG_SIG_MAGIC);
938                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
939                                 APE_HOST_SEG_LEN_MAGIC);
940                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
941                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
942                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
943                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
944                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
945                                 APE_HOST_BEHAV_NO_PHYLOCK);
946                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
947                                     TG3_APE_HOST_DRVR_STATE_START);
948
949                 event = APE_EVENT_STATUS_STATE_START;
950                 break;
951         case RESET_KIND_SHUTDOWN:
952                 /* With the interface we are currently using,
953                  * APE does not track driver state.  Wiping
954                  * out the HOST SEGMENT SIGNATURE forces
955                  * the APE to assume OS absent status.
956                  */
957                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
958
959                 if (device_may_wakeup(&tp->pdev->dev) &&
960                     tg3_flag(tp, WOL_ENABLE)) {
961                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
962                                             TG3_APE_HOST_WOL_SPEED_AUTO);
963                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
964                 } else
965                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
966
967                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
968
969                 event = APE_EVENT_STATUS_STATE_UNLOAD;
970                 break;
971         case RESET_KIND_SUSPEND:
972                 event = APE_EVENT_STATUS_STATE_SUSPEND;
973                 break;
974         default:
975                 return;
976         }
977
978         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
979
980         tg3_ape_send_event(tp, event);
981 }
982
983 static void tg3_disable_ints(struct tg3 *tp)
984 {
985         int i;
986
987         tw32(TG3PCI_MISC_HOST_CTRL,
988              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
989         for (i = 0; i < tp->irq_max; i++)
990                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
991 }
992
993 static void tg3_enable_ints(struct tg3 *tp)
994 {
995         int i;
996
997         tp->irq_sync = 0;
998         wmb();
999
1000         tw32(TG3PCI_MISC_HOST_CTRL,
1001              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
1002
1003         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
1004         for (i = 0; i < tp->irq_cnt; i++) {
1005                 struct tg3_napi *tnapi = &tp->napi[i];
1006
1007                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1008                 if (tg3_flag(tp, 1SHOT_MSI))
1009                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
1010
1011                 tp->coal_now |= tnapi->coal_now;
1012         }
1013
1014         /* Force an initial interrupt */
1015         if (!tg3_flag(tp, TAGGED_STATUS) &&
1016             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
1017                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
1018         else
1019                 tw32(HOSTCC_MODE, tp->coal_now);
1020
1021         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
1022 }
1023
1024 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
1025 {
1026         struct tg3 *tp = tnapi->tp;
1027         struct tg3_hw_status *sblk = tnapi->hw_status;
1028         unsigned int work_exists = 0;
1029
1030         /* check for phy events */
1031         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
1032                 if (sblk->status & SD_STATUS_LINK_CHG)
1033                         work_exists = 1;
1034         }
1035
1036         /* check for TX work to do */
1037         if (sblk->idx[0].tx_consumer != tnapi->tx_cons)
1038                 work_exists = 1;
1039
1040         /* check for RX work to do */
1041         if (tnapi->rx_rcb_prod_idx &&
1042             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
1043                 work_exists = 1;
1044
1045         return work_exists;
1046 }
1047
1048 /* tg3_int_reenable
1049  *  similar to tg3_enable_ints, but it accurately determines whether there
1050  *  is new work pending and can return without flushing the PIO write
1051  *  which reenables interrupts
1052  */
1053 static void tg3_int_reenable(struct tg3_napi *tnapi)
1054 {
1055         struct tg3 *tp = tnapi->tp;
1056
1057         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
1058         mmiowb();
1059
1060         /* When doing tagged status, this work check is unnecessary.
1061          * The last_tag we write above tells the chip which piece of
1062          * work we've completed.
1063          */
1064         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
1065                 tw32(HOSTCC_MODE, tp->coalesce_mode |
1066                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
1067 }
1068
1069 static void tg3_switch_clocks(struct tg3 *tp)
1070 {
1071         u32 clock_ctrl;
1072         u32 orig_clock_ctrl;
1073
1074         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
1075                 return;
1076
1077         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
1078
1079         orig_clock_ctrl = clock_ctrl;
1080         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
1081                        CLOCK_CTRL_CLKRUN_OENABLE |
1082                        0x1f);
1083         tp->pci_clock_ctrl = clock_ctrl;
1084
1085         if (tg3_flag(tp, 5705_PLUS)) {
1086                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
1087                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
1088                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
1089                 }
1090         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
1091                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1092                             clock_ctrl |
1093                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
1094                             40);
1095                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
1096                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
1097                             40);
1098         }
1099         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
1100 }
1101
1102 #define PHY_BUSY_LOOPS  5000
1103
1104 static int __tg3_readphy(struct tg3 *tp, unsigned int phy_addr, int reg,
1105                          u32 *val)
1106 {
1107         u32 frame_val;
1108         unsigned int loops;
1109         int ret;
1110
1111         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1112                 tw32_f(MAC_MI_MODE,
1113                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1114                 udelay(80);
1115         }
1116
1117         tg3_ape_lock(tp, tp->phy_ape_lock);
1118
1119         *val = 0x0;
1120
1121         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1122                       MI_COM_PHY_ADDR_MASK);
1123         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1124                       MI_COM_REG_ADDR_MASK);
1125         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
1126
1127         tw32_f(MAC_MI_COM, frame_val);
1128
1129         loops = PHY_BUSY_LOOPS;
1130         while (loops != 0) {
1131                 udelay(10);
1132                 frame_val = tr32(MAC_MI_COM);
1133
1134                 if ((frame_val & MI_COM_BUSY) == 0) {
1135                         udelay(5);
1136                         frame_val = tr32(MAC_MI_COM);
1137                         break;
1138                 }
1139                 loops -= 1;
1140         }
1141
1142         ret = -EBUSY;
1143         if (loops != 0) {
1144                 *val = frame_val & MI_COM_DATA_MASK;
1145                 ret = 0;
1146         }
1147
1148         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1149                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1150                 udelay(80);
1151         }
1152
1153         tg3_ape_unlock(tp, tp->phy_ape_lock);
1154
1155         return ret;
1156 }
1157
1158 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
1159 {
1160         return __tg3_readphy(tp, tp->phy_addr, reg, val);
1161 }
1162
1163 static int __tg3_writephy(struct tg3 *tp, unsigned int phy_addr, int reg,
1164                           u32 val)
1165 {
1166         u32 frame_val;
1167         unsigned int loops;
1168         int ret;
1169
1170         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1171             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1172                 return 0;
1173
1174         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1175                 tw32_f(MAC_MI_MODE,
1176                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1177                 udelay(80);
1178         }
1179
1180         tg3_ape_lock(tp, tp->phy_ape_lock);
1181
1182         frame_val  = ((phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1183                       MI_COM_PHY_ADDR_MASK);
1184         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1185                       MI_COM_REG_ADDR_MASK);
1186         frame_val |= (val & MI_COM_DATA_MASK);
1187         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1188
1189         tw32_f(MAC_MI_COM, frame_val);
1190
1191         loops = PHY_BUSY_LOOPS;
1192         while (loops != 0) {
1193                 udelay(10);
1194                 frame_val = tr32(MAC_MI_COM);
1195                 if ((frame_val & MI_COM_BUSY) == 0) {
1196                         udelay(5);
1197                         frame_val = tr32(MAC_MI_COM);
1198                         break;
1199                 }
1200                 loops -= 1;
1201         }
1202
1203         ret = -EBUSY;
1204         if (loops != 0)
1205                 ret = 0;
1206
1207         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1208                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1209                 udelay(80);
1210         }
1211
1212         tg3_ape_unlock(tp, tp->phy_ape_lock);
1213
1214         return ret;
1215 }
1216
1217 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
1218 {
1219         return __tg3_writephy(tp, tp->phy_addr, reg, val);
1220 }
1221
1222 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1223 {
1224         int err;
1225
1226         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1227         if (err)
1228                 goto done;
1229
1230         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1231         if (err)
1232                 goto done;
1233
1234         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1235                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1236         if (err)
1237                 goto done;
1238
1239         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1240
1241 done:
1242         return err;
1243 }
1244
1245 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1246 {
1247         int err;
1248
1249         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1250         if (err)
1251                 goto done;
1252
1253         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1254         if (err)
1255                 goto done;
1256
1257         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1258                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1259         if (err)
1260                 goto done;
1261
1262         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1263
1264 done:
1265         return err;
1266 }
1267
1268 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1269 {
1270         int err;
1271
1272         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1273         if (!err)
1274                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1275
1276         return err;
1277 }
1278
1279 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1280 {
1281         int err;
1282
1283         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1284         if (!err)
1285                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1286
1287         return err;
1288 }
1289
1290 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1291 {
1292         int err;
1293
1294         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1295                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1296                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1297         if (!err)
1298                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1299
1300         return err;
1301 }
1302
1303 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1304 {
1305         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1306                 set |= MII_TG3_AUXCTL_MISC_WREN;
1307
1308         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1309 }
1310
1311 static int tg3_phy_toggle_auxctl_smdsp(struct tg3 *tp, bool enable)
1312 {
1313         u32 val;
1314         int err;
1315
1316         err = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1317
1318         if (err)
1319                 return err;
1320         if (enable)
1321
1322                 val |= MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1323         else
1324                 val &= ~MII_TG3_AUXCTL_ACTL_SMDSP_ENA;
1325
1326         err = tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1327                                    val | MII_TG3_AUXCTL_ACTL_TX_6DB);
1328
1329         return err;
1330 }
1331
1332 static int tg3_bmcr_reset(struct tg3 *tp)
1333 {
1334         u32 phy_control;
1335         int limit, err;
1336
1337         /* OK, reset it, and poll the BMCR_RESET bit until it
1338          * clears or we time out.
1339          */
1340         phy_control = BMCR_RESET;
1341         err = tg3_writephy(tp, MII_BMCR, phy_control);
1342         if (err != 0)
1343                 return -EBUSY;
1344
1345         limit = 5000;
1346         while (limit--) {
1347                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1348                 if (err != 0)
1349                         return -EBUSY;
1350
1351                 if ((phy_control & BMCR_RESET) == 0) {
1352                         udelay(40);
1353                         break;
1354                 }
1355                 udelay(10);
1356         }
1357         if (limit < 0)
1358                 return -EBUSY;
1359
1360         return 0;
1361 }
1362
1363 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1364 {
1365         struct tg3 *tp = bp->priv;
1366         u32 val;
1367
1368         spin_lock_bh(&tp->lock);
1369
1370         if (tg3_readphy(tp, reg, &val))
1371                 val = -EIO;
1372
1373         spin_unlock_bh(&tp->lock);
1374
1375         return val;
1376 }
1377
1378 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1379 {
1380         struct tg3 *tp = bp->priv;
1381         u32 ret = 0;
1382
1383         spin_lock_bh(&tp->lock);
1384
1385         if (tg3_writephy(tp, reg, val))
1386                 ret = -EIO;
1387
1388         spin_unlock_bh(&tp->lock);
1389
1390         return ret;
1391 }
1392
1393 static int tg3_mdio_reset(struct mii_bus *bp)
1394 {
1395         return 0;
1396 }
1397
1398 static void tg3_mdio_config_5785(struct tg3 *tp)
1399 {
1400         u32 val;
1401         struct phy_device *phydev;
1402
1403         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1404         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1405         case PHY_ID_BCM50610:
1406         case PHY_ID_BCM50610M:
1407                 val = MAC_PHYCFG2_50610_LED_MODES;
1408                 break;
1409         case PHY_ID_BCMAC131:
1410                 val = MAC_PHYCFG2_AC131_LED_MODES;
1411                 break;
1412         case PHY_ID_RTL8211C:
1413                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1414                 break;
1415         case PHY_ID_RTL8201E:
1416                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1417                 break;
1418         default:
1419                 return;
1420         }
1421
1422         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1423                 tw32(MAC_PHYCFG2, val);
1424
1425                 val = tr32(MAC_PHYCFG1);
1426                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1427                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1428                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1429                 tw32(MAC_PHYCFG1, val);
1430
1431                 return;
1432         }
1433
1434         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1435                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1436                        MAC_PHYCFG2_FMODE_MASK_MASK |
1437                        MAC_PHYCFG2_GMODE_MASK_MASK |
1438                        MAC_PHYCFG2_ACT_MASK_MASK   |
1439                        MAC_PHYCFG2_QUAL_MASK_MASK |
1440                        MAC_PHYCFG2_INBAND_ENABLE;
1441
1442         tw32(MAC_PHYCFG2, val);
1443
1444         val = tr32(MAC_PHYCFG1);
1445         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1446                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1447         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1448                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1449                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1450                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1451                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1452         }
1453         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1454                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1455         tw32(MAC_PHYCFG1, val);
1456
1457         val = tr32(MAC_EXT_RGMII_MODE);
1458         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1459                  MAC_RGMII_MODE_RX_QUALITY |
1460                  MAC_RGMII_MODE_RX_ACTIVITY |
1461                  MAC_RGMII_MODE_RX_ENG_DET |
1462                  MAC_RGMII_MODE_TX_ENABLE |
1463                  MAC_RGMII_MODE_TX_LOWPWR |
1464                  MAC_RGMII_MODE_TX_RESET);
1465         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1466                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1467                         val |= MAC_RGMII_MODE_RX_INT_B |
1468                                MAC_RGMII_MODE_RX_QUALITY |
1469                                MAC_RGMII_MODE_RX_ACTIVITY |
1470                                MAC_RGMII_MODE_RX_ENG_DET;
1471                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1472                         val |= MAC_RGMII_MODE_TX_ENABLE |
1473                                MAC_RGMII_MODE_TX_LOWPWR |
1474                                MAC_RGMII_MODE_TX_RESET;
1475         }
1476         tw32(MAC_EXT_RGMII_MODE, val);
1477 }
1478
1479 static void tg3_mdio_start(struct tg3 *tp)
1480 {
1481         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1482         tw32_f(MAC_MI_MODE, tp->mi_mode);
1483         udelay(80);
1484
1485         if (tg3_flag(tp, MDIOBUS_INITED) &&
1486             tg3_asic_rev(tp) == ASIC_REV_5785)
1487                 tg3_mdio_config_5785(tp);
1488 }
1489
1490 static int tg3_mdio_init(struct tg3 *tp)
1491 {
1492         int i;
1493         u32 reg;
1494         struct phy_device *phydev;
1495
1496         if (tg3_flag(tp, 5717_PLUS)) {
1497                 u32 is_serdes;
1498
1499                 tp->phy_addr = tp->pci_fn + 1;
1500
1501                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0)
1502                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1503                 else
1504                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1505                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1506                 if (is_serdes)
1507                         tp->phy_addr += 7;
1508         } else
1509                 tp->phy_addr = TG3_PHY_MII_ADDR;
1510
1511         tg3_mdio_start(tp);
1512
1513         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1514                 return 0;
1515
1516         tp->mdio_bus = mdiobus_alloc();
1517         if (tp->mdio_bus == NULL)
1518                 return -ENOMEM;
1519
1520         tp->mdio_bus->name     = "tg3 mdio bus";
1521         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1522                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1523         tp->mdio_bus->priv     = tp;
1524         tp->mdio_bus->parent   = &tp->pdev->dev;
1525         tp->mdio_bus->read     = &tg3_mdio_read;
1526         tp->mdio_bus->write    = &tg3_mdio_write;
1527         tp->mdio_bus->reset    = &tg3_mdio_reset;
1528         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1529         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1530
1531         for (i = 0; i < PHY_MAX_ADDR; i++)
1532                 tp->mdio_bus->irq[i] = PHY_POLL;
1533
1534         /* The bus registration will look for all the PHYs on the mdio bus.
1535          * Unfortunately, it does not ensure the PHY is powered up before
1536          * accessing the PHY ID registers.  A chip reset is the
1537          * quickest way to bring the device back to an operational state..
1538          */
1539         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1540                 tg3_bmcr_reset(tp);
1541
1542         i = mdiobus_register(tp->mdio_bus);
1543         if (i) {
1544                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1545                 mdiobus_free(tp->mdio_bus);
1546                 return i;
1547         }
1548
1549         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1550
1551         if (!phydev || !phydev->drv) {
1552                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1553                 mdiobus_unregister(tp->mdio_bus);
1554                 mdiobus_free(tp->mdio_bus);
1555                 return -ENODEV;
1556         }
1557
1558         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1559         case PHY_ID_BCM57780:
1560                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1561                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1562                 break;
1563         case PHY_ID_BCM50610:
1564         case PHY_ID_BCM50610M:
1565                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1566                                      PHY_BRCM_RX_REFCLK_UNUSED |
1567                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1568                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1569                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1570                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1571                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1572                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1573                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1574                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1575                 /* fallthru */
1576         case PHY_ID_RTL8211C:
1577                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1578                 break;
1579         case PHY_ID_RTL8201E:
1580         case PHY_ID_BCMAC131:
1581                 phydev->interface = PHY_INTERFACE_MODE_MII;
1582                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1583                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1584                 break;
1585         }
1586
1587         tg3_flag_set(tp, MDIOBUS_INITED);
1588
1589         if (tg3_asic_rev(tp) == ASIC_REV_5785)
1590                 tg3_mdio_config_5785(tp);
1591
1592         return 0;
1593 }
1594
1595 static void tg3_mdio_fini(struct tg3 *tp)
1596 {
1597         if (tg3_flag(tp, MDIOBUS_INITED)) {
1598                 tg3_flag_clear(tp, MDIOBUS_INITED);
1599                 mdiobus_unregister(tp->mdio_bus);
1600                 mdiobus_free(tp->mdio_bus);
1601         }
1602 }
1603
1604 /* tp->lock is held. */
1605 static inline void tg3_generate_fw_event(struct tg3 *tp)
1606 {
1607         u32 val;
1608
1609         val = tr32(GRC_RX_CPU_EVENT);
1610         val |= GRC_RX_CPU_DRIVER_EVENT;
1611         tw32_f(GRC_RX_CPU_EVENT, val);
1612
1613         tp->last_event_jiffies = jiffies;
1614 }
1615
1616 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1617
1618 /* tp->lock is held. */
1619 static void tg3_wait_for_event_ack(struct tg3 *tp)
1620 {
1621         int i;
1622         unsigned int delay_cnt;
1623         long time_remain;
1624
1625         /* If enough time has passed, no wait is necessary. */
1626         time_remain = (long)(tp->last_event_jiffies + 1 +
1627                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1628                       (long)jiffies;
1629         if (time_remain < 0)
1630                 return;
1631
1632         /* Check if we can shorten the wait time. */
1633         delay_cnt = jiffies_to_usecs(time_remain);
1634         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1635                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1636         delay_cnt = (delay_cnt >> 3) + 1;
1637
1638         for (i = 0; i < delay_cnt; i++) {
1639                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1640                         break;
1641                 if (pci_channel_offline(tp->pdev))
1642                         break;
1643
1644                 udelay(8);
1645         }
1646 }
1647
1648 /* tp->lock is held. */
1649 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1650 {
1651         u32 reg, val;
1652
1653         val = 0;
1654         if (!tg3_readphy(tp, MII_BMCR, &reg))
1655                 val = reg << 16;
1656         if (!tg3_readphy(tp, MII_BMSR, &reg))
1657                 val |= (reg & 0xffff);
1658         *data++ = val;
1659
1660         val = 0;
1661         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1662                 val = reg << 16;
1663         if (!tg3_readphy(tp, MII_LPA, &reg))
1664                 val |= (reg & 0xffff);
1665         *data++ = val;
1666
1667         val = 0;
1668         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1669                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1670                         val = reg << 16;
1671                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1672                         val |= (reg & 0xffff);
1673         }
1674         *data++ = val;
1675
1676         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1677                 val = reg << 16;
1678         else
1679                 val = 0;
1680         *data++ = val;
1681 }
1682
1683 /* tp->lock is held. */
1684 static void tg3_ump_link_report(struct tg3 *tp)
1685 {
1686         u32 data[4];
1687
1688         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1689                 return;
1690
1691         tg3_phy_gather_ump_data(tp, data);
1692
1693         tg3_wait_for_event_ack(tp);
1694
1695         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1696         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1697         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1698         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1699         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1700         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1701
1702         tg3_generate_fw_event(tp);
1703 }
1704
1705 /* tp->lock is held. */
1706 static void tg3_stop_fw(struct tg3 *tp)
1707 {
1708         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1709                 /* Wait for RX cpu to ACK the previous event. */
1710                 tg3_wait_for_event_ack(tp);
1711
1712                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1713
1714                 tg3_generate_fw_event(tp);
1715
1716                 /* Wait for RX cpu to ACK this event. */
1717                 tg3_wait_for_event_ack(tp);
1718         }
1719 }
1720
1721 /* tp->lock is held. */
1722 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1723 {
1724         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1725                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1726
1727         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1728                 switch (kind) {
1729                 case RESET_KIND_INIT:
1730                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1731                                       DRV_STATE_START);
1732                         break;
1733
1734                 case RESET_KIND_SHUTDOWN:
1735                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1736                                       DRV_STATE_UNLOAD);
1737                         break;
1738
1739                 case RESET_KIND_SUSPEND:
1740                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1741                                       DRV_STATE_SUSPEND);
1742                         break;
1743
1744                 default:
1745                         break;
1746                 }
1747         }
1748
1749         if (kind == RESET_KIND_INIT ||
1750             kind == RESET_KIND_SUSPEND)
1751                 tg3_ape_driver_state_change(tp, kind);
1752 }
1753
1754 /* tp->lock is held. */
1755 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1756 {
1757         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1758                 switch (kind) {
1759                 case RESET_KIND_INIT:
1760                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1761                                       DRV_STATE_START_DONE);
1762                         break;
1763
1764                 case RESET_KIND_SHUTDOWN:
1765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1766                                       DRV_STATE_UNLOAD_DONE);
1767                         break;
1768
1769                 default:
1770                         break;
1771                 }
1772         }
1773
1774         if (kind == RESET_KIND_SHUTDOWN)
1775                 tg3_ape_driver_state_change(tp, kind);
1776 }
1777
1778 /* tp->lock is held. */
1779 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1780 {
1781         if (tg3_flag(tp, ENABLE_ASF)) {
1782                 switch (kind) {
1783                 case RESET_KIND_INIT:
1784                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1785                                       DRV_STATE_START);
1786                         break;
1787
1788                 case RESET_KIND_SHUTDOWN:
1789                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1790                                       DRV_STATE_UNLOAD);
1791                         break;
1792
1793                 case RESET_KIND_SUSPEND:
1794                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1795                                       DRV_STATE_SUSPEND);
1796                         break;
1797
1798                 default:
1799                         break;
1800                 }
1801         }
1802 }
1803
1804 static int tg3_poll_fw(struct tg3 *tp)
1805 {
1806         int i;
1807         u32 val;
1808
1809         if (tg3_flag(tp, NO_FWARE_REPORTED))
1810                 return 0;
1811
1812         if (tg3_flag(tp, IS_SSB_CORE)) {
1813                 /* We don't use firmware. */
1814                 return 0;
1815         }
1816
1817         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
1818                 /* Wait up to 20ms for init done. */
1819                 for (i = 0; i < 200; i++) {
1820                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1821                                 return 0;
1822                         if (pci_channel_offline(tp->pdev))
1823                                 return -ENODEV;
1824
1825                         udelay(100);
1826                 }
1827                 return -ENODEV;
1828         }
1829
1830         /* Wait for firmware initialization to complete. */
1831         for (i = 0; i < 100000; i++) {
1832                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1833                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1834                         break;
1835                 if (pci_channel_offline(tp->pdev)) {
1836                         if (!tg3_flag(tp, NO_FWARE_REPORTED)) {
1837                                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1838                                 netdev_info(tp->dev, "No firmware running\n");
1839                         }
1840
1841                         break;
1842                 }
1843
1844                 udelay(10);
1845         }
1846
1847         /* Chip might not be fitted with firmware.  Some Sun onboard
1848          * parts are configured like that.  So don't signal the timeout
1849          * of the above loop as an error, but do report the lack of
1850          * running firmware once.
1851          */
1852         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1853                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1854
1855                 netdev_info(tp->dev, "No firmware running\n");
1856         }
1857
1858         if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
1859                 /* The 57765 A0 needs a little more
1860                  * time to do some important work.
1861                  */
1862                 mdelay(10);
1863         }
1864
1865         return 0;
1866 }
1867
1868 static void tg3_link_report(struct tg3 *tp)
1869 {
1870         if (!netif_carrier_ok(tp->dev)) {
1871                 netif_info(tp, link, tp->dev, "Link is down\n");
1872                 tg3_ump_link_report(tp);
1873         } else if (netif_msg_link(tp)) {
1874                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1875                             (tp->link_config.active_speed == SPEED_1000 ?
1876                              1000 :
1877                              (tp->link_config.active_speed == SPEED_100 ?
1878                               100 : 10)),
1879                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1880                              "full" : "half"));
1881
1882                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1883                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1884                             "on" : "off",
1885                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1886                             "on" : "off");
1887
1888                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1889                         netdev_info(tp->dev, "EEE is %s\n",
1890                                     tp->setlpicnt ? "enabled" : "disabled");
1891
1892                 tg3_ump_link_report(tp);
1893         }
1894
1895         tp->link_up = netif_carrier_ok(tp->dev);
1896 }
1897
1898 static u32 tg3_decode_flowctrl_1000T(u32 adv)
1899 {
1900         u32 flowctrl = 0;
1901
1902         if (adv & ADVERTISE_PAUSE_CAP) {
1903                 flowctrl |= FLOW_CTRL_RX;
1904                 if (!(adv & ADVERTISE_PAUSE_ASYM))
1905                         flowctrl |= FLOW_CTRL_TX;
1906         } else if (adv & ADVERTISE_PAUSE_ASYM)
1907                 flowctrl |= FLOW_CTRL_TX;
1908
1909         return flowctrl;
1910 }
1911
1912 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1913 {
1914         u16 miireg;
1915
1916         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1917                 miireg = ADVERTISE_1000XPAUSE;
1918         else if (flow_ctrl & FLOW_CTRL_TX)
1919                 miireg = ADVERTISE_1000XPSE_ASYM;
1920         else if (flow_ctrl & FLOW_CTRL_RX)
1921                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1922         else
1923                 miireg = 0;
1924
1925         return miireg;
1926 }
1927
1928 static u32 tg3_decode_flowctrl_1000X(u32 adv)
1929 {
1930         u32 flowctrl = 0;
1931
1932         if (adv & ADVERTISE_1000XPAUSE) {
1933                 flowctrl |= FLOW_CTRL_RX;
1934                 if (!(adv & ADVERTISE_1000XPSE_ASYM))
1935                         flowctrl |= FLOW_CTRL_TX;
1936         } else if (adv & ADVERTISE_1000XPSE_ASYM)
1937                 flowctrl |= FLOW_CTRL_TX;
1938
1939         return flowctrl;
1940 }
1941
1942 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1943 {
1944         u8 cap = 0;
1945
1946         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1947                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1948         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1949                 if (lcladv & ADVERTISE_1000XPAUSE)
1950                         cap = FLOW_CTRL_RX;
1951                 if (rmtadv & ADVERTISE_1000XPAUSE)
1952                         cap = FLOW_CTRL_TX;
1953         }
1954
1955         return cap;
1956 }
1957
1958 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1959 {
1960         u8 autoneg;
1961         u8 flowctrl = 0;
1962         u32 old_rx_mode = tp->rx_mode;
1963         u32 old_tx_mode = tp->tx_mode;
1964
1965         if (tg3_flag(tp, USE_PHYLIB))
1966                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1967         else
1968                 autoneg = tp->link_config.autoneg;
1969
1970         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1971                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1972                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1973                 else
1974                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1975         } else
1976                 flowctrl = tp->link_config.flowctrl;
1977
1978         tp->link_config.active_flowctrl = flowctrl;
1979
1980         if (flowctrl & FLOW_CTRL_RX)
1981                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1982         else
1983                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1984
1985         if (old_rx_mode != tp->rx_mode)
1986                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1987
1988         if (flowctrl & FLOW_CTRL_TX)
1989                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1990         else
1991                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1992
1993         if (old_tx_mode != tp->tx_mode)
1994                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1995 }
1996
1997 static void tg3_adjust_link(struct net_device *dev)
1998 {
1999         u8 oldflowctrl, linkmesg = 0;
2000         u32 mac_mode, lcl_adv, rmt_adv;
2001         struct tg3 *tp = netdev_priv(dev);
2002         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2003
2004         spin_lock_bh(&tp->lock);
2005
2006         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
2007                                     MAC_MODE_HALF_DUPLEX);
2008
2009         oldflowctrl = tp->link_config.active_flowctrl;
2010
2011         if (phydev->link) {
2012                 lcl_adv = 0;
2013                 rmt_adv = 0;
2014
2015                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
2016                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2017                 else if (phydev->speed == SPEED_1000 ||
2018                          tg3_asic_rev(tp) != ASIC_REV_5785)
2019                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
2020                 else
2021                         mac_mode |= MAC_MODE_PORT_MODE_MII;
2022
2023                 if (phydev->duplex == DUPLEX_HALF)
2024                         mac_mode |= MAC_MODE_HALF_DUPLEX;
2025                 else {
2026                         lcl_adv = mii_advertise_flowctrl(
2027                                   tp->link_config.flowctrl);
2028
2029                         if (phydev->pause)
2030                                 rmt_adv = LPA_PAUSE_CAP;
2031                         if (phydev->asym_pause)
2032                                 rmt_adv |= LPA_PAUSE_ASYM;
2033                 }
2034
2035                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
2036         } else
2037                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
2038
2039         if (mac_mode != tp->mac_mode) {
2040                 tp->mac_mode = mac_mode;
2041                 tw32_f(MAC_MODE, tp->mac_mode);
2042                 udelay(40);
2043         }
2044
2045         if (tg3_asic_rev(tp) == ASIC_REV_5785) {
2046                 if (phydev->speed == SPEED_10)
2047                         tw32(MAC_MI_STAT,
2048                              MAC_MI_STAT_10MBPS_MODE |
2049                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2050                 else
2051                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
2052         }
2053
2054         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
2055                 tw32(MAC_TX_LENGTHS,
2056                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2057                       (6 << TX_LENGTHS_IPG_SHIFT) |
2058                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2059         else
2060                 tw32(MAC_TX_LENGTHS,
2061                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2062                       (6 << TX_LENGTHS_IPG_SHIFT) |
2063                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2064
2065         if (phydev->link != tp->old_link ||
2066             phydev->speed != tp->link_config.active_speed ||
2067             phydev->duplex != tp->link_config.active_duplex ||
2068             oldflowctrl != tp->link_config.active_flowctrl)
2069                 linkmesg = 1;
2070
2071         tp->old_link = phydev->link;
2072         tp->link_config.active_speed = phydev->speed;
2073         tp->link_config.active_duplex = phydev->duplex;
2074
2075         spin_unlock_bh(&tp->lock);
2076
2077         if (linkmesg)
2078                 tg3_link_report(tp);
2079 }
2080
2081 static int tg3_phy_init(struct tg3 *tp)
2082 {
2083         struct phy_device *phydev;
2084
2085         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
2086                 return 0;
2087
2088         /* Bring the PHY back to a known state. */
2089         tg3_bmcr_reset(tp);
2090
2091         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2092
2093         /* Attach the MAC to the PHY. */
2094         phydev = phy_connect(tp->dev, dev_name(&phydev->dev),
2095                              tg3_adjust_link, phydev->interface);
2096         if (IS_ERR(phydev)) {
2097                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
2098                 return PTR_ERR(phydev);
2099         }
2100
2101         /* Mask with MAC supported features. */
2102         switch (phydev->interface) {
2103         case PHY_INTERFACE_MODE_GMII:
2104         case PHY_INTERFACE_MODE_RGMII:
2105                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
2106                         phydev->supported &= (PHY_GBIT_FEATURES |
2107                                               SUPPORTED_Pause |
2108                                               SUPPORTED_Asym_Pause);
2109                         break;
2110                 }
2111                 /* fallthru */
2112         case PHY_INTERFACE_MODE_MII:
2113                 phydev->supported &= (PHY_BASIC_FEATURES |
2114                                       SUPPORTED_Pause |
2115                                       SUPPORTED_Asym_Pause);
2116                 break;
2117         default:
2118                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2119                 return -EINVAL;
2120         }
2121
2122         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
2123
2124         phydev->advertising = phydev->supported;
2125
2126         return 0;
2127 }
2128
2129 static void tg3_phy_start(struct tg3 *tp)
2130 {
2131         struct phy_device *phydev;
2132
2133         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2134                 return;
2135
2136         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2137
2138         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
2139                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
2140                 phydev->speed = tp->link_config.speed;
2141                 phydev->duplex = tp->link_config.duplex;
2142                 phydev->autoneg = tp->link_config.autoneg;
2143                 phydev->advertising = tp->link_config.advertising;
2144         }
2145
2146         phy_start(phydev);
2147
2148         phy_start_aneg(phydev);
2149 }
2150
2151 static void tg3_phy_stop(struct tg3 *tp)
2152 {
2153         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
2154                 return;
2155
2156         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2157 }
2158
2159 static void tg3_phy_fini(struct tg3 *tp)
2160 {
2161         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
2162                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
2163                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
2164         }
2165 }
2166
2167 static int tg3_phy_set_extloopbk(struct tg3 *tp)
2168 {
2169         int err;
2170         u32 val;
2171
2172         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
2173                 return 0;
2174
2175         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2176                 /* Cannot do read-modify-write on 5401 */
2177                 err = tg3_phy_auxctl_write(tp,
2178                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2179                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
2180                                            0x4c20);
2181                 goto done;
2182         }
2183
2184         err = tg3_phy_auxctl_read(tp,
2185                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2186         if (err)
2187                 return err;
2188
2189         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
2190         err = tg3_phy_auxctl_write(tp,
2191                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
2192
2193 done:
2194         return err;
2195 }
2196
2197 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
2198 {
2199         u32 phytest;
2200
2201         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2202                 u32 phy;
2203
2204                 tg3_writephy(tp, MII_TG3_FET_TEST,
2205                              phytest | MII_TG3_FET_SHADOW_EN);
2206                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
2207                         if (enable)
2208                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
2209                         else
2210                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
2211                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
2212                 }
2213                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2214         }
2215 }
2216
2217 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
2218 {
2219         u32 reg;
2220
2221         if (!tg3_flag(tp, 5705_PLUS) ||
2222             (tg3_flag(tp, 5717_PLUS) &&
2223              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2224                 return;
2225
2226         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2227                 tg3_phy_fet_toggle_apd(tp, enable);
2228                 return;
2229         }
2230
2231         reg = MII_TG3_MISC_SHDW_WREN |
2232               MII_TG3_MISC_SHDW_SCR5_SEL |
2233               MII_TG3_MISC_SHDW_SCR5_LPED |
2234               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
2235               MII_TG3_MISC_SHDW_SCR5_SDTL |
2236               MII_TG3_MISC_SHDW_SCR5_C125OE;
2237         if (tg3_asic_rev(tp) != ASIC_REV_5784 || !enable)
2238                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
2239
2240         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2241
2242
2243         reg = MII_TG3_MISC_SHDW_WREN |
2244               MII_TG3_MISC_SHDW_APD_SEL |
2245               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2246         if (enable)
2247                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2248
2249         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2250 }
2251
2252 static void tg3_phy_toggle_automdix(struct tg3 *tp, bool enable)
2253 {
2254         u32 phy;
2255
2256         if (!tg3_flag(tp, 5705_PLUS) ||
2257             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2258                 return;
2259
2260         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2261                 u32 ephy;
2262
2263                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2264                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2265
2266                         tg3_writephy(tp, MII_TG3_FET_TEST,
2267                                      ephy | MII_TG3_FET_SHADOW_EN);
2268                         if (!tg3_readphy(tp, reg, &phy)) {
2269                                 if (enable)
2270                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2271                                 else
2272                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2273                                 tg3_writephy(tp, reg, phy);
2274                         }
2275                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2276                 }
2277         } else {
2278                 int ret;
2279
2280                 ret = tg3_phy_auxctl_read(tp,
2281                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2282                 if (!ret) {
2283                         if (enable)
2284                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2285                         else
2286                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2287                         tg3_phy_auxctl_write(tp,
2288                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2289                 }
2290         }
2291 }
2292
2293 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2294 {
2295         int ret;
2296         u32 val;
2297
2298         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2299                 return;
2300
2301         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2302         if (!ret)
2303                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2304                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2305 }
2306
2307 static void tg3_phy_apply_otp(struct tg3 *tp)
2308 {
2309         u32 otp, phy;
2310
2311         if (!tp->phy_otp)
2312                 return;
2313
2314         otp = tp->phy_otp;
2315
2316         if (tg3_phy_toggle_auxctl_smdsp(tp, true))
2317                 return;
2318
2319         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2320         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2321         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2322
2323         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2324               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2325         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2326
2327         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2328         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2329         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2330
2331         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2332         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2333
2334         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2335         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2336
2337         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2338               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2339         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2340
2341         tg3_phy_toggle_auxctl_smdsp(tp, false);
2342 }
2343
2344 static void tg3_phy_eee_adjust(struct tg3 *tp, bool current_link_up)
2345 {
2346         u32 val;
2347
2348         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2349                 return;
2350
2351         tp->setlpicnt = 0;
2352
2353         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2354             current_link_up &&
2355             tp->link_config.active_duplex == DUPLEX_FULL &&
2356             (tp->link_config.active_speed == SPEED_100 ||
2357              tp->link_config.active_speed == SPEED_1000)) {
2358                 u32 eeectl;
2359
2360                 if (tp->link_config.active_speed == SPEED_1000)
2361                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2362                 else
2363                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2364
2365                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2366
2367                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2368                                   TG3_CL45_D7_EEERES_STAT, &val);
2369
2370                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2371                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2372                         tp->setlpicnt = 2;
2373         }
2374
2375         if (!tp->setlpicnt) {
2376                 if (current_link_up &&
2377                    !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2378                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2379                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2380                 }
2381
2382                 val = tr32(TG3_CPMU_EEE_MODE);
2383                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2384         }
2385 }
2386
2387 static void tg3_phy_eee_enable(struct tg3 *tp)
2388 {
2389         u32 val;
2390
2391         if (tp->link_config.active_speed == SPEED_1000 &&
2392             (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2393              tg3_asic_rev(tp) == ASIC_REV_5719 ||
2394              tg3_flag(tp, 57765_CLASS)) &&
2395             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2396                 val = MII_TG3_DSP_TAP26_ALNOKO |
2397                       MII_TG3_DSP_TAP26_RMRXSTO;
2398                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2399                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2400         }
2401
2402         val = tr32(TG3_CPMU_EEE_MODE);
2403         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2404 }
2405
2406 static int tg3_wait_macro_done(struct tg3 *tp)
2407 {
2408         int limit = 100;
2409
2410         while (limit--) {
2411                 u32 tmp32;
2412
2413                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2414                         if ((tmp32 & 0x1000) == 0)
2415                                 break;
2416                 }
2417         }
2418         if (limit < 0)
2419                 return -EBUSY;
2420
2421         return 0;
2422 }
2423
2424 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2425 {
2426         static const u32 test_pat[4][6] = {
2427         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2428         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2429         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2430         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2431         };
2432         int chan;
2433
2434         for (chan = 0; chan < 4; chan++) {
2435                 int i;
2436
2437                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2438                              (chan * 0x2000) | 0x0200);
2439                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2440
2441                 for (i = 0; i < 6; i++)
2442                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2443                                      test_pat[chan][i]);
2444
2445                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2446                 if (tg3_wait_macro_done(tp)) {
2447                         *resetp = 1;
2448                         return -EBUSY;
2449                 }
2450
2451                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2452                              (chan * 0x2000) | 0x0200);
2453                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2454                 if (tg3_wait_macro_done(tp)) {
2455                         *resetp = 1;
2456                         return -EBUSY;
2457                 }
2458
2459                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2460                 if (tg3_wait_macro_done(tp)) {
2461                         *resetp = 1;
2462                         return -EBUSY;
2463                 }
2464
2465                 for (i = 0; i < 6; i += 2) {
2466                         u32 low, high;
2467
2468                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2469                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2470                             tg3_wait_macro_done(tp)) {
2471                                 *resetp = 1;
2472                                 return -EBUSY;
2473                         }
2474                         low &= 0x7fff;
2475                         high &= 0x000f;
2476                         if (low != test_pat[chan][i] ||
2477                             high != test_pat[chan][i+1]) {
2478                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2479                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2480                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2481
2482                                 return -EBUSY;
2483                         }
2484                 }
2485         }
2486
2487         return 0;
2488 }
2489
2490 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2491 {
2492         int chan;
2493
2494         for (chan = 0; chan < 4; chan++) {
2495                 int i;
2496
2497                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2498                              (chan * 0x2000) | 0x0200);
2499                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2500                 for (i = 0; i < 6; i++)
2501                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2502                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2503                 if (tg3_wait_macro_done(tp))
2504                         return -EBUSY;
2505         }
2506
2507         return 0;
2508 }
2509
2510 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2511 {
2512         u32 reg32, phy9_orig;
2513         int retries, do_phy_reset, err;
2514
2515         retries = 10;
2516         do_phy_reset = 1;
2517         do {
2518                 if (do_phy_reset) {
2519                         err = tg3_bmcr_reset(tp);
2520                         if (err)
2521                                 return err;
2522                         do_phy_reset = 0;
2523                 }
2524
2525                 /* Disable transmitter and interrupt.  */
2526                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2527                         continue;
2528
2529                 reg32 |= 0x3000;
2530                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2531
2532                 /* Set full-duplex, 1000 mbps.  */
2533                 tg3_writephy(tp, MII_BMCR,
2534                              BMCR_FULLDPLX | BMCR_SPEED1000);
2535
2536                 /* Set to master mode.  */
2537                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2538                         continue;
2539
2540                 tg3_writephy(tp, MII_CTRL1000,
2541                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2542
2543                 err = tg3_phy_toggle_auxctl_smdsp(tp, true);
2544                 if (err)
2545                         return err;
2546
2547                 /* Block the PHY control access.  */
2548                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2549
2550                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2551                 if (!err)
2552                         break;
2553         } while (--retries);
2554
2555         err = tg3_phy_reset_chanpat(tp);
2556         if (err)
2557                 return err;
2558
2559         tg3_phydsp_write(tp, 0x8005, 0x0000);
2560
2561         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2562         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2563
2564         tg3_phy_toggle_auxctl_smdsp(tp, false);
2565
2566         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2567
2568         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2569                 reg32 &= ~0x3000;
2570                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2571         } else if (!err)
2572                 err = -EBUSY;
2573
2574         return err;
2575 }
2576
2577 static void tg3_carrier_off(struct tg3 *tp)
2578 {
2579         netif_carrier_off(tp->dev);
2580         tp->link_up = false;
2581 }
2582
2583 static void tg3_warn_mgmt_link_flap(struct tg3 *tp)
2584 {
2585         if (tg3_flag(tp, ENABLE_ASF))
2586                 netdev_warn(tp->dev,
2587                             "Management side-band traffic will be interrupted during phy settings change\n");
2588 }
2589
2590 /* This will reset the tigon3 PHY if there is no valid
2591  * link unless the FORCE argument is non-zero.
2592  */
2593 static int tg3_phy_reset(struct tg3 *tp)
2594 {
2595         u32 val, cpmuctrl;
2596         int err;
2597
2598         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2599                 val = tr32(GRC_MISC_CFG);
2600                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2601                 udelay(40);
2602         }
2603         err  = tg3_readphy(tp, MII_BMSR, &val);
2604         err |= tg3_readphy(tp, MII_BMSR, &val);
2605         if (err != 0)
2606                 return -EBUSY;
2607
2608         if (netif_running(tp->dev) && tp->link_up) {
2609                 netif_carrier_off(tp->dev);
2610                 tg3_link_report(tp);
2611         }
2612
2613         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
2614             tg3_asic_rev(tp) == ASIC_REV_5704 ||
2615             tg3_asic_rev(tp) == ASIC_REV_5705) {
2616                 err = tg3_phy_reset_5703_4_5(tp);
2617                 if (err)
2618                         return err;
2619                 goto out;
2620         }
2621
2622         cpmuctrl = 0;
2623         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
2624             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
2625                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2626                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2627                         tw32(TG3_CPMU_CTRL,
2628                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2629         }
2630
2631         err = tg3_bmcr_reset(tp);
2632         if (err)
2633                 return err;
2634
2635         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2636                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2637                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2638
2639                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2640         }
2641
2642         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
2643             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
2644                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2645                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2646                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2647                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2648                         udelay(40);
2649                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2650                 }
2651         }
2652
2653         if (tg3_flag(tp, 5717_PLUS) &&
2654             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2655                 return 0;
2656
2657         tg3_phy_apply_otp(tp);
2658
2659         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2660                 tg3_phy_toggle_apd(tp, true);
2661         else
2662                 tg3_phy_toggle_apd(tp, false);
2663
2664 out:
2665         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2666             !tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2667                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2668                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2669                 tg3_phy_toggle_auxctl_smdsp(tp, false);
2670         }
2671
2672         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2673                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2674                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2675         }
2676
2677         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2678                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2679                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2680                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2681                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2682                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2683                 }
2684         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2685                 if (!tg3_phy_toggle_auxctl_smdsp(tp, true)) {
2686                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2687                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2688                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2689                                 tg3_writephy(tp, MII_TG3_TEST1,
2690                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2691                         } else
2692                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2693
2694                         tg3_phy_toggle_auxctl_smdsp(tp, false);
2695                 }
2696         }
2697
2698         /* Set Extended packet length bit (bit 14) on all chips that */
2699         /* support jumbo frames */
2700         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2701                 /* Cannot do read-modify-write on 5401 */
2702                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2703         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2704                 /* Set bit 14 with read-modify-write to preserve other bits */
2705                 err = tg3_phy_auxctl_read(tp,
2706                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2707                 if (!err)
2708                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2709                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2710         }
2711
2712         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2713          * jumbo frames transmission.
2714          */
2715         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2716                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2717                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2718                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2719         }
2720
2721         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
2722                 /* adjust output voltage */
2723                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2724         }
2725
2726         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5762_A0)
2727                 tg3_phydsp_write(tp, 0xffb, 0x4000);
2728
2729         tg3_phy_toggle_automdix(tp, true);
2730         tg3_phy_set_wirespeed(tp);
2731         return 0;
2732 }
2733
2734 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2735 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2736 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2737                                           TG3_GPIO_MSG_NEED_VAUX)
2738 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2739         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2740          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2741          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2742          (TG3_GPIO_MSG_DRVR_PRES << 12))
2743
2744 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2745         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2746          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2747          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2748          (TG3_GPIO_MSG_NEED_VAUX << 12))
2749
2750 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2751 {
2752         u32 status, shift;
2753
2754         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2755             tg3_asic_rev(tp) == ASIC_REV_5719)
2756                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2757         else
2758                 status = tr32(TG3_CPMU_DRV_STATUS);
2759
2760         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2761         status &= ~(TG3_GPIO_MSG_MASK << shift);
2762         status |= (newstat << shift);
2763
2764         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2765             tg3_asic_rev(tp) == ASIC_REV_5719)
2766                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2767         else
2768                 tw32(TG3_CPMU_DRV_STATUS, status);
2769
2770         return status >> TG3_APE_GPIO_MSG_SHIFT;
2771 }
2772
2773 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2774 {
2775         if (!tg3_flag(tp, IS_NIC))
2776                 return 0;
2777
2778         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2779             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2780             tg3_asic_rev(tp) == ASIC_REV_5720) {
2781                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2782                         return -EIO;
2783
2784                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2785
2786                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2787                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2788
2789                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2790         } else {
2791                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2792                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2793         }
2794
2795         return 0;
2796 }
2797
2798 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2799 {
2800         u32 grc_local_ctrl;
2801
2802         if (!tg3_flag(tp, IS_NIC) ||
2803             tg3_asic_rev(tp) == ASIC_REV_5700 ||
2804             tg3_asic_rev(tp) == ASIC_REV_5701)
2805                 return;
2806
2807         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2808
2809         tw32_wait_f(GRC_LOCAL_CTRL,
2810                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2811                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2812
2813         tw32_wait_f(GRC_LOCAL_CTRL,
2814                     grc_local_ctrl,
2815                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2816
2817         tw32_wait_f(GRC_LOCAL_CTRL,
2818                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2819                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2820 }
2821
2822 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2823 {
2824         if (!tg3_flag(tp, IS_NIC))
2825                 return;
2826
2827         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
2828             tg3_asic_rev(tp) == ASIC_REV_5701) {
2829                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2830                             (GRC_LCLCTRL_GPIO_OE0 |
2831                              GRC_LCLCTRL_GPIO_OE1 |
2832                              GRC_LCLCTRL_GPIO_OE2 |
2833                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2834                              GRC_LCLCTRL_GPIO_OUTPUT1),
2835                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2836         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2837                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2838                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2839                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2840                                      GRC_LCLCTRL_GPIO_OE1 |
2841                                      GRC_LCLCTRL_GPIO_OE2 |
2842                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2843                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2844                                      tp->grc_local_ctrl;
2845                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2846                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2847
2848                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2849                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2850                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2851
2852                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2853                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2854                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2855         } else {
2856                 u32 no_gpio2;
2857                 u32 grc_local_ctrl = 0;
2858
2859                 /* Workaround to prevent overdrawing Amps. */
2860                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
2861                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2862                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2863                                     grc_local_ctrl,
2864                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2865                 }
2866
2867                 /* On 5753 and variants, GPIO2 cannot be used. */
2868                 no_gpio2 = tp->nic_sram_data_cfg &
2869                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2870
2871                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2872                                   GRC_LCLCTRL_GPIO_OE1 |
2873                                   GRC_LCLCTRL_GPIO_OE2 |
2874                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2875                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2876                 if (no_gpio2) {
2877                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2878                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2879                 }
2880                 tw32_wait_f(GRC_LOCAL_CTRL,
2881                             tp->grc_local_ctrl | grc_local_ctrl,
2882                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2883
2884                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2885
2886                 tw32_wait_f(GRC_LOCAL_CTRL,
2887                             tp->grc_local_ctrl | grc_local_ctrl,
2888                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2889
2890                 if (!no_gpio2) {
2891                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2892                         tw32_wait_f(GRC_LOCAL_CTRL,
2893                                     tp->grc_local_ctrl | grc_local_ctrl,
2894                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2895                 }
2896         }
2897 }
2898
2899 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2900 {
2901         u32 msg = 0;
2902
2903         /* Serialize power state transitions */
2904         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2905                 return;
2906
2907         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2908                 msg = TG3_GPIO_MSG_NEED_VAUX;
2909
2910         msg = tg3_set_function_status(tp, msg);
2911
2912         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2913                 goto done;
2914
2915         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2916                 tg3_pwrsrc_switch_to_vaux(tp);
2917         else
2918                 tg3_pwrsrc_die_with_vmain(tp);
2919
2920 done:
2921         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2922 }
2923
2924 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2925 {
2926         bool need_vaux = false;
2927
2928         /* The GPIOs do something completely different on 57765. */
2929         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2930                 return;
2931
2932         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
2933             tg3_asic_rev(tp) == ASIC_REV_5719 ||
2934             tg3_asic_rev(tp) == ASIC_REV_5720) {
2935                 tg3_frob_aux_power_5717(tp, include_wol ?
2936                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2937                 return;
2938         }
2939
2940         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2941                 struct net_device *dev_peer;
2942
2943                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2944
2945                 /* remove_one() may have been run on the peer. */
2946                 if (dev_peer) {
2947                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2948
2949                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2950                                 return;
2951
2952                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2953                             tg3_flag(tp_peer, ENABLE_ASF))
2954                                 need_vaux = true;
2955                 }
2956         }
2957
2958         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2959             tg3_flag(tp, ENABLE_ASF))
2960                 need_vaux = true;
2961
2962         if (need_vaux)
2963                 tg3_pwrsrc_switch_to_vaux(tp);
2964         else
2965                 tg3_pwrsrc_die_with_vmain(tp);
2966 }
2967
2968 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2969 {
2970         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2971                 return 1;
2972         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2973                 if (speed != SPEED_10)
2974                         return 1;
2975         } else if (speed == SPEED_10)
2976                 return 1;
2977
2978         return 0;
2979 }
2980
2981 static bool tg3_phy_power_bug(struct tg3 *tp)
2982 {
2983         switch (tg3_asic_rev(tp)) {
2984         case ASIC_REV_5700:
2985         case ASIC_REV_5704:
2986                 return true;
2987         case ASIC_REV_5780:
2988                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
2989                         return true;
2990                 return false;
2991         case ASIC_REV_5717:
2992                 if (!tp->pci_fn)
2993                         return true;
2994                 return false;
2995         case ASIC_REV_5719:
2996         case ASIC_REV_5720:
2997                 if ((tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
2998                     !tp->pci_fn)
2999                         return true;
3000                 return false;
3001         }
3002
3003         return false;
3004 }
3005
3006 static bool tg3_phy_led_bug(struct tg3 *tp)
3007 {
3008         switch (tg3_asic_rev(tp)) {
3009         case ASIC_REV_5719:
3010                 if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
3011                     !tp->pci_fn)
3012                         return true;
3013                 return false;
3014         }
3015
3016         return false;
3017 }
3018
3019 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
3020 {
3021         u32 val;
3022
3023         if (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)
3024                 return;
3025
3026         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
3027                 if (tg3_asic_rev(tp) == ASIC_REV_5704) {
3028                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
3029                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
3030
3031                         sg_dig_ctrl |=
3032                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
3033                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
3034                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
3035                 }
3036                 return;
3037         }
3038
3039         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3040                 tg3_bmcr_reset(tp);
3041                 val = tr32(GRC_MISC_CFG);
3042                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
3043                 udelay(40);
3044                 return;
3045         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3046                 u32 phytest;
3047                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
3048                         u32 phy;
3049
3050                         tg3_writephy(tp, MII_ADVERTISE, 0);
3051                         tg3_writephy(tp, MII_BMCR,
3052                                      BMCR_ANENABLE | BMCR_ANRESTART);
3053
3054                         tg3_writephy(tp, MII_TG3_FET_TEST,
3055                                      phytest | MII_TG3_FET_SHADOW_EN);
3056                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
3057                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
3058                                 tg3_writephy(tp,
3059                                              MII_TG3_FET_SHDW_AUXMODE4,
3060                                              phy);
3061                         }
3062                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
3063                 }
3064                 return;
3065         } else if (do_low_power) {
3066                 if (!tg3_phy_led_bug(tp))
3067                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
3068                                      MII_TG3_EXT_CTRL_FORCE_LED_OFF);
3069
3070                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3071                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
3072                       MII_TG3_AUXCTL_PCTL_VREG_11V;
3073                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
3074         }
3075
3076         /* The PHY should not be powered down on some chips because
3077          * of bugs.
3078          */
3079         if (tg3_phy_power_bug(tp))
3080                 return;
3081
3082         if (tg3_chip_rev(tp) == CHIPREV_5784_AX ||
3083             tg3_chip_rev(tp) == CHIPREV_5761_AX) {
3084                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
3085                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
3086                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
3087                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
3088         }
3089
3090         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
3091 }
3092
3093 /* tp->lock is held. */
3094 static int tg3_nvram_lock(struct tg3 *tp)
3095 {
3096         if (tg3_flag(tp, NVRAM)) {
3097                 int i;
3098
3099                 if (tp->nvram_lock_cnt == 0) {
3100                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3101                         for (i = 0; i < 8000; i++) {
3102                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3103                                         break;
3104                                 udelay(20);
3105                         }
3106                         if (i == 8000) {
3107                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
3108                                 return -ENODEV;
3109                         }
3110                 }
3111                 tp->nvram_lock_cnt++;
3112         }
3113         return 0;
3114 }
3115
3116 /* tp->lock is held. */
3117 static void tg3_nvram_unlock(struct tg3 *tp)
3118 {
3119         if (tg3_flag(tp, NVRAM)) {
3120                 if (tp->nvram_lock_cnt > 0)
3121                         tp->nvram_lock_cnt--;
3122                 if (tp->nvram_lock_cnt == 0)
3123                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3124         }
3125 }
3126
3127 /* tp->lock is held. */
3128 static void tg3_enable_nvram_access(struct tg3 *tp)
3129 {
3130         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3131                 u32 nvaccess = tr32(NVRAM_ACCESS);
3132
3133                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
3134         }
3135 }
3136
3137 /* tp->lock is held. */
3138 static void tg3_disable_nvram_access(struct tg3 *tp)
3139 {
3140         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
3141                 u32 nvaccess = tr32(NVRAM_ACCESS);
3142
3143                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
3144         }
3145 }
3146
3147 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3148                                         u32 offset, u32 *val)
3149 {
3150         u32 tmp;
3151         int i;
3152
3153         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
3154                 return -EINVAL;
3155
3156         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
3157                                         EEPROM_ADDR_DEVID_MASK |
3158                                         EEPROM_ADDR_READ);
3159         tw32(GRC_EEPROM_ADDR,
3160              tmp |
3161              (0 << EEPROM_ADDR_DEVID_SHIFT) |
3162              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
3163               EEPROM_ADDR_ADDR_MASK) |
3164              EEPROM_ADDR_READ | EEPROM_ADDR_START);
3165
3166         for (i = 0; i < 1000; i++) {
3167                 tmp = tr32(GRC_EEPROM_ADDR);
3168
3169                 if (tmp & EEPROM_ADDR_COMPLETE)
3170                         break;
3171                 msleep(1);
3172         }
3173         if (!(tmp & EEPROM_ADDR_COMPLETE))
3174                 return -EBUSY;
3175
3176         tmp = tr32(GRC_EEPROM_DATA);
3177
3178         /*
3179          * The data will always be opposite the native endian
3180          * format.  Perform a blind byteswap to compensate.
3181          */
3182         *val = swab32(tmp);
3183
3184         return 0;
3185 }
3186
3187 #define NVRAM_CMD_TIMEOUT 10000
3188
3189 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3190 {
3191         int i;
3192
3193         tw32(NVRAM_CMD, nvram_cmd);
3194         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3195                 udelay(10);
3196                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3197                         udelay(10);
3198                         break;
3199                 }
3200         }
3201
3202         if (i == NVRAM_CMD_TIMEOUT)
3203                 return -EBUSY;
3204
3205         return 0;
3206 }
3207
3208 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
3209 {
3210         if (tg3_flag(tp, NVRAM) &&
3211             tg3_flag(tp, NVRAM_BUFFERED) &&
3212             tg3_flag(tp, FLASH) &&
3213             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3214             (tp->nvram_jedecnum == JEDEC_ATMEL))
3215
3216                 addr = ((addr / tp->nvram_pagesize) <<
3217                         ATMEL_AT45DB0X1B_PAGE_POS) +
3218                        (addr % tp->nvram_pagesize);
3219
3220         return addr;
3221 }
3222
3223 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
3224 {
3225         if (tg3_flag(tp, NVRAM) &&
3226             tg3_flag(tp, NVRAM_BUFFERED) &&
3227             tg3_flag(tp, FLASH) &&
3228             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
3229             (tp->nvram_jedecnum == JEDEC_ATMEL))
3230
3231                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
3232                         tp->nvram_pagesize) +
3233                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
3234
3235         return addr;
3236 }
3237
3238 /* NOTE: Data read in from NVRAM is byteswapped according to
3239  * the byteswapping settings for all other register accesses.
3240  * tg3 devices are BE devices, so on a BE machine, the data
3241  * returned will be exactly as it is seen in NVRAM.  On a LE
3242  * machine, the 32-bit value will be byteswapped.
3243  */
3244 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
3245 {
3246         int ret;
3247
3248         if (!tg3_flag(tp, NVRAM))
3249                 return tg3_nvram_read_using_eeprom(tp, offset, val);
3250
3251         offset = tg3_nvram_phys_addr(tp, offset);
3252
3253         if (offset > NVRAM_ADDR_MSK)
3254                 return -EINVAL;
3255
3256         ret = tg3_nvram_lock(tp);
3257         if (ret)
3258                 return ret;
3259
3260         tg3_enable_nvram_access(tp);
3261
3262         tw32(NVRAM_ADDR, offset);
3263         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
3264                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
3265
3266         if (ret == 0)
3267                 *val = tr32(NVRAM_RDDATA);
3268
3269         tg3_disable_nvram_access(tp);
3270
3271         tg3_nvram_unlock(tp);
3272
3273         return ret;
3274 }
3275
3276 /* Ensures NVRAM data is in bytestream format. */
3277 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
3278 {
3279         u32 v;
3280         int res = tg3_nvram_read(tp, offset, &v);
3281         if (!res)
3282                 *val = cpu_to_be32(v);
3283         return res;
3284 }
3285
3286 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
3287                                     u32 offset, u32 len, u8 *buf)
3288 {
3289         int i, j, rc = 0;
3290         u32 val;
3291
3292         for (i = 0; i < len; i += 4) {
3293                 u32 addr;
3294                 __be32 data;
3295
3296                 addr = offset + i;
3297
3298                 memcpy(&data, buf + i, 4);
3299
3300                 /*
3301                  * The SEEPROM interface expects the data to always be opposite
3302                  * the native endian format.  We accomplish this by reversing
3303                  * all the operations that would have been performed on the
3304                  * data from a call to tg3_nvram_read_be32().
3305                  */
3306                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3307
3308                 val = tr32(GRC_EEPROM_ADDR);
3309                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3310
3311                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3312                         EEPROM_ADDR_READ);
3313                 tw32(GRC_EEPROM_ADDR, val |
3314                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3315                         (addr & EEPROM_ADDR_ADDR_MASK) |
3316                         EEPROM_ADDR_START |
3317                         EEPROM_ADDR_WRITE);
3318
3319                 for (j = 0; j < 1000; j++) {
3320                         val = tr32(GRC_EEPROM_ADDR);
3321
3322                         if (val & EEPROM_ADDR_COMPLETE)
3323                                 break;
3324                         msleep(1);
3325                 }
3326                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3327                         rc = -EBUSY;
3328                         break;
3329                 }
3330         }
3331
3332         return rc;
3333 }
3334
3335 /* offset and length are dword aligned */
3336 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3337                 u8 *buf)
3338 {
3339         int ret = 0;
3340         u32 pagesize = tp->nvram_pagesize;
3341         u32 pagemask = pagesize - 1;
3342         u32 nvram_cmd;
3343         u8 *tmp;
3344
3345         tmp = kmalloc(pagesize, GFP_KERNEL);
3346         if (tmp == NULL)
3347                 return -ENOMEM;
3348
3349         while (len) {
3350                 int j;
3351                 u32 phy_addr, page_off, size;
3352
3353                 phy_addr = offset & ~pagemask;
3354
3355                 for (j = 0; j < pagesize; j += 4) {
3356                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3357                                                   (__be32 *) (tmp + j));
3358                         if (ret)
3359                                 break;
3360                 }
3361                 if (ret)
3362                         break;
3363
3364                 page_off = offset & pagemask;
3365                 size = pagesize;
3366                 if (len < size)
3367                         size = len;
3368
3369                 len -= size;
3370
3371                 memcpy(tmp + page_off, buf, size);
3372
3373                 offset = offset + (pagesize - page_off);
3374
3375                 tg3_enable_nvram_access(tp);
3376
3377                 /*
3378                  * Before we can erase the flash page, we need
3379                  * to issue a special "write enable" command.
3380                  */
3381                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3382
3383                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3384                         break;
3385
3386                 /* Erase the target page */
3387                 tw32(NVRAM_ADDR, phy_addr);
3388
3389                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3390                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3391
3392                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3393                         break;
3394
3395                 /* Issue another write enable to start the write. */
3396                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3397
3398                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3399                         break;
3400
3401                 for (j = 0; j < pagesize; j += 4) {
3402                         __be32 data;
3403
3404                         data = *((__be32 *) (tmp + j));
3405
3406                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3407
3408                         tw32(NVRAM_ADDR, phy_addr + j);
3409
3410                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3411                                 NVRAM_CMD_WR;
3412
3413                         if (j == 0)
3414                                 nvram_cmd |= NVRAM_CMD_FIRST;
3415                         else if (j == (pagesize - 4))
3416                                 nvram_cmd |= NVRAM_CMD_LAST;
3417
3418                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3419                         if (ret)
3420                                 break;
3421                 }
3422                 if (ret)
3423                         break;
3424         }
3425
3426         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3427         tg3_nvram_exec_cmd(tp, nvram_cmd);
3428
3429         kfree(tmp);
3430
3431         return ret;
3432 }
3433
3434 /* offset and length are dword aligned */
3435 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3436                 u8 *buf)
3437 {
3438         int i, ret = 0;
3439
3440         for (i = 0; i < len; i += 4, offset += 4) {
3441                 u32 page_off, phy_addr, nvram_cmd;
3442                 __be32 data;
3443
3444                 memcpy(&data, buf + i, 4);
3445                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3446
3447                 page_off = offset % tp->nvram_pagesize;
3448
3449                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3450
3451                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3452
3453                 if (page_off == 0 || i == 0)
3454                         nvram_cmd |= NVRAM_CMD_FIRST;
3455                 if (page_off == (tp->nvram_pagesize - 4))
3456                         nvram_cmd |= NVRAM_CMD_LAST;
3457
3458                 if (i == (len - 4))
3459                         nvram_cmd |= NVRAM_CMD_LAST;
3460
3461                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3462                     !tg3_flag(tp, FLASH) ||
3463                     !tg3_flag(tp, 57765_PLUS))
3464                         tw32(NVRAM_ADDR, phy_addr);
3465
3466                 if (tg3_asic_rev(tp) != ASIC_REV_5752 &&
3467                     !tg3_flag(tp, 5755_PLUS) &&
3468                     (tp->nvram_jedecnum == JEDEC_ST) &&
3469                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3470                         u32 cmd;
3471
3472                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3473                         ret = tg3_nvram_exec_cmd(tp, cmd);
3474                         if (ret)
3475                                 break;
3476                 }
3477                 if (!tg3_flag(tp, FLASH)) {
3478                         /* We always do complete word writes to eeprom. */
3479                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3480                 }
3481
3482                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3483                 if (ret)
3484                         break;
3485         }
3486         return ret;
3487 }
3488
3489 /* offset and length are dword aligned */
3490 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3491 {
3492         int ret;
3493
3494         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3495                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3496                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3497                 udelay(40);
3498         }
3499
3500         if (!tg3_flag(tp, NVRAM)) {
3501                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3502         } else {
3503                 u32 grc_mode;
3504
3505                 ret = tg3_nvram_lock(tp);
3506                 if (ret)
3507                         return ret;
3508
3509                 tg3_enable_nvram_access(tp);
3510                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3511                         tw32(NVRAM_WRITE1, 0x406);
3512
3513                 grc_mode = tr32(GRC_MODE);
3514                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3515
3516                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3517                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3518                                 buf);
3519                 } else {
3520                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3521                                 buf);
3522                 }
3523
3524                 grc_mode = tr32(GRC_MODE);
3525                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3526
3527                 tg3_disable_nvram_access(tp);
3528                 tg3_nvram_unlock(tp);
3529         }
3530
3531         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3532                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3533                 udelay(40);
3534         }
3535
3536         return ret;
3537 }
3538
3539 #define RX_CPU_SCRATCH_BASE     0x30000
3540 #define RX_CPU_SCRATCH_SIZE     0x04000
3541 #define TX_CPU_SCRATCH_BASE     0x34000
3542 #define TX_CPU_SCRATCH_SIZE     0x04000
3543
3544 /* tp->lock is held. */
3545 static int tg3_pause_cpu(struct tg3 *tp, u32 cpu_base)
3546 {
3547         int i;
3548         const int iters = 10000;
3549
3550         for (i = 0; i < iters; i++) {
3551                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3552                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3553                 if (tr32(cpu_base + CPU_MODE) & CPU_MODE_HALT)
3554                         break;
3555                 if (pci_channel_offline(tp->pdev))
3556                         return -EBUSY;
3557         }
3558
3559         return (i == iters) ? -EBUSY : 0;
3560 }
3561
3562 /* tp->lock is held. */
3563 static int tg3_rxcpu_pause(struct tg3 *tp)
3564 {
3565         int rc = tg3_pause_cpu(tp, RX_CPU_BASE);
3566
3567         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3568         tw32_f(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3569         udelay(10);
3570
3571         return rc;
3572 }
3573
3574 /* tp->lock is held. */
3575 static int tg3_txcpu_pause(struct tg3 *tp)
3576 {
3577         return tg3_pause_cpu(tp, TX_CPU_BASE);
3578 }
3579
3580 /* tp->lock is held. */
3581 static void tg3_resume_cpu(struct tg3 *tp, u32 cpu_base)
3582 {
3583         tw32(cpu_base + CPU_STATE, 0xffffffff);
3584         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3585 }
3586
3587 /* tp->lock is held. */
3588 static void tg3_rxcpu_resume(struct tg3 *tp)
3589 {
3590         tg3_resume_cpu(tp, RX_CPU_BASE);
3591 }
3592
3593 /* tp->lock is held. */
3594 static int tg3_halt_cpu(struct tg3 *tp, u32 cpu_base)
3595 {
3596         int rc;
3597
3598         BUG_ON(cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3599
3600         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
3601                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3602
3603                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3604                 return 0;
3605         }
3606         if (cpu_base == RX_CPU_BASE) {
3607                 rc = tg3_rxcpu_pause(tp);
3608         } else {
3609                 /*
3610                  * There is only an Rx CPU for the 5750 derivative in the
3611                  * BCM4785.
3612                  */
3613                 if (tg3_flag(tp, IS_SSB_CORE))
3614                         return 0;
3615
3616                 rc = tg3_txcpu_pause(tp);
3617         }
3618
3619         if (rc) {
3620                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3621                            __func__, cpu_base == RX_CPU_BASE ? "RX" : "TX");
3622                 return -ENODEV;
3623         }
3624
3625         /* Clear firmware's nvram arbitration. */
3626         if (tg3_flag(tp, NVRAM))
3627                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3628         return 0;
3629 }
3630
3631 static int tg3_fw_data_len(struct tg3 *tp,
3632                            const struct tg3_firmware_hdr *fw_hdr)
3633 {
3634         int fw_len;
3635
3636         /* Non fragmented firmware have one firmware header followed by a
3637          * contiguous chunk of data to be written. The length field in that
3638          * header is not the length of data to be written but the complete
3639          * length of the bss. The data length is determined based on
3640          * tp->fw->size minus headers.
3641          *
3642          * Fragmented firmware have a main header followed by multiple
3643          * fragments. Each fragment is identical to non fragmented firmware
3644          * with a firmware header followed by a contiguous chunk of data. In
3645          * the main header, the length field is unused and set to 0xffffffff.
3646          * In each fragment header the length is the entire size of that
3647          * fragment i.e. fragment data + header length. Data length is
3648          * therefore length field in the header minus TG3_FW_HDR_LEN.
3649          */
3650         if (tp->fw_len == 0xffffffff)
3651                 fw_len = be32_to_cpu(fw_hdr->len);
3652         else
3653                 fw_len = tp->fw->size;
3654
3655         return (fw_len - TG3_FW_HDR_LEN) / sizeof(u32);
3656 }
3657
3658 /* tp->lock is held. */
3659 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3660                                  u32 cpu_scratch_base, int cpu_scratch_size,
3661                                  const struct tg3_firmware_hdr *fw_hdr)
3662 {
3663         int err, i;
3664         void (*write_op)(struct tg3 *, u32, u32);
3665         int total_len = tp->fw->size;
3666
3667         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3668                 netdev_err(tp->dev,
3669                            "%s: Trying to load TX cpu firmware which is 5705\n",
3670                            __func__);
3671                 return -EINVAL;
3672         }
3673
3674         if (tg3_flag(tp, 5705_PLUS) && tg3_asic_rev(tp) != ASIC_REV_57766)
3675                 write_op = tg3_write_mem;
3676         else
3677                 write_op = tg3_write_indirect_reg32;
3678
3679         if (tg3_asic_rev(tp) != ASIC_REV_57766) {
3680                 /* It is possible that bootcode is still loading at this point.
3681                  * Get the nvram lock first before halting the cpu.
3682                  */
3683                 int lock_err = tg3_nvram_lock(tp);
3684                 err = tg3_halt_cpu(tp, cpu_base);
3685                 if (!lock_err)
3686                         tg3_nvram_unlock(tp);
3687                 if (err)
3688                         goto out;
3689
3690                 for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3691                         write_op(tp, cpu_scratch_base + i, 0);
3692                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3693                 tw32(cpu_base + CPU_MODE,
3694                      tr32(cpu_base + CPU_MODE) | CPU_MODE_HALT);
3695         } else {
3696                 /* Subtract additional main header for fragmented firmware and
3697                  * advance to the first fragment
3698                  */
3699                 total_len -= TG3_FW_HDR_LEN;
3700                 fw_hdr++;
3701         }
3702
3703         do {
3704                 u32 *fw_data = (u32 *)(fw_hdr + 1);
3705                 for (i = 0; i < tg3_fw_data_len(tp, fw_hdr); i++)
3706                         write_op(tp, cpu_scratch_base +
3707                                      (be32_to_cpu(fw_hdr->base_addr) & 0xffff) +
3708                                      (i * sizeof(u32)),
3709                                  be32_to_cpu(fw_data[i]));
3710
3711                 total_len -= be32_to_cpu(fw_hdr->len);
3712
3713                 /* Advance to next fragment */
3714                 fw_hdr = (struct tg3_firmware_hdr *)
3715                          ((void *)fw_hdr + be32_to_cpu(fw_hdr->len));
3716         } while (total_len > 0);
3717
3718         err = 0;
3719
3720 out:
3721         return err;
3722 }
3723
3724 /* tp->lock is held. */
3725 static int tg3_pause_cpu_and_set_pc(struct tg3 *tp, u32 cpu_base, u32 pc)
3726 {
3727         int i;
3728         const int iters = 5;
3729
3730         tw32(cpu_base + CPU_STATE, 0xffffffff);
3731         tw32_f(cpu_base + CPU_PC, pc);
3732
3733         for (i = 0; i < iters; i++) {
3734                 if (tr32(cpu_base + CPU_PC) == pc)
3735                         break;
3736                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3737                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3738                 tw32_f(cpu_base + CPU_PC, pc);
3739                 udelay(1000);
3740         }
3741
3742         return (i == iters) ? -EBUSY : 0;
3743 }
3744
3745 /* tp->lock is held. */
3746 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3747 {
3748         const struct tg3_firmware_hdr *fw_hdr;
3749         int err;
3750
3751         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3752
3753         /* Firmware blob starts with version numbers, followed by
3754            start address and length. We are setting complete length.
3755            length = end_address_of_bss - start_address_of_text.
3756            Remainder is the blob to be loaded contiguously
3757            from start address. */
3758
3759         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3760                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3761                                     fw_hdr);
3762         if (err)
3763                 return err;
3764
3765         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3766                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3767                                     fw_hdr);
3768         if (err)
3769                 return err;
3770
3771         /* Now startup only the RX cpu. */
3772         err = tg3_pause_cpu_and_set_pc(tp, RX_CPU_BASE,
3773                                        be32_to_cpu(fw_hdr->base_addr));
3774         if (err) {
3775                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3776                            "should be %08x\n", __func__,
3777                            tr32(RX_CPU_BASE + CPU_PC),
3778                                 be32_to_cpu(fw_hdr->base_addr));
3779                 return -ENODEV;
3780         }
3781
3782         tg3_rxcpu_resume(tp);
3783
3784         return 0;
3785 }
3786
3787 static int tg3_validate_rxcpu_state(struct tg3 *tp)
3788 {
3789         const int iters = 1000;
3790         int i;
3791         u32 val;
3792
3793         /* Wait for boot code to complete initialization and enter service
3794          * loop. It is then safe to download service patches
3795          */
3796         for (i = 0; i < iters; i++) {
3797                 if (tr32(RX_CPU_HWBKPT) == TG3_SBROM_IN_SERVICE_LOOP)
3798                         break;
3799
3800                 udelay(10);
3801         }
3802
3803         if (i == iters) {
3804                 netdev_err(tp->dev, "Boot code not ready for service patches\n");
3805                 return -EBUSY;
3806         }
3807
3808         val = tg3_read_indirect_reg32(tp, TG3_57766_FW_HANDSHAKE);
3809         if (val & 0xff) {
3810                 netdev_warn(tp->dev,
3811                             "Other patches exist. Not downloading EEE patch\n");
3812                 return -EEXIST;
3813         }
3814
3815         return 0;
3816 }
3817
3818 /* tp->lock is held. */
3819 static void tg3_load_57766_firmware(struct tg3 *tp)
3820 {
3821         struct tg3_firmware_hdr *fw_hdr;
3822
3823         if (!tg3_flag(tp, NO_NVRAM))
3824                 return;
3825
3826         if (tg3_validate_rxcpu_state(tp))
3827                 return;
3828
3829         if (!tp->fw)
3830                 return;
3831
3832         /* This firmware blob has a different format than older firmware
3833          * releases as given below. The main difference is we have fragmented
3834          * data to be written to non-contiguous locations.
3835          *
3836          * In the beginning we have a firmware header identical to other
3837          * firmware which consists of version, base addr and length. The length
3838          * here is unused and set to 0xffffffff.
3839          *
3840          * This is followed by a series of firmware fragments which are
3841          * individually identical to previous firmware. i.e. they have the
3842          * firmware header and followed by data for that fragment. The version
3843          * field of the individual fragment header is unused.
3844          */
3845
3846         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3847         if (be32_to_cpu(fw_hdr->base_addr) != TG3_57766_FW_BASE_ADDR)
3848                 return;
3849
3850         if (tg3_rxcpu_pause(tp))
3851                 return;
3852
3853         /* tg3_load_firmware_cpu() will always succeed for the 57766 */
3854         tg3_load_firmware_cpu(tp, 0, TG3_57766_FW_BASE_ADDR, 0, fw_hdr);
3855
3856         tg3_rxcpu_resume(tp);
3857 }
3858
3859 /* tp->lock is held. */
3860 static int tg3_load_tso_firmware(struct tg3 *tp)
3861 {
3862         const struct tg3_firmware_hdr *fw_hdr;
3863         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3864         int err;
3865
3866         if (!tg3_flag(tp, FW_TSO))
3867                 return 0;
3868
3869         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
3870
3871         /* Firmware blob starts with version numbers, followed by
3872            start address and length. We are setting complete length.
3873            length = end_address_of_bss - start_address_of_text.
3874            Remainder is the blob to be loaded contiguously
3875            from start address. */
3876
3877         cpu_scratch_size = tp->fw_len;
3878
3879         if (tg3_asic_rev(tp) == ASIC_REV_5705) {
3880                 cpu_base = RX_CPU_BASE;
3881                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3882         } else {
3883                 cpu_base = TX_CPU_BASE;
3884                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3885                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3886         }
3887
3888         err = tg3_load_firmware_cpu(tp, cpu_base,
3889                                     cpu_scratch_base, cpu_scratch_size,
3890                                     fw_hdr);
3891         if (err)
3892                 return err;
3893
3894         /* Now startup the cpu. */
3895         err = tg3_pause_cpu_and_set_pc(tp, cpu_base,
3896                                        be32_to_cpu(fw_hdr->base_addr));
3897         if (err) {
3898                 netdev_err(tp->dev,
3899                            "%s fails to set CPU PC, is %08x should be %08x\n",
3900                            __func__, tr32(cpu_base + CPU_PC),
3901                            be32_to_cpu(fw_hdr->base_addr));
3902                 return -ENODEV;
3903         }
3904
3905         tg3_resume_cpu(tp, cpu_base);
3906         return 0;
3907 }
3908
3909
3910 /* tp->lock is held. */
3911 static void __tg3_set_mac_addr(struct tg3 *tp, bool skip_mac_1)
3912 {
3913         u32 addr_high, addr_low;
3914         int i;
3915
3916         addr_high = ((tp->dev->dev_addr[0] << 8) |
3917                      tp->dev->dev_addr[1]);
3918         addr_low = ((tp->dev->dev_addr[2] << 24) |
3919                     (tp->dev->dev_addr[3] << 16) |
3920                     (tp->dev->dev_addr[4] <<  8) |
3921                     (tp->dev->dev_addr[5] <<  0));
3922         for (i = 0; i < 4; i++) {
3923                 if (i == 1 && skip_mac_1)
3924                         continue;
3925                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3926                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3927         }
3928
3929         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
3930             tg3_asic_rev(tp) == ASIC_REV_5704) {
3931                 for (i = 0; i < 12; i++) {
3932                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3933                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3934                 }
3935         }
3936
3937         addr_high = (tp->dev->dev_addr[0] +
3938                      tp->dev->dev_addr[1] +
3939                      tp->dev->dev_addr[2] +
3940                      tp->dev->dev_addr[3] +
3941                      tp->dev->dev_addr[4] +
3942                      tp->dev->dev_addr[5]) &
3943                 TX_BACKOFF_SEED_MASK;
3944         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3945 }
3946
3947 static void tg3_enable_register_access(struct tg3 *tp)
3948 {
3949         /*
3950          * Make sure register accesses (indirect or otherwise) will function
3951          * correctly.
3952          */
3953         pci_write_config_dword(tp->pdev,
3954                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3955 }
3956
3957 static int tg3_power_up(struct tg3 *tp)
3958 {
3959         int err;
3960
3961         tg3_enable_register_access(tp);
3962
3963         err = pci_set_power_state(tp->pdev, PCI_D0);
3964         if (!err) {
3965                 /* Switch out of Vaux if it is a NIC */
3966                 tg3_pwrsrc_switch_to_vmain(tp);
3967         } else {
3968                 netdev_err(tp->dev, "Transition to D0 failed\n");
3969         }
3970
3971         return err;
3972 }
3973
3974 static int tg3_setup_phy(struct tg3 *, bool);
3975
3976 static int tg3_power_down_prepare(struct tg3 *tp)
3977 {
3978         u32 misc_host_ctrl;
3979         bool device_should_wake, do_low_power;
3980
3981         tg3_enable_register_access(tp);
3982
3983         /* Restore the CLKREQ setting. */
3984         if (tg3_flag(tp, CLKREQ_BUG))
3985                 pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
3986                                          PCI_EXP_LNKCTL_CLKREQ_EN);
3987
3988         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3989         tw32(TG3PCI_MISC_HOST_CTRL,
3990              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3991
3992         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3993                              tg3_flag(tp, WOL_ENABLE);
3994
3995         if (tg3_flag(tp, USE_PHYLIB)) {
3996                 do_low_power = false;
3997                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3998                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3999                         struct phy_device *phydev;
4000                         u32 phyid, advertising;
4001
4002                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
4003
4004                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4005
4006                         tp->link_config.speed = phydev->speed;
4007                         tp->link_config.duplex = phydev->duplex;
4008                         tp->link_config.autoneg = phydev->autoneg;
4009                         tp->link_config.advertising = phydev->advertising;
4010
4011                         advertising = ADVERTISED_TP |
4012                                       ADVERTISED_Pause |
4013                                       ADVERTISED_Autoneg |
4014                                       ADVERTISED_10baseT_Half;
4015
4016                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
4017                                 if (tg3_flag(tp, WOL_SPEED_100MB))
4018                                         advertising |=
4019                                                 ADVERTISED_100baseT_Half |
4020                                                 ADVERTISED_100baseT_Full |
4021                                                 ADVERTISED_10baseT_Full;
4022                                 else
4023                                         advertising |= ADVERTISED_10baseT_Full;
4024                         }
4025
4026                         phydev->advertising = advertising;
4027
4028                         phy_start_aneg(phydev);
4029
4030                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
4031                         if (phyid != PHY_ID_BCMAC131) {
4032                                 phyid &= PHY_BCM_OUI_MASK;
4033                                 if (phyid == PHY_BCM_OUI_1 ||
4034                                     phyid == PHY_BCM_OUI_2 ||
4035                                     phyid == PHY_BCM_OUI_3)
4036                                         do_low_power = true;
4037                         }
4038                 }
4039         } else {
4040                 do_low_power = true;
4041
4042                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
4043                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
4044
4045                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
4046                         tg3_setup_phy(tp, false);
4047         }
4048
4049         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
4050                 u32 val;
4051
4052                 val = tr32(GRC_VCPU_EXT_CTRL);
4053                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
4054         } else if (!tg3_flag(tp, ENABLE_ASF)) {
4055                 int i;
4056                 u32 val;
4057
4058                 for (i = 0; i < 200; i++) {
4059                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
4060                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4061                                 break;
4062                         msleep(1);
4063                 }
4064         }
4065         if (tg3_flag(tp, WOL_CAP))
4066                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
4067                                                      WOL_DRV_STATE_SHUTDOWN |
4068                                                      WOL_DRV_WOL |
4069                                                      WOL_SET_MAGIC_PKT);
4070
4071         if (device_should_wake) {
4072                 u32 mac_mode;
4073
4074                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
4075                         if (do_low_power &&
4076                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
4077                                 tg3_phy_auxctl_write(tp,
4078                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
4079                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
4080                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
4081                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
4082                                 udelay(40);
4083                         }
4084
4085                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4086                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
4087                         else if (tp->phy_flags &
4088                                  TG3_PHYFLG_KEEP_LINK_ON_PWRDN) {
4089                                 if (tp->link_config.active_speed == SPEED_1000)
4090                                         mac_mode = MAC_MODE_PORT_MODE_GMII;
4091                                 else
4092                                         mac_mode = MAC_MODE_PORT_MODE_MII;
4093                         } else
4094                                 mac_mode = MAC_MODE_PORT_MODE_MII;
4095
4096                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
4097                         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4098                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
4099                                              SPEED_100 : SPEED_10;
4100                                 if (tg3_5700_link_polarity(tp, speed))
4101                                         mac_mode |= MAC_MODE_LINK_POLARITY;
4102                                 else
4103                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
4104                         }
4105                 } else {
4106                         mac_mode = MAC_MODE_PORT_MODE_TBI;
4107                 }
4108
4109                 if (!tg3_flag(tp, 5750_PLUS))
4110                         tw32(MAC_LED_CTRL, tp->led_ctrl);
4111
4112                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
4113                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
4114                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
4115                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
4116
4117                 if (tg3_flag(tp, ENABLE_APE))
4118                         mac_mode |= MAC_MODE_APE_TX_EN |
4119                                     MAC_MODE_APE_RX_EN |
4120                                     MAC_MODE_TDE_ENABLE;
4121
4122                 tw32_f(MAC_MODE, mac_mode);
4123                 udelay(100);
4124
4125                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
4126                 udelay(10);
4127         }
4128
4129         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
4130             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4131              tg3_asic_rev(tp) == ASIC_REV_5701)) {
4132                 u32 base_val;
4133
4134                 base_val = tp->pci_clock_ctrl;
4135                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
4136                              CLOCK_CTRL_TXCLK_DISABLE);
4137
4138                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
4139                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
4140         } else if (tg3_flag(tp, 5780_CLASS) ||
4141                    tg3_flag(tp, CPMU_PRESENT) ||
4142                    tg3_asic_rev(tp) == ASIC_REV_5906) {
4143                 /* do nothing */
4144         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
4145                 u32 newbits1, newbits2;
4146
4147                 if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4148                     tg3_asic_rev(tp) == ASIC_REV_5701) {
4149                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
4150                                     CLOCK_CTRL_TXCLK_DISABLE |
4151                                     CLOCK_CTRL_ALTCLK);
4152                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4153                 } else if (tg3_flag(tp, 5705_PLUS)) {
4154                         newbits1 = CLOCK_CTRL_625_CORE;
4155                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
4156                 } else {
4157                         newbits1 = CLOCK_CTRL_ALTCLK;
4158                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
4159                 }
4160
4161                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
4162                             40);
4163
4164                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
4165                             40);
4166
4167                 if (!tg3_flag(tp, 5705_PLUS)) {
4168                         u32 newbits3;
4169
4170                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4171                             tg3_asic_rev(tp) == ASIC_REV_5701) {
4172                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
4173                                             CLOCK_CTRL_TXCLK_DISABLE |
4174                                             CLOCK_CTRL_44MHZ_CORE);
4175                         } else {
4176                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
4177                         }
4178
4179                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
4180                                     tp->pci_clock_ctrl | newbits3, 40);
4181                 }
4182         }
4183
4184         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
4185                 tg3_power_down_phy(tp, do_low_power);
4186
4187         tg3_frob_aux_power(tp, true);
4188
4189         /* Workaround for unstable PLL clock */
4190         if ((!tg3_flag(tp, IS_SSB_CORE)) &&
4191             ((tg3_chip_rev(tp) == CHIPREV_5750_AX) ||
4192              (tg3_chip_rev(tp) == CHIPREV_5750_BX))) {
4193                 u32 val = tr32(0x7d00);
4194
4195                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
4196                 tw32(0x7d00, val);
4197                 if (!tg3_flag(tp, ENABLE_ASF)) {
4198                         int err;
4199
4200                         err = tg3_nvram_lock(tp);
4201                         tg3_halt_cpu(tp, RX_CPU_BASE);
4202                         if (!err)
4203                                 tg3_nvram_unlock(tp);
4204                 }
4205         }
4206
4207         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4208
4209         return 0;
4210 }
4211
4212 static void tg3_power_down(struct tg3 *tp)
4213 {
4214         tg3_power_down_prepare(tp);
4215
4216         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
4217         pci_set_power_state(tp->pdev, PCI_D3hot);
4218 }
4219
4220 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
4221 {
4222         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
4223         case MII_TG3_AUX_STAT_10HALF:
4224                 *speed = SPEED_10;
4225                 *duplex = DUPLEX_HALF;
4226                 break;
4227
4228         case MII_TG3_AUX_STAT_10FULL:
4229                 *speed = SPEED_10;
4230                 *duplex = DUPLEX_FULL;
4231                 break;
4232
4233         case MII_TG3_AUX_STAT_100HALF:
4234                 *speed = SPEED_100;
4235                 *duplex = DUPLEX_HALF;
4236                 break;
4237
4238         case MII_TG3_AUX_STAT_100FULL:
4239                 *speed = SPEED_100;
4240                 *duplex = DUPLEX_FULL;
4241                 break;
4242
4243         case MII_TG3_AUX_STAT_1000HALF:
4244                 *speed = SPEED_1000;
4245                 *duplex = DUPLEX_HALF;
4246                 break;
4247
4248         case MII_TG3_AUX_STAT_1000FULL:
4249                 *speed = SPEED_1000;
4250                 *duplex = DUPLEX_FULL;
4251                 break;
4252
4253         default:
4254                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4255                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
4256                                  SPEED_10;
4257                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
4258                                   DUPLEX_HALF;
4259                         break;
4260                 }
4261                 *speed = SPEED_UNKNOWN;
4262                 *duplex = DUPLEX_UNKNOWN;
4263                 break;
4264         }
4265 }
4266
4267 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
4268 {
4269         int err = 0;
4270         u32 val, new_adv;
4271
4272         new_adv = ADVERTISE_CSMA;
4273         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
4274         new_adv |= mii_advertise_flowctrl(flowctrl);
4275
4276         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
4277         if (err)
4278                 goto done;
4279
4280         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4281                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
4282
4283                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4284                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)
4285                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4286
4287                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
4288                 if (err)
4289                         goto done;
4290         }
4291
4292         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4293                 goto done;
4294
4295         tw32(TG3_CPMU_EEE_MODE,
4296              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
4297
4298         err = tg3_phy_toggle_auxctl_smdsp(tp, true);
4299         if (!err) {
4300                 u32 err2;
4301
4302                 val = 0;
4303                 /* Advertise 100-BaseTX EEE ability */
4304                 if (advertise & ADVERTISED_100baseT_Full)
4305                         val |= MDIO_AN_EEE_ADV_100TX;
4306                 /* Advertise 1000-BaseT EEE ability */
4307                 if (advertise & ADVERTISED_1000baseT_Full)
4308                         val |= MDIO_AN_EEE_ADV_1000T;
4309                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
4310                 if (err)
4311                         val = 0;
4312
4313                 switch (tg3_asic_rev(tp)) {
4314                 case ASIC_REV_5717:
4315                 case ASIC_REV_57765:
4316                 case ASIC_REV_57766:
4317                 case ASIC_REV_5719:
4318                         /* If we advertised any eee advertisements above... */
4319                         if (val)
4320                                 val = MII_TG3_DSP_TAP26_ALNOKO |
4321                                       MII_TG3_DSP_TAP26_RMRXSTO |
4322                                       MII_TG3_DSP_TAP26_OPCSINPT;
4323                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
4324                         /* Fall through */
4325                 case ASIC_REV_5720:
4326                 case ASIC_REV_5762:
4327                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
4328                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
4329                                                  MII_TG3_DSP_CH34TP2_HIBW01);
4330                 }
4331
4332                 err2 = tg3_phy_toggle_auxctl_smdsp(tp, false);
4333                 if (!err)
4334                         err = err2;
4335         }
4336
4337 done:
4338         return err;
4339 }
4340
4341 static void tg3_phy_copper_begin(struct tg3 *tp)
4342 {
4343         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
4344             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4345                 u32 adv, fc;
4346
4347                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4348                     !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4349                         adv = ADVERTISED_10baseT_Half |
4350                               ADVERTISED_10baseT_Full;
4351                         if (tg3_flag(tp, WOL_SPEED_100MB))
4352                                 adv |= ADVERTISED_100baseT_Half |
4353                                        ADVERTISED_100baseT_Full;
4354                         if (tp->phy_flags & TG3_PHYFLG_1G_ON_VAUX_OK)
4355                                 adv |= ADVERTISED_1000baseT_Half |
4356                                        ADVERTISED_1000baseT_Full;
4357
4358                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
4359                 } else {
4360                         adv = tp->link_config.advertising;
4361                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
4362                                 adv &= ~(ADVERTISED_1000baseT_Half |
4363                                          ADVERTISED_1000baseT_Full);
4364
4365                         fc = tp->link_config.flowctrl;
4366                 }
4367
4368                 tg3_phy_autoneg_cfg(tp, adv, fc);
4369
4370                 if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
4371                     (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN)) {
4372                         /* Normally during power down we want to autonegotiate
4373                          * the lowest possible speed for WOL. However, to avoid
4374                          * link flap, we leave it untouched.
4375                          */
4376                         return;
4377                 }
4378
4379                 tg3_writephy(tp, MII_BMCR,
4380                              BMCR_ANENABLE | BMCR_ANRESTART);
4381         } else {
4382                 int i;
4383                 u32 bmcr, orig_bmcr;
4384
4385                 tp->link_config.active_speed = tp->link_config.speed;
4386                 tp->link_config.active_duplex = tp->link_config.duplex;
4387
4388                 if (tg3_asic_rev(tp) == ASIC_REV_5714) {
4389                         /* With autoneg disabled, 5715 only links up when the
4390                          * advertisement register has the configured speed
4391                          * enabled.
4392                          */
4393                         tg3_writephy(tp, MII_ADVERTISE, ADVERTISE_ALL);
4394                 }
4395
4396                 bmcr = 0;
4397                 switch (tp->link_config.speed) {
4398                 default:
4399                 case SPEED_10:
4400                         break;
4401
4402                 case SPEED_100:
4403                         bmcr |= BMCR_SPEED100;
4404                         break;
4405
4406                 case SPEED_1000:
4407                         bmcr |= BMCR_SPEED1000;
4408                         break;
4409                 }
4410
4411                 if (tp->link_config.duplex == DUPLEX_FULL)
4412                         bmcr |= BMCR_FULLDPLX;
4413
4414                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
4415                     (bmcr != orig_bmcr)) {
4416                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
4417                         for (i = 0; i < 1500; i++) {
4418                                 u32 tmp;
4419
4420                                 udelay(10);
4421                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
4422                                     tg3_readphy(tp, MII_BMSR, &tmp))
4423                                         continue;
4424                                 if (!(tmp & BMSR_LSTATUS)) {
4425                                         udelay(40);
4426                                         break;
4427                                 }
4428                         }
4429                         tg3_writephy(tp, MII_BMCR, bmcr);
4430                         udelay(40);
4431                 }
4432         }
4433 }
4434
4435 static int tg3_phy_pull_config(struct tg3 *tp)
4436 {
4437         int err;
4438         u32 val;
4439
4440         err = tg3_readphy(tp, MII_BMCR, &val);
4441         if (err)
4442                 goto done;
4443
4444         if (!(val & BMCR_ANENABLE)) {
4445                 tp->link_config.autoneg = AUTONEG_DISABLE;
4446                 tp->link_config.advertising = 0;
4447                 tg3_flag_clear(tp, PAUSE_AUTONEG);
4448
4449                 err = -EIO;
4450
4451                 switch (val & (BMCR_SPEED1000 | BMCR_SPEED100)) {
4452                 case 0:
4453                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4454                                 goto done;
4455
4456                         tp->link_config.speed = SPEED_10;
4457                         break;
4458                 case BMCR_SPEED100:
4459                         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
4460                                 goto done;
4461
4462                         tp->link_config.speed = SPEED_100;
4463                         break;
4464                 case BMCR_SPEED1000:
4465                         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4466                                 tp->link_config.speed = SPEED_1000;
4467                                 break;
4468                         }
4469                         /* Fall through */
4470                 default:
4471                         goto done;
4472                 }
4473
4474                 if (val & BMCR_FULLDPLX)
4475                         tp->link_config.duplex = DUPLEX_FULL;
4476                 else
4477                         tp->link_config.duplex = DUPLEX_HALF;
4478
4479                 tp->link_config.flowctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
4480
4481                 err = 0;
4482                 goto done;
4483         }
4484
4485         tp->link_config.autoneg = AUTONEG_ENABLE;
4486         tp->link_config.advertising = ADVERTISED_Autoneg;
4487         tg3_flag_set(tp, PAUSE_AUTONEG);
4488
4489         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4490                 u32 adv;
4491
4492                 err = tg3_readphy(tp, MII_ADVERTISE, &val);
4493                 if (err)
4494                         goto done;
4495
4496                 adv = mii_adv_to_ethtool_adv_t(val & ADVERTISE_ALL);
4497                 tp->link_config.advertising |= adv | ADVERTISED_TP;
4498
4499                 tp->link_config.flowctrl = tg3_decode_flowctrl_1000T(val);
4500         } else {
4501                 tp->link_config.advertising |= ADVERTISED_FIBRE;
4502         }
4503
4504         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4505                 u32 adv;
4506
4507                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
4508                         err = tg3_readphy(tp, MII_CTRL1000, &val);
4509                         if (err)
4510                                 goto done;
4511
4512                         adv = mii_ctrl1000_to_ethtool_adv_t(val);
4513                 } else {
4514                         err = tg3_readphy(tp, MII_ADVERTISE, &val);
4515                         if (err)
4516                                 goto done;
4517
4518                         adv = tg3_decode_flowctrl_1000X(val);
4519                         tp->link_config.flowctrl = adv;
4520
4521                         val &= (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL);
4522                         adv = mii_adv_to_ethtool_adv_x(val);
4523                 }
4524
4525                 tp->link_config.advertising |= adv;
4526         }
4527
4528 done:
4529         return err;
4530 }
4531
4532 static int tg3_init_5401phy_dsp(struct tg3 *tp)
4533 {
4534         int err;
4535
4536         /* Turn off tap power management. */
4537         /* Set Extended packet length bit */
4538         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
4539
4540         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
4541         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
4542         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
4543         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
4544         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
4545
4546         udelay(40);
4547
4548         return err;
4549 }
4550
4551 static bool tg3_phy_eee_config_ok(struct tg3 *tp)
4552 {
4553         u32 val;
4554         u32 tgtadv = 0;
4555         u32 advertising = tp->link_config.advertising;
4556
4557         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
4558                 return true;
4559
4560         if (tg3_phy_cl45_read(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, &val))
4561                 return false;
4562
4563         val &= (MDIO_AN_EEE_ADV_100TX | MDIO_AN_EEE_ADV_1000T);
4564
4565
4566         if (advertising & ADVERTISED_100baseT_Full)
4567                 tgtadv |= MDIO_AN_EEE_ADV_100TX;
4568         if (advertising & ADVERTISED_1000baseT_Full)
4569                 tgtadv |= MDIO_AN_EEE_ADV_1000T;
4570
4571         if (val != tgtadv)
4572                 return false;
4573
4574         return true;
4575 }
4576
4577 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
4578 {
4579         u32 advmsk, tgtadv, advertising;
4580
4581         advertising = tp->link_config.advertising;
4582         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
4583
4584         advmsk = ADVERTISE_ALL;
4585         if (tp->link_config.active_duplex == DUPLEX_FULL) {
4586                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
4587                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
4588         }
4589
4590         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4591                 return false;
4592
4593         if ((*lcladv & advmsk) != tgtadv)
4594                 return false;
4595
4596         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4597                 u32 tg3_ctrl;
4598
4599                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4600
4601                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4602                         return false;
4603
4604                 if (tgtadv &&
4605                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4606                      tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0)) {
4607                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4608                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4609                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4610                 } else {
4611                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4612                 }
4613
4614                 if (tg3_ctrl != tgtadv)
4615                         return false;
4616         }
4617
4618         return true;
4619 }
4620
4621 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4622 {
4623         u32 lpeth = 0;
4624
4625         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4626                 u32 val;
4627
4628                 if (tg3_readphy(tp, MII_STAT1000, &val))
4629                         return false;
4630
4631                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4632         }
4633
4634         if (tg3_readphy(tp, MII_LPA, rmtadv))
4635                 return false;
4636
4637         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4638         tp->link_config.rmt_adv = lpeth;
4639
4640         return true;
4641 }
4642
4643 static bool tg3_test_and_report_link_chg(struct tg3 *tp, bool curr_link_up)
4644 {
4645         if (curr_link_up != tp->link_up) {
4646                 if (curr_link_up) {
4647                         netif_carrier_on(tp->dev);
4648                 } else {
4649                         netif_carrier_off(tp->dev);
4650                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
4651                                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4652                 }
4653
4654                 tg3_link_report(tp);
4655                 return true;
4656         }
4657
4658         return false;
4659 }
4660
4661 static void tg3_clear_mac_status(struct tg3 *tp)
4662 {
4663         tw32(MAC_EVENT, 0);
4664
4665         tw32_f(MAC_STATUS,
4666                MAC_STATUS_SYNC_CHANGED |
4667                MAC_STATUS_CFG_CHANGED |
4668                MAC_STATUS_MI_COMPLETION |
4669                MAC_STATUS_LNKSTATE_CHANGED);
4670         udelay(40);
4671 }
4672
4673 static int tg3_setup_copper_phy(struct tg3 *tp, bool force_reset)
4674 {
4675         bool current_link_up;
4676         u32 bmsr, val;
4677         u32 lcl_adv, rmt_adv;
4678         u16 current_speed;
4679         u8 current_duplex;
4680         int i, err;
4681
4682         tg3_clear_mac_status(tp);
4683
4684         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4685                 tw32_f(MAC_MI_MODE,
4686                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4687                 udelay(80);
4688         }
4689
4690         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4691
4692         /* Some third-party PHYs need to be reset on link going
4693          * down.
4694          */
4695         if ((tg3_asic_rev(tp) == ASIC_REV_5703 ||
4696              tg3_asic_rev(tp) == ASIC_REV_5704 ||
4697              tg3_asic_rev(tp) == ASIC_REV_5705) &&
4698             tp->link_up) {
4699                 tg3_readphy(tp, MII_BMSR, &bmsr);
4700                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4701                     !(bmsr & BMSR_LSTATUS))
4702                         force_reset = true;
4703         }
4704         if (force_reset)
4705                 tg3_phy_reset(tp);
4706
4707         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4708                 tg3_readphy(tp, MII_BMSR, &bmsr);
4709                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4710                     !tg3_flag(tp, INIT_COMPLETE))
4711                         bmsr = 0;
4712
4713                 if (!(bmsr & BMSR_LSTATUS)) {
4714                         err = tg3_init_5401phy_dsp(tp);
4715                         if (err)
4716                                 return err;
4717
4718                         tg3_readphy(tp, MII_BMSR, &bmsr);
4719                         for (i = 0; i < 1000; i++) {
4720                                 udelay(10);
4721                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4722                                     (bmsr & BMSR_LSTATUS)) {
4723                                         udelay(40);
4724                                         break;
4725                                 }
4726                         }
4727
4728                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4729                             TG3_PHY_REV_BCM5401_B0 &&
4730                             !(bmsr & BMSR_LSTATUS) &&
4731                             tp->link_config.active_speed == SPEED_1000) {
4732                                 err = tg3_phy_reset(tp);
4733                                 if (!err)
4734                                         err = tg3_init_5401phy_dsp(tp);
4735                                 if (err)
4736                                         return err;
4737                         }
4738                 }
4739         } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
4740                    tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0) {
4741                 /* 5701 {A0,B0} CRC bug workaround */
4742                 tg3_writephy(tp, 0x15, 0x0a75);
4743                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4744                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4745                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4746         }
4747
4748         /* Clear pending interrupts... */
4749         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4750         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4751
4752         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4753                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4754         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4755                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4756
4757         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
4758             tg3_asic_rev(tp) == ASIC_REV_5701) {
4759                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4760                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4761                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4762                 else
4763                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4764         }
4765
4766         current_link_up = false;
4767         current_speed = SPEED_UNKNOWN;
4768         current_duplex = DUPLEX_UNKNOWN;
4769         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4770         tp->link_config.rmt_adv = 0;
4771
4772         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4773                 err = tg3_phy_auxctl_read(tp,
4774                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4775                                           &val);
4776                 if (!err && !(val & (1 << 10))) {
4777                         tg3_phy_auxctl_write(tp,
4778                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4779                                              val | (1 << 10));
4780                         goto relink;
4781                 }
4782         }
4783
4784         bmsr = 0;
4785         for (i = 0; i < 100; i++) {
4786                 tg3_readphy(tp, MII_BMSR, &bmsr);
4787                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4788                     (bmsr & BMSR_LSTATUS))
4789                         break;
4790                 udelay(40);
4791         }
4792
4793         if (bmsr & BMSR_LSTATUS) {
4794                 u32 aux_stat, bmcr;
4795
4796                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4797                 for (i = 0; i < 2000; i++) {
4798                         udelay(10);
4799                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4800                             aux_stat)
4801                                 break;
4802                 }
4803
4804                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4805                                              &current_speed,
4806                                              &current_duplex);
4807
4808                 bmcr = 0;
4809                 for (i = 0; i < 200; i++) {
4810                         tg3_readphy(tp, MII_BMCR, &bmcr);
4811                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4812                                 continue;
4813                         if (bmcr && bmcr != 0x7fff)
4814                                 break;
4815                         udelay(10);
4816                 }
4817
4818                 lcl_adv = 0;
4819                 rmt_adv = 0;
4820
4821                 tp->link_config.active_speed = current_speed;
4822                 tp->link_config.active_duplex = current_duplex;
4823
4824                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4825                         bool eee_config_ok = tg3_phy_eee_config_ok(tp);
4826
4827                         if ((bmcr & BMCR_ANENABLE) &&
4828                             eee_config_ok &&
4829                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4830                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4831                                 current_link_up = true;
4832
4833                         /* EEE settings changes take effect only after a phy
4834                          * reset.  If we have skipped a reset due to Link Flap
4835                          * Avoidance being enabled, do it now.
4836                          */
4837                         if (!eee_config_ok &&
4838                             (tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
4839                             !force_reset)
4840                                 tg3_phy_reset(tp);
4841                 } else {
4842                         if (!(bmcr & BMCR_ANENABLE) &&
4843                             tp->link_config.speed == current_speed &&
4844                             tp->link_config.duplex == current_duplex) {
4845                                 current_link_up = true;
4846                         }
4847                 }
4848
4849                 if (current_link_up &&
4850                     tp->link_config.active_duplex == DUPLEX_FULL) {
4851                         u32 reg, bit;
4852
4853                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4854                                 reg = MII_TG3_FET_GEN_STAT;
4855                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4856                         } else {
4857                                 reg = MII_TG3_EXT_STAT;
4858                                 bit = MII_TG3_EXT_STAT_MDIX;
4859                         }
4860
4861                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4862                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4863
4864                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4865                 }
4866         }
4867
4868 relink:
4869         if (!current_link_up || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4870                 tg3_phy_copper_begin(tp);
4871
4872                 if (tg3_flag(tp, ROBOSWITCH)) {
4873                         current_link_up = true;
4874                         /* FIXME: when BCM5325 switch is used use 100 MBit/s */
4875                         current_speed = SPEED_1000;
4876                         current_duplex = DUPLEX_FULL;
4877                         tp->link_config.active_speed = current_speed;
4878                         tp->link_config.active_duplex = current_duplex;
4879                 }
4880
4881                 tg3_readphy(tp, MII_BMSR, &bmsr);
4882                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4883                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4884                         current_link_up = true;
4885         }
4886
4887         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4888         if (current_link_up) {
4889                 if (tp->link_config.active_speed == SPEED_100 ||
4890                     tp->link_config.active_speed == SPEED_10)
4891                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4892                 else
4893                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4894         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4895                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4896         else
4897                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4898
4899         /* In order for the 5750 core in BCM4785 chip to work properly
4900          * in RGMII mode, the Led Control Register must be set up.
4901          */
4902         if (tg3_flag(tp, RGMII_MODE)) {
4903                 u32 led_ctrl = tr32(MAC_LED_CTRL);
4904                 led_ctrl &= ~(LED_CTRL_1000MBPS_ON | LED_CTRL_100MBPS_ON);
4905
4906                 if (tp->link_config.active_speed == SPEED_10)
4907                         led_ctrl |= LED_CTRL_LNKLED_OVERRIDE;
4908                 else if (tp->link_config.active_speed == SPEED_100)
4909                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4910                                      LED_CTRL_100MBPS_ON);
4911                 else if (tp->link_config.active_speed == SPEED_1000)
4912                         led_ctrl |= (LED_CTRL_LNKLED_OVERRIDE |
4913                                      LED_CTRL_1000MBPS_ON);
4914
4915                 tw32(MAC_LED_CTRL, led_ctrl);
4916                 udelay(40);
4917         }
4918
4919         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4920         if (tp->link_config.active_duplex == DUPLEX_HALF)
4921                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4922
4923         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
4924                 if (current_link_up &&
4925                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4926                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4927                 else
4928                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4929         }
4930
4931         /* ??? Without this setting Netgear GA302T PHY does not
4932          * ??? send/receive packets...
4933          */
4934         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4935             tg3_chip_rev_id(tp) == CHIPREV_ID_5700_ALTIMA) {
4936                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4937                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4938                 udelay(80);
4939         }
4940
4941         tw32_f(MAC_MODE, tp->mac_mode);
4942         udelay(40);
4943
4944         tg3_phy_eee_adjust(tp, current_link_up);
4945
4946         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4947                 /* Polled via timer. */
4948                 tw32_f(MAC_EVENT, 0);
4949         } else {
4950                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4951         }
4952         udelay(40);
4953
4954         if (tg3_asic_rev(tp) == ASIC_REV_5700 &&
4955             current_link_up &&
4956             tp->link_config.active_speed == SPEED_1000 &&
4957             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4958                 udelay(120);
4959                 tw32_f(MAC_STATUS,
4960                      (MAC_STATUS_SYNC_CHANGED |
4961                       MAC_STATUS_CFG_CHANGED));
4962                 udelay(40);
4963                 tg3_write_mem(tp,
4964                               NIC_SRAM_FIRMWARE_MBOX,
4965                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4966         }
4967
4968         /* Prevent send BD corruption. */
4969         if (tg3_flag(tp, CLKREQ_BUG)) {
4970                 if (tp->link_config.active_speed == SPEED_100 ||
4971                     tp->link_config.active_speed == SPEED_10)
4972                         pcie_capability_clear_word(tp->pdev, PCI_EXP_LNKCTL,
4973                                                    PCI_EXP_LNKCTL_CLKREQ_EN);
4974                 else
4975                         pcie_capability_set_word(tp->pdev, PCI_EXP_LNKCTL,
4976                                                  PCI_EXP_LNKCTL_CLKREQ_EN);
4977         }
4978
4979         tg3_test_and_report_link_chg(tp, current_link_up);
4980
4981         return 0;
4982 }
4983
4984 struct tg3_fiber_aneginfo {
4985         int state;
4986 #define ANEG_STATE_UNKNOWN              0
4987 #define ANEG_STATE_AN_ENABLE            1
4988 #define ANEG_STATE_RESTART_INIT         2
4989 #define ANEG_STATE_RESTART              3
4990 #define ANEG_STATE_DISABLE_LINK_OK      4
4991 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4992 #define ANEG_STATE_ABILITY_DETECT       6
4993 #define ANEG_STATE_ACK_DETECT_INIT      7
4994 #define ANEG_STATE_ACK_DETECT           8
4995 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4996 #define ANEG_STATE_COMPLETE_ACK         10
4997 #define ANEG_STATE_IDLE_DETECT_INIT     11
4998 #define ANEG_STATE_IDLE_DETECT          12
4999 #define ANEG_STATE_LINK_OK              13
5000 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
5001 #define ANEG_STATE_NEXT_PAGE_WAIT       15
5002
5003         u32 flags;
5004 #define MR_AN_ENABLE            0x00000001
5005 #define MR_RESTART_AN           0x00000002
5006 #define MR_AN_COMPLETE          0x00000004
5007 #define MR_PAGE_RX              0x00000008
5008 #define MR_NP_LOADED            0x00000010
5009 #define MR_TOGGLE_TX            0x00000020
5010 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
5011 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
5012 #define MR_LP_ADV_SYM_PAUSE     0x00000100
5013 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
5014 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
5015 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
5016 #define MR_LP_ADV_NEXT_PAGE     0x00001000
5017 #define MR_TOGGLE_RX            0x00002000
5018 #define MR_NP_RX                0x00004000
5019
5020 #define MR_LINK_OK              0x80000000
5021
5022         unsigned long link_time, cur_time;
5023
5024         u32 ability_match_cfg;
5025         int ability_match_count;
5026
5027         char ability_match, idle_match, ack_match;
5028
5029         u32 txconfig, rxconfig;
5030 #define ANEG_CFG_NP             0x00000080
5031 #define ANEG_CFG_ACK            0x00000040
5032 #define ANEG_CFG_RF2            0x00000020
5033 #define ANEG_CFG_RF1            0x00000010
5034 #define ANEG_CFG_PS2            0x00000001
5035 #define ANEG_CFG_PS1            0x00008000
5036 #define ANEG_CFG_HD             0x00004000
5037 #define ANEG_CFG_FD             0x00002000
5038 #define ANEG_CFG_INVAL          0x00001f06
5039
5040 };
5041 #define ANEG_OK         0
5042 #define ANEG_DONE       1
5043 #define ANEG_TIMER_ENAB 2
5044 #define ANEG_FAILED     -1
5045
5046 #define ANEG_STATE_SETTLE_TIME  10000
5047
5048 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
5049                                    struct tg3_fiber_aneginfo *ap)
5050 {
5051         u16 flowctrl;
5052         unsigned long delta;
5053         u32 rx_cfg_reg;
5054         int ret;
5055
5056         if (ap->state == ANEG_STATE_UNKNOWN) {
5057                 ap->rxconfig = 0;
5058                 ap->link_time = 0;
5059                 ap->cur_time = 0;
5060                 ap->ability_match_cfg = 0;
5061                 ap->ability_match_count = 0;
5062                 ap->ability_match = 0;
5063                 ap->idle_match = 0;
5064                 ap->ack_match = 0;
5065         }
5066         ap->cur_time++;
5067
5068         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
5069                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
5070
5071                 if (rx_cfg_reg != ap->ability_match_cfg) {
5072                         ap->ability_match_cfg = rx_cfg_reg;
5073                         ap->ability_match = 0;
5074                         ap->ability_match_count = 0;
5075                 } else {
5076                         if (++ap->ability_match_count > 1) {
5077                                 ap->ability_match = 1;
5078                                 ap->ability_match_cfg = rx_cfg_reg;
5079                         }
5080                 }
5081                 if (rx_cfg_reg & ANEG_CFG_ACK)
5082                         ap->ack_match = 1;
5083                 else
5084                         ap->ack_match = 0;
5085
5086                 ap->idle_match = 0;
5087         } else {
5088                 ap->idle_match = 1;
5089                 ap->ability_match_cfg = 0;
5090                 ap->ability_match_count = 0;
5091                 ap->ability_match = 0;
5092                 ap->ack_match = 0;
5093
5094                 rx_cfg_reg = 0;
5095         }
5096
5097         ap->rxconfig = rx_cfg_reg;
5098         ret = ANEG_OK;
5099
5100         switch (ap->state) {
5101         case ANEG_STATE_UNKNOWN:
5102                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
5103                         ap->state = ANEG_STATE_AN_ENABLE;
5104
5105                 /* fallthru */
5106         case ANEG_STATE_AN_ENABLE:
5107                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
5108                 if (ap->flags & MR_AN_ENABLE) {
5109                         ap->link_time = 0;
5110                         ap->cur_time = 0;
5111                         ap->ability_match_cfg = 0;
5112                         ap->ability_match_count = 0;
5113                         ap->ability_match = 0;
5114                         ap->idle_match = 0;
5115                         ap->ack_match = 0;
5116
5117                         ap->state = ANEG_STATE_RESTART_INIT;
5118                 } else {
5119                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
5120                 }
5121                 break;
5122
5123         case ANEG_STATE_RESTART_INIT:
5124                 ap->link_time = ap->cur_time;
5125                 ap->flags &= ~(MR_NP_LOADED);
5126                 ap->txconfig = 0;
5127                 tw32(MAC_TX_AUTO_NEG, 0);
5128                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5129                 tw32_f(MAC_MODE, tp->mac_mode);
5130                 udelay(40);
5131
5132                 ret = ANEG_TIMER_ENAB;
5133                 ap->state = ANEG_STATE_RESTART;
5134
5135                 /* fallthru */
5136         case ANEG_STATE_RESTART:
5137                 delta = ap->cur_time - ap->link_time;
5138                 if (delta > ANEG_STATE_SETTLE_TIME)
5139                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
5140                 else
5141                         ret = ANEG_TIMER_ENAB;
5142                 break;
5143
5144         case ANEG_STATE_DISABLE_LINK_OK:
5145                 ret = ANEG_DONE;
5146                 break;
5147
5148         case ANEG_STATE_ABILITY_DETECT_INIT:
5149                 ap->flags &= ~(MR_TOGGLE_TX);
5150                 ap->txconfig = ANEG_CFG_FD;
5151                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5152                 if (flowctrl & ADVERTISE_1000XPAUSE)
5153                         ap->txconfig |= ANEG_CFG_PS1;
5154                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5155                         ap->txconfig |= ANEG_CFG_PS2;
5156                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5157                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5158                 tw32_f(MAC_MODE, tp->mac_mode);
5159                 udelay(40);
5160
5161                 ap->state = ANEG_STATE_ABILITY_DETECT;
5162                 break;
5163
5164         case ANEG_STATE_ABILITY_DETECT:
5165                 if (ap->ability_match != 0 && ap->rxconfig != 0)
5166                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
5167                 break;
5168
5169         case ANEG_STATE_ACK_DETECT_INIT:
5170                 ap->txconfig |= ANEG_CFG_ACK;
5171                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
5172                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
5173                 tw32_f(MAC_MODE, tp->mac_mode);
5174                 udelay(40);
5175
5176                 ap->state = ANEG_STATE_ACK_DETECT;
5177
5178                 /* fallthru */
5179         case ANEG_STATE_ACK_DETECT:
5180                 if (ap->ack_match != 0) {
5181                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
5182                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
5183                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
5184                         } else {
5185                                 ap->state = ANEG_STATE_AN_ENABLE;
5186                         }
5187                 } else if (ap->ability_match != 0 &&
5188                            ap->rxconfig == 0) {
5189                         ap->state = ANEG_STATE_AN_ENABLE;
5190                 }
5191                 break;
5192
5193         case ANEG_STATE_COMPLETE_ACK_INIT:
5194                 if (ap->rxconfig & ANEG_CFG_INVAL) {
5195                         ret = ANEG_FAILED;
5196                         break;
5197                 }
5198                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
5199                                MR_LP_ADV_HALF_DUPLEX |
5200                                MR_LP_ADV_SYM_PAUSE |
5201                                MR_LP_ADV_ASYM_PAUSE |
5202                                MR_LP_ADV_REMOTE_FAULT1 |
5203                                MR_LP_ADV_REMOTE_FAULT2 |
5204                                MR_LP_ADV_NEXT_PAGE |
5205                                MR_TOGGLE_RX |
5206                                MR_NP_RX);
5207                 if (ap->rxconfig & ANEG_CFG_FD)
5208                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
5209                 if (ap->rxconfig & ANEG_CFG_HD)
5210                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
5211                 if (ap->rxconfig & ANEG_CFG_PS1)
5212                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
5213                 if (ap->rxconfig & ANEG_CFG_PS2)
5214                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
5215                 if (ap->rxconfig & ANEG_CFG_RF1)
5216                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
5217                 if (ap->rxconfig & ANEG_CFG_RF2)
5218                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
5219                 if (ap->rxconfig & ANEG_CFG_NP)
5220                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
5221
5222                 ap->link_time = ap->cur_time;
5223
5224                 ap->flags ^= (MR_TOGGLE_TX);
5225                 if (ap->rxconfig & 0x0008)
5226                         ap->flags |= MR_TOGGLE_RX;
5227                 if (ap->rxconfig & ANEG_CFG_NP)
5228                         ap->flags |= MR_NP_RX;
5229                 ap->flags |= MR_PAGE_RX;
5230
5231                 ap->state = ANEG_STATE_COMPLETE_ACK;
5232                 ret = ANEG_TIMER_ENAB;
5233                 break;
5234
5235         case ANEG_STATE_COMPLETE_ACK:
5236                 if (ap->ability_match != 0 &&
5237                     ap->rxconfig == 0) {
5238                         ap->state = ANEG_STATE_AN_ENABLE;
5239                         break;
5240                 }
5241                 delta = ap->cur_time - ap->link_time;
5242                 if (delta > ANEG_STATE_SETTLE_TIME) {
5243                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
5244                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5245                         } else {
5246                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
5247                                     !(ap->flags & MR_NP_RX)) {
5248                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
5249                                 } else {
5250                                         ret = ANEG_FAILED;
5251                                 }
5252                         }
5253                 }
5254                 break;
5255
5256         case ANEG_STATE_IDLE_DETECT_INIT:
5257                 ap->link_time = ap->cur_time;
5258                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5259                 tw32_f(MAC_MODE, tp->mac_mode);
5260                 udelay(40);
5261
5262                 ap->state = ANEG_STATE_IDLE_DETECT;
5263                 ret = ANEG_TIMER_ENAB;
5264                 break;
5265
5266         case ANEG_STATE_IDLE_DETECT:
5267                 if (ap->ability_match != 0 &&
5268                     ap->rxconfig == 0) {
5269                         ap->state = ANEG_STATE_AN_ENABLE;
5270                         break;
5271                 }
5272                 delta = ap->cur_time - ap->link_time;
5273                 if (delta > ANEG_STATE_SETTLE_TIME) {
5274                         /* XXX another gem from the Broadcom driver :( */
5275                         ap->state = ANEG_STATE_LINK_OK;
5276                 }
5277                 break;
5278
5279         case ANEG_STATE_LINK_OK:
5280                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
5281                 ret = ANEG_DONE;
5282                 break;
5283
5284         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
5285                 /* ??? unimplemented */
5286                 break;
5287
5288         case ANEG_STATE_NEXT_PAGE_WAIT:
5289                 /* ??? unimplemented */
5290                 break;
5291
5292         default:
5293                 ret = ANEG_FAILED;
5294                 break;
5295         }
5296
5297         return ret;
5298 }
5299
5300 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
5301 {
5302         int res = 0;
5303         struct tg3_fiber_aneginfo aninfo;
5304         int status = ANEG_FAILED;
5305         unsigned int tick;
5306         u32 tmp;
5307
5308         tw32_f(MAC_TX_AUTO_NEG, 0);
5309
5310         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
5311         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
5312         udelay(40);
5313
5314         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
5315         udelay(40);
5316
5317         memset(&aninfo, 0, sizeof(aninfo));
5318         aninfo.flags |= MR_AN_ENABLE;
5319         aninfo.state = ANEG_STATE_UNKNOWN;
5320         aninfo.cur_time = 0;
5321         tick = 0;
5322         while (++tick < 195000) {
5323                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
5324                 if (status == ANEG_DONE || status == ANEG_FAILED)
5325                         break;
5326
5327                 udelay(1);
5328         }
5329
5330         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
5331         tw32_f(MAC_MODE, tp->mac_mode);
5332         udelay(40);
5333
5334         *txflags = aninfo.txconfig;
5335         *rxflags = aninfo.flags;
5336
5337         if (status == ANEG_DONE &&
5338             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
5339                              MR_LP_ADV_FULL_DUPLEX)))
5340                 res = 1;
5341
5342         return res;
5343 }
5344
5345 static void tg3_init_bcm8002(struct tg3 *tp)
5346 {
5347         u32 mac_status = tr32(MAC_STATUS);
5348         int i;
5349
5350         /* Reset when initting first time or we have a link. */
5351         if (tg3_flag(tp, INIT_COMPLETE) &&
5352             !(mac_status & MAC_STATUS_PCS_SYNCED))
5353                 return;
5354
5355         /* Set PLL lock range. */
5356         tg3_writephy(tp, 0x16, 0x8007);
5357
5358         /* SW reset */
5359         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
5360
5361         /* Wait for reset to complete. */
5362         /* XXX schedule_timeout() ... */
5363         for (i = 0; i < 500; i++)
5364                 udelay(10);
5365
5366         /* Config mode; select PMA/Ch 1 regs. */
5367         tg3_writephy(tp, 0x10, 0x8411);
5368
5369         /* Enable auto-lock and comdet, select txclk for tx. */
5370         tg3_writephy(tp, 0x11, 0x0a10);
5371
5372         tg3_writephy(tp, 0x18, 0x00a0);
5373         tg3_writephy(tp, 0x16, 0x41ff);
5374
5375         /* Assert and deassert POR. */
5376         tg3_writephy(tp, 0x13, 0x0400);
5377         udelay(40);
5378         tg3_writephy(tp, 0x13, 0x0000);
5379
5380         tg3_writephy(tp, 0x11, 0x0a50);
5381         udelay(40);
5382         tg3_writephy(tp, 0x11, 0x0a10);
5383
5384         /* Wait for signal to stabilize */
5385         /* XXX schedule_timeout() ... */
5386         for (i = 0; i < 15000; i++)
5387                 udelay(10);
5388
5389         /* Deselect the channel register so we can read the PHYID
5390          * later.
5391          */
5392         tg3_writephy(tp, 0x10, 0x8011);
5393 }
5394
5395 static bool tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
5396 {
5397         u16 flowctrl;
5398         bool current_link_up;
5399         u32 sg_dig_ctrl, sg_dig_status;
5400         u32 serdes_cfg, expected_sg_dig_ctrl;
5401         int workaround, port_a;
5402
5403         serdes_cfg = 0;
5404         expected_sg_dig_ctrl = 0;
5405         workaround = 0;
5406         port_a = 1;
5407         current_link_up = false;
5408
5409         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A0 &&
5410             tg3_chip_rev_id(tp) != CHIPREV_ID_5704_A1) {
5411                 workaround = 1;
5412                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
5413                         port_a = 0;
5414
5415                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
5416                 /* preserve bits 20-23 for voltage regulator */
5417                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
5418         }
5419
5420         sg_dig_ctrl = tr32(SG_DIG_CTRL);
5421
5422         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
5423                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
5424                         if (workaround) {
5425                                 u32 val = serdes_cfg;
5426
5427                                 if (port_a)
5428                                         val |= 0xc010000;
5429                                 else
5430                                         val |= 0x4010000;
5431                                 tw32_f(MAC_SERDES_CFG, val);
5432                         }
5433
5434                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5435                 }
5436                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
5437                         tg3_setup_flow_control(tp, 0, 0);
5438                         current_link_up = true;
5439                 }
5440                 goto out;
5441         }
5442
5443         /* Want auto-negotiation.  */
5444         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
5445
5446         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5447         if (flowctrl & ADVERTISE_1000XPAUSE)
5448                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
5449         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
5450                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
5451
5452         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
5453                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
5454                     tp->serdes_counter &&
5455                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
5456                                     MAC_STATUS_RCVD_CFG)) ==
5457                      MAC_STATUS_PCS_SYNCED)) {
5458                         tp->serdes_counter--;
5459                         current_link_up = true;
5460                         goto out;
5461                 }
5462 restart_autoneg:
5463                 if (workaround)
5464                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
5465                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
5466                 udelay(5);
5467                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
5468
5469                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5470                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5471         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
5472                                  MAC_STATUS_SIGNAL_DET)) {
5473                 sg_dig_status = tr32(SG_DIG_STATUS);
5474                 mac_status = tr32(MAC_STATUS);
5475
5476                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
5477                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
5478                         u32 local_adv = 0, remote_adv = 0;
5479
5480                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
5481                                 local_adv |= ADVERTISE_1000XPAUSE;
5482                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
5483                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5484
5485                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
5486                                 remote_adv |= LPA_1000XPAUSE;
5487                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
5488                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5489
5490                         tp->link_config.rmt_adv =
5491                                            mii_adv_to_ethtool_adv_x(remote_adv);
5492
5493                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5494                         current_link_up = true;
5495                         tp->serdes_counter = 0;
5496                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5497                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
5498                         if (tp->serdes_counter)
5499                                 tp->serdes_counter--;
5500                         else {
5501                                 if (workaround) {
5502                                         u32 val = serdes_cfg;
5503
5504                                         if (port_a)
5505                                                 val |= 0xc010000;
5506                                         else
5507                                                 val |= 0x4010000;
5508
5509                                         tw32_f(MAC_SERDES_CFG, val);
5510                                 }
5511
5512                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
5513                                 udelay(40);
5514
5515                                 /* Link parallel detection - link is up */
5516                                 /* only if we have PCS_SYNC and not */
5517                                 /* receiving config code words */
5518                                 mac_status = tr32(MAC_STATUS);
5519                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
5520                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
5521                                         tg3_setup_flow_control(tp, 0, 0);
5522                                         current_link_up = true;
5523                                         tp->phy_flags |=
5524                                                 TG3_PHYFLG_PARALLEL_DETECT;
5525                                         tp->serdes_counter =
5526                                                 SERDES_PARALLEL_DET_TIMEOUT;
5527                                 } else
5528                                         goto restart_autoneg;
5529                         }
5530                 }
5531         } else {
5532                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
5533                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5534         }
5535
5536 out:
5537         return current_link_up;
5538 }
5539
5540 static bool tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
5541 {
5542         bool current_link_up = false;
5543
5544         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
5545                 goto out;
5546
5547         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5548                 u32 txflags, rxflags;
5549                 int i;
5550
5551                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
5552                         u32 local_adv = 0, remote_adv = 0;
5553
5554                         if (txflags & ANEG_CFG_PS1)
5555                                 local_adv |= ADVERTISE_1000XPAUSE;
5556                         if (txflags & ANEG_CFG_PS2)
5557                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
5558
5559                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
5560                                 remote_adv |= LPA_1000XPAUSE;
5561                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
5562                                 remote_adv |= LPA_1000XPAUSE_ASYM;
5563
5564                         tp->link_config.rmt_adv =
5565                                            mii_adv_to_ethtool_adv_x(remote_adv);
5566
5567                         tg3_setup_flow_control(tp, local_adv, remote_adv);
5568
5569                         current_link_up = true;
5570                 }
5571                 for (i = 0; i < 30; i++) {
5572                         udelay(20);
5573                         tw32_f(MAC_STATUS,
5574                                (MAC_STATUS_SYNC_CHANGED |
5575                                 MAC_STATUS_CFG_CHANGED));
5576                         udelay(40);
5577                         if ((tr32(MAC_STATUS) &
5578                              (MAC_STATUS_SYNC_CHANGED |
5579                               MAC_STATUS_CFG_CHANGED)) == 0)
5580                                 break;
5581                 }
5582
5583                 mac_status = tr32(MAC_STATUS);
5584                 if (!current_link_up &&
5585                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
5586                     !(mac_status & MAC_STATUS_RCVD_CFG))
5587                         current_link_up = true;
5588         } else {
5589                 tg3_setup_flow_control(tp, 0, 0);
5590
5591                 /* Forcing 1000FD link up. */
5592                 current_link_up = true;
5593
5594                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
5595                 udelay(40);
5596
5597                 tw32_f(MAC_MODE, tp->mac_mode);
5598                 udelay(40);
5599         }
5600
5601 out:
5602         return current_link_up;
5603 }
5604
5605 static int tg3_setup_fiber_phy(struct tg3 *tp, bool force_reset)
5606 {
5607         u32 orig_pause_cfg;
5608         u16 orig_active_speed;
5609         u8 orig_active_duplex;
5610         u32 mac_status;
5611         bool current_link_up;
5612         int i;
5613
5614         orig_pause_cfg = tp->link_config.active_flowctrl;
5615         orig_active_speed = tp->link_config.active_speed;
5616         orig_active_duplex = tp->link_config.active_duplex;
5617
5618         if (!tg3_flag(tp, HW_AUTONEG) &&
5619             tp->link_up &&
5620             tg3_flag(tp, INIT_COMPLETE)) {
5621                 mac_status = tr32(MAC_STATUS);
5622                 mac_status &= (MAC_STATUS_PCS_SYNCED |
5623                                MAC_STATUS_SIGNAL_DET |
5624                                MAC_STATUS_CFG_CHANGED |
5625                                MAC_STATUS_RCVD_CFG);
5626                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
5627                                    MAC_STATUS_SIGNAL_DET)) {
5628                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5629                                             MAC_STATUS_CFG_CHANGED));
5630                         return 0;
5631                 }
5632         }
5633
5634         tw32_f(MAC_TX_AUTO_NEG, 0);
5635
5636         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
5637         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5638         tw32_f(MAC_MODE, tp->mac_mode);
5639         udelay(40);
5640
5641         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5642                 tg3_init_bcm8002(tp);
5643
5644         /* Enable link change event even when serdes polling.  */
5645         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5646         udelay(40);
5647
5648         current_link_up = false;
5649         tp->link_config.rmt_adv = 0;
5650         mac_status = tr32(MAC_STATUS);
5651
5652         if (tg3_flag(tp, HW_AUTONEG))
5653                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5654         else
5655                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5656
5657         tp->napi[0].hw_status->status =
5658                 (SD_STATUS_UPDATED |
5659                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5660
5661         for (i = 0; i < 100; i++) {
5662                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5663                                     MAC_STATUS_CFG_CHANGED));
5664                 udelay(5);
5665                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5666                                          MAC_STATUS_CFG_CHANGED |
5667                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5668                         break;
5669         }
5670
5671         mac_status = tr32(MAC_STATUS);
5672         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5673                 current_link_up = false;
5674                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5675                     tp->serdes_counter == 0) {
5676                         tw32_f(MAC_MODE, (tp->mac_mode |
5677                                           MAC_MODE_SEND_CONFIGS));
5678                         udelay(1);
5679                         tw32_f(MAC_MODE, tp->mac_mode);
5680                 }
5681         }
5682
5683         if (current_link_up) {
5684                 tp->link_config.active_speed = SPEED_1000;
5685                 tp->link_config.active_duplex = DUPLEX_FULL;
5686                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5687                                     LED_CTRL_LNKLED_OVERRIDE |
5688                                     LED_CTRL_1000MBPS_ON));
5689         } else {
5690                 tp->link_config.active_speed = SPEED_UNKNOWN;
5691                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5692                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5693                                     LED_CTRL_LNKLED_OVERRIDE |
5694                                     LED_CTRL_TRAFFIC_OVERRIDE));
5695         }
5696
5697         if (!tg3_test_and_report_link_chg(tp, current_link_up)) {
5698                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5699                 if (orig_pause_cfg != now_pause_cfg ||
5700                     orig_active_speed != tp->link_config.active_speed ||
5701                     orig_active_duplex != tp->link_config.active_duplex)
5702                         tg3_link_report(tp);
5703         }
5704
5705         return 0;
5706 }
5707
5708 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, bool force_reset)
5709 {
5710         int err = 0;
5711         u32 bmsr, bmcr;
5712         u16 current_speed = SPEED_UNKNOWN;
5713         u8 current_duplex = DUPLEX_UNKNOWN;
5714         bool current_link_up = false;
5715         u32 local_adv, remote_adv, sgsr;
5716
5717         if ((tg3_asic_rev(tp) == ASIC_REV_5719 ||
5718              tg3_asic_rev(tp) == ASIC_REV_5720) &&
5719              !tg3_readphy(tp, SERDES_TG3_1000X_STATUS, &sgsr) &&
5720              (sgsr & SERDES_TG3_SGMII_MODE)) {
5721
5722                 if (force_reset)
5723                         tg3_phy_reset(tp);
5724
5725                 tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
5726
5727                 if (!(sgsr & SERDES_TG3_LINK_UP)) {
5728                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5729                 } else {
5730                         current_link_up = true;
5731                         if (sgsr & SERDES_TG3_SPEED_1000) {
5732                                 current_speed = SPEED_1000;
5733                                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5734                         } else if (sgsr & SERDES_TG3_SPEED_100) {
5735                                 current_speed = SPEED_100;
5736                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5737                         } else {
5738                                 current_speed = SPEED_10;
5739                                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
5740                         }
5741
5742                         if (sgsr & SERDES_TG3_FULL_DUPLEX)
5743                                 current_duplex = DUPLEX_FULL;
5744                         else
5745                                 current_duplex = DUPLEX_HALF;
5746                 }
5747
5748                 tw32_f(MAC_MODE, tp->mac_mode);
5749                 udelay(40);
5750
5751                 tg3_clear_mac_status(tp);
5752
5753                 goto fiber_setup_done;
5754         }
5755
5756         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5757         tw32_f(MAC_MODE, tp->mac_mode);
5758         udelay(40);
5759
5760         tg3_clear_mac_status(tp);
5761
5762         if (force_reset)
5763                 tg3_phy_reset(tp);
5764
5765         tp->link_config.rmt_adv = 0;
5766
5767         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5768         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5769         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5770                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5771                         bmsr |= BMSR_LSTATUS;
5772                 else
5773                         bmsr &= ~BMSR_LSTATUS;
5774         }
5775
5776         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5777
5778         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5779             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5780                 /* do nothing, just check for link up at the end */
5781         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5782                 u32 adv, newadv;
5783
5784                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5785                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5786                                  ADVERTISE_1000XPAUSE |
5787                                  ADVERTISE_1000XPSE_ASYM |
5788                                  ADVERTISE_SLCT);
5789
5790                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5791                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5792
5793                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5794                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5795                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5796                         tg3_writephy(tp, MII_BMCR, bmcr);
5797
5798                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5799                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5800                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5801
5802                         return err;
5803                 }
5804         } else {
5805                 u32 new_bmcr;
5806
5807                 bmcr &= ~BMCR_SPEED1000;
5808                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5809
5810                 if (tp->link_config.duplex == DUPLEX_FULL)
5811                         new_bmcr |= BMCR_FULLDPLX;
5812
5813                 if (new_bmcr != bmcr) {
5814                         /* BMCR_SPEED1000 is a reserved bit that needs
5815                          * to be set on write.
5816                          */
5817                         new_bmcr |= BMCR_SPEED1000;
5818
5819                         /* Force a linkdown */
5820                         if (tp->link_up) {
5821                                 u32 adv;
5822
5823                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5824                                 adv &= ~(ADVERTISE_1000XFULL |
5825                                          ADVERTISE_1000XHALF |
5826                                          ADVERTISE_SLCT);
5827                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5828                                 tg3_writephy(tp, MII_BMCR, bmcr |
5829                                                            BMCR_ANRESTART |
5830                                                            BMCR_ANENABLE);
5831                                 udelay(10);
5832                                 tg3_carrier_off(tp);
5833                         }
5834                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5835                         bmcr = new_bmcr;
5836                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5837                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5838                         if (tg3_asic_rev(tp) == ASIC_REV_5714) {
5839                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5840                                         bmsr |= BMSR_LSTATUS;
5841                                 else
5842                                         bmsr &= ~BMSR_LSTATUS;
5843                         }
5844                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5845                 }
5846         }
5847
5848         if (bmsr & BMSR_LSTATUS) {
5849                 current_speed = SPEED_1000;
5850                 current_link_up = true;
5851                 if (bmcr & BMCR_FULLDPLX)
5852                         current_duplex = DUPLEX_FULL;
5853                 else
5854                         current_duplex = DUPLEX_HALF;
5855
5856                 local_adv = 0;
5857                 remote_adv = 0;
5858
5859                 if (bmcr & BMCR_ANENABLE) {
5860                         u32 common;
5861
5862                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5863                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5864                         common = local_adv & remote_adv;
5865                         if (common & (ADVERTISE_1000XHALF |
5866                                       ADVERTISE_1000XFULL)) {
5867                                 if (common & ADVERTISE_1000XFULL)
5868                                         current_duplex = DUPLEX_FULL;
5869                                 else
5870                                         current_duplex = DUPLEX_HALF;
5871
5872                                 tp->link_config.rmt_adv =
5873                                            mii_adv_to_ethtool_adv_x(remote_adv);
5874                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5875                                 /* Link is up via parallel detect */
5876                         } else {
5877                                 current_link_up = false;
5878                         }
5879                 }
5880         }
5881
5882 fiber_setup_done:
5883         if (current_link_up && current_duplex == DUPLEX_FULL)
5884                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5885
5886         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5887         if (tp->link_config.active_duplex == DUPLEX_HALF)
5888                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5889
5890         tw32_f(MAC_MODE, tp->mac_mode);
5891         udelay(40);
5892
5893         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5894
5895         tp->link_config.active_speed = current_speed;
5896         tp->link_config.active_duplex = current_duplex;
5897
5898         tg3_test_and_report_link_chg(tp, current_link_up);
5899         return err;
5900 }
5901
5902 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5903 {
5904         if (tp->serdes_counter) {
5905                 /* Give autoneg time to complete. */
5906                 tp->serdes_counter--;
5907                 return;
5908         }
5909
5910         if (!tp->link_up &&
5911             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5912                 u32 bmcr;
5913
5914                 tg3_readphy(tp, MII_BMCR, &bmcr);
5915                 if (bmcr & BMCR_ANENABLE) {
5916                         u32 phy1, phy2;
5917
5918                         /* Select shadow register 0x1f */
5919                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5920                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5921
5922                         /* Select expansion interrupt status register */
5923                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5924                                          MII_TG3_DSP_EXP1_INT_STAT);
5925                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5926                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5927
5928                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5929                                 /* We have signal detect and not receiving
5930                                  * config code words, link is up by parallel
5931                                  * detection.
5932                                  */
5933
5934                                 bmcr &= ~BMCR_ANENABLE;
5935                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5936                                 tg3_writephy(tp, MII_BMCR, bmcr);
5937                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5938                         }
5939                 }
5940         } else if (tp->link_up &&
5941                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5942                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5943                 u32 phy2;
5944
5945                 /* Select expansion interrupt status register */
5946                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5947                                  MII_TG3_DSP_EXP1_INT_STAT);
5948                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5949                 if (phy2 & 0x20) {
5950                         u32 bmcr;
5951
5952                         /* Config code words received, turn on autoneg. */
5953                         tg3_readphy(tp, MII_BMCR, &bmcr);
5954                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5955
5956                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5957
5958                 }
5959         }
5960 }
5961
5962 static int tg3_setup_phy(struct tg3 *tp, bool force_reset)
5963 {
5964         u32 val;
5965         int err;
5966
5967         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5968                 err = tg3_setup_fiber_phy(tp, force_reset);
5969         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5970                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5971         else
5972                 err = tg3_setup_copper_phy(tp, force_reset);
5973
5974         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
5975                 u32 scale;
5976
5977                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5978                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5979                         scale = 65;
5980                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5981                         scale = 6;
5982                 else
5983                         scale = 12;
5984
5985                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5986                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5987                 tw32(GRC_MISC_CFG, val);
5988         }
5989
5990         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5991               (6 << TX_LENGTHS_IPG_SHIFT);
5992         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
5993             tg3_asic_rev(tp) == ASIC_REV_5762)
5994                 val |= tr32(MAC_TX_LENGTHS) &
5995                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5996                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5997
5998         if (tp->link_config.active_speed == SPEED_1000 &&
5999             tp->link_config.active_duplex == DUPLEX_HALF)
6000                 tw32(MAC_TX_LENGTHS, val |
6001                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
6002         else
6003                 tw32(MAC_TX_LENGTHS, val |
6004                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
6005
6006         if (!tg3_flag(tp, 5705_PLUS)) {
6007                 if (tp->link_up) {
6008                         tw32(HOSTCC_STAT_COAL_TICKS,
6009                              tp->coal.stats_block_coalesce_usecs);
6010                 } else {
6011                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
6012                 }
6013         }
6014
6015         if (tg3_flag(tp, ASPM_WORKAROUND)) {
6016                 val = tr32(PCIE_PWR_MGMT_THRESH);
6017                 if (!tp->link_up)
6018                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
6019                               tp->pwrmgmt_thresh;
6020                 else
6021                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
6022                 tw32(PCIE_PWR_MGMT_THRESH, val);
6023         }
6024
6025         return err;
6026 }
6027
6028 /* tp->lock must be held */
6029 static u64 tg3_refclk_read(struct tg3 *tp)
6030 {
6031         u64 stamp = tr32(TG3_EAV_REF_CLCK_LSB);
6032         return stamp | (u64)tr32(TG3_EAV_REF_CLCK_MSB) << 32;
6033 }
6034
6035 /* tp->lock must be held */
6036 static void tg3_refclk_write(struct tg3 *tp, u64 newval)
6037 {
6038         tw32(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_STOP);
6039         tw32(TG3_EAV_REF_CLCK_LSB, newval & 0xffffffff);
6040         tw32(TG3_EAV_REF_CLCK_MSB, newval >> 32);
6041         tw32_f(TG3_EAV_REF_CLCK_CTL, TG3_EAV_REF_CLCK_CTL_RESUME);
6042 }
6043
6044 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync);
6045 static inline void tg3_full_unlock(struct tg3 *tp);
6046 static int tg3_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info)
6047 {
6048         struct tg3 *tp = netdev_priv(dev);
6049
6050         info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
6051                                 SOF_TIMESTAMPING_RX_SOFTWARE |
6052                                 SOF_TIMESTAMPING_SOFTWARE;
6053
6054         if (tg3_flag(tp, PTP_CAPABLE)) {
6055                 info->so_timestamping |= SOF_TIMESTAMPING_TX_HARDWARE |
6056                                         SOF_TIMESTAMPING_RX_HARDWARE |
6057                                         SOF_TIMESTAMPING_RAW_HARDWARE;
6058         }
6059
6060         if (tp->ptp_clock)
6061                 info->phc_index = ptp_clock_index(tp->ptp_clock);
6062         else
6063                 info->phc_index = -1;
6064
6065         info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
6066
6067         info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
6068                            (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
6069                            (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
6070                            (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
6071         return 0;
6072 }
6073
6074 static int tg3_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
6075 {
6076         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6077         bool neg_adj = false;
6078         u32 correction = 0;
6079
6080         if (ppb < 0) {
6081                 neg_adj = true;
6082                 ppb = -ppb;
6083         }
6084
6085         /* Frequency adjustment is performed using hardware with a 24 bit
6086          * accumulator and a programmable correction value. On each clk, the
6087          * correction value gets added to the accumulator and when it
6088          * overflows, the time counter is incremented/decremented.
6089          *
6090          * So conversion from ppb to correction value is
6091          *              ppb * (1 << 24) / 1000000000
6092          */
6093         correction = div_u64((u64)ppb * (1 << 24), 1000000000ULL) &
6094                      TG3_EAV_REF_CLK_CORRECT_MASK;
6095
6096         tg3_full_lock(tp, 0);
6097
6098         if (correction)
6099                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL,
6100                      TG3_EAV_REF_CLK_CORRECT_EN |
6101                      (neg_adj ? TG3_EAV_REF_CLK_CORRECT_NEG : 0) | correction);
6102         else
6103                 tw32(TG3_EAV_REF_CLK_CORRECT_CTL, 0);
6104
6105         tg3_full_unlock(tp);
6106
6107         return 0;
6108 }
6109
6110 static int tg3_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
6111 {
6112         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6113
6114         tg3_full_lock(tp, 0);
6115         tp->ptp_adjust += delta;
6116         tg3_full_unlock(tp);
6117
6118         return 0;
6119 }
6120
6121 static int tg3_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
6122 {
6123         u64 ns;
6124         u32 remainder;
6125         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6126
6127         tg3_full_lock(tp, 0);
6128         ns = tg3_refclk_read(tp);
6129         ns += tp->ptp_adjust;
6130         tg3_full_unlock(tp);
6131
6132         ts->tv_sec = div_u64_rem(ns, 1000000000, &remainder);
6133         ts->tv_nsec = remainder;
6134
6135         return 0;
6136 }
6137
6138 static int tg3_ptp_settime(struct ptp_clock_info *ptp,
6139                            const struct timespec *ts)
6140 {
6141         u64 ns;
6142         struct tg3 *tp = container_of(ptp, struct tg3, ptp_info);
6143
6144         ns = timespec_to_ns(ts);
6145
6146         tg3_full_lock(tp, 0);
6147         tg3_refclk_write(tp, ns);
6148         tp->ptp_adjust = 0;
6149         tg3_full_unlock(tp);
6150
6151         return 0;
6152 }
6153
6154 static int tg3_ptp_enable(struct ptp_clock_info *ptp,
6155                           struct ptp_clock_request *rq, int on)
6156 {
6157         return -EOPNOTSUPP;
6158 }
6159
6160 static const struct ptp_clock_info tg3_ptp_caps = {
6161         .owner          = THIS_MODULE,
6162         .name           = "tg3 clock",
6163         .max_adj        = 250000000,
6164         .n_alarm        = 0,
6165         .n_ext_ts       = 0,
6166         .n_per_out      = 0,
6167         .pps            = 0,
6168         .adjfreq        = tg3_ptp_adjfreq,
6169         .adjtime        = tg3_ptp_adjtime,
6170         .gettime        = tg3_ptp_gettime,
6171         .settime        = tg3_ptp_settime,
6172         .enable         = tg3_ptp_enable,
6173 };
6174
6175 static void tg3_hwclock_to_timestamp(struct tg3 *tp, u64 hwclock,
6176                                      struct skb_shared_hwtstamps *timestamp)
6177 {
6178         memset(timestamp, 0, sizeof(struct skb_shared_hwtstamps));
6179         timestamp->hwtstamp  = ns_to_ktime((hwclock & TG3_TSTAMP_MASK) +
6180                                            tp->ptp_adjust);
6181 }
6182
6183 /* tp->lock must be held */
6184 static void tg3_ptp_init(struct tg3 *tp)
6185 {
6186         if (!tg3_flag(tp, PTP_CAPABLE))
6187                 return;
6188
6189         /* Initialize the hardware clock to the system time. */
6190         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()));
6191         tp->ptp_adjust = 0;
6192         tp->ptp_info = tg3_ptp_caps;
6193 }
6194
6195 /* tp->lock must be held */
6196 static void tg3_ptp_resume(struct tg3 *tp)
6197 {
6198         if (!tg3_flag(tp, PTP_CAPABLE))
6199                 return;
6200
6201         tg3_refclk_write(tp, ktime_to_ns(ktime_get_real()) + tp->ptp_adjust);
6202         tp->ptp_adjust = 0;
6203 }
6204
6205 static void tg3_ptp_fini(struct tg3 *tp)
6206 {
6207         if (!tg3_flag(tp, PTP_CAPABLE) || !tp->ptp_clock)
6208                 return;
6209
6210         ptp_clock_unregister(tp->ptp_clock);
6211         tp->ptp_clock = NULL;
6212         tp->ptp_adjust = 0;
6213 }
6214
6215 static inline int tg3_irq_sync(struct tg3 *tp)
6216 {
6217         return tp->irq_sync;
6218 }
6219
6220 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
6221 {
6222         int i;
6223
6224         dst = (u32 *)((u8 *)dst + off);
6225         for (i = 0; i < len; i += sizeof(u32))
6226                 *dst++ = tr32(off + i);
6227 }
6228
6229 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
6230 {
6231         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
6232         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
6233         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
6234         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
6235         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
6236         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
6237         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
6238         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
6239         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
6240         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
6241         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
6242         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
6243         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
6244         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
6245         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
6246         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
6247         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
6248         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
6249         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
6250
6251         if (tg3_flag(tp, SUPPORT_MSIX))
6252                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
6253
6254         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
6255         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
6256         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
6257         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
6258         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
6259         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
6260         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
6261         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
6262
6263         if (!tg3_flag(tp, 5705_PLUS)) {
6264                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
6265                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
6266                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
6267         }
6268
6269         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
6270         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
6271         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
6272         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
6273         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
6274
6275         if (tg3_flag(tp, NVRAM))
6276                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
6277 }
6278
6279 static void tg3_dump_state(struct tg3 *tp)
6280 {
6281         int i;
6282         u32 *regs;
6283
6284         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
6285         if (!regs)
6286                 return;
6287
6288         if (tg3_flag(tp, PCI_EXPRESS)) {
6289                 /* Read up to but not including private PCI registers */
6290                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
6291                         regs[i / sizeof(u32)] = tr32(i);
6292         } else
6293                 tg3_dump_legacy_regs(tp, regs);
6294
6295         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
6296                 if (!regs[i + 0] && !regs[i + 1] &&
6297                     !regs[i + 2] && !regs[i + 3])
6298                         continue;
6299
6300                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
6301                            i * 4,
6302                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
6303         }
6304
6305         kfree(regs);
6306
6307         for (i = 0; i < tp->irq_cnt; i++) {
6308                 struct tg3_napi *tnapi = &tp->napi[i];
6309
6310                 /* SW status block */
6311                 netdev_err(tp->dev,
6312                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
6313                            i,
6314                            tnapi->hw_status->status,
6315                            tnapi->hw_status->status_tag,
6316                            tnapi->hw_status->rx_jumbo_consumer,
6317                            tnapi->hw_status->rx_consumer,
6318                            tnapi->hw_status->rx_mini_consumer,
6319                            tnapi->hw_status->idx[0].rx_producer,
6320                            tnapi->hw_status->idx[0].tx_consumer);
6321
6322                 netdev_err(tp->dev,
6323                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
6324                            i,
6325                            tnapi->last_tag, tnapi->last_irq_tag,
6326                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
6327                            tnapi->rx_rcb_ptr,
6328                            tnapi->prodring.rx_std_prod_idx,
6329                            tnapi->prodring.rx_std_cons_idx,
6330                            tnapi->prodring.rx_jmb_prod_idx,
6331                            tnapi->prodring.rx_jmb_cons_idx);
6332         }
6333 }
6334
6335 /* This is called whenever we suspect that the system chipset is re-
6336  * ordering the sequence of MMIO to the tx send mailbox. The symptom
6337  * is bogus tx completions. We try to recover by setting the
6338  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
6339  * in the workqueue.
6340  */
6341 static void tg3_tx_recover(struct tg3 *tp)
6342 {
6343         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
6344                tp->write32_tx_mbox == tg3_write_indirect_mbox);
6345
6346         netdev_warn(tp->dev,
6347                     "The system may be re-ordering memory-mapped I/O "
6348                     "cycles to the network device, attempting to recover. "
6349                     "Please report the problem to the driver maintainer "
6350                     "and include system chipset information.\n");
6351
6352         spin_lock(&tp->lock);
6353         tg3_flag_set(tp, TX_RECOVERY_PENDING);
6354         spin_unlock(&tp->lock);
6355 }
6356
6357 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
6358 {
6359         /* Tell compiler to fetch tx indices from memory. */
6360         barrier();
6361         return tnapi->tx_pending -
6362                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
6363 }
6364
6365 /* Tigon3 never reports partial packet sends.  So we do not
6366  * need special logic to handle SKBs that have not had all
6367  * of their frags sent yet, like SunGEM does.
6368  */
6369 static void tg3_tx(struct tg3_napi *tnapi)
6370 {
6371         struct tg3 *tp = tnapi->tp;
6372         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
6373         u32 sw_idx = tnapi->tx_cons;
6374         struct netdev_queue *txq;
6375         int index = tnapi - tp->napi;
6376         unsigned int pkts_compl = 0, bytes_compl = 0;
6377
6378         if (tg3_flag(tp, ENABLE_TSS))
6379                 index--;
6380
6381         txq = netdev_get_tx_queue(tp->dev, index);
6382
6383         while (sw_idx != hw_idx) {
6384                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
6385                 struct sk_buff *skb = ri->skb;
6386                 int i, tx_bug = 0;
6387
6388                 if (unlikely(skb == NULL)) {
6389                         tg3_tx_recover(tp);
6390                         return;
6391                 }
6392
6393                 if (tnapi->tx_ring[sw_idx].len_flags & TXD_FLAG_HWTSTAMP) {
6394                         struct skb_shared_hwtstamps timestamp;
6395                         u64 hwclock = tr32(TG3_TX_TSTAMP_LSB);
6396                         hwclock |= (u64)tr32(TG3_TX_TSTAMP_MSB) << 32;
6397
6398                         tg3_hwclock_to_timestamp(tp, hwclock, &timestamp);
6399
6400                         skb_tstamp_tx(skb, &timestamp);
6401                 }
6402
6403                 pci_unmap_single(tp->pdev,
6404                                  dma_unmap_addr(ri, mapping),
6405                                  skb_headlen(skb),
6406                                  PCI_DMA_TODEVICE);
6407
6408                 ri->skb = NULL;
6409
6410                 while (ri->fragmented) {
6411                         ri->fragmented = false;
6412                         sw_idx = NEXT_TX(sw_idx);
6413                         ri = &tnapi->tx_buffers[sw_idx];
6414                 }
6415
6416                 sw_idx = NEXT_TX(sw_idx);
6417
6418                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6419                         ri = &tnapi->tx_buffers[sw_idx];
6420                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
6421                                 tx_bug = 1;
6422
6423                         pci_unmap_page(tp->pdev,
6424                                        dma_unmap_addr(ri, mapping),
6425                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
6426                                        PCI_DMA_TODEVICE);
6427
6428                         while (ri->fragmented) {
6429                                 ri->fragmented = false;
6430                                 sw_idx = NEXT_TX(sw_idx);
6431                                 ri = &tnapi->tx_buffers[sw_idx];
6432                         }
6433
6434                         sw_idx = NEXT_TX(sw_idx);
6435                 }
6436
6437                 pkts_compl++;
6438                 bytes_compl += skb->len;
6439
6440                 dev_kfree_skb_any(skb);
6441
6442                 if (unlikely(tx_bug)) {
6443                         tg3_tx_recover(tp);
6444                         return;
6445                 }
6446         }
6447
6448         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
6449
6450         tnapi->tx_cons = sw_idx;
6451
6452         /* Need to make the tx_cons update visible to tg3_start_xmit()
6453          * before checking for netif_queue_stopped().  Without the
6454          * memory barrier, there is a small possibility that tg3_start_xmit()
6455          * will miss it and cause the queue to be stopped forever.
6456          */
6457         smp_mb();
6458
6459         if (unlikely(netif_tx_queue_stopped(txq) &&
6460                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
6461                 __netif_tx_lock(txq, smp_processor_id());
6462                 if (netif_tx_queue_stopped(txq) &&
6463                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
6464                         netif_tx_wake_queue(txq);
6465                 __netif_tx_unlock(txq);
6466         }
6467 }
6468
6469 static void tg3_frag_free(bool is_frag, void *data)
6470 {
6471         if (is_frag)
6472                 put_page(virt_to_head_page(data));
6473         else
6474                 kfree(data);
6475 }
6476
6477 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
6478 {
6479         unsigned int skb_size = SKB_DATA_ALIGN(map_sz + TG3_RX_OFFSET(tp)) +
6480                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6481
6482         if (!ri->data)
6483                 return;
6484
6485         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
6486                          map_sz, PCI_DMA_FROMDEVICE);
6487         tg3_frag_free(skb_size <= PAGE_SIZE, ri->data);
6488         ri->data = NULL;
6489 }
6490
6491
6492 /* Returns size of skb allocated or < 0 on error.
6493  *
6494  * We only need to fill in the address because the other members
6495  * of the RX descriptor are invariant, see tg3_init_rings.
6496  *
6497  * Note the purposeful assymetry of cpu vs. chip accesses.  For
6498  * posting buffers we only dirty the first cache line of the RX
6499  * descriptor (containing the address).  Whereas for the RX status
6500  * buffers the cpu only reads the last cacheline of the RX descriptor
6501  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
6502  */
6503 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
6504                              u32 opaque_key, u32 dest_idx_unmasked,
6505                              unsigned int *frag_size)
6506 {
6507         struct tg3_rx_buffer_desc *desc;
6508         struct ring_info *map;
6509         u8 *data;
6510         dma_addr_t mapping;
6511         int skb_size, data_size, dest_idx;
6512
6513         switch (opaque_key) {
6514         case RXD_OPAQUE_RING_STD:
6515                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6516                 desc = &tpr->rx_std[dest_idx];
6517                 map = &tpr->rx_std_buffers[dest_idx];
6518                 data_size = tp->rx_pkt_map_sz;
6519                 break;
6520
6521         case RXD_OPAQUE_RING_JUMBO:
6522                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6523                 desc = &tpr->rx_jmb[dest_idx].std;
6524                 map = &tpr->rx_jmb_buffers[dest_idx];
6525                 data_size = TG3_RX_JMB_MAP_SZ;
6526                 break;
6527
6528         default:
6529                 return -EINVAL;
6530         }
6531
6532         /* Do not overwrite any of the map or rp information
6533          * until we are sure we can commit to a new buffer.
6534          *
6535          * Callers depend upon this behavior and assume that
6536          * we leave everything unchanged if we fail.
6537          */
6538         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
6539                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
6540         if (skb_size <= PAGE_SIZE) {
6541                 data = netdev_alloc_frag(skb_size);
6542                 *frag_size = skb_size;
6543         } else {
6544                 data = kmalloc(skb_size, GFP_ATOMIC);
6545                 *frag_size = 0;
6546         }
6547         if (!data)
6548                 return -ENOMEM;
6549
6550         mapping = pci_map_single(tp->pdev,
6551                                  data + TG3_RX_OFFSET(tp),
6552                                  data_size,
6553                                  PCI_DMA_FROMDEVICE);
6554         if (unlikely(pci_dma_mapping_error(tp->pdev, mapping))) {
6555                 tg3_frag_free(skb_size <= PAGE_SIZE, data);
6556                 return -EIO;
6557         }
6558
6559         map->data = data;
6560         dma_unmap_addr_set(map, mapping, mapping);
6561
6562         desc->addr_hi = ((u64)mapping >> 32);
6563         desc->addr_lo = ((u64)mapping & 0xffffffff);
6564
6565         return data_size;
6566 }
6567
6568 /* We only need to move over in the address because the other
6569  * members of the RX descriptor are invariant.  See notes above
6570  * tg3_alloc_rx_data for full details.
6571  */
6572 static void tg3_recycle_rx(struct tg3_napi *tnapi,
6573                            struct tg3_rx_prodring_set *dpr,
6574                            u32 opaque_key, int src_idx,
6575                            u32 dest_idx_unmasked)
6576 {
6577         struct tg3 *tp = tnapi->tp;
6578         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
6579         struct ring_info *src_map, *dest_map;
6580         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
6581         int dest_idx;
6582
6583         switch (opaque_key) {
6584         case RXD_OPAQUE_RING_STD:
6585                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
6586                 dest_desc = &dpr->rx_std[dest_idx];
6587                 dest_map = &dpr->rx_std_buffers[dest_idx];
6588                 src_desc = &spr->rx_std[src_idx];
6589                 src_map = &spr->rx_std_buffers[src_idx];
6590                 break;
6591
6592         case RXD_OPAQUE_RING_JUMBO:
6593                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
6594                 dest_desc = &dpr->rx_jmb[dest_idx].std;
6595                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
6596                 src_desc = &spr->rx_jmb[src_idx].std;
6597                 src_map = &spr->rx_jmb_buffers[src_idx];
6598                 break;
6599
6600         default:
6601                 return;
6602         }
6603
6604         dest_map->data = src_map->data;
6605         dma_unmap_addr_set(dest_map, mapping,
6606                            dma_unmap_addr(src_map, mapping));
6607         dest_desc->addr_hi = src_desc->addr_hi;
6608         dest_desc->addr_lo = src_desc->addr_lo;
6609
6610         /* Ensure that the update to the skb happens after the physical
6611          * addresses have been transferred to the new BD location.
6612          */
6613         smp_wmb();
6614
6615         src_map->data = NULL;
6616 }
6617
6618 /* The RX ring scheme is composed of multiple rings which post fresh
6619  * buffers to the chip, and one special ring the chip uses to report
6620  * status back to the host.
6621  *
6622  * The special ring reports the status of received packets to the
6623  * host.  The chip does not write into the original descriptor the
6624  * RX buffer was obtained from.  The chip simply takes the original
6625  * descriptor as provided by the host, updates the status and length
6626  * field, then writes this into the next status ring entry.
6627  *
6628  * Each ring the host uses to post buffers to the chip is described
6629  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
6630  * it is first placed into the on-chip ram.  When the packet's length
6631  * is known, it walks down the TG3_BDINFO entries to select the ring.
6632  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
6633  * which is within the range of the new packet's length is chosen.
6634  *
6635  * The "separate ring for rx status" scheme may sound queer, but it makes
6636  * sense from a cache coherency perspective.  If only the host writes
6637  * to the buffer post rings, and only the chip writes to the rx status
6638  * rings, then cache lines never move beyond shared-modified state.
6639  * If both the host and chip were to write into the same ring, cache line
6640  * eviction could occur since both entities want it in an exclusive state.
6641  */
6642 static int tg3_rx(struct tg3_napi *tnapi, int budget)
6643 {
6644         struct tg3 *tp = tnapi->tp;
6645         u32 work_mask, rx_std_posted = 0;
6646         u32 std_prod_idx, jmb_prod_idx;
6647         u32 sw_idx = tnapi->rx_rcb_ptr;
6648         u16 hw_idx;
6649         int received;
6650         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
6651
6652         hw_idx = *(tnapi->rx_rcb_prod_idx);
6653         /*
6654          * We need to order the read of hw_idx and the read of
6655          * the opaque cookie.
6656          */
6657         rmb();
6658         work_mask = 0;
6659         received = 0;
6660         std_prod_idx = tpr->rx_std_prod_idx;
6661         jmb_prod_idx = tpr->rx_jmb_prod_idx;
6662         while (sw_idx != hw_idx && budget > 0) {
6663                 struct ring_info *ri;
6664                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
6665                 unsigned int len;
6666                 struct sk_buff *skb;
6667                 dma_addr_t dma_addr;
6668                 u32 opaque_key, desc_idx, *post_ptr;
6669                 u8 *data;
6670                 u64 tstamp = 0;
6671
6672                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
6673                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
6674                 if (opaque_key == RXD_OPAQUE_RING_STD) {
6675                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
6676                         dma_addr = dma_unmap_addr(ri, mapping);
6677                         data = ri->data;
6678                         post_ptr = &std_prod_idx;
6679                         rx_std_posted++;
6680                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
6681                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
6682                         dma_addr = dma_unmap_addr(ri, mapping);
6683                         data = ri->data;
6684                         post_ptr = &jmb_prod_idx;
6685                 } else
6686                         goto next_pkt_nopost;
6687
6688                 work_mask |= opaque_key;
6689
6690                 if (desc->err_vlan & RXD_ERR_MASK) {
6691                 drop_it:
6692                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6693                                        desc_idx, *post_ptr);
6694                 drop_it_no_recycle:
6695                         /* Other statistics kept track of by card. */
6696                         tp->rx_dropped++;
6697                         goto next_pkt;
6698                 }
6699
6700                 prefetch(data + TG3_RX_OFFSET(tp));
6701                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
6702                       ETH_FCS_LEN;
6703
6704                 if ((desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6705                      RXD_FLAG_PTPSTAT_PTPV1 ||
6706                     (desc->type_flags & RXD_FLAG_PTPSTAT_MASK) ==
6707                      RXD_FLAG_PTPSTAT_PTPV2) {
6708                         tstamp = tr32(TG3_RX_TSTAMP_LSB);
6709                         tstamp |= (u64)tr32(TG3_RX_TSTAMP_MSB) << 32;
6710                 }
6711
6712                 if (len > TG3_RX_COPY_THRESH(tp)) {
6713                         int skb_size;
6714                         unsigned int frag_size;
6715
6716                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
6717                                                     *post_ptr, &frag_size);
6718                         if (skb_size < 0)
6719                                 goto drop_it;
6720
6721                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
6722                                          PCI_DMA_FROMDEVICE);
6723
6724                         /* Ensure that the update to the data happens
6725                          * after the usage of the old DMA mapping.
6726                          */
6727                         smp_wmb();
6728
6729                         ri->data = NULL;
6730
6731                         skb = build_skb(data, frag_size);
6732                         if (!skb) {
6733                                 tg3_frag_free(frag_size != 0, data);
6734                                 goto drop_it_no_recycle;
6735                         }
6736                         skb_reserve(skb, TG3_RX_OFFSET(tp));
6737                 } else {
6738                         tg3_recycle_rx(tnapi, tpr, opaque_key,
6739                                        desc_idx, *post_ptr);
6740
6741                         skb = netdev_alloc_skb(tp->dev,
6742                                                len + TG3_RAW_IP_ALIGN);
6743                         if (skb == NULL)
6744                                 goto drop_it_no_recycle;
6745
6746                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
6747                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6748                         memcpy(skb->data,
6749                                data + TG3_RX_OFFSET(tp),
6750                                len);
6751                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
6752                 }
6753
6754                 skb_put(skb, len);
6755                 if (tstamp)
6756                         tg3_hwclock_to_timestamp(tp, tstamp,
6757                                                  skb_hwtstamps(skb));
6758
6759                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
6760                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
6761                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
6762                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
6763                         skb->ip_summed = CHECKSUM_UNNECESSARY;
6764                 else
6765                         skb_checksum_none_assert(skb);
6766
6767                 skb->protocol = eth_type_trans(skb, tp->dev);
6768
6769                 if (len > (tp->dev->mtu + ETH_HLEN) &&
6770                     skb->protocol != htons(ETH_P_8021Q) &&
6771                     skb->protocol != htons(ETH_P_8021AD)) {
6772                         dev_kfree_skb_any(skb);
6773                         goto drop_it_no_recycle;
6774                 }
6775
6776                 if (desc->type_flags & RXD_FLAG_VLAN &&
6777                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
6778                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
6779                                                desc->err_vlan & RXD_VLAN_MASK);
6780
6781                 napi_gro_receive(&tnapi->napi, skb);
6782
6783                 received++;
6784                 budget--;
6785
6786 next_pkt:
6787                 (*post_ptr)++;
6788
6789                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
6790                         tpr->rx_std_prod_idx = std_prod_idx &
6791                                                tp->rx_std_ring_mask;
6792                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6793                                      tpr->rx_std_prod_idx);
6794                         work_mask &= ~RXD_OPAQUE_RING_STD;
6795                         rx_std_posted = 0;
6796                 }
6797 next_pkt_nopost:
6798                 sw_idx++;
6799                 sw_idx &= tp->rx_ret_ring_mask;
6800
6801                 /* Refresh hw_idx to see if there is new work */
6802                 if (sw_idx == hw_idx) {
6803                         hw_idx = *(tnapi->rx_rcb_prod_idx);
6804                         rmb();
6805                 }
6806         }
6807
6808         /* ACK the status ring. */
6809         tnapi->rx_rcb_ptr = sw_idx;
6810         tw32_rx_mbox(tnapi->consmbox, sw_idx);
6811
6812         /* Refill RX ring(s). */
6813         if (!tg3_flag(tp, ENABLE_RSS)) {
6814                 /* Sync BD data before updating mailbox */
6815                 wmb();
6816
6817                 if (work_mask & RXD_OPAQUE_RING_STD) {
6818                         tpr->rx_std_prod_idx = std_prod_idx &
6819                                                tp->rx_std_ring_mask;
6820                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6821                                      tpr->rx_std_prod_idx);
6822                 }
6823                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
6824                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
6825                                                tp->rx_jmb_ring_mask;
6826                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6827                                      tpr->rx_jmb_prod_idx);
6828                 }
6829                 mmiowb();
6830         } else if (work_mask) {
6831                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
6832                  * updated before the producer indices can be updated.
6833                  */
6834                 smp_wmb();
6835
6836                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
6837                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
6838
6839                 if (tnapi != &tp->napi[1]) {
6840                         tp->rx_refill = true;
6841                         napi_schedule(&tp->napi[1].napi);
6842                 }
6843         }
6844
6845         return received;
6846 }
6847
6848 static void tg3_poll_link(struct tg3 *tp)
6849 {
6850         /* handle link change and other phy events */
6851         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
6852                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
6853
6854                 if (sblk->status & SD_STATUS_LINK_CHG) {
6855                         sblk->status = SD_STATUS_UPDATED |
6856                                        (sblk->status & ~SD_STATUS_LINK_CHG);
6857                         spin_lock(&tp->lock);
6858                         if (tg3_flag(tp, USE_PHYLIB)) {
6859                                 tw32_f(MAC_STATUS,
6860                                      (MAC_STATUS_SYNC_CHANGED |
6861                                       MAC_STATUS_CFG_CHANGED |
6862                                       MAC_STATUS_MI_COMPLETION |
6863                                       MAC_STATUS_LNKSTATE_CHANGED));
6864                                 udelay(40);
6865                         } else
6866                                 tg3_setup_phy(tp, false);
6867                         spin_unlock(&tp->lock);
6868                 }
6869         }
6870 }
6871
6872 static int tg3_rx_prodring_xfer(struct tg3 *tp,
6873                                 struct tg3_rx_prodring_set *dpr,
6874                                 struct tg3_rx_prodring_set *spr)
6875 {
6876         u32 si, di, cpycnt, src_prod_idx;
6877         int i, err = 0;
6878
6879         while (1) {
6880                 src_prod_idx = spr->rx_std_prod_idx;
6881
6882                 /* Make sure updates to the rx_std_buffers[] entries and the
6883                  * standard producer index are seen in the correct order.
6884                  */
6885                 smp_rmb();
6886
6887                 if (spr->rx_std_cons_idx == src_prod_idx)
6888                         break;
6889
6890                 if (spr->rx_std_cons_idx < src_prod_idx)
6891                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6892                 else
6893                         cpycnt = tp->rx_std_ring_mask + 1 -
6894                                  spr->rx_std_cons_idx;
6895
6896                 cpycnt = min(cpycnt,
6897                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6898
6899                 si = spr->rx_std_cons_idx;
6900                 di = dpr->rx_std_prod_idx;
6901
6902                 for (i = di; i < di + cpycnt; i++) {
6903                         if (dpr->rx_std_buffers[i].data) {
6904                                 cpycnt = i - di;
6905                                 err = -ENOSPC;
6906                                 break;
6907                         }
6908                 }
6909
6910                 if (!cpycnt)
6911                         break;
6912
6913                 /* Ensure that updates to the rx_std_buffers ring and the
6914                  * shadowed hardware producer ring from tg3_recycle_skb() are
6915                  * ordered correctly WRT the skb check above.
6916                  */
6917                 smp_rmb();
6918
6919                 memcpy(&dpr->rx_std_buffers[di],
6920                        &spr->rx_std_buffers[si],
6921                        cpycnt * sizeof(struct ring_info));
6922
6923                 for (i = 0; i < cpycnt; i++, di++, si++) {
6924                         struct tg3_rx_buffer_desc *sbd, *dbd;
6925                         sbd = &spr->rx_std[si];
6926                         dbd = &dpr->rx_std[di];
6927                         dbd->addr_hi = sbd->addr_hi;
6928                         dbd->addr_lo = sbd->addr_lo;
6929                 }
6930
6931                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6932                                        tp->rx_std_ring_mask;
6933                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6934                                        tp->rx_std_ring_mask;
6935         }
6936
6937         while (1) {
6938                 src_prod_idx = spr->rx_jmb_prod_idx;
6939
6940                 /* Make sure updates to the rx_jmb_buffers[] entries and
6941                  * the jumbo producer index are seen in the correct order.
6942                  */
6943                 smp_rmb();
6944
6945                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6946                         break;
6947
6948                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6949                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6950                 else
6951                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6952                                  spr->rx_jmb_cons_idx;
6953
6954                 cpycnt = min(cpycnt,
6955                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6956
6957                 si = spr->rx_jmb_cons_idx;
6958                 di = dpr->rx_jmb_prod_idx;
6959
6960                 for (i = di; i < di + cpycnt; i++) {
6961                         if (dpr->rx_jmb_buffers[i].data) {
6962                                 cpycnt = i - di;
6963                                 err = -ENOSPC;
6964                                 break;
6965                         }
6966                 }
6967
6968                 if (!cpycnt)
6969                         break;
6970
6971                 /* Ensure that updates to the rx_jmb_buffers ring and the
6972                  * shadowed hardware producer ring from tg3_recycle_skb() are
6973                  * ordered correctly WRT the skb check above.
6974                  */
6975                 smp_rmb();
6976
6977                 memcpy(&dpr->rx_jmb_buffers[di],
6978                        &spr->rx_jmb_buffers[si],
6979                        cpycnt * sizeof(struct ring_info));
6980
6981                 for (i = 0; i < cpycnt; i++, di++, si++) {
6982                         struct tg3_rx_buffer_desc *sbd, *dbd;
6983                         sbd = &spr->rx_jmb[si].std;
6984                         dbd = &dpr->rx_jmb[di].std;
6985                         dbd->addr_hi = sbd->addr_hi;
6986                         dbd->addr_lo = sbd->addr_lo;
6987                 }
6988
6989                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6990                                        tp->rx_jmb_ring_mask;
6991                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6992                                        tp->rx_jmb_ring_mask;
6993         }
6994
6995         return err;
6996 }
6997
6998 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6999 {
7000         struct tg3 *tp = tnapi->tp;
7001
7002         /* run TX completion thread */
7003         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
7004                 tg3_tx(tnapi);
7005                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7006                         return work_done;
7007         }
7008
7009         if (!tnapi->rx_rcb_prod_idx)
7010                 return work_done;
7011
7012         /* run RX thread, within the bounds set by NAPI.
7013          * All RX "locking" is done by ensuring outside
7014          * code synchronizes with tg3->napi.poll()
7015          */
7016         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
7017                 work_done += tg3_rx(tnapi, budget - work_done);
7018
7019         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
7020                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
7021                 int i, err = 0;
7022                 u32 std_prod_idx = dpr->rx_std_prod_idx;
7023                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
7024
7025                 tp->rx_refill = false;
7026                 for (i = 1; i <= tp->rxq_cnt; i++)
7027                         err |= tg3_rx_prodring_xfer(tp, dpr,
7028                                                     &tp->napi[i].prodring);
7029
7030                 wmb();
7031
7032                 if (std_prod_idx != dpr->rx_std_prod_idx)
7033                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
7034                                      dpr->rx_std_prod_idx);
7035
7036                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
7037                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
7038                                      dpr->rx_jmb_prod_idx);
7039
7040                 mmiowb();
7041
7042                 if (err)
7043                         tw32_f(HOSTCC_MODE, tp->coal_now);
7044         }
7045
7046         return work_done;
7047 }
7048
7049 static inline void tg3_reset_task_schedule(struct tg3 *tp)
7050 {
7051         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
7052                 schedule_work(&tp->reset_task);
7053 }
7054
7055 static inline void tg3_reset_task_cancel(struct tg3 *tp)
7056 {
7057         cancel_work_sync(&tp->reset_task);
7058         tg3_flag_clear(tp, RESET_TASK_PENDING);
7059         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
7060 }
7061
7062 static int tg3_poll_msix(struct napi_struct *napi, int budget)
7063 {
7064         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7065         struct tg3 *tp = tnapi->tp;
7066         int work_done = 0;
7067         struct tg3_hw_status *sblk = tnapi->hw_status;
7068
7069         while (1) {
7070                 work_done = tg3_poll_work(tnapi, work_done, budget);
7071
7072                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7073                         goto tx_recovery;
7074
7075                 if (unlikely(work_done >= budget))
7076                         break;
7077
7078                 /* tp->last_tag is used in tg3_int_reenable() below
7079                  * to tell the hw how much work has been processed,
7080                  * so we must read it before checking for more work.
7081                  */
7082                 tnapi->last_tag = sblk->status_tag;
7083                 tnapi->last_irq_tag = tnapi->last_tag;
7084                 rmb();
7085
7086                 /* check for RX/TX work to do */
7087                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
7088                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
7089
7090                         /* This test here is not race free, but will reduce
7091                          * the number of interrupts by looping again.
7092                          */
7093                         if (tnapi == &tp->napi[1] && tp->rx_refill)
7094                                 continue;
7095
7096                         napi_complete(napi);
7097                         /* Reenable interrupts. */
7098                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
7099
7100                         /* This test here is synchronized by napi_schedule()
7101                          * and napi_complete() to close the race condition.
7102                          */
7103                         if (unlikely(tnapi == &tp->napi[1] && tp->rx_refill)) {
7104                                 tw32(HOSTCC_MODE, tp->coalesce_mode |
7105                                                   HOSTCC_MODE_ENABLE |
7106                                                   tnapi->coal_now);
7107                         }
7108                         mmiowb();
7109                         break;
7110                 }
7111         }
7112
7113         return work_done;
7114
7115 tx_recovery:
7116         /* work_done is guaranteed to be less than budget. */
7117         napi_complete(napi);
7118         tg3_reset_task_schedule(tp);
7119         return work_done;
7120 }
7121
7122 static void tg3_process_error(struct tg3 *tp)
7123 {
7124         u32 val;
7125         bool real_error = false;
7126
7127         if (tg3_flag(tp, ERROR_PROCESSED))
7128                 return;
7129
7130         /* Check Flow Attention register */
7131         val = tr32(HOSTCC_FLOW_ATTN);
7132         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
7133                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
7134                 real_error = true;
7135         }
7136
7137         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
7138                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
7139                 real_error = true;
7140         }
7141
7142         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
7143                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
7144                 real_error = true;
7145         }
7146
7147         if (!real_error)
7148                 return;
7149
7150         tg3_dump_state(tp);
7151
7152         tg3_flag_set(tp, ERROR_PROCESSED);
7153         tg3_reset_task_schedule(tp);
7154 }
7155
7156 static int tg3_poll(struct napi_struct *napi, int budget)
7157 {
7158         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
7159         struct tg3 *tp = tnapi->tp;
7160         int work_done = 0;
7161         struct tg3_hw_status *sblk = tnapi->hw_status;
7162
7163         while (1) {
7164                 if (sblk->status & SD_STATUS_ERROR)
7165                         tg3_process_error(tp);
7166
7167                 tg3_poll_link(tp);
7168
7169                 work_done = tg3_poll_work(tnapi, work_done, budget);
7170
7171                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
7172                         goto tx_recovery;
7173
7174                 if (unlikely(work_done >= budget))
7175                         break;
7176
7177                 if (tg3_flag(tp, TAGGED_STATUS)) {
7178                         /* tp->last_tag is used in tg3_int_reenable() below
7179                          * to tell the hw how much work has been processed,
7180                          * so we must read it before checking for more work.
7181                          */
7182                         tnapi->last_tag = sblk->status_tag;
7183                         tnapi->last_irq_tag = tnapi->last_tag;
7184                         rmb();
7185                 } else
7186                         sblk->status &= ~SD_STATUS_UPDATED;
7187
7188                 if (likely(!tg3_has_work(tnapi))) {
7189                         napi_complete(napi);
7190                         tg3_int_reenable(tnapi);
7191                         break;
7192                 }
7193         }
7194
7195         return work_done;
7196
7197 tx_recovery:
7198         /* work_done is guaranteed to be less than budget. */
7199         napi_complete(napi);
7200         tg3_reset_task_schedule(tp);
7201         return work_done;
7202 }
7203
7204 static void tg3_napi_disable(struct tg3 *tp)
7205 {
7206         int i;
7207
7208         for (i = tp->irq_cnt - 1; i >= 0; i--)
7209                 napi_disable(&tp->napi[i].napi);
7210 }
7211
7212 static void tg3_napi_enable(struct tg3 *tp)
7213 {
7214         int i;
7215
7216         for (i = 0; i < tp->irq_cnt; i++)
7217                 napi_enable(&tp->napi[i].napi);
7218 }
7219
7220 static void tg3_napi_init(struct tg3 *tp)
7221 {
7222         int i;
7223
7224         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
7225         for (i = 1; i < tp->irq_cnt; i++)
7226                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
7227 }
7228
7229 static void tg3_napi_fini(struct tg3 *tp)
7230 {
7231         int i;
7232
7233         for (i = 0; i < tp->irq_cnt; i++)
7234                 netif_napi_del(&tp->napi[i].napi);
7235 }
7236
7237 static inline void tg3_netif_stop(struct tg3 *tp)
7238 {
7239         tp->dev->trans_start = jiffies; /* prevent tx timeout */
7240         tg3_napi_disable(tp);
7241         netif_carrier_off(tp->dev);
7242         netif_tx_disable(tp->dev);
7243 }
7244
7245 /* tp->lock must be held */
7246 static inline void tg3_netif_start(struct tg3 *tp)
7247 {
7248         tg3_ptp_resume(tp);
7249
7250         /* NOTE: unconditional netif_tx_wake_all_queues is only
7251          * appropriate so long as all callers are assured to
7252          * have free tx slots (such as after tg3_init_hw)
7253          */
7254         netif_tx_wake_all_queues(tp->dev);
7255
7256         if (tp->link_up)
7257                 netif_carrier_on(tp->dev);
7258
7259         tg3_napi_enable(tp);
7260         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
7261         tg3_enable_ints(tp);
7262 }
7263
7264 static void tg3_irq_quiesce(struct tg3 *tp)
7265 {
7266         int i;
7267
7268         BUG_ON(tp->irq_sync);
7269
7270         tp->irq_sync = 1;
7271         smp_mb();
7272
7273         for (i = 0; i < tp->irq_cnt; i++)
7274                 synchronize_irq(tp->napi[i].irq_vec);
7275 }
7276
7277 /* Fully shutdown all tg3 driver activity elsewhere in the system.
7278  * If irq_sync is non-zero, then the IRQ handler must be synchronized
7279  * with as well.  Most of the time, this is not necessary except when
7280  * shutting down the device.
7281  */
7282 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
7283 {
7284         spin_lock_bh(&tp->lock);
7285         if (irq_sync)
7286                 tg3_irq_quiesce(tp);
7287 }
7288
7289 static inline void tg3_full_unlock(struct tg3 *tp)
7290 {
7291         spin_unlock_bh(&tp->lock);
7292 }
7293
7294 /* One-shot MSI handler - Chip automatically disables interrupt
7295  * after sending MSI so driver doesn't have to do it.
7296  */
7297 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
7298 {
7299         struct tg3_napi *tnapi = dev_id;
7300         struct tg3 *tp = tnapi->tp;
7301
7302         prefetch(tnapi->hw_status);
7303         if (tnapi->rx_rcb)
7304                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7305
7306         if (likely(!tg3_irq_sync(tp)))
7307                 napi_schedule(&tnapi->napi);
7308
7309         return IRQ_HANDLED;
7310 }
7311
7312 /* MSI ISR - No need to check for interrupt sharing and no need to
7313  * flush status block and interrupt mailbox. PCI ordering rules
7314  * guarantee that MSI will arrive after the status block.
7315  */
7316 static irqreturn_t tg3_msi(int irq, void *dev_id)
7317 {
7318         struct tg3_napi *tnapi = dev_id;
7319         struct tg3 *tp = tnapi->tp;
7320
7321         prefetch(tnapi->hw_status);
7322         if (tnapi->rx_rcb)
7323                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7324         /*
7325          * Writing any value to intr-mbox-0 clears PCI INTA# and
7326          * chip-internal interrupt pending events.
7327          * Writing non-zero to intr-mbox-0 additional tells the
7328          * NIC to stop sending us irqs, engaging "in-intr-handler"
7329          * event coalescing.
7330          */
7331         tw32_mailbox(tnapi->int_mbox, 0x00000001);
7332         if (likely(!tg3_irq_sync(tp)))
7333                 napi_schedule(&tnapi->napi);
7334
7335         return IRQ_RETVAL(1);
7336 }
7337
7338 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
7339 {
7340         struct tg3_napi *tnapi = dev_id;
7341         struct tg3 *tp = tnapi->tp;
7342         struct tg3_hw_status *sblk = tnapi->hw_status;
7343         unsigned int handled = 1;
7344
7345         /* In INTx mode, it is possible for the interrupt to arrive at
7346          * the CPU before the status block posted prior to the interrupt.
7347          * Reading the PCI State register will confirm whether the
7348          * interrupt is ours and will flush the status block.
7349          */
7350         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
7351                 if (tg3_flag(tp, CHIP_RESETTING) ||
7352                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7353                         handled = 0;
7354                         goto out;
7355                 }
7356         }
7357
7358         /*
7359          * Writing any value to intr-mbox-0 clears PCI INTA# and
7360          * chip-internal interrupt pending events.
7361          * Writing non-zero to intr-mbox-0 additional tells the
7362          * NIC to stop sending us irqs, engaging "in-intr-handler"
7363          * event coalescing.
7364          *
7365          * Flush the mailbox to de-assert the IRQ immediately to prevent
7366          * spurious interrupts.  The flush impacts performance but
7367          * excessive spurious interrupts can be worse in some cases.
7368          */
7369         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7370         if (tg3_irq_sync(tp))
7371                 goto out;
7372         sblk->status &= ~SD_STATUS_UPDATED;
7373         if (likely(tg3_has_work(tnapi))) {
7374                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7375                 napi_schedule(&tnapi->napi);
7376         } else {
7377                 /* No work, shared interrupt perhaps?  re-enable
7378                  * interrupts, and flush that PCI write
7379                  */
7380                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
7381                                0x00000000);
7382         }
7383 out:
7384         return IRQ_RETVAL(handled);
7385 }
7386
7387 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
7388 {
7389         struct tg3_napi *tnapi = dev_id;
7390         struct tg3 *tp = tnapi->tp;
7391         struct tg3_hw_status *sblk = tnapi->hw_status;
7392         unsigned int handled = 1;
7393
7394         /* In INTx mode, it is possible for the interrupt to arrive at
7395          * the CPU before the status block posted prior to the interrupt.
7396          * Reading the PCI State register will confirm whether the
7397          * interrupt is ours and will flush the status block.
7398          */
7399         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
7400                 if (tg3_flag(tp, CHIP_RESETTING) ||
7401                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7402                         handled = 0;
7403                         goto out;
7404                 }
7405         }
7406
7407         /*
7408          * writing any value to intr-mbox-0 clears PCI INTA# and
7409          * chip-internal interrupt pending events.
7410          * writing non-zero to intr-mbox-0 additional tells the
7411          * NIC to stop sending us irqs, engaging "in-intr-handler"
7412          * event coalescing.
7413          *
7414          * Flush the mailbox to de-assert the IRQ immediately to prevent
7415          * spurious interrupts.  The flush impacts performance but
7416          * excessive spurious interrupts can be worse in some cases.
7417          */
7418         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
7419
7420         /*
7421          * In a shared interrupt configuration, sometimes other devices'
7422          * interrupts will scream.  We record the current status tag here
7423          * so that the above check can report that the screaming interrupts
7424          * are unhandled.  Eventually they will be silenced.
7425          */
7426         tnapi->last_irq_tag = sblk->status_tag;
7427
7428         if (tg3_irq_sync(tp))
7429                 goto out;
7430
7431         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
7432
7433         napi_schedule(&tnapi->napi);
7434
7435 out:
7436         return IRQ_RETVAL(handled);
7437 }
7438
7439 /* ISR for interrupt test */
7440 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
7441 {
7442         struct tg3_napi *tnapi = dev_id;
7443         struct tg3 *tp = tnapi->tp;
7444         struct tg3_hw_status *sblk = tnapi->hw_status;
7445
7446         if ((sblk->status & SD_STATUS_UPDATED) ||
7447             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
7448                 tg3_disable_ints(tp);
7449                 return IRQ_RETVAL(1);
7450         }
7451         return IRQ_RETVAL(0);
7452 }
7453
7454 #ifdef CONFIG_NET_POLL_CONTROLLER
7455 static void tg3_poll_controller(struct net_device *dev)
7456 {
7457         int i;
7458         struct tg3 *tp = netdev_priv(dev);
7459
7460         if (tg3_irq_sync(tp))
7461                 return;
7462
7463         for (i = 0; i < tp->irq_cnt; i++)
7464                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
7465 }
7466 #endif
7467
7468 static void tg3_tx_timeout(struct net_device *dev)
7469 {
7470         struct tg3 *tp = netdev_priv(dev);
7471
7472         if (netif_msg_tx_err(tp)) {
7473                 netdev_err(dev, "transmit timed out, resetting\n");
7474                 tg3_dump_state(tp);
7475         }
7476
7477         tg3_reset_task_schedule(tp);
7478 }
7479
7480 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
7481 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
7482 {
7483         u32 base = (u32) mapping & 0xffffffff;
7484
7485         return base + len + 8 < base;
7486 }
7487
7488 /* Test for TSO DMA buffers that cross into regions which are within MSS bytes
7489  * of any 4GB boundaries: 4G, 8G, etc
7490  */
7491 static inline int tg3_4g_tso_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7492                                            u32 len, u32 mss)
7493 {
7494         if (tg3_asic_rev(tp) == ASIC_REV_5762 && mss) {
7495                 u32 base = (u32) mapping & 0xffffffff;
7496
7497                 return ((base + len + (mss & 0x3fff)) < base);
7498         }
7499         return 0;
7500 }
7501
7502 /* Test for DMA addresses > 40-bit */
7503 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
7504                                           int len)
7505 {
7506 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
7507         if (tg3_flag(tp, 40BIT_DMA_BUG))
7508                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
7509         return 0;
7510 #else
7511         return 0;
7512 #endif
7513 }
7514
7515 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
7516                                  dma_addr_t mapping, u32 len, u32 flags,
7517                                  u32 mss, u32 vlan)
7518 {
7519         txbd->addr_hi = ((u64) mapping >> 32);
7520         txbd->addr_lo = ((u64) mapping & 0xffffffff);
7521         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
7522         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
7523 }
7524
7525 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
7526                             dma_addr_t map, u32 len, u32 flags,
7527                             u32 mss, u32 vlan)
7528 {
7529         struct tg3 *tp = tnapi->tp;
7530         bool hwbug = false;
7531
7532         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
7533                 hwbug = true;
7534
7535         if (tg3_4g_overflow_test(map, len))
7536                 hwbug = true;
7537
7538         if (tg3_4g_tso_overflow_test(tp, map, len, mss))
7539                 hwbug = true;
7540
7541         if (tg3_40bit_overflow_test(tp, map, len))
7542                 hwbug = true;
7543
7544         if (tp->dma_limit) {
7545                 u32 prvidx = *entry;
7546                 u32 tmp_flag = flags & ~TXD_FLAG_END;
7547                 while (len > tp->dma_limit && *budget) {
7548                         u32 frag_len = tp->dma_limit;
7549                         len -= tp->dma_limit;
7550
7551                         /* Avoid the 8byte DMA problem */
7552                         if (len <= 8) {
7553                                 len += tp->dma_limit / 2;
7554                                 frag_len = tp->dma_limit / 2;
7555                         }
7556
7557                         tnapi->tx_buffers[*entry].fragmented = true;
7558
7559                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7560                                       frag_len, tmp_flag, mss, vlan);
7561                         *budget -= 1;
7562                         prvidx = *entry;
7563                         *entry = NEXT_TX(*entry);
7564
7565                         map += frag_len;
7566                 }
7567
7568                 if (len) {
7569                         if (*budget) {
7570                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7571                                               len, flags, mss, vlan);
7572                                 *budget -= 1;
7573                                 *entry = NEXT_TX(*entry);
7574                         } else {
7575                                 hwbug = true;
7576                                 tnapi->tx_buffers[prvidx].fragmented = false;
7577                         }
7578                 }
7579         } else {
7580                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
7581                               len, flags, mss, vlan);
7582                 *entry = NEXT_TX(*entry);
7583         }
7584
7585         return hwbug;
7586 }
7587
7588 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
7589 {
7590         int i;
7591         struct sk_buff *skb;
7592         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
7593
7594         skb = txb->skb;
7595         txb->skb = NULL;
7596
7597         pci_unmap_single(tnapi->tp->pdev,
7598                          dma_unmap_addr(txb, mapping),
7599                          skb_headlen(skb),
7600                          PCI_DMA_TODEVICE);
7601
7602         while (txb->fragmented) {
7603                 txb->fragmented = false;
7604                 entry = NEXT_TX(entry);
7605                 txb = &tnapi->tx_buffers[entry];
7606         }
7607
7608         for (i = 0; i <= last; i++) {
7609                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7610
7611                 entry = NEXT_TX(entry);
7612                 txb = &tnapi->tx_buffers[entry];
7613
7614                 pci_unmap_page(tnapi->tp->pdev,
7615                                dma_unmap_addr(txb, mapping),
7616                                skb_frag_size(frag), PCI_DMA_TODEVICE);
7617
7618                 while (txb->fragmented) {
7619                         txb->fragmented = false;
7620                         entry = NEXT_TX(entry);
7621                         txb = &tnapi->tx_buffers[entry];
7622                 }
7623         }
7624 }
7625
7626 /* Workaround 4GB and 40-bit hardware DMA bugs. */
7627 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7628                                        struct sk_buff **pskb,
7629                                        u32 *entry, u32 *budget,
7630                                        u32 base_flags, u32 mss, u32 vlan)
7631 {
7632         struct tg3 *tp = tnapi->tp;
7633         struct sk_buff *new_skb, *skb = *pskb;
7634         dma_addr_t new_addr = 0;
7635         int ret = 0;
7636
7637         if (tg3_asic_rev(tp) != ASIC_REV_5701)
7638                 new_skb = skb_copy(skb, GFP_ATOMIC);
7639         else {
7640                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
7641
7642                 new_skb = skb_copy_expand(skb,
7643                                           skb_headroom(skb) + more_headroom,
7644                                           skb_tailroom(skb), GFP_ATOMIC);
7645         }
7646
7647         if (!new_skb) {
7648                 ret = -1;
7649         } else {
7650                 /* New SKB is guaranteed to be linear. */
7651                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
7652                                           PCI_DMA_TODEVICE);
7653                 /* Make sure the mapping succeeded */
7654                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
7655                         dev_kfree_skb_any(new_skb);
7656                         ret = -1;
7657                 } else {
7658                         u32 save_entry = *entry;
7659
7660                         base_flags |= TXD_FLAG_END;
7661
7662                         tnapi->tx_buffers[*entry].skb = new_skb;
7663                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
7664                                            mapping, new_addr);
7665
7666                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
7667                                             new_skb->len, base_flags,
7668                                             mss, vlan)) {
7669                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
7670                                 dev_kfree_skb_any(new_skb);
7671                                 ret = -1;
7672                         }
7673                 }
7674         }
7675
7676         dev_kfree_skb_any(skb);
7677         *pskb = new_skb;
7678         return ret;
7679 }
7680
7681 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7682
7683 /* Use GSO to workaround a rare TSO bug that may be triggered when the
7684  * TSO header is greater than 80 bytes.
7685  */
7686 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7687 {
7688         struct sk_buff *segs, *nskb;
7689         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7690
7691         /* Estimate the number of fragments in the worst case */
7692         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
7693                 netif_stop_queue(tp->dev);
7694
7695                 /* netif_tx_stop_queue() must be done before checking
7696                  * checking tx index in tg3_tx_avail() below, because in
7697                  * tg3_tx(), we update tx index before checking for
7698                  * netif_tx_queue_stopped().
7699                  */
7700                 smp_mb();
7701                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
7702                         return NETDEV_TX_BUSY;
7703
7704                 netif_wake_queue(tp->dev);
7705         }
7706
7707         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
7708         if (IS_ERR(segs))
7709                 goto tg3_tso_bug_end;
7710
7711         do {
7712                 nskb = segs;
7713                 segs = segs->next;
7714                 nskb->next = NULL;
7715                 tg3_start_xmit(nskb, tp->dev);
7716         } while (segs);
7717
7718 tg3_tso_bug_end:
7719         dev_kfree_skb_any(skb);
7720
7721         return NETDEV_TX_OK;
7722 }
7723
7724 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
7725  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
7726  */
7727 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7728 {
7729         struct tg3 *tp = netdev_priv(dev);
7730         u32 len, entry, base_flags, mss, vlan = 0;
7731         u32 budget;
7732         int i = -1, would_hit_hwbug;
7733         dma_addr_t mapping;
7734         struct tg3_napi *tnapi;
7735         struct netdev_queue *txq;
7736         unsigned int last;
7737
7738         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
7739         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
7740         if (tg3_flag(tp, ENABLE_TSS))
7741                 tnapi++;
7742
7743         budget = tg3_tx_avail(tnapi);
7744
7745         /* We are running in BH disabled context with netif_tx_lock
7746          * and TX reclaim runs via tp->napi.poll inside of a software
7747          * interrupt.  Furthermore, IRQ processing runs lockless so we have
7748          * no IRQ context deadlocks to worry about either.  Rejoice!
7749          */
7750         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
7751                 if (!netif_tx_queue_stopped(txq)) {
7752                         netif_tx_stop_queue(txq);
7753
7754                         /* This is a hard error, log it. */
7755                         netdev_err(dev,
7756                                    "BUG! Tx Ring full when queue awake!\n");
7757                 }
7758                 return NETDEV_TX_BUSY;
7759         }
7760
7761         entry = tnapi->tx_prod;
7762         base_flags = 0;
7763
7764         mss = skb_shinfo(skb)->gso_size;
7765         if (mss) {
7766                 struct iphdr *iph;
7767                 u32 tcp_opt_len, hdr_len;
7768
7769                 if (skb_header_cloned(skb) &&
7770                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
7771                         goto drop;
7772
7773                 iph = ip_hdr(skb);
7774                 tcp_opt_len = tcp_optlen(skb);
7775
7776                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
7777
7778                 /* HW/FW can not correctly segment packets that have been
7779                  * vlan encapsulated.
7780                  */
7781                 if (skb->protocol == htons(ETH_P_8021Q) ||
7782                     skb->protocol == htons(ETH_P_8021AD))
7783                         return tg3_tso_bug(tp, skb);
7784
7785                 if (!skb_is_gso_v6(skb)) {
7786                         iph->check = 0;
7787                         iph->tot_len = htons(mss + hdr_len);
7788                 }
7789
7790                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7791                     tg3_flag(tp, TSO_BUG))
7792                         return tg3_tso_bug(tp, skb);
7793
7794                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
7795                                TXD_FLAG_CPU_POST_DMA);
7796
7797                 if (tg3_flag(tp, HW_TSO_1) ||
7798                     tg3_flag(tp, HW_TSO_2) ||
7799                     tg3_flag(tp, HW_TSO_3)) {
7800                         tcp_hdr(skb)->check = 0;
7801                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
7802                 } else
7803                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
7804                                                                  iph->daddr, 0,
7805                                                                  IPPROTO_TCP,
7806                                                                  0);
7807
7808                 if (tg3_flag(tp, HW_TSO_3)) {
7809                         mss |= (hdr_len & 0xc) << 12;
7810                         if (hdr_len & 0x10)
7811                                 base_flags |= 0x00000010;
7812                         base_flags |= (hdr_len & 0x3e0) << 5;
7813                 } else if (tg3_flag(tp, HW_TSO_2))
7814                         mss |= hdr_len << 9;
7815                 else if (tg3_flag(tp, HW_TSO_1) ||
7816                          tg3_asic_rev(tp) == ASIC_REV_5705) {
7817                         if (tcp_opt_len || iph->ihl > 5) {
7818                                 int tsflags;
7819
7820                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7821                                 mss |= (tsflags << 11);
7822                         }
7823                 } else {
7824                         if (tcp_opt_len || iph->ihl > 5) {
7825                                 int tsflags;
7826
7827                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
7828                                 base_flags |= tsflags << 12;
7829                         }
7830                 }
7831         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
7832                 /* HW/FW can not correctly checksum packets that have been
7833                  * vlan encapsulated.
7834                  */
7835                 if (skb->protocol == htons(ETH_P_8021Q) ||
7836                     skb->protocol == htons(ETH_P_8021AD)) {
7837                         if (skb_checksum_help(skb))
7838                                 goto drop;
7839                 } else  {
7840                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
7841                 }
7842         }
7843
7844         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
7845             !mss && skb->len > VLAN_ETH_FRAME_LEN)
7846                 base_flags |= TXD_FLAG_JMB_PKT;
7847
7848         if (vlan_tx_tag_present(skb)) {
7849                 base_flags |= TXD_FLAG_VLAN;
7850                 vlan = vlan_tx_tag_get(skb);
7851         }
7852
7853         if ((unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) &&
7854             tg3_flag(tp, TX_TSTAMP_EN)) {
7855                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
7856                 base_flags |= TXD_FLAG_HWTSTAMP;
7857         }
7858
7859         len = skb_headlen(skb);
7860
7861         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
7862         if (pci_dma_mapping_error(tp->pdev, mapping))
7863                 goto drop;
7864
7865
7866         tnapi->tx_buffers[entry].skb = skb;
7867         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
7868
7869         would_hit_hwbug = 0;
7870
7871         if (tg3_flag(tp, 5701_DMA_BUG))
7872                 would_hit_hwbug = 1;
7873
7874         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
7875                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
7876                             mss, vlan)) {
7877                 would_hit_hwbug = 1;
7878         } else if (skb_shinfo(skb)->nr_frags > 0) {
7879                 u32 tmp_mss = mss;
7880
7881                 if (!tg3_flag(tp, HW_TSO_1) &&
7882                     !tg3_flag(tp, HW_TSO_2) &&
7883                     !tg3_flag(tp, HW_TSO_3))
7884                         tmp_mss = 0;
7885
7886                 /* Now loop through additional data
7887                  * fragments, and queue them.
7888                  */
7889                 last = skb_shinfo(skb)->nr_frags - 1;
7890                 for (i = 0; i <= last; i++) {
7891                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
7892
7893                         len = skb_frag_size(frag);
7894                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
7895                                                    len, DMA_TO_DEVICE);
7896
7897                         tnapi->tx_buffers[entry].skb = NULL;
7898                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
7899                                            mapping);
7900                         if (dma_mapping_error(&tp->pdev->dev, mapping))
7901                                 goto dma_error;
7902
7903                         if (!budget ||
7904                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
7905                                             len, base_flags |
7906                                             ((i == last) ? TXD_FLAG_END : 0),
7907                                             tmp_mss, vlan)) {
7908                                 would_hit_hwbug = 1;
7909                                 break;
7910                         }
7911                 }
7912         }
7913
7914         if (would_hit_hwbug) {
7915                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
7916
7917                 /* If the workaround fails due to memory/mapping
7918                  * failure, silently drop this packet.
7919                  */
7920                 entry = tnapi->tx_prod;
7921                 budget = tg3_tx_avail(tnapi);
7922                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
7923                                                 base_flags, mss, vlan))
7924                         goto drop_nofree;
7925         }
7926
7927         skb_tx_timestamp(skb);
7928         netdev_tx_sent_queue(txq, skb->len);
7929
7930         /* Sync BD data before updating mailbox */
7931         wmb();
7932
7933         /* Packets are ready, update Tx producer idx local and on card. */
7934         tw32_tx_mbox(tnapi->prodmbox, entry);
7935
7936         tnapi->tx_prod = entry;
7937         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
7938                 netif_tx_stop_queue(txq);
7939
7940                 /* netif_tx_stop_queue() must be done before checking
7941                  * checking tx index in tg3_tx_avail() below, because in
7942                  * tg3_tx(), we update tx index before checking for
7943                  * netif_tx_queue_stopped().
7944                  */
7945                 smp_mb();
7946                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
7947                         netif_tx_wake_queue(txq);
7948         }
7949
7950         mmiowb();
7951         return NETDEV_TX_OK;
7952
7953 dma_error:
7954         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7955         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7956 drop:
7957         dev_kfree_skb_any(skb);
7958 drop_nofree:
7959         tp->tx_dropped++;
7960         return NETDEV_TX_OK;
7961 }
7962
7963 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7964 {
7965         if (enable) {
7966                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7967                                   MAC_MODE_PORT_MODE_MASK);
7968
7969                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7970
7971                 if (!tg3_flag(tp, 5705_PLUS))
7972                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7973
7974                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7975                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7976                 else
7977                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7978         } else {
7979                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7980
7981                 if (tg3_flag(tp, 5705_PLUS) ||
7982                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7983                     tg3_asic_rev(tp) == ASIC_REV_5700)
7984                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7985         }
7986
7987         tw32(MAC_MODE, tp->mac_mode);
7988         udelay(40);
7989 }
7990
7991 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7992 {
7993         u32 val, bmcr, mac_mode, ptest = 0;
7994
7995         tg3_phy_toggle_apd(tp, false);
7996         tg3_phy_toggle_automdix(tp, false);
7997
7998         if (extlpbk && tg3_phy_set_extloopbk(tp))
7999                 return -EIO;
8000
8001         bmcr = BMCR_FULLDPLX;
8002         switch (speed) {
8003         case SPEED_10:
8004                 break;
8005         case SPEED_100:
8006                 bmcr |= BMCR_SPEED100;
8007                 break;
8008         case SPEED_1000:
8009         default:
8010                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
8011                         speed = SPEED_100;
8012                         bmcr |= BMCR_SPEED100;
8013                 } else {
8014                         speed = SPEED_1000;
8015                         bmcr |= BMCR_SPEED1000;
8016                 }
8017         }
8018
8019         if (extlpbk) {
8020                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
8021                         tg3_readphy(tp, MII_CTRL1000, &val);
8022                         val |= CTL1000_AS_MASTER |
8023                                CTL1000_ENABLE_MASTER;
8024                         tg3_writephy(tp, MII_CTRL1000, val);
8025                 } else {
8026                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
8027                                 MII_TG3_FET_PTEST_TRIM_2;
8028                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
8029                 }
8030         } else
8031                 bmcr |= BMCR_LOOPBACK;
8032
8033         tg3_writephy(tp, MII_BMCR, bmcr);
8034
8035         /* The write needs to be flushed for the FETs */
8036         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
8037                 tg3_readphy(tp, MII_BMCR, &bmcr);
8038
8039         udelay(40);
8040
8041         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
8042             tg3_asic_rev(tp) == ASIC_REV_5785) {
8043                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
8044                              MII_TG3_FET_PTEST_FRC_TX_LINK |
8045                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
8046
8047                 /* The write needs to be flushed for the AC131 */
8048                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
8049         }
8050
8051         /* Reset to prevent losing 1st rx packet intermittently */
8052         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
8053             tg3_flag(tp, 5780_CLASS)) {
8054                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
8055                 udelay(10);
8056                 tw32_f(MAC_RX_MODE, tp->rx_mode);
8057         }
8058
8059         mac_mode = tp->mac_mode &
8060                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
8061         if (speed == SPEED_1000)
8062                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
8063         else
8064                 mac_mode |= MAC_MODE_PORT_MODE_MII;
8065
8066         if (tg3_asic_rev(tp) == ASIC_REV_5700) {
8067                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
8068
8069                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
8070                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
8071                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
8072                         mac_mode |= MAC_MODE_LINK_POLARITY;
8073
8074                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
8075                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
8076         }
8077
8078         tw32(MAC_MODE, mac_mode);
8079         udelay(40);
8080
8081         return 0;
8082 }
8083
8084 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
8085 {
8086         struct tg3 *tp = netdev_priv(dev);
8087
8088         if (features & NETIF_F_LOOPBACK) {
8089                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
8090                         return;
8091
8092                 spin_lock_bh(&tp->lock);
8093                 tg3_mac_loopback(tp, true);
8094                 netif_carrier_on(tp->dev);
8095                 spin_unlock_bh(&tp->lock);
8096                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
8097         } else {
8098                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
8099                         return;
8100
8101                 spin_lock_bh(&tp->lock);
8102                 tg3_mac_loopback(tp, false);
8103                 /* Force link status check */
8104                 tg3_setup_phy(tp, true);
8105                 spin_unlock_bh(&tp->lock);
8106                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
8107         }
8108 }
8109
8110 static netdev_features_t tg3_fix_features(struct net_device *dev,
8111         netdev_features_t features)
8112 {
8113         struct tg3 *tp = netdev_priv(dev);
8114
8115         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
8116                 features &= ~NETIF_F_ALL_TSO;
8117
8118         return features;
8119 }
8120
8121 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
8122 {
8123         netdev_features_t changed = dev->features ^ features;
8124
8125         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
8126                 tg3_set_loopback(dev, features);
8127
8128         return 0;
8129 }
8130
8131 static void tg3_rx_prodring_free(struct tg3 *tp,
8132                                  struct tg3_rx_prodring_set *tpr)
8133 {
8134         int i;
8135
8136         if (tpr != &tp->napi[0].prodring) {
8137                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
8138                      i = (i + 1) & tp->rx_std_ring_mask)
8139                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8140                                         tp->rx_pkt_map_sz);
8141
8142                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
8143                         for (i = tpr->rx_jmb_cons_idx;
8144                              i != tpr->rx_jmb_prod_idx;
8145                              i = (i + 1) & tp->rx_jmb_ring_mask) {
8146                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8147                                                 TG3_RX_JMB_MAP_SZ);
8148                         }
8149                 }
8150
8151                 return;
8152         }
8153
8154         for (i = 0; i <= tp->rx_std_ring_mask; i++)
8155                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
8156                                 tp->rx_pkt_map_sz);
8157
8158         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8159                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
8160                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
8161                                         TG3_RX_JMB_MAP_SZ);
8162         }
8163 }
8164
8165 /* Initialize rx rings for packet processing.
8166  *
8167  * The chip has been shut down and the driver detached from
8168  * the networking, so no interrupts or new tx packets will
8169  * end up in the driver.  tp->{tx,}lock are held and thus
8170  * we may not sleep.
8171  */
8172 static int tg3_rx_prodring_alloc(struct tg3 *tp,
8173                                  struct tg3_rx_prodring_set *tpr)
8174 {
8175         u32 i, rx_pkt_dma_sz;
8176
8177         tpr->rx_std_cons_idx = 0;
8178         tpr->rx_std_prod_idx = 0;
8179         tpr->rx_jmb_cons_idx = 0;
8180         tpr->rx_jmb_prod_idx = 0;
8181
8182         if (tpr != &tp->napi[0].prodring) {
8183                 memset(&tpr->rx_std_buffers[0], 0,
8184                        TG3_RX_STD_BUFF_RING_SIZE(tp));
8185                 if (tpr->rx_jmb_buffers)
8186                         memset(&tpr->rx_jmb_buffers[0], 0,
8187                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
8188                 goto done;
8189         }
8190
8191         /* Zero out all descriptors. */
8192         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
8193
8194         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
8195         if (tg3_flag(tp, 5780_CLASS) &&
8196             tp->dev->mtu > ETH_DATA_LEN)
8197                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
8198         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
8199
8200         /* Initialize invariants of the rings, we only set this
8201          * stuff once.  This works because the card does not
8202          * write into the rx buffer posting rings.
8203          */
8204         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
8205                 struct tg3_rx_buffer_desc *rxd;
8206
8207                 rxd = &tpr->rx_std[i];
8208                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
8209                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
8210                 rxd->opaque = (RXD_OPAQUE_RING_STD |
8211                                (i << RXD_OPAQUE_INDEX_SHIFT));
8212         }
8213
8214         /* Now allocate fresh SKBs for each rx ring. */
8215         for (i = 0; i < tp->rx_pending; i++) {
8216                 unsigned int frag_size;
8217
8218                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i,
8219                                       &frag_size) < 0) {
8220                         netdev_warn(tp->dev,
8221                                     "Using a smaller RX standard ring. Only "
8222                                     "%d out of %d buffers were allocated "
8223                                     "successfully\n", i, tp->rx_pending);
8224                         if (i == 0)
8225                                 goto initfail;
8226                         tp->rx_pending = i;
8227                         break;
8228                 }
8229         }
8230
8231         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8232                 goto done;
8233
8234         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
8235
8236         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
8237                 goto done;
8238
8239         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
8240                 struct tg3_rx_buffer_desc *rxd;
8241
8242                 rxd = &tpr->rx_jmb[i].std;
8243                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
8244                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
8245                                   RXD_FLAG_JUMBO;
8246                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
8247                        (i << RXD_OPAQUE_INDEX_SHIFT));
8248         }
8249
8250         for (i = 0; i < tp->rx_jumbo_pending; i++) {
8251                 unsigned int frag_size;
8252
8253                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i,
8254                                       &frag_size) < 0) {
8255                         netdev_warn(tp->dev,
8256                                     "Using a smaller RX jumbo ring. Only %d "
8257                                     "out of %d buffers were allocated "
8258                                     "successfully\n", i, tp->rx_jumbo_pending);
8259                         if (i == 0)
8260                                 goto initfail;
8261                         tp->rx_jumbo_pending = i;
8262                         break;
8263                 }
8264         }
8265
8266 done:
8267         return 0;
8268
8269 initfail:
8270         tg3_rx_prodring_free(tp, tpr);
8271         return -ENOMEM;
8272 }
8273
8274 static void tg3_rx_prodring_fini(struct tg3 *tp,
8275                                  struct tg3_rx_prodring_set *tpr)
8276 {
8277         kfree(tpr->rx_std_buffers);
8278         tpr->rx_std_buffers = NULL;
8279         kfree(tpr->rx_jmb_buffers);
8280         tpr->rx_jmb_buffers = NULL;
8281         if (tpr->rx_std) {
8282                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
8283                                   tpr->rx_std, tpr->rx_std_mapping);
8284                 tpr->rx_std = NULL;
8285         }
8286         if (tpr->rx_jmb) {
8287                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
8288                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
8289                 tpr->rx_jmb = NULL;
8290         }
8291 }
8292
8293 static int tg3_rx_prodring_init(struct tg3 *tp,
8294                                 struct tg3_rx_prodring_set *tpr)
8295 {
8296         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
8297                                       GFP_KERNEL);
8298         if (!tpr->rx_std_buffers)
8299                 return -ENOMEM;
8300
8301         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
8302                                          TG3_RX_STD_RING_BYTES(tp),
8303                                          &tpr->rx_std_mapping,
8304                                          GFP_KERNEL);
8305         if (!tpr->rx_std)
8306                 goto err_out;
8307
8308         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
8309                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
8310                                               GFP_KERNEL);
8311                 if (!tpr->rx_jmb_buffers)
8312                         goto err_out;
8313
8314                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
8315                                                  TG3_RX_JMB_RING_BYTES(tp),
8316                                                  &tpr->rx_jmb_mapping,
8317                                                  GFP_KERNEL);
8318                 if (!tpr->rx_jmb)
8319                         goto err_out;
8320         }
8321
8322         return 0;
8323
8324 err_out:
8325         tg3_rx_prodring_fini(tp, tpr);
8326         return -ENOMEM;
8327 }
8328
8329 /* Free up pending packets in all rx/tx rings.
8330  *
8331  * The chip has been shut down and the driver detached from
8332  * the networking, so no interrupts or new tx packets will
8333  * end up in the driver.  tp->{tx,}lock is not held and we are not
8334  * in an interrupt context and thus may sleep.
8335  */
8336 static void tg3_free_rings(struct tg3 *tp)
8337 {
8338         int i, j;
8339
8340         for (j = 0; j < tp->irq_cnt; j++) {
8341                 struct tg3_napi *tnapi = &tp->napi[j];
8342
8343                 tg3_rx_prodring_free(tp, &tnapi->prodring);
8344
8345                 if (!tnapi->tx_buffers)
8346                         continue;
8347
8348                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
8349                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
8350
8351                         if (!skb)
8352                                 continue;
8353
8354                         tg3_tx_skb_unmap(tnapi, i,
8355                                          skb_shinfo(skb)->nr_frags - 1);
8356
8357                         dev_kfree_skb_any(skb);
8358                 }
8359                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
8360         }
8361 }
8362
8363 /* Initialize tx/rx rings for packet processing.
8364  *
8365  * The chip has been shut down and the driver detached from
8366  * the networking, so no interrupts or new tx packets will
8367  * end up in the driver.  tp->{tx,}lock are held and thus
8368  * we may not sleep.
8369  */
8370 static int tg3_init_rings(struct tg3 *tp)
8371 {
8372         int i;
8373
8374         /* Free up all the SKBs. */
8375         tg3_free_rings(tp);
8376
8377         for (i = 0; i < tp->irq_cnt; i++) {
8378                 struct tg3_napi *tnapi = &tp->napi[i];
8379
8380                 tnapi->last_tag = 0;
8381                 tnapi->last_irq_tag = 0;
8382                 tnapi->hw_status->status = 0;
8383                 tnapi->hw_status->status_tag = 0;
8384                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8385
8386                 tnapi->tx_prod = 0;
8387                 tnapi->tx_cons = 0;
8388                 if (tnapi->tx_ring)
8389                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
8390
8391                 tnapi->rx_rcb_ptr = 0;
8392                 if (tnapi->rx_rcb)
8393                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8394
8395                 if (tnapi->prodring.rx_std &&
8396                     tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8397                         tg3_free_rings(tp);
8398                         return -ENOMEM;
8399                 }
8400         }
8401
8402         return 0;
8403 }
8404
8405 static void tg3_mem_tx_release(struct tg3 *tp)
8406 {
8407         int i;
8408
8409         for (i = 0; i < tp->irq_max; i++) {
8410                 struct tg3_napi *tnapi = &tp->napi[i];
8411
8412                 if (tnapi->tx_ring) {
8413                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
8414                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
8415                         tnapi->tx_ring = NULL;
8416                 }
8417
8418                 kfree(tnapi->tx_buffers);
8419                 tnapi->tx_buffers = NULL;
8420         }
8421 }
8422
8423 static int tg3_mem_tx_acquire(struct tg3 *tp)
8424 {
8425         int i;
8426         struct tg3_napi *tnapi = &tp->napi[0];
8427
8428         /* If multivector TSS is enabled, vector 0 does not handle
8429          * tx interrupts.  Don't allocate any resources for it.
8430          */
8431         if (tg3_flag(tp, ENABLE_TSS))
8432                 tnapi++;
8433
8434         for (i = 0; i < tp->txq_cnt; i++, tnapi++) {
8435                 tnapi->tx_buffers = kzalloc(sizeof(struct tg3_tx_ring_info) *
8436                                             TG3_TX_RING_SIZE, GFP_KERNEL);
8437                 if (!tnapi->tx_buffers)
8438                         goto err_out;
8439
8440                 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
8441                                                     TG3_TX_RING_BYTES,
8442                                                     &tnapi->tx_desc_mapping,
8443                                                     GFP_KERNEL);
8444                 if (!tnapi->tx_ring)
8445                         goto err_out;
8446         }
8447
8448         return 0;
8449
8450 err_out:
8451         tg3_mem_tx_release(tp);
8452         return -ENOMEM;
8453 }
8454
8455 static void tg3_mem_rx_release(struct tg3 *tp)
8456 {
8457         int i;
8458
8459         for (i = 0; i < tp->irq_max; i++) {
8460                 struct tg3_napi *tnapi = &tp->napi[i];
8461
8462                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
8463
8464                 if (!tnapi->rx_rcb)
8465                         continue;
8466
8467                 dma_free_coherent(&tp->pdev->dev,
8468                                   TG3_RX_RCB_RING_BYTES(tp),
8469                                   tnapi->rx_rcb,
8470                                   tnapi->rx_rcb_mapping);
8471                 tnapi->rx_rcb = NULL;
8472         }
8473 }
8474
8475 static int tg3_mem_rx_acquire(struct tg3 *tp)
8476 {
8477         unsigned int i, limit;
8478
8479         limit = tp->rxq_cnt;
8480
8481         /* If RSS is enabled, we need a (dummy) producer ring
8482          * set on vector zero.  This is the true hw prodring.
8483          */
8484         if (tg3_flag(tp, ENABLE_RSS))
8485                 limit++;
8486
8487         for (i = 0; i < limit; i++) {
8488                 struct tg3_napi *tnapi = &tp->napi[i];
8489
8490                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
8491                         goto err_out;
8492
8493                 /* If multivector RSS is enabled, vector 0
8494                  * does not handle rx or tx interrupts.
8495                  * Don't allocate any resources for it.
8496                  */
8497                 if (!i && tg3_flag(tp, ENABLE_RSS))
8498                         continue;
8499
8500                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
8501                                                    TG3_RX_RCB_RING_BYTES(tp),
8502                                                    &tnapi->rx_rcb_mapping,
8503                                                    GFP_KERNEL | __GFP_ZERO);
8504                 if (!tnapi->rx_rcb)
8505                         goto err_out;
8506         }
8507
8508         return 0;
8509
8510 err_out:
8511         tg3_mem_rx_release(tp);
8512         return -ENOMEM;
8513 }
8514
8515 /*
8516  * Must not be invoked with interrupt sources disabled and
8517  * the hardware shutdown down.
8518  */
8519 static void tg3_free_consistent(struct tg3 *tp)
8520 {
8521         int i;
8522
8523         for (i = 0; i < tp->irq_cnt; i++) {
8524                 struct tg3_napi *tnapi = &tp->napi[i];
8525
8526                 if (tnapi->hw_status) {
8527                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
8528                                           tnapi->hw_status,
8529                                           tnapi->status_mapping);
8530                         tnapi->hw_status = NULL;
8531                 }
8532         }
8533
8534         tg3_mem_rx_release(tp);
8535         tg3_mem_tx_release(tp);
8536
8537         if (tp->hw_stats) {
8538                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
8539                                   tp->hw_stats, tp->stats_mapping);
8540                 tp->hw_stats = NULL;
8541         }
8542 }
8543
8544 /*
8545  * Must not be invoked with interrupt sources disabled and
8546  * the hardware shutdown down.  Can sleep.
8547  */
8548 static int tg3_alloc_consistent(struct tg3 *tp)
8549 {
8550         int i;
8551
8552         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
8553                                           sizeof(struct tg3_hw_stats),
8554                                           &tp->stats_mapping,
8555                                           GFP_KERNEL | __GFP_ZERO);
8556         if (!tp->hw_stats)
8557                 goto err_out;
8558
8559         for (i = 0; i < tp->irq_cnt; i++) {
8560                 struct tg3_napi *tnapi = &tp->napi[i];
8561                 struct tg3_hw_status *sblk;
8562
8563                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
8564                                                       TG3_HW_STATUS_SIZE,
8565                                                       &tnapi->status_mapping,
8566                                                       GFP_KERNEL | __GFP_ZERO);
8567                 if (!tnapi->hw_status)
8568                         goto err_out;
8569
8570                 sblk = tnapi->hw_status;
8571
8572                 if (tg3_flag(tp, ENABLE_RSS)) {
8573                         u16 *prodptr = NULL;
8574
8575                         /*
8576                          * When RSS is enabled, the status block format changes
8577                          * slightly.  The "rx_jumbo_consumer", "reserved",
8578                          * and "rx_mini_consumer" members get mapped to the
8579                          * other three rx return ring producer indexes.
8580                          */
8581                         switch (i) {
8582                         case 1:
8583                                 prodptr = &sblk->idx[0].rx_producer;
8584                                 break;
8585                         case 2:
8586                                 prodptr = &sblk->rx_jumbo_consumer;
8587                                 break;
8588                         case 3:
8589                                 prodptr = &sblk->reserved;
8590                                 break;
8591                         case 4:
8592                                 prodptr = &sblk->rx_mini_consumer;
8593                                 break;
8594                         }
8595                         tnapi->rx_rcb_prod_idx = prodptr;
8596                 } else {
8597                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
8598                 }
8599         }
8600
8601         if (tg3_mem_tx_acquire(tp) || tg3_mem_rx_acquire(tp))
8602                 goto err_out;
8603
8604         return 0;
8605
8606 err_out:
8607         tg3_free_consistent(tp);
8608         return -ENOMEM;
8609 }
8610
8611 #define MAX_WAIT_CNT 1000
8612
8613 /* To stop a block, clear the enable bit and poll till it
8614  * clears.  tp->lock is held.
8615  */
8616 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, bool silent)
8617 {
8618         unsigned int i;
8619         u32 val;
8620
8621         if (tg3_flag(tp, 5705_PLUS)) {
8622                 switch (ofs) {
8623                 case RCVLSC_MODE:
8624                 case DMAC_MODE:
8625                 case MBFREE_MODE:
8626                 case BUFMGR_MODE:
8627                 case MEMARB_MODE:
8628                         /* We can't enable/disable these bits of the
8629                          * 5705/5750, just say success.
8630                          */
8631                         return 0;
8632
8633                 default:
8634                         break;
8635                 }
8636         }
8637
8638         val = tr32(ofs);
8639         val &= ~enable_bit;
8640         tw32_f(ofs, val);
8641
8642         for (i = 0; i < MAX_WAIT_CNT; i++) {
8643                 if (pci_channel_offline(tp->pdev)) {
8644                         dev_err(&tp->pdev->dev,
8645                                 "tg3_stop_block device offline, "
8646                                 "ofs=%lx enable_bit=%x\n",
8647                                 ofs, enable_bit);
8648                         return -ENODEV;
8649                 }
8650
8651                 udelay(100);
8652                 val = tr32(ofs);
8653                 if ((val & enable_bit) == 0)
8654                         break;
8655         }
8656
8657         if (i == MAX_WAIT_CNT && !silent) {
8658                 dev_err(&tp->pdev->dev,
8659                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
8660                         ofs, enable_bit);
8661                 return -ENODEV;
8662         }
8663
8664         return 0;
8665 }
8666
8667 /* tp->lock is held. */
8668 static int tg3_abort_hw(struct tg3 *tp, bool silent)
8669 {
8670         int i, err;
8671
8672         tg3_disable_ints(tp);
8673
8674         if (pci_channel_offline(tp->pdev)) {
8675                 tp->rx_mode &= ~(RX_MODE_ENABLE | TX_MODE_ENABLE);
8676                 tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8677                 err = -ENODEV;
8678                 goto err_no_dev;
8679         }
8680
8681         tp->rx_mode &= ~RX_MODE_ENABLE;
8682         tw32_f(MAC_RX_MODE, tp->rx_mode);
8683         udelay(10);
8684
8685         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
8686         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
8687         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
8688         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
8689         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
8690         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
8691
8692         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
8693         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
8694         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
8695         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
8696         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
8697         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
8698         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
8699
8700         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
8701         tw32_f(MAC_MODE, tp->mac_mode);
8702         udelay(40);
8703
8704         tp->tx_mode &= ~TX_MODE_ENABLE;
8705         tw32_f(MAC_TX_MODE, tp->tx_mode);
8706
8707         for (i = 0; i < MAX_WAIT_CNT; i++) {
8708                 udelay(100);
8709                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
8710                         break;
8711         }
8712         if (i >= MAX_WAIT_CNT) {
8713                 dev_err(&tp->pdev->dev,
8714                         "%s timed out, TX_MODE_ENABLE will not clear "
8715                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
8716                 err |= -ENODEV;
8717         }
8718
8719         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
8720         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
8721         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
8722
8723         tw32(FTQ_RESET, 0xffffffff);
8724         tw32(FTQ_RESET, 0x00000000);
8725
8726         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
8727         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
8728
8729 err_no_dev:
8730         for (i = 0; i < tp->irq_cnt; i++) {
8731                 struct tg3_napi *tnapi = &tp->napi[i];
8732                 if (tnapi->hw_status)
8733                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8734         }
8735
8736         return err;
8737 }
8738
8739 /* Save PCI command register before chip reset */
8740 static void tg3_save_pci_state(struct tg3 *tp)
8741 {
8742         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
8743 }
8744
8745 /* Restore PCI state after chip reset */
8746 static void tg3_restore_pci_state(struct tg3 *tp)
8747 {
8748         u32 val;
8749
8750         /* Re-enable indirect register accesses. */
8751         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
8752                                tp->misc_host_ctrl);
8753
8754         /* Set MAX PCI retry to zero. */
8755         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
8756         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
8757             tg3_flag(tp, PCIX_MODE))
8758                 val |= PCISTATE_RETRY_SAME_DMA;
8759         /* Allow reads and writes to the APE register and memory space. */
8760         if (tg3_flag(tp, ENABLE_APE))
8761                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8762                        PCISTATE_ALLOW_APE_SHMEM_WR |
8763                        PCISTATE_ALLOW_APE_PSPACE_WR;
8764         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
8765
8766         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
8767
8768         if (!tg3_flag(tp, PCI_EXPRESS)) {
8769                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
8770                                       tp->pci_cacheline_sz);
8771                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
8772                                       tp->pci_lat_timer);
8773         }
8774
8775         /* Make sure PCI-X relaxed ordering bit is clear. */
8776         if (tg3_flag(tp, PCIX_MODE)) {
8777                 u16 pcix_cmd;
8778
8779                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8780                                      &pcix_cmd);
8781                 pcix_cmd &= ~PCI_X_CMD_ERO;
8782                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
8783                                       pcix_cmd);
8784         }
8785
8786         if (tg3_flag(tp, 5780_CLASS)) {
8787
8788                 /* Chip reset on 5780 will reset MSI enable bit,
8789                  * so need to restore it.
8790                  */
8791                 if (tg3_flag(tp, USING_MSI)) {
8792                         u16 ctrl;
8793
8794                         pci_read_config_word(tp->pdev,
8795                                              tp->msi_cap + PCI_MSI_FLAGS,
8796                                              &ctrl);
8797                         pci_write_config_word(tp->pdev,
8798                                               tp->msi_cap + PCI_MSI_FLAGS,
8799                                               ctrl | PCI_MSI_FLAGS_ENABLE);
8800                         val = tr32(MSGINT_MODE);
8801                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
8802                 }
8803         }
8804 }
8805
8806 /* tp->lock is held. */
8807 static int tg3_chip_reset(struct tg3 *tp)
8808 {
8809         u32 val;
8810         void (*write_op)(struct tg3 *, u32, u32);
8811         int i, err;
8812
8813         tg3_nvram_lock(tp);
8814
8815         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
8816
8817         /* No matching tg3_nvram_unlock() after this because
8818          * chip reset below will undo the nvram lock.
8819          */
8820         tp->nvram_lock_cnt = 0;
8821
8822         /* GRC_MISC_CFG core clock reset will clear the memory
8823          * enable bit in PCI register 4 and the MSI enable bit
8824          * on some chips, so we save relevant registers here.
8825          */
8826         tg3_save_pci_state(tp);
8827
8828         if (tg3_asic_rev(tp) == ASIC_REV_5752 ||
8829             tg3_flag(tp, 5755_PLUS))
8830                 tw32(GRC_FASTBOOT_PC, 0);
8831
8832         /*
8833          * We must avoid the readl() that normally takes place.
8834          * It locks machines, causes machine checks, and other
8835          * fun things.  So, temporarily disable the 5701
8836          * hardware workaround, while we do the reset.
8837          */
8838         write_op = tp->write32;
8839         if (write_op == tg3_write_flush_reg32)
8840                 tp->write32 = tg3_write32;
8841
8842         /* Prevent the irq handler from reading or writing PCI registers
8843          * during chip reset when the memory enable bit in the PCI command
8844          * register may be cleared.  The chip does not generate interrupt
8845          * at this time, but the irq handler may still be called due to irq
8846          * sharing or irqpoll.
8847          */
8848         tg3_flag_set(tp, CHIP_RESETTING);
8849         for (i = 0; i < tp->irq_cnt; i++) {
8850                 struct tg3_napi *tnapi = &tp->napi[i];
8851                 if (tnapi->hw_status) {
8852                         tnapi->hw_status->status = 0;
8853                         tnapi->hw_status->status_tag = 0;
8854                 }
8855                 tnapi->last_tag = 0;
8856                 tnapi->last_irq_tag = 0;
8857         }
8858         smp_mb();
8859
8860         for (i = 0; i < tp->irq_cnt; i++)
8861                 synchronize_irq(tp->napi[i].irq_vec);
8862
8863         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
8864                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8865                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8866         }
8867
8868         /* do the reset */
8869         val = GRC_MISC_CFG_CORECLK_RESET;
8870
8871         if (tg3_flag(tp, PCI_EXPRESS)) {
8872                 /* Force PCIe 1.0a mode */
8873                 if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
8874                     !tg3_flag(tp, 57765_PLUS) &&
8875                     tr32(TG3_PCIE_PHY_TSTCTL) ==
8876                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
8877                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
8878
8879                 if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0) {
8880                         tw32(GRC_MISC_CFG, (1 << 29));
8881                         val |= (1 << 29);
8882                 }
8883         }
8884
8885         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
8886                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
8887                 tw32(GRC_VCPU_EXT_CTRL,
8888                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
8889         }
8890
8891         /* Manage gphy power for all CPMU absent PCIe devices. */
8892         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
8893                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
8894
8895         tw32(GRC_MISC_CFG, val);
8896
8897         /* restore 5701 hardware bug workaround write method */
8898         tp->write32 = write_op;
8899
8900         /* Unfortunately, we have to delay before the PCI read back.
8901          * Some 575X chips even will not respond to a PCI cfg access
8902          * when the reset command is given to the chip.
8903          *
8904          * How do these hardware designers expect things to work
8905          * properly if the PCI write is posted for a long period
8906          * of time?  It is always necessary to have some method by
8907          * which a register read back can occur to push the write
8908          * out which does the reset.
8909          *
8910          * For most tg3 variants the trick below was working.
8911          * Ho hum...
8912          */
8913         udelay(120);
8914
8915         /* Flush PCI posted writes.  The normal MMIO registers
8916          * are inaccessible at this time so this is the only
8917          * way to make this reliably (actually, this is no longer
8918          * the case, see above).  I tried to use indirect
8919          * register read/write but this upset some 5701 variants.
8920          */
8921         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
8922
8923         udelay(120);
8924
8925         if (tg3_flag(tp, PCI_EXPRESS) && pci_is_pcie(tp->pdev)) {
8926                 u16 val16;
8927
8928                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0) {
8929                         int j;
8930                         u32 cfg_val;
8931
8932                         /* Wait for link training to complete.  */
8933                         for (j = 0; j < 5000; j++)
8934                                 udelay(100);
8935
8936                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
8937                         pci_write_config_dword(tp->pdev, 0xc4,
8938                                                cfg_val | (1 << 15));
8939                 }
8940
8941                 /* Clear the "no snoop" and "relaxed ordering" bits. */
8942                 val16 = PCI_EXP_DEVCTL_RELAX_EN | PCI_EXP_DEVCTL_NOSNOOP_EN;
8943                 /*
8944                  * Older PCIe devices only support the 128 byte
8945                  * MPS setting.  Enforce the restriction.
8946                  */
8947                 if (!tg3_flag(tp, CPMU_PRESENT))
8948                         val16 |= PCI_EXP_DEVCTL_PAYLOAD;
8949                 pcie_capability_clear_word(tp->pdev, PCI_EXP_DEVCTL, val16);
8950
8951                 /* Clear error status */
8952                 pcie_capability_write_word(tp->pdev, PCI_EXP_DEVSTA,
8953                                       PCI_EXP_DEVSTA_CED |
8954                                       PCI_EXP_DEVSTA_NFED |
8955                                       PCI_EXP_DEVSTA_FED |
8956                                       PCI_EXP_DEVSTA_URD);
8957         }
8958
8959         tg3_restore_pci_state(tp);
8960
8961         tg3_flag_clear(tp, CHIP_RESETTING);
8962         tg3_flag_clear(tp, ERROR_PROCESSED);
8963
8964         val = 0;
8965         if (tg3_flag(tp, 5780_CLASS))
8966                 val = tr32(MEMARB_MODE);
8967         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
8968
8969         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A3) {
8970                 tg3_stop_fw(tp);
8971                 tw32(0x5000, 0x400);
8972         }
8973
8974         if (tg3_flag(tp, IS_SSB_CORE)) {
8975                 /*
8976                  * BCM4785: In order to avoid repercussions from using
8977                  * potentially defective internal ROM, stop the Rx RISC CPU,
8978                  * which is not required.
8979                  */
8980                 tg3_stop_fw(tp);
8981                 tg3_halt_cpu(tp, RX_CPU_BASE);
8982         }
8983
8984         err = tg3_poll_fw(tp);
8985         if (err)
8986                 return err;
8987
8988         tw32(GRC_MODE, tp->grc_mode);
8989
8990         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0) {
8991                 val = tr32(0xc4);
8992
8993                 tw32(0xc4, val | (1 << 15));
8994         }
8995
8996         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
8997             tg3_asic_rev(tp) == ASIC_REV_5705) {
8998                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
8999                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A0)
9000                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
9001                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9002         }
9003
9004         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9005                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
9006                 val = tp->mac_mode;
9007         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
9008                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
9009                 val = tp->mac_mode;
9010         } else
9011                 val = 0;
9012
9013         tw32_f(MAC_MODE, val);
9014         udelay(40);
9015
9016         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
9017
9018         tg3_mdio_start(tp);
9019
9020         if (tg3_flag(tp, PCI_EXPRESS) &&
9021             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
9022             tg3_asic_rev(tp) != ASIC_REV_5785 &&
9023             !tg3_flag(tp, 57765_PLUS)) {
9024                 val = tr32(0x7c00);
9025
9026                 tw32(0x7c00, val | (1 << 25));
9027         }
9028
9029         if (tg3_asic_rev(tp) == ASIC_REV_5720) {
9030                 val = tr32(TG3_CPMU_CLCK_ORIDE);
9031                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
9032         }
9033
9034         /* Reprobe ASF enable state.  */
9035         tg3_flag_clear(tp, ENABLE_ASF);
9036         tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
9037                            TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
9038
9039         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
9040         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
9041         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
9042                 u32 nic_cfg;
9043
9044                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
9045                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
9046                         tg3_flag_set(tp, ENABLE_ASF);
9047                         tp->last_event_jiffies = jiffies;
9048                         if (tg3_flag(tp, 5750_PLUS))
9049                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
9050
9051                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &nic_cfg);
9052                         if (nic_cfg & NIC_SRAM_1G_ON_VAUX_OK)
9053                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
9054                         if (nic_cfg & NIC_SRAM_LNK_FLAP_AVOID)
9055                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
9056                 }
9057         }
9058
9059         return 0;
9060 }
9061
9062 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
9063 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
9064
9065 /* tp->lock is held. */
9066 static int tg3_halt(struct tg3 *tp, int kind, bool silent)
9067 {
9068         int err;
9069
9070         tg3_stop_fw(tp);
9071
9072         tg3_write_sig_pre_reset(tp, kind);
9073
9074         tg3_abort_hw(tp, silent);
9075         err = tg3_chip_reset(tp);
9076
9077         __tg3_set_mac_addr(tp, false);
9078
9079         tg3_write_sig_legacy(tp, kind);
9080         tg3_write_sig_post_reset(tp, kind);
9081
9082         if (tp->hw_stats) {
9083                 /* Save the stats across chip resets... */
9084                 tg3_get_nstats(tp, &tp->net_stats_prev);
9085                 tg3_get_estats(tp, &tp->estats_prev);
9086
9087                 /* And make sure the next sample is new data */
9088                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
9089         }
9090
9091         if (err)
9092                 return err;
9093
9094         return 0;
9095 }
9096
9097 static int tg3_set_mac_addr(struct net_device *dev, void *p)
9098 {
9099         struct tg3 *tp = netdev_priv(dev);
9100         struct sockaddr *addr = p;
9101         int err = 0;
9102         bool skip_mac_1 = false;
9103
9104         if (!is_valid_ether_addr(addr->sa_data))
9105                 return -EADDRNOTAVAIL;
9106
9107         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9108
9109         if (!netif_running(dev))
9110                 return 0;
9111
9112         if (tg3_flag(tp, ENABLE_ASF)) {
9113                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
9114
9115                 addr0_high = tr32(MAC_ADDR_0_HIGH);
9116                 addr0_low = tr32(MAC_ADDR_0_LOW);
9117                 addr1_high = tr32(MAC_ADDR_1_HIGH);
9118                 addr1_low = tr32(MAC_ADDR_1_LOW);
9119
9120                 /* Skip MAC addr 1 if ASF is using it. */
9121                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
9122                     !(addr1_high == 0 && addr1_low == 0))
9123                         skip_mac_1 = true;
9124         }
9125         spin_lock_bh(&tp->lock);
9126         __tg3_set_mac_addr(tp, skip_mac_1);
9127         spin_unlock_bh(&tp->lock);
9128
9129         return err;
9130 }
9131
9132 /* tp->lock is held. */
9133 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
9134                            dma_addr_t mapping, u32 maxlen_flags,
9135                            u32 nic_addr)
9136 {
9137         tg3_write_mem(tp,
9138                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
9139                       ((u64) mapping >> 32));
9140         tg3_write_mem(tp,
9141                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
9142                       ((u64) mapping & 0xffffffff));
9143         tg3_write_mem(tp,
9144                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
9145                        maxlen_flags);
9146
9147         if (!tg3_flag(tp, 5705_PLUS))
9148                 tg3_write_mem(tp,
9149                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
9150                               nic_addr);
9151 }
9152
9153
9154 static void tg3_coal_tx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9155 {
9156         int i = 0;
9157
9158         if (!tg3_flag(tp, ENABLE_TSS)) {
9159                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
9160                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
9161                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
9162         } else {
9163                 tw32(HOSTCC_TXCOL_TICKS, 0);
9164                 tw32(HOSTCC_TXMAX_FRAMES, 0);
9165                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
9166
9167                 for (; i < tp->txq_cnt; i++) {
9168                         u32 reg;
9169
9170                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
9171                         tw32(reg, ec->tx_coalesce_usecs);
9172                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
9173                         tw32(reg, ec->tx_max_coalesced_frames);
9174                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
9175                         tw32(reg, ec->tx_max_coalesced_frames_irq);
9176                 }
9177         }
9178
9179         for (; i < tp->irq_max - 1; i++) {
9180                 tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
9181                 tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
9182                 tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9183         }
9184 }
9185
9186 static void tg3_coal_rx_init(struct tg3 *tp, struct ethtool_coalesce *ec)
9187 {
9188         int i = 0;
9189         u32 limit = tp->rxq_cnt;
9190
9191         if (!tg3_flag(tp, ENABLE_RSS)) {
9192                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
9193                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
9194                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
9195                 limit--;
9196         } else {
9197                 tw32(HOSTCC_RXCOL_TICKS, 0);
9198                 tw32(HOSTCC_RXMAX_FRAMES, 0);
9199                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
9200         }
9201
9202         for (; i < limit; i++) {
9203                 u32 reg;
9204
9205                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
9206                 tw32(reg, ec->rx_coalesce_usecs);
9207                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
9208                 tw32(reg, ec->rx_max_coalesced_frames);
9209                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
9210                 tw32(reg, ec->rx_max_coalesced_frames_irq);
9211         }
9212
9213         for (; i < tp->irq_max - 1; i++) {
9214                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
9215                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
9216                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
9217         }
9218 }
9219
9220 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
9221 {
9222         tg3_coal_tx_init(tp, ec);
9223         tg3_coal_rx_init(tp, ec);
9224
9225         if (!tg3_flag(tp, 5705_PLUS)) {
9226                 u32 val = ec->stats_block_coalesce_usecs;
9227
9228                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
9229                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
9230
9231                 if (!tp->link_up)
9232                         val = 0;
9233
9234                 tw32(HOSTCC_STAT_COAL_TICKS, val);
9235         }
9236 }
9237
9238 /* tp->lock is held. */
9239 static void tg3_rings_reset(struct tg3 *tp)
9240 {
9241         int i;
9242         u32 stblk, txrcb, rxrcb, limit;
9243         struct tg3_napi *tnapi = &tp->napi[0];
9244
9245         /* Disable all transmit rings but the first. */
9246         if (!tg3_flag(tp, 5705_PLUS))
9247                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
9248         else if (tg3_flag(tp, 5717_PLUS))
9249                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
9250         else if (tg3_flag(tp, 57765_CLASS) ||
9251                  tg3_asic_rev(tp) == ASIC_REV_5762)
9252                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
9253         else
9254                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9255
9256         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
9257              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
9258                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
9259                               BDINFO_FLAGS_DISABLED);
9260
9261
9262         /* Disable all receive return rings but the first. */
9263         if (tg3_flag(tp, 5717_PLUS))
9264                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
9265         else if (!tg3_flag(tp, 5705_PLUS))
9266                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
9267         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9268                  tg3_asic_rev(tp) == ASIC_REV_5762 ||
9269                  tg3_flag(tp, 57765_CLASS))
9270                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
9271         else
9272                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9273
9274         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
9275              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
9276                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
9277                               BDINFO_FLAGS_DISABLED);
9278
9279         /* Disable interrupts */
9280         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
9281         tp->napi[0].chk_msi_cnt = 0;
9282         tp->napi[0].last_rx_cons = 0;
9283         tp->napi[0].last_tx_cons = 0;
9284
9285         /* Zero mailbox registers. */
9286         if (tg3_flag(tp, SUPPORT_MSIX)) {
9287                 for (i = 1; i < tp->irq_max; i++) {
9288                         tp->napi[i].tx_prod = 0;
9289                         tp->napi[i].tx_cons = 0;
9290                         if (tg3_flag(tp, ENABLE_TSS))
9291                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
9292                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
9293                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
9294                         tp->napi[i].chk_msi_cnt = 0;
9295                         tp->napi[i].last_rx_cons = 0;
9296                         tp->napi[i].last_tx_cons = 0;
9297                 }
9298                 if (!tg3_flag(tp, ENABLE_TSS))
9299                         tw32_mailbox(tp->napi[0].prodmbox, 0);
9300         } else {
9301                 tp->napi[0].tx_prod = 0;
9302                 tp->napi[0].tx_cons = 0;
9303                 tw32_mailbox(tp->napi[0].prodmbox, 0);
9304                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
9305         }
9306
9307         /* Make sure the NIC-based send BD rings are disabled. */
9308         if (!tg3_flag(tp, 5705_PLUS)) {
9309                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
9310                 for (i = 0; i < 16; i++)
9311                         tw32_tx_mbox(mbox + i * 8, 0);
9312         }
9313
9314         txrcb = NIC_SRAM_SEND_RCB;
9315         rxrcb = NIC_SRAM_RCV_RET_RCB;
9316
9317         /* Clear status block in ram. */
9318         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9319
9320         /* Set status block DMA address */
9321         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
9322              ((u64) tnapi->status_mapping >> 32));
9323         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
9324              ((u64) tnapi->status_mapping & 0xffffffff));
9325
9326         if (tnapi->tx_ring) {
9327                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9328                                (TG3_TX_RING_SIZE <<
9329                                 BDINFO_FLAGS_MAXLEN_SHIFT),
9330                                NIC_SRAM_TX_BUFFER_DESC);
9331                 txrcb += TG3_BDINFO_SIZE;
9332         }
9333
9334         if (tnapi->rx_rcb) {
9335                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9336                                (tp->rx_ret_ring_mask + 1) <<
9337                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
9338                 rxrcb += TG3_BDINFO_SIZE;
9339         }
9340
9341         stblk = HOSTCC_STATBLCK_RING1;
9342
9343         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
9344                 u64 mapping = (u64)tnapi->status_mapping;
9345                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
9346                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
9347
9348                 /* Clear status block in ram. */
9349                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
9350
9351                 if (tnapi->tx_ring) {
9352                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
9353                                        (TG3_TX_RING_SIZE <<
9354                                         BDINFO_FLAGS_MAXLEN_SHIFT),
9355                                        NIC_SRAM_TX_BUFFER_DESC);
9356                         txrcb += TG3_BDINFO_SIZE;
9357                 }
9358
9359                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
9360                                ((tp->rx_ret_ring_mask + 1) <<
9361                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
9362
9363                 stblk += 8;
9364                 rxrcb += TG3_BDINFO_SIZE;
9365         }
9366 }
9367
9368 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
9369 {
9370         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
9371
9372         if (!tg3_flag(tp, 5750_PLUS) ||
9373             tg3_flag(tp, 5780_CLASS) ||
9374             tg3_asic_rev(tp) == ASIC_REV_5750 ||
9375             tg3_asic_rev(tp) == ASIC_REV_5752 ||
9376             tg3_flag(tp, 57765_PLUS))
9377                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
9378         else if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
9379                  tg3_asic_rev(tp) == ASIC_REV_5787)
9380                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
9381         else
9382                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
9383
9384         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
9385         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
9386
9387         val = min(nic_rep_thresh, host_rep_thresh);
9388         tw32(RCVBDI_STD_THRESH, val);
9389
9390         if (tg3_flag(tp, 57765_PLUS))
9391                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
9392
9393         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
9394                 return;
9395
9396         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
9397
9398         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
9399
9400         val = min(bdcache_maxcnt / 2, host_rep_thresh);
9401         tw32(RCVBDI_JUMBO_THRESH, val);
9402
9403         if (tg3_flag(tp, 57765_PLUS))
9404                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
9405 }
9406
9407 static inline u32 calc_crc(unsigned char *buf, int len)
9408 {
9409         u32 reg;
9410         u32 tmp;
9411         int j, k;
9412
9413         reg = 0xffffffff;
9414
9415         for (j = 0; j < len; j++) {
9416                 reg ^= buf[j];
9417
9418                 for (k = 0; k < 8; k++) {
9419                         tmp = reg & 0x01;
9420
9421                         reg >>= 1;
9422
9423                         if (tmp)
9424                                 reg ^= 0xedb88320;
9425                 }
9426         }
9427
9428         return ~reg;
9429 }
9430
9431 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
9432 {
9433         /* accept or reject all multicast frames */
9434         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
9435         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
9436         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
9437         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
9438 }
9439
9440 static void __tg3_set_rx_mode(struct net_device *dev)
9441 {
9442         struct tg3 *tp = netdev_priv(dev);
9443         u32 rx_mode;
9444
9445         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
9446                                   RX_MODE_KEEP_VLAN_TAG);
9447
9448 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
9449         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
9450          * flag clear.
9451          */
9452         if (!tg3_flag(tp, ENABLE_ASF))
9453                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
9454 #endif
9455
9456         if (dev->flags & IFF_PROMISC) {
9457                 /* Promiscuous mode. */
9458                 rx_mode |= RX_MODE_PROMISC;
9459         } else if (dev->flags & IFF_ALLMULTI) {
9460                 /* Accept all multicast. */
9461                 tg3_set_multi(tp, 1);
9462         } else if (netdev_mc_empty(dev)) {
9463                 /* Reject all multicast. */
9464                 tg3_set_multi(tp, 0);
9465         } else {
9466                 /* Accept one or more multicast(s). */
9467                 struct netdev_hw_addr *ha;
9468                 u32 mc_filter[4] = { 0, };
9469                 u32 regidx;
9470                 u32 bit;
9471                 u32 crc;
9472
9473                 netdev_for_each_mc_addr(ha, dev) {
9474                         crc = calc_crc(ha->addr, ETH_ALEN);
9475                         bit = ~crc & 0x7f;
9476                         regidx = (bit & 0x60) >> 5;
9477                         bit &= 0x1f;
9478                         mc_filter[regidx] |= (1 << bit);
9479                 }
9480
9481                 tw32(MAC_HASH_REG_0, mc_filter[0]);
9482                 tw32(MAC_HASH_REG_1, mc_filter[1]);
9483                 tw32(MAC_HASH_REG_2, mc_filter[2]);
9484                 tw32(MAC_HASH_REG_3, mc_filter[3]);
9485         }
9486
9487         if (rx_mode != tp->rx_mode) {
9488                 tp->rx_mode = rx_mode;
9489                 tw32_f(MAC_RX_MODE, rx_mode);
9490                 udelay(10);
9491         }
9492 }
9493
9494 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp, u32 qcnt)
9495 {
9496         int i;
9497
9498         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
9499                 tp->rss_ind_tbl[i] = ethtool_rxfh_indir_default(i, qcnt);
9500 }
9501
9502 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
9503 {
9504         int i;
9505
9506         if (!tg3_flag(tp, SUPPORT_MSIX))
9507                 return;
9508
9509         if (tp->rxq_cnt == 1) {
9510                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
9511                 return;
9512         }
9513
9514         /* Validate table against current IRQ count */
9515         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
9516                 if (tp->rss_ind_tbl[i] >= tp->rxq_cnt)
9517                         break;
9518         }
9519
9520         if (i != TG3_RSS_INDIR_TBL_SIZE)
9521                 tg3_rss_init_dflt_indir_tbl(tp, tp->rxq_cnt);
9522 }
9523
9524 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
9525 {
9526         int i = 0;
9527         u32 reg = MAC_RSS_INDIR_TBL_0;
9528
9529         while (i < TG3_RSS_INDIR_TBL_SIZE) {
9530                 u32 val = tp->rss_ind_tbl[i];
9531                 i++;
9532                 for (; i % 8; i++) {
9533                         val <<= 4;
9534                         val |= tp->rss_ind_tbl[i];
9535                 }
9536                 tw32(reg, val);
9537                 reg += 4;
9538         }
9539 }
9540
9541 static inline u32 tg3_lso_rd_dma_workaround_bit(struct tg3 *tp)
9542 {
9543         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9544                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5719;
9545         else
9546                 return TG3_LSO_RD_DMA_TX_LENGTH_WA_5720;
9547 }
9548
9549 /* tp->lock is held. */
9550 static int tg3_reset_hw(struct tg3 *tp, bool reset_phy)
9551 {
9552         u32 val, rdmac_mode;
9553         int i, err, limit;
9554         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
9555
9556         tg3_disable_ints(tp);
9557
9558         tg3_stop_fw(tp);
9559
9560         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
9561
9562         if (tg3_flag(tp, INIT_COMPLETE))
9563                 tg3_abort_hw(tp, 1);
9564
9565         /* Enable MAC control of LPI */
9566         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
9567                 val = TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
9568                       TG3_CPMU_EEE_LNKIDL_UART_IDL;
9569                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9570                         val |= TG3_CPMU_EEE_LNKIDL_APE_TX_MT;
9571
9572                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL, val);
9573
9574                 tw32_f(TG3_CPMU_EEE_CTRL,
9575                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
9576
9577                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
9578                       TG3_CPMU_EEEMD_LPI_IN_TX |
9579                       TG3_CPMU_EEEMD_LPI_IN_RX |
9580                       TG3_CPMU_EEEMD_EEE_ENABLE;
9581
9582                 if (tg3_asic_rev(tp) != ASIC_REV_5717)
9583                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
9584
9585                 if (tg3_flag(tp, ENABLE_APE))
9586                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
9587
9588                 tw32_f(TG3_CPMU_EEE_MODE, val);
9589
9590                 tw32_f(TG3_CPMU_EEE_DBTMR1,
9591                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
9592                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
9593
9594                 tw32_f(TG3_CPMU_EEE_DBTMR2,
9595                        TG3_CPMU_DBTMR2_APE_TX_2047US |
9596                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
9597         }
9598
9599         if ((tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
9600             !(tp->phy_flags & TG3_PHYFLG_USER_CONFIGURED)) {
9601                 tg3_phy_pull_config(tp);
9602                 tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
9603         }
9604
9605         if (reset_phy)
9606                 tg3_phy_reset(tp);
9607
9608         err = tg3_chip_reset(tp);
9609         if (err)
9610                 return err;
9611
9612         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
9613
9614         if (tg3_chip_rev(tp) == CHIPREV_5784_AX) {
9615                 val = tr32(TG3_CPMU_CTRL);
9616                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
9617                 tw32(TG3_CPMU_CTRL, val);
9618
9619                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9620                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9621                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9622                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9623
9624                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
9625                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
9626                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
9627                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
9628
9629                 val = tr32(TG3_CPMU_HST_ACC);
9630                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
9631                 val |= CPMU_HST_ACC_MACCLK_6_25;
9632                 tw32(TG3_CPMU_HST_ACC, val);
9633         }
9634
9635         if (tg3_asic_rev(tp) == ASIC_REV_57780) {
9636                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
9637                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
9638                        PCIE_PWR_MGMT_L1_THRESH_4MS;
9639                 tw32(PCIE_PWR_MGMT_THRESH, val);
9640
9641                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
9642                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
9643
9644                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
9645
9646                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
9647                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
9648         }
9649
9650         if (tg3_flag(tp, L1PLLPD_EN)) {
9651                 u32 grc_mode = tr32(GRC_MODE);
9652
9653                 /* Access the lower 1K of PL PCIE block registers. */
9654                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9655                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9656
9657                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
9658                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
9659                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
9660
9661                 tw32(GRC_MODE, grc_mode);
9662         }
9663
9664         if (tg3_flag(tp, 57765_CLASS)) {
9665                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0) {
9666                         u32 grc_mode = tr32(GRC_MODE);
9667
9668                         /* Access the lower 1K of PL PCIE block registers. */
9669                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9670                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
9671
9672                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9673                                    TG3_PCIE_PL_LO_PHYCTL5);
9674                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
9675                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
9676
9677                         tw32(GRC_MODE, grc_mode);
9678                 }
9679
9680                 if (tg3_chip_rev(tp) != CHIPREV_57765_AX) {
9681                         u32 grc_mode;
9682
9683                         /* Fix transmit hangs */
9684                         val = tr32(TG3_CPMU_PADRNG_CTL);
9685                         val |= TG3_CPMU_PADRNG_CTL_RDIV2;
9686                         tw32(TG3_CPMU_PADRNG_CTL, val);
9687
9688                         grc_mode = tr32(GRC_MODE);
9689
9690                         /* Access the lower 1K of DL PCIE block registers. */
9691                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
9692                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
9693
9694                         val = tr32(TG3_PCIE_TLDLPL_PORT +
9695                                    TG3_PCIE_DL_LO_FTSMAX);
9696                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
9697                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
9698                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
9699
9700                         tw32(GRC_MODE, grc_mode);
9701                 }
9702
9703                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
9704                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
9705                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
9706                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
9707         }
9708
9709         /* This works around an issue with Athlon chipsets on
9710          * B3 tigon3 silicon.  This bit has no effect on any
9711          * other revision.  But do not set this on PCI Express
9712          * chips and don't even touch the clocks if the CPMU is present.
9713          */
9714         if (!tg3_flag(tp, CPMU_PRESENT)) {
9715                 if (!tg3_flag(tp, PCI_EXPRESS))
9716                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
9717                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
9718         }
9719
9720         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0 &&
9721             tg3_flag(tp, PCIX_MODE)) {
9722                 val = tr32(TG3PCI_PCISTATE);
9723                 val |= PCISTATE_RETRY_SAME_DMA;
9724                 tw32(TG3PCI_PCISTATE, val);
9725         }
9726
9727         if (tg3_flag(tp, ENABLE_APE)) {
9728                 /* Allow reads and writes to the
9729                  * APE register and memory space.
9730                  */
9731                 val = tr32(TG3PCI_PCISTATE);
9732                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
9733                        PCISTATE_ALLOW_APE_SHMEM_WR |
9734                        PCISTATE_ALLOW_APE_PSPACE_WR;
9735                 tw32(TG3PCI_PCISTATE, val);
9736         }
9737
9738         if (tg3_chip_rev(tp) == CHIPREV_5704_BX) {
9739                 /* Enable some hw fixes.  */
9740                 val = tr32(TG3PCI_MSI_DATA);
9741                 val |= (1 << 26) | (1 << 28) | (1 << 29);
9742                 tw32(TG3PCI_MSI_DATA, val);
9743         }
9744
9745         /* Descriptor ring init may make accesses to the
9746          * NIC SRAM area to setup the TX descriptors, so we
9747          * can only do this after the hardware has been
9748          * successfully reset.
9749          */
9750         err = tg3_init_rings(tp);
9751         if (err)
9752                 return err;
9753
9754         if (tg3_flag(tp, 57765_PLUS)) {
9755                 val = tr32(TG3PCI_DMA_RW_CTRL) &
9756                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
9757                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_57765_A0)
9758                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
9759                 if (!tg3_flag(tp, 57765_CLASS) &&
9760                     tg3_asic_rev(tp) != ASIC_REV_5717 &&
9761                     tg3_asic_rev(tp) != ASIC_REV_5762)
9762                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
9763                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
9764         } else if (tg3_asic_rev(tp) != ASIC_REV_5784 &&
9765                    tg3_asic_rev(tp) != ASIC_REV_5761) {
9766                 /* This value is determined during the probe time DMA
9767                  * engine test, tg3_test_dma.
9768                  */
9769                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
9770         }
9771
9772         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
9773                           GRC_MODE_4X_NIC_SEND_RINGS |
9774                           GRC_MODE_NO_TX_PHDR_CSUM |
9775                           GRC_MODE_NO_RX_PHDR_CSUM);
9776         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
9777
9778         /* Pseudo-header checksum is done by hardware logic and not
9779          * the offload processers, so make the chip do the pseudo-
9780          * header checksums on receive.  For transmit it is more
9781          * convenient to do the pseudo-header checksum in software
9782          * as Linux does that on transmit for us in all cases.
9783          */
9784         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
9785
9786         val = GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP;
9787         if (tp->rxptpctl)
9788                 tw32(TG3_RX_PTP_CTL,
9789                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
9790
9791         if (tg3_flag(tp, PTP_CAPABLE))
9792                 val |= GRC_MODE_TIME_SYNC_ENABLE;
9793
9794         tw32(GRC_MODE, tp->grc_mode | val);
9795
9796         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
9797         val = tr32(GRC_MISC_CFG);
9798         val &= ~0xff;
9799         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
9800         tw32(GRC_MISC_CFG, val);
9801
9802         /* Initialize MBUF/DESC pool. */
9803         if (tg3_flag(tp, 5750_PLUS)) {
9804                 /* Do nothing.  */
9805         } else if (tg3_asic_rev(tp) != ASIC_REV_5705) {
9806                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
9807                 if (tg3_asic_rev(tp) == ASIC_REV_5704)
9808                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
9809                 else
9810                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
9811                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
9812                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
9813         } else if (tg3_flag(tp, TSO_CAPABLE)) {
9814                 int fw_len;
9815
9816                 fw_len = tp->fw_len;
9817                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
9818                 tw32(BUFMGR_MB_POOL_ADDR,
9819                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
9820                 tw32(BUFMGR_MB_POOL_SIZE,
9821                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
9822         }
9823
9824         if (tp->dev->mtu <= ETH_DATA_LEN) {
9825                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9826                      tp->bufmgr_config.mbuf_read_dma_low_water);
9827                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9828                      tp->bufmgr_config.mbuf_mac_rx_low_water);
9829                 tw32(BUFMGR_MB_HIGH_WATER,
9830                      tp->bufmgr_config.mbuf_high_water);
9831         } else {
9832                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
9833                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
9834                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
9835                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
9836                 tw32(BUFMGR_MB_HIGH_WATER,
9837                      tp->bufmgr_config.mbuf_high_water_jumbo);
9838         }
9839         tw32(BUFMGR_DMA_LOW_WATER,
9840              tp->bufmgr_config.dma_low_water);
9841         tw32(BUFMGR_DMA_HIGH_WATER,
9842              tp->bufmgr_config.dma_high_water);
9843
9844         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
9845         if (tg3_asic_rev(tp) == ASIC_REV_5719)
9846                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
9847         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
9848             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9849             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0)
9850                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
9851         tw32(BUFMGR_MODE, val);
9852         for (i = 0; i < 2000; i++) {
9853                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
9854                         break;
9855                 udelay(10);
9856         }
9857         if (i >= 2000) {
9858                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
9859                 return -ENODEV;
9860         }
9861
9862         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5906_A1)
9863                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
9864
9865         tg3_setup_rxbd_thresholds(tp);
9866
9867         /* Initialize TG3_BDINFO's at:
9868          *  RCVDBDI_STD_BD:     standard eth size rx ring
9869          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
9870          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
9871          *
9872          * like so:
9873          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
9874          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
9875          *                              ring attribute flags
9876          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
9877          *
9878          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
9879          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
9880          *
9881          * The size of each ring is fixed in the firmware, but the location is
9882          * configurable.
9883          */
9884         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9885              ((u64) tpr->rx_std_mapping >> 32));
9886         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9887              ((u64) tpr->rx_std_mapping & 0xffffffff));
9888         if (!tg3_flag(tp, 5717_PLUS))
9889                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
9890                      NIC_SRAM_RX_BUFFER_DESC);
9891
9892         /* Disable the mini ring */
9893         if (!tg3_flag(tp, 5705_PLUS))
9894                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
9895                      BDINFO_FLAGS_DISABLED);
9896
9897         /* Program the jumbo buffer descriptor ring control
9898          * blocks on those devices that have them.
9899          */
9900         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
9901             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
9902
9903                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
9904                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
9905                              ((u64) tpr->rx_jmb_mapping >> 32));
9906                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
9907                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
9908                         val = TG3_RX_JMB_RING_SIZE(tp) <<
9909                               BDINFO_FLAGS_MAXLEN_SHIFT;
9910                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9911                              val | BDINFO_FLAGS_USE_EXT_RECV);
9912                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
9913                             tg3_flag(tp, 57765_CLASS) ||
9914                             tg3_asic_rev(tp) == ASIC_REV_5762)
9915                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
9916                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
9917                 } else {
9918                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
9919                              BDINFO_FLAGS_DISABLED);
9920                 }
9921
9922                 if (tg3_flag(tp, 57765_PLUS)) {
9923                         val = TG3_RX_STD_RING_SIZE(tp);
9924                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
9925                         val |= (TG3_RX_STD_DMA_SZ << 2);
9926                 } else
9927                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
9928         } else
9929                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
9930
9931         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
9932
9933         tpr->rx_std_prod_idx = tp->rx_pending;
9934         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
9935
9936         tpr->rx_jmb_prod_idx =
9937                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
9938         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
9939
9940         tg3_rings_reset(tp);
9941
9942         /* Initialize MAC address and backoff seed. */
9943         __tg3_set_mac_addr(tp, false);
9944
9945         /* MTU + ethernet header + FCS + optional VLAN tag */
9946         tw32(MAC_RX_MTU_SIZE,
9947              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
9948
9949         /* The slot time is changed by tg3_setup_phy if we
9950          * run at gigabit with half duplex.
9951          */
9952         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
9953               (6 << TX_LENGTHS_IPG_SHIFT) |
9954               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
9955
9956         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
9957             tg3_asic_rev(tp) == ASIC_REV_5762)
9958                 val |= tr32(MAC_TX_LENGTHS) &
9959                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
9960                         TX_LENGTHS_CNT_DWN_VAL_MSK);
9961
9962         tw32(MAC_TX_LENGTHS, val);
9963
9964         /* Receive rules. */
9965         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
9966         tw32(RCVLPC_CONFIG, 0x0181);
9967
9968         /* Calculate RDMAC_MODE setting early, we need it to determine
9969          * the RCVLPC_STATE_ENABLE mask.
9970          */
9971         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
9972                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
9973                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
9974                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
9975                       RDMAC_MODE_LNGREAD_ENAB);
9976
9977         if (tg3_asic_rev(tp) == ASIC_REV_5717)
9978                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
9979
9980         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
9981             tg3_asic_rev(tp) == ASIC_REV_5785 ||
9982             tg3_asic_rev(tp) == ASIC_REV_57780)
9983                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
9984                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
9985                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
9986
9987         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
9988             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
9989                 if (tg3_flag(tp, TSO_CAPABLE) &&
9990                     tg3_asic_rev(tp) == ASIC_REV_5705) {
9991                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
9992                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9993                            !tg3_flag(tp, IS_5788)) {
9994                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
9995                 }
9996         }
9997
9998         if (tg3_flag(tp, PCI_EXPRESS))
9999                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
10000
10001         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10002                 tp->dma_limit = 0;
10003                 if (tp->dev->mtu <= ETH_DATA_LEN) {
10004                         rdmac_mode |= RDMAC_MODE_JMB_2K_MMRR;
10005                         tp->dma_limit = TG3_TX_BD_DMA_MAX_2K;
10006                 }
10007         }
10008
10009         if (tg3_flag(tp, HW_TSO_1) ||
10010             tg3_flag(tp, HW_TSO_2) ||
10011             tg3_flag(tp, HW_TSO_3))
10012                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
10013
10014         if (tg3_flag(tp, 57765_PLUS) ||
10015             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10016             tg3_asic_rev(tp) == ASIC_REV_57780)
10017                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
10018
10019         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10020             tg3_asic_rev(tp) == ASIC_REV_5762)
10021                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
10022
10023         if (tg3_asic_rev(tp) == ASIC_REV_5761 ||
10024             tg3_asic_rev(tp) == ASIC_REV_5784 ||
10025             tg3_asic_rev(tp) == ASIC_REV_5785 ||
10026             tg3_asic_rev(tp) == ASIC_REV_57780 ||
10027             tg3_flag(tp, 57765_PLUS)) {
10028                 u32 tgtreg;
10029
10030                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10031                         tgtreg = TG3_RDMA_RSRVCTRL_REG2;
10032                 else
10033                         tgtreg = TG3_RDMA_RSRVCTRL_REG;
10034
10035                 val = tr32(tgtreg);
10036                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
10037                     tg3_asic_rev(tp) == ASIC_REV_5762) {
10038                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
10039                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
10040                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
10041                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
10042                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
10043                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
10044                 }
10045                 tw32(tgtreg, val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
10046         }
10047
10048         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10049             tg3_asic_rev(tp) == ASIC_REV_5720 ||
10050             tg3_asic_rev(tp) == ASIC_REV_5762) {
10051                 u32 tgtreg;
10052
10053                 if (tg3_asic_rev(tp) == ASIC_REV_5762)
10054                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL2;
10055                 else
10056                         tgtreg = TG3_LSO_RD_DMA_CRPTEN_CTRL;
10057
10058                 val = tr32(tgtreg);
10059                 tw32(tgtreg, val |
10060                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
10061                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
10062         }
10063
10064         /* Receive/send statistics. */
10065         if (tg3_flag(tp, 5750_PLUS)) {
10066                 val = tr32(RCVLPC_STATS_ENABLE);
10067                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
10068                 tw32(RCVLPC_STATS_ENABLE, val);
10069         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
10070                    tg3_flag(tp, TSO_CAPABLE)) {
10071                 val = tr32(RCVLPC_STATS_ENABLE);
10072                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
10073                 tw32(RCVLPC_STATS_ENABLE, val);
10074         } else {
10075                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
10076         }
10077         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
10078         tw32(SNDDATAI_STATSENAB, 0xffffff);
10079         tw32(SNDDATAI_STATSCTRL,
10080              (SNDDATAI_SCTRL_ENABLE |
10081               SNDDATAI_SCTRL_FASTUPD));
10082
10083         /* Setup host coalescing engine. */
10084         tw32(HOSTCC_MODE, 0);
10085         for (i = 0; i < 2000; i++) {
10086                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
10087                         break;
10088                 udelay(10);
10089         }
10090
10091         __tg3_set_coalesce(tp, &tp->coal);
10092
10093         if (!tg3_flag(tp, 5705_PLUS)) {
10094                 /* Status/statistics block address.  See tg3_timer,
10095                  * the tg3_periodic_fetch_stats call there, and
10096                  * tg3_get_stats to see how this works for 5705/5750 chips.
10097                  */
10098                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
10099                      ((u64) tp->stats_mapping >> 32));
10100                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
10101                      ((u64) tp->stats_mapping & 0xffffffff));
10102                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
10103
10104                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
10105
10106                 /* Clear statistics and status block memory areas */
10107                 for (i = NIC_SRAM_STATS_BLK;
10108                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
10109                      i += sizeof(u32)) {
10110                         tg3_write_mem(tp, i, 0);
10111                         udelay(40);
10112                 }
10113         }
10114
10115         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
10116
10117         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
10118         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
10119         if (!tg3_flag(tp, 5705_PLUS))
10120                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
10121
10122         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
10123                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
10124                 /* reset to prevent losing 1st rx packet intermittently */
10125                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10126                 udelay(10);
10127         }
10128
10129         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
10130                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
10131                         MAC_MODE_FHDE_ENABLE;
10132         if (tg3_flag(tp, ENABLE_APE))
10133                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
10134         if (!tg3_flag(tp, 5705_PLUS) &&
10135             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10136             tg3_asic_rev(tp) != ASIC_REV_5700)
10137                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
10138         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
10139         udelay(40);
10140
10141         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
10142          * If TG3_FLAG_IS_NIC is zero, we should read the
10143          * register to preserve the GPIO settings for LOMs. The GPIOs,
10144          * whether used as inputs or outputs, are set by boot code after
10145          * reset.
10146          */
10147         if (!tg3_flag(tp, IS_NIC)) {
10148                 u32 gpio_mask;
10149
10150                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
10151                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
10152                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
10153
10154                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
10155                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
10156                                      GRC_LCLCTRL_GPIO_OUTPUT3;
10157
10158                 if (tg3_asic_rev(tp) == ASIC_REV_5755)
10159                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
10160
10161                 tp->grc_local_ctrl &= ~gpio_mask;
10162                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
10163
10164                 /* GPIO1 must be driven high for eeprom write protect */
10165                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
10166                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
10167                                                GRC_LCLCTRL_GPIO_OUTPUT1);
10168         }
10169         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10170         udelay(100);
10171
10172         if (tg3_flag(tp, USING_MSIX)) {
10173                 val = tr32(MSGINT_MODE);
10174                 val |= MSGINT_MODE_ENABLE;
10175                 if (tp->irq_cnt > 1)
10176                         val |= MSGINT_MODE_MULTIVEC_EN;
10177                 if (!tg3_flag(tp, 1SHOT_MSI))
10178                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
10179                 tw32(MSGINT_MODE, val);
10180         }
10181
10182         if (!tg3_flag(tp, 5705_PLUS)) {
10183                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
10184                 udelay(40);
10185         }
10186
10187         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
10188                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
10189                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
10190                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
10191                WDMAC_MODE_LNGREAD_ENAB);
10192
10193         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
10194             tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
10195                 if (tg3_flag(tp, TSO_CAPABLE) &&
10196                     (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 ||
10197                      tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A2)) {
10198                         /* nothing */
10199                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
10200                            !tg3_flag(tp, IS_5788)) {
10201                         val |= WDMAC_MODE_RX_ACCEL;
10202                 }
10203         }
10204
10205         /* Enable host coalescing bug fix */
10206         if (tg3_flag(tp, 5755_PLUS))
10207                 val |= WDMAC_MODE_STATUS_TAG_FIX;
10208
10209         if (tg3_asic_rev(tp) == ASIC_REV_5785)
10210                 val |= WDMAC_MODE_BURST_ALL_DATA;
10211
10212         tw32_f(WDMAC_MODE, val);
10213         udelay(40);
10214
10215         if (tg3_flag(tp, PCIX_MODE)) {
10216                 u16 pcix_cmd;
10217
10218                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10219                                      &pcix_cmd);
10220                 if (tg3_asic_rev(tp) == ASIC_REV_5703) {
10221                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
10222                         pcix_cmd |= PCI_X_CMD_READ_2K;
10223                 } else if (tg3_asic_rev(tp) == ASIC_REV_5704) {
10224                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
10225                         pcix_cmd |= PCI_X_CMD_READ_2K;
10226                 }
10227                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
10228                                       pcix_cmd);
10229         }
10230
10231         tw32_f(RDMAC_MODE, rdmac_mode);
10232         udelay(40);
10233
10234         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
10235             tg3_asic_rev(tp) == ASIC_REV_5720) {
10236                 for (i = 0; i < TG3_NUM_RDMA_CHANNELS; i++) {
10237                         if (tr32(TG3_RDMA_LENGTH + (i << 2)) > TG3_MAX_MTU(tp))
10238                                 break;
10239                 }
10240                 if (i < TG3_NUM_RDMA_CHANNELS) {
10241                         val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10242                         val |= tg3_lso_rd_dma_workaround_bit(tp);
10243                         tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10244                         tg3_flag_set(tp, 5719_5720_RDMA_BUG);
10245                 }
10246         }
10247
10248         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
10249         if (!tg3_flag(tp, 5705_PLUS))
10250                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
10251
10252         if (tg3_asic_rev(tp) == ASIC_REV_5761)
10253                 tw32(SNDDATAC_MODE,
10254                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
10255         else
10256                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
10257
10258         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
10259         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
10260         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
10261         if (tg3_flag(tp, LRG_PROD_RING_CAP))
10262                 val |= RCVDBDI_MODE_LRG_RING_SZ;
10263         tw32(RCVDBDI_MODE, val);
10264         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
10265         if (tg3_flag(tp, HW_TSO_1) ||
10266             tg3_flag(tp, HW_TSO_2) ||
10267             tg3_flag(tp, HW_TSO_3))
10268                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
10269         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
10270         if (tg3_flag(tp, ENABLE_TSS))
10271                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
10272         tw32(SNDBDI_MODE, val);
10273         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
10274
10275         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
10276                 err = tg3_load_5701_a0_firmware_fix(tp);
10277                 if (err)
10278                         return err;
10279         }
10280
10281         if (tg3_asic_rev(tp) == ASIC_REV_57766) {
10282                 /* Ignore any errors for the firmware download. If download
10283                  * fails, the device will operate with EEE disabled
10284                  */
10285                 tg3_load_57766_firmware(tp);
10286         }
10287
10288         if (tg3_flag(tp, TSO_CAPABLE)) {
10289                 err = tg3_load_tso_firmware(tp);
10290                 if (err)
10291                         return err;
10292         }
10293
10294         tp->tx_mode = TX_MODE_ENABLE;
10295
10296         if (tg3_flag(tp, 5755_PLUS) ||
10297             tg3_asic_rev(tp) == ASIC_REV_5906)
10298                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
10299
10300         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
10301             tg3_asic_rev(tp) == ASIC_REV_5762) {
10302                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
10303                 tp->tx_mode &= ~val;
10304                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
10305         }
10306
10307         tw32_f(MAC_TX_MODE, tp->tx_mode);
10308         udelay(100);
10309
10310         if (tg3_flag(tp, ENABLE_RSS)) {
10311                 tg3_rss_write_indir_tbl(tp);
10312
10313                 /* Setup the "secret" hash key. */
10314                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
10315                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
10316                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
10317                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
10318                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
10319                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
10320                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
10321                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
10322                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
10323                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
10324         }
10325
10326         tp->rx_mode = RX_MODE_ENABLE;
10327         if (tg3_flag(tp, 5755_PLUS))
10328                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
10329
10330         if (tg3_flag(tp, ENABLE_RSS))
10331                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
10332                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
10333                                RX_MODE_RSS_IPV6_HASH_EN |
10334                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
10335                                RX_MODE_RSS_IPV4_HASH_EN |
10336                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
10337
10338         tw32_f(MAC_RX_MODE, tp->rx_mode);
10339         udelay(10);
10340
10341         tw32(MAC_LED_CTRL, tp->led_ctrl);
10342
10343         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
10344         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10345                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
10346                 udelay(10);
10347         }
10348         tw32_f(MAC_RX_MODE, tp->rx_mode);
10349         udelay(10);
10350
10351         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
10352                 if ((tg3_asic_rev(tp) == ASIC_REV_5704) &&
10353                     !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
10354                         /* Set drive transmission level to 1.2V  */
10355                         /* only if the signal pre-emphasis bit is not set  */
10356                         val = tr32(MAC_SERDES_CFG);
10357                         val &= 0xfffff000;
10358                         val |= 0x880;
10359                         tw32(MAC_SERDES_CFG, val);
10360                 }
10361                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1)
10362                         tw32(MAC_SERDES_CFG, 0x616000);
10363         }
10364
10365         /* Prevent chip from dropping frames when flow control
10366          * is enabled.
10367          */
10368         if (tg3_flag(tp, 57765_CLASS))
10369                 val = 1;
10370         else
10371                 val = 2;
10372         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
10373
10374         if (tg3_asic_rev(tp) == ASIC_REV_5704 &&
10375             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
10376                 /* Use hardware link auto-negotiation */
10377                 tg3_flag_set(tp, HW_AUTONEG);
10378         }
10379
10380         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10381             tg3_asic_rev(tp) == ASIC_REV_5714) {
10382                 u32 tmp;
10383
10384                 tmp = tr32(SERDES_RX_CTRL);
10385                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
10386                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
10387                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
10388                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
10389         }
10390
10391         if (!tg3_flag(tp, USE_PHYLIB)) {
10392                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10393                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
10394
10395                 err = tg3_setup_phy(tp, false);
10396                 if (err)
10397                         return err;
10398
10399                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10400                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
10401                         u32 tmp;
10402
10403                         /* Clear CRC stats. */
10404                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
10405                                 tg3_writephy(tp, MII_TG3_TEST1,
10406                                              tmp | MII_TG3_TEST1_CRC_EN);
10407                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
10408                         }
10409                 }
10410         }
10411
10412         __tg3_set_rx_mode(tp->dev);
10413
10414         /* Initialize receive rules. */
10415         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
10416         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
10417         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
10418         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
10419
10420         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
10421                 limit = 8;
10422         else
10423                 limit = 16;
10424         if (tg3_flag(tp, ENABLE_ASF))
10425                 limit -= 4;
10426         switch (limit) {
10427         case 16:
10428                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
10429         case 15:
10430                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
10431         case 14:
10432                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
10433         case 13:
10434                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
10435         case 12:
10436                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
10437         case 11:
10438                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
10439         case 10:
10440                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
10441         case 9:
10442                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
10443         case 8:
10444                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
10445         case 7:
10446                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
10447         case 6:
10448                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
10449         case 5:
10450                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
10451         case 4:
10452                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
10453         case 3:
10454                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
10455         case 2:
10456         case 1:
10457
10458         default:
10459                 break;
10460         }
10461
10462         if (tg3_flag(tp, ENABLE_APE))
10463                 /* Write our heartbeat update interval to APE. */
10464                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
10465                                 APE_HOST_HEARTBEAT_INT_DISABLE);
10466
10467         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
10468
10469         return 0;
10470 }
10471
10472 /* Called at device open time to get the chip ready for
10473  * packet processing.  Invoked with tp->lock held.
10474  */
10475 static int tg3_init_hw(struct tg3 *tp, bool reset_phy)
10476 {
10477         /* Chip may have been just powered on. If so, the boot code may still
10478          * be running initialization. Wait for it to finish to avoid races in
10479          * accessing the hardware.
10480          */
10481         tg3_enable_register_access(tp);
10482         tg3_poll_fw(tp);
10483
10484         tg3_switch_clocks(tp);
10485
10486         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
10487
10488         return tg3_reset_hw(tp, reset_phy);
10489 }
10490
10491 static void tg3_sd_scan_scratchpad(struct tg3 *tp, struct tg3_ocir *ocir)
10492 {
10493         int i;
10494
10495         for (i = 0; i < TG3_SD_NUM_RECS; i++, ocir++) {
10496                 u32 off = i * TG3_OCIR_LEN, len = TG3_OCIR_LEN;
10497
10498                 tg3_ape_scratchpad_read(tp, (u32 *) ocir, off, len);
10499                 off += len;
10500
10501                 if (ocir->signature != TG3_OCIR_SIG_MAGIC ||
10502                     !(ocir->version_flags & TG3_OCIR_FLAG_ACTIVE))
10503                         memset(ocir, 0, TG3_OCIR_LEN);
10504         }
10505 }
10506
10507 /* sysfs attributes for hwmon */
10508 static ssize_t tg3_show_temp(struct device *dev,
10509                              struct device_attribute *devattr, char *buf)
10510 {
10511         struct pci_dev *pdev = to_pci_dev(dev);
10512         struct net_device *netdev = pci_get_drvdata(pdev);
10513         struct tg3 *tp = netdev_priv(netdev);
10514         struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
10515         u32 temperature;
10516
10517         spin_lock_bh(&tp->lock);
10518         tg3_ape_scratchpad_read(tp, &temperature, attr->index,
10519                                 sizeof(temperature));
10520         spin_unlock_bh(&tp->lock);
10521         return sprintf(buf, "%u\n", temperature);
10522 }
10523
10524
10525 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tg3_show_temp, NULL,
10526                           TG3_TEMP_SENSOR_OFFSET);
10527 static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, tg3_show_temp, NULL,
10528                           TG3_TEMP_CAUTION_OFFSET);
10529 static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, tg3_show_temp, NULL,
10530                           TG3_TEMP_MAX_OFFSET);
10531
10532 static struct attribute *tg3_attributes[] = {
10533         &sensor_dev_attr_temp1_input.dev_attr.attr,
10534         &sensor_dev_attr_temp1_crit.dev_attr.attr,
10535         &sensor_dev_attr_temp1_max.dev_attr.attr,
10536         NULL
10537 };
10538
10539 static const struct attribute_group tg3_group = {
10540         .attrs = tg3_attributes,
10541 };
10542
10543 static void tg3_hwmon_close(struct tg3 *tp)
10544 {
10545         if (tp->hwmon_dev) {
10546                 hwmon_device_unregister(tp->hwmon_dev);
10547                 tp->hwmon_dev = NULL;
10548                 sysfs_remove_group(&tp->pdev->dev.kobj, &tg3_group);
10549         }
10550 }
10551
10552 static void tg3_hwmon_open(struct tg3 *tp)
10553 {
10554         int i, err;
10555         u32 size = 0;
10556         struct pci_dev *pdev = tp->pdev;
10557         struct tg3_ocir ocirs[TG3_SD_NUM_RECS];
10558
10559         tg3_sd_scan_scratchpad(tp, ocirs);
10560
10561         for (i = 0; i < TG3_SD_NUM_RECS; i++) {
10562                 if (!ocirs[i].src_data_length)
10563                         continue;
10564
10565                 size += ocirs[i].src_hdr_length;
10566                 size += ocirs[i].src_data_length;
10567         }
10568
10569         if (!size)
10570                 return;
10571
10572         /* Register hwmon sysfs hooks */
10573         err = sysfs_create_group(&pdev->dev.kobj, &tg3_group);
10574         if (err) {
10575                 dev_err(&pdev->dev, "Cannot create sysfs group, aborting\n");
10576                 return;
10577         }
10578
10579         tp->hwmon_dev = hwmon_device_register(&pdev->dev);
10580         if (IS_ERR(tp->hwmon_dev)) {
10581                 tp->hwmon_dev = NULL;
10582                 dev_err(&pdev->dev, "Cannot register hwmon device, aborting\n");
10583                 sysfs_remove_group(&pdev->dev.kobj, &tg3_group);
10584         }
10585 }
10586
10587
10588 #define TG3_STAT_ADD32(PSTAT, REG) \
10589 do {    u32 __val = tr32(REG); \
10590         (PSTAT)->low += __val; \
10591         if ((PSTAT)->low < __val) \
10592                 (PSTAT)->high += 1; \
10593 } while (0)
10594
10595 static void tg3_periodic_fetch_stats(struct tg3 *tp)
10596 {
10597         struct tg3_hw_stats *sp = tp->hw_stats;
10598
10599         if (!tp->link_up)
10600                 return;
10601
10602         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
10603         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
10604         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
10605         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
10606         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
10607         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
10608         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
10609         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
10610         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
10611         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
10612         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
10613         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
10614         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
10615         if (unlikely(tg3_flag(tp, 5719_5720_RDMA_BUG) &&
10616                      (sp->tx_ucast_packets.low + sp->tx_mcast_packets.low +
10617                       sp->tx_bcast_packets.low) > TG3_NUM_RDMA_CHANNELS)) {
10618                 u32 val;
10619
10620                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
10621                 val &= ~tg3_lso_rd_dma_workaround_bit(tp);
10622                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val);
10623                 tg3_flag_clear(tp, 5719_5720_RDMA_BUG);
10624         }
10625
10626         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
10627         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
10628         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
10629         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
10630         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
10631         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
10632         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
10633         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
10634         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
10635         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
10636         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
10637         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
10638         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
10639         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
10640
10641         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
10642         if (tg3_asic_rev(tp) != ASIC_REV_5717 &&
10643             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0 &&
10644             tg3_chip_rev_id(tp) != CHIPREV_ID_5720_A0) {
10645                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
10646         } else {
10647                 u32 val = tr32(HOSTCC_FLOW_ATTN);
10648                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
10649                 if (val) {
10650                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
10651                         sp->rx_discards.low += val;
10652                         if (sp->rx_discards.low < val)
10653                                 sp->rx_discards.high += 1;
10654                 }
10655                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
10656         }
10657         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
10658 }
10659
10660 static void tg3_chk_missed_msi(struct tg3 *tp)
10661 {
10662         u32 i;
10663
10664         for (i = 0; i < tp->irq_cnt; i++) {
10665                 struct tg3_napi *tnapi = &tp->napi[i];
10666
10667                 if (tg3_has_work(tnapi)) {
10668                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
10669                             tnapi->last_tx_cons == tnapi->tx_cons) {
10670                                 if (tnapi->chk_msi_cnt < 1) {
10671                                         tnapi->chk_msi_cnt++;
10672                                         return;
10673                                 }
10674                                 tg3_msi(0, tnapi);
10675                         }
10676                 }
10677                 tnapi->chk_msi_cnt = 0;
10678                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
10679                 tnapi->last_tx_cons = tnapi->tx_cons;
10680         }
10681 }
10682
10683 static void tg3_timer(unsigned long __opaque)
10684 {
10685         struct tg3 *tp = (struct tg3 *) __opaque;
10686
10687         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
10688                 goto restart_timer;
10689
10690         spin_lock(&tp->lock);
10691
10692         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
10693             tg3_flag(tp, 57765_CLASS))
10694                 tg3_chk_missed_msi(tp);
10695
10696         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
10697                 /* BCM4785: Flush posted writes from GbE to host memory. */
10698                 tr32(HOSTCC_MODE);
10699         }
10700
10701         if (!tg3_flag(tp, TAGGED_STATUS)) {
10702                 /* All of this garbage is because when using non-tagged
10703                  * IRQ status the mailbox/status_block protocol the chip
10704                  * uses with the cpu is race prone.
10705                  */
10706                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
10707                         tw32(GRC_LOCAL_CTRL,
10708                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
10709                 } else {
10710                         tw32(HOSTCC_MODE, tp->coalesce_mode |
10711                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
10712                 }
10713
10714                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
10715                         spin_unlock(&tp->lock);
10716                         tg3_reset_task_schedule(tp);
10717                         goto restart_timer;
10718                 }
10719         }
10720
10721         /* This part only runs once per second. */
10722         if (!--tp->timer_counter) {
10723                 if (tg3_flag(tp, 5705_PLUS))
10724                         tg3_periodic_fetch_stats(tp);
10725
10726                 if (tp->setlpicnt && !--tp->setlpicnt)
10727                         tg3_phy_eee_enable(tp);
10728
10729                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
10730                         u32 mac_stat;
10731                         int phy_event;
10732
10733                         mac_stat = tr32(MAC_STATUS);
10734
10735                         phy_event = 0;
10736                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
10737                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
10738                                         phy_event = 1;
10739                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
10740                                 phy_event = 1;
10741
10742                         if (phy_event)
10743                                 tg3_setup_phy(tp, false);
10744                 } else if (tg3_flag(tp, POLL_SERDES)) {
10745                         u32 mac_stat = tr32(MAC_STATUS);
10746                         int need_setup = 0;
10747
10748                         if (tp->link_up &&
10749                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
10750                                 need_setup = 1;
10751                         }
10752                         if (!tp->link_up &&
10753                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
10754                                          MAC_STATUS_SIGNAL_DET))) {
10755                                 need_setup = 1;
10756                         }
10757                         if (need_setup) {
10758                                 if (!tp->serdes_counter) {
10759                                         tw32_f(MAC_MODE,
10760                                              (tp->mac_mode &
10761                                               ~MAC_MODE_PORT_MODE_MASK));
10762                                         udelay(40);
10763                                         tw32_f(MAC_MODE, tp->mac_mode);
10764                                         udelay(40);
10765                                 }
10766                                 tg3_setup_phy(tp, false);
10767                         }
10768                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
10769                            tg3_flag(tp, 5780_CLASS)) {
10770                         tg3_serdes_parallel_detect(tp);
10771                 }
10772
10773                 tp->timer_counter = tp->timer_multiplier;
10774         }
10775
10776         /* Heartbeat is only sent once every 2 seconds.
10777          *
10778          * The heartbeat is to tell the ASF firmware that the host
10779          * driver is still alive.  In the event that the OS crashes,
10780          * ASF needs to reset the hardware to free up the FIFO space
10781          * that may be filled with rx packets destined for the host.
10782          * If the FIFO is full, ASF will no longer function properly.
10783          *
10784          * Unintended resets have been reported on real time kernels
10785          * where the timer doesn't run on time.  Netpoll will also have
10786          * same problem.
10787          *
10788          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
10789          * to check the ring condition when the heartbeat is expiring
10790          * before doing the reset.  This will prevent most unintended
10791          * resets.
10792          */
10793         if (!--tp->asf_counter) {
10794                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
10795                         tg3_wait_for_event_ack(tp);
10796
10797                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
10798                                       FWCMD_NICDRV_ALIVE3);
10799                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
10800                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
10801                                       TG3_FW_UPDATE_TIMEOUT_SEC);
10802
10803                         tg3_generate_fw_event(tp);
10804                 }
10805                 tp->asf_counter = tp->asf_multiplier;
10806         }
10807
10808         spin_unlock(&tp->lock);
10809
10810 restart_timer:
10811         tp->timer.expires = jiffies + tp->timer_offset;
10812         add_timer(&tp->timer);
10813 }
10814
10815 static void tg3_timer_init(struct tg3 *tp)
10816 {
10817         if (tg3_flag(tp, TAGGED_STATUS) &&
10818             tg3_asic_rev(tp) != ASIC_REV_5717 &&
10819             !tg3_flag(tp, 57765_CLASS))
10820                 tp->timer_offset = HZ;
10821         else
10822                 tp->timer_offset = HZ / 10;
10823
10824         BUG_ON(tp->timer_offset > HZ);
10825
10826         tp->timer_multiplier = (HZ / tp->timer_offset);
10827         tp->asf_multiplier = (HZ / tp->timer_offset) *
10828                              TG3_FW_UPDATE_FREQ_SEC;
10829
10830         init_timer(&tp->timer);
10831         tp->timer.data = (unsigned long) tp;
10832         tp->timer.function = tg3_timer;
10833 }
10834
10835 static void tg3_timer_start(struct tg3 *tp)
10836 {
10837         tp->asf_counter   = tp->asf_multiplier;
10838         tp->timer_counter = tp->timer_multiplier;
10839
10840         tp->timer.expires = jiffies + tp->timer_offset;
10841         add_timer(&tp->timer);
10842 }
10843
10844 static void tg3_timer_stop(struct tg3 *tp)
10845 {
10846         del_timer_sync(&tp->timer);
10847 }
10848
10849 /* Restart hardware after configuration changes, self-test, etc.
10850  * Invoked with tp->lock held.
10851  */
10852 static int tg3_restart_hw(struct tg3 *tp, bool reset_phy)
10853         __releases(tp->lock)
10854         __acquires(tp->lock)
10855 {
10856         int err;
10857
10858         err = tg3_init_hw(tp, reset_phy);
10859         if (err) {
10860                 netdev_err(tp->dev,
10861                            "Failed to re-initialize device, aborting\n");
10862                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10863                 tg3_full_unlock(tp);
10864                 tg3_timer_stop(tp);
10865                 tp->irq_sync = 0;
10866                 tg3_napi_enable(tp);
10867                 dev_close(tp->dev);
10868                 tg3_full_lock(tp, 0);
10869         }
10870         return err;
10871 }
10872
10873 static void tg3_reset_task(struct work_struct *work)
10874 {
10875         struct tg3 *tp = container_of(work, struct tg3, reset_task);
10876         int err;
10877
10878         tg3_full_lock(tp, 0);
10879
10880         if (!netif_running(tp->dev)) {
10881                 tg3_flag_clear(tp, RESET_TASK_PENDING);
10882                 tg3_full_unlock(tp);
10883                 return;
10884         }
10885
10886         tg3_full_unlock(tp);
10887
10888         tg3_phy_stop(tp);
10889
10890         tg3_netif_stop(tp);
10891
10892         tg3_full_lock(tp, 1);
10893
10894         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
10895                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
10896                 tp->write32_rx_mbox = tg3_write_flush_reg32;
10897                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
10898                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
10899         }
10900
10901         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
10902         err = tg3_init_hw(tp, true);
10903         if (err)
10904                 goto out;
10905
10906         tg3_netif_start(tp);
10907
10908 out:
10909         tg3_full_unlock(tp);
10910
10911         if (!err)
10912                 tg3_phy_start(tp);
10913
10914         tg3_flag_clear(tp, RESET_TASK_PENDING);
10915 }
10916
10917 static int tg3_request_irq(struct tg3 *tp, int irq_num)
10918 {
10919         irq_handler_t fn;
10920         unsigned long flags;
10921         char *name;
10922         struct tg3_napi *tnapi = &tp->napi[irq_num];
10923
10924         if (tp->irq_cnt == 1)
10925                 name = tp->dev->name;
10926         else {
10927                 name = &tnapi->irq_lbl[0];
10928                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
10929                 name[IFNAMSIZ-1] = 0;
10930         }
10931
10932         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
10933                 fn = tg3_msi;
10934                 if (tg3_flag(tp, 1SHOT_MSI))
10935                         fn = tg3_msi_1shot;
10936                 flags = 0;
10937         } else {
10938                 fn = tg3_interrupt;
10939                 if (tg3_flag(tp, TAGGED_STATUS))
10940                         fn = tg3_interrupt_tagged;
10941                 flags = IRQF_SHARED;
10942         }
10943
10944         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
10945 }
10946
10947 static int tg3_test_interrupt(struct tg3 *tp)
10948 {
10949         struct tg3_napi *tnapi = &tp->napi[0];
10950         struct net_device *dev = tp->dev;
10951         int err, i, intr_ok = 0;
10952         u32 val;
10953
10954         if (!netif_running(dev))
10955                 return -ENODEV;
10956
10957         tg3_disable_ints(tp);
10958
10959         free_irq(tnapi->irq_vec, tnapi);
10960
10961         /*
10962          * Turn off MSI one shot mode.  Otherwise this test has no
10963          * observable way to know whether the interrupt was delivered.
10964          */
10965         if (tg3_flag(tp, 57765_PLUS)) {
10966                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
10967                 tw32(MSGINT_MODE, val);
10968         }
10969
10970         err = request_irq(tnapi->irq_vec, tg3_test_isr,
10971                           IRQF_SHARED, dev->name, tnapi);
10972         if (err)
10973                 return err;
10974
10975         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
10976         tg3_enable_ints(tp);
10977
10978         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10979                tnapi->coal_now);
10980
10981         for (i = 0; i < 5; i++) {
10982                 u32 int_mbox, misc_host_ctrl;
10983
10984                 int_mbox = tr32_mailbox(tnapi->int_mbox);
10985                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
10986
10987                 if ((int_mbox != 0) ||
10988                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
10989                         intr_ok = 1;
10990                         break;
10991                 }
10992
10993                 if (tg3_flag(tp, 57765_PLUS) &&
10994                     tnapi->hw_status->status_tag != tnapi->last_tag)
10995                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
10996
10997                 msleep(10);
10998         }
10999
11000         tg3_disable_ints(tp);
11001
11002         free_irq(tnapi->irq_vec, tnapi);
11003
11004         err = tg3_request_irq(tp, 0);
11005
11006         if (err)
11007                 return err;
11008
11009         if (intr_ok) {
11010                 /* Reenable MSI one shot mode. */
11011                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
11012                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
11013                         tw32(MSGINT_MODE, val);
11014                 }
11015                 return 0;
11016         }
11017
11018         return -EIO;
11019 }
11020
11021 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
11022  * successfully restored
11023  */
11024 static int tg3_test_msi(struct tg3 *tp)
11025 {
11026         int err;
11027         u16 pci_cmd;
11028
11029         if (!tg3_flag(tp, USING_MSI))
11030                 return 0;
11031
11032         /* Turn off SERR reporting in case MSI terminates with Master
11033          * Abort.
11034          */
11035         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
11036         pci_write_config_word(tp->pdev, PCI_COMMAND,
11037                               pci_cmd & ~PCI_COMMAND_SERR);
11038
11039         err = tg3_test_interrupt(tp);
11040
11041         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
11042
11043         if (!err)
11044                 return 0;
11045
11046         /* other failures */
11047         if (err != -EIO)
11048                 return err;
11049
11050         /* MSI test failed, go back to INTx mode */
11051         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
11052                     "to INTx mode. Please report this failure to the PCI "
11053                     "maintainer and include system chipset information\n");
11054
11055         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11056
11057         pci_disable_msi(tp->pdev);
11058
11059         tg3_flag_clear(tp, USING_MSI);
11060         tp->napi[0].irq_vec = tp->pdev->irq;
11061
11062         err = tg3_request_irq(tp, 0);
11063         if (err)
11064                 return err;
11065
11066         /* Need to reset the chip because the MSI cycle may have terminated
11067          * with Master Abort.
11068          */
11069         tg3_full_lock(tp, 1);
11070
11071         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11072         err = tg3_init_hw(tp, true);
11073
11074         tg3_full_unlock(tp);
11075
11076         if (err)
11077                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
11078
11079         return err;
11080 }
11081
11082 static int tg3_request_firmware(struct tg3 *tp)
11083 {
11084         const struct tg3_firmware_hdr *fw_hdr;
11085
11086         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
11087                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
11088                            tp->fw_needed);
11089                 return -ENOENT;
11090         }
11091
11092         fw_hdr = (struct tg3_firmware_hdr *)tp->fw->data;
11093
11094         /* Firmware blob starts with version numbers, followed by
11095          * start address and _full_ length including BSS sections
11096          * (which must be longer than the actual data, of course
11097          */
11098
11099         tp->fw_len = be32_to_cpu(fw_hdr->len);  /* includes bss */
11100         if (tp->fw_len < (tp->fw->size - TG3_FW_HDR_LEN)) {
11101                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
11102                            tp->fw_len, tp->fw_needed);
11103                 release_firmware(tp->fw);
11104                 tp->fw = NULL;
11105                 return -EINVAL;
11106         }
11107
11108         /* We no longer need firmware; we have it. */
11109         tp->fw_needed = NULL;
11110         return 0;
11111 }
11112
11113 static u32 tg3_irq_count(struct tg3 *tp)
11114 {
11115         u32 irq_cnt = max(tp->rxq_cnt, tp->txq_cnt);
11116
11117         if (irq_cnt > 1) {
11118                 /* We want as many rx rings enabled as there are cpus.
11119                  * In multiqueue MSI-X mode, the first MSI-X vector
11120                  * only deals with link interrupts, etc, so we add
11121                  * one to the number of vectors we are requesting.
11122                  */
11123                 irq_cnt = min_t(unsigned, irq_cnt + 1, tp->irq_max);
11124         }
11125
11126         return irq_cnt;
11127 }
11128
11129 static bool tg3_enable_msix(struct tg3 *tp)
11130 {
11131         int i, rc;
11132         struct msix_entry msix_ent[TG3_IRQ_MAX_VECS];
11133
11134         tp->txq_cnt = tp->txq_req;
11135         tp->rxq_cnt = tp->rxq_req;
11136         if (!tp->rxq_cnt)
11137                 tp->rxq_cnt = netif_get_num_default_rss_queues();
11138         if (tp->rxq_cnt > tp->rxq_max)
11139                 tp->rxq_cnt = tp->rxq_max;
11140
11141         /* Disable multiple TX rings by default.  Simple round-robin hardware
11142          * scheduling of the TX rings can cause starvation of rings with
11143          * small packets when other rings have TSO or jumbo packets.
11144          */
11145         if (!tp->txq_req)
11146                 tp->txq_cnt = 1;
11147
11148         tp->irq_cnt = tg3_irq_count(tp);
11149
11150         for (i = 0; i < tp->irq_max; i++) {
11151                 msix_ent[i].entry  = i;
11152                 msix_ent[i].vector = 0;
11153         }
11154
11155         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
11156         if (rc < 0) {
11157                 return false;
11158         } else if (rc != 0) {
11159                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
11160                         return false;
11161                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
11162                               tp->irq_cnt, rc);
11163                 tp->irq_cnt = rc;
11164                 tp->rxq_cnt = max(rc - 1, 1);
11165                 if (tp->txq_cnt)
11166                         tp->txq_cnt = min(tp->rxq_cnt, tp->txq_max);
11167         }
11168
11169         for (i = 0; i < tp->irq_max; i++)
11170                 tp->napi[i].irq_vec = msix_ent[i].vector;
11171
11172         if (netif_set_real_num_rx_queues(tp->dev, tp->rxq_cnt)) {
11173                 pci_disable_msix(tp->pdev);
11174                 return false;
11175         }
11176
11177         if (tp->irq_cnt == 1)
11178                 return true;
11179
11180         tg3_flag_set(tp, ENABLE_RSS);
11181
11182         if (tp->txq_cnt > 1)
11183                 tg3_flag_set(tp, ENABLE_TSS);
11184
11185         netif_set_real_num_tx_queues(tp->dev, tp->txq_cnt);
11186
11187         return true;
11188 }
11189
11190 static void tg3_ints_init(struct tg3 *tp)
11191 {
11192         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
11193             !tg3_flag(tp, TAGGED_STATUS)) {
11194                 /* All MSI supporting chips should support tagged
11195                  * status.  Assert that this is the case.
11196                  */
11197                 netdev_warn(tp->dev,
11198                             "MSI without TAGGED_STATUS? Not using MSI\n");
11199                 goto defcfg;
11200         }
11201
11202         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
11203                 tg3_flag_set(tp, USING_MSIX);
11204         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
11205                 tg3_flag_set(tp, USING_MSI);
11206
11207         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
11208                 u32 msi_mode = tr32(MSGINT_MODE);
11209                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
11210                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
11211                 if (!tg3_flag(tp, 1SHOT_MSI))
11212                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
11213                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
11214         }
11215 defcfg:
11216         if (!tg3_flag(tp, USING_MSIX)) {
11217                 tp->irq_cnt = 1;
11218                 tp->napi[0].irq_vec = tp->pdev->irq;
11219         }
11220
11221         if (tp->irq_cnt == 1) {
11222                 tp->txq_cnt = 1;
11223                 tp->rxq_cnt = 1;
11224                 netif_set_real_num_tx_queues(tp->dev, 1);
11225                 netif_set_real_num_rx_queues(tp->dev, 1);
11226         }
11227 }
11228
11229 static void tg3_ints_fini(struct tg3 *tp)
11230 {
11231         if (tg3_flag(tp, USING_MSIX))
11232                 pci_disable_msix(tp->pdev);
11233         else if (tg3_flag(tp, USING_MSI))
11234                 pci_disable_msi(tp->pdev);
11235         tg3_flag_clear(tp, USING_MSI);
11236         tg3_flag_clear(tp, USING_MSIX);
11237         tg3_flag_clear(tp, ENABLE_RSS);
11238         tg3_flag_clear(tp, ENABLE_TSS);
11239 }
11240
11241 static int tg3_start(struct tg3 *tp, bool reset_phy, bool test_irq,
11242                      bool init)
11243 {
11244         struct net_device *dev = tp->dev;
11245         int i, err;
11246
11247         /*
11248          * Setup interrupts first so we know how
11249          * many NAPI resources to allocate
11250          */
11251         tg3_ints_init(tp);
11252
11253         tg3_rss_check_indir_tbl(tp);
11254
11255         /* The placement of this call is tied
11256          * to the setup and use of Host TX descriptors.
11257          */
11258         err = tg3_alloc_consistent(tp);
11259         if (err)
11260                 goto err_out1;
11261
11262         tg3_napi_init(tp);
11263
11264         tg3_napi_enable(tp);
11265
11266         for (i = 0; i < tp->irq_cnt; i++) {
11267                 struct tg3_napi *tnapi = &tp->napi[i];
11268                 err = tg3_request_irq(tp, i);
11269                 if (err) {
11270                         for (i--; i >= 0; i--) {
11271                                 tnapi = &tp->napi[i];
11272                                 free_irq(tnapi->irq_vec, tnapi);
11273                         }
11274                         goto err_out2;
11275                 }
11276         }
11277
11278         tg3_full_lock(tp, 0);
11279
11280         err = tg3_init_hw(tp, reset_phy);
11281         if (err) {
11282                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11283                 tg3_free_rings(tp);
11284         }
11285
11286         tg3_full_unlock(tp);
11287
11288         if (err)
11289                 goto err_out3;
11290
11291         if (test_irq && tg3_flag(tp, USING_MSI)) {
11292                 err = tg3_test_msi(tp);
11293
11294                 if (err) {
11295                         tg3_full_lock(tp, 0);
11296                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11297                         tg3_free_rings(tp);
11298                         tg3_full_unlock(tp);
11299
11300                         goto err_out2;
11301                 }
11302
11303                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
11304                         u32 val = tr32(PCIE_TRANSACTION_CFG);
11305
11306                         tw32(PCIE_TRANSACTION_CFG,
11307                              val | PCIE_TRANS_CFG_1SHOT_MSI);
11308                 }
11309         }
11310
11311         tg3_phy_start(tp);
11312
11313         tg3_hwmon_open(tp);
11314
11315         tg3_full_lock(tp, 0);
11316
11317         tg3_timer_start(tp);
11318         tg3_flag_set(tp, INIT_COMPLETE);
11319         tg3_enable_ints(tp);
11320
11321         if (init)
11322                 tg3_ptp_init(tp);
11323         else
11324                 tg3_ptp_resume(tp);
11325
11326
11327         tg3_full_unlock(tp);
11328
11329         netif_tx_start_all_queues(dev);
11330
11331         /*
11332          * Reset loopback feature if it was turned on while the device was down
11333          * make sure that it's installed properly now.
11334          */
11335         if (dev->features & NETIF_F_LOOPBACK)
11336                 tg3_set_loopback(dev, dev->features);
11337
11338         return 0;
11339
11340 err_out3:
11341         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11342                 struct tg3_napi *tnapi = &tp->napi[i];
11343                 free_irq(tnapi->irq_vec, tnapi);
11344         }
11345
11346 err_out2:
11347         tg3_napi_disable(tp);
11348         tg3_napi_fini(tp);
11349         tg3_free_consistent(tp);
11350
11351 err_out1:
11352         tg3_ints_fini(tp);
11353
11354         return err;
11355 }
11356
11357 static void tg3_stop(struct tg3 *tp)
11358 {
11359         int i;
11360
11361         tg3_reset_task_cancel(tp);
11362         tg3_netif_stop(tp);
11363
11364         tg3_timer_stop(tp);
11365
11366         tg3_hwmon_close(tp);
11367
11368         tg3_phy_stop(tp);
11369
11370         tg3_full_lock(tp, 1);
11371
11372         tg3_disable_ints(tp);
11373
11374         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
11375         tg3_free_rings(tp);
11376         tg3_flag_clear(tp, INIT_COMPLETE);
11377
11378         tg3_full_unlock(tp);
11379
11380         for (i = tp->irq_cnt - 1; i >= 0; i--) {
11381                 struct tg3_napi *tnapi = &tp->napi[i];
11382                 free_irq(tnapi->irq_vec, tnapi);
11383         }
11384
11385         tg3_ints_fini(tp);
11386
11387         tg3_napi_fini(tp);
11388
11389         tg3_free_consistent(tp);
11390 }
11391
11392 static int tg3_open(struct net_device *dev)
11393 {
11394         struct tg3 *tp = netdev_priv(dev);
11395         int err;
11396
11397         if (tp->fw_needed) {
11398                 err = tg3_request_firmware(tp);
11399                 if (tg3_asic_rev(tp) == ASIC_REV_57766) {
11400                         if (err) {
11401                                 netdev_warn(tp->dev, "EEE capability disabled\n");
11402                                 tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11403                         } else if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP)) {
11404                                 netdev_warn(tp->dev, "EEE capability restored\n");
11405                                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
11406                         }
11407                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0) {
11408                         if (err)
11409                                 return err;
11410                 } else if (err) {
11411                         netdev_warn(tp->dev, "TSO capability disabled\n");
11412                         tg3_flag_clear(tp, TSO_CAPABLE);
11413                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
11414                         netdev_notice(tp->dev, "TSO capability restored\n");
11415                         tg3_flag_set(tp, TSO_CAPABLE);
11416                 }
11417         }
11418
11419         tg3_carrier_off(tp);
11420
11421         err = tg3_power_up(tp);
11422         if (err)
11423                 return err;
11424
11425         tg3_full_lock(tp, 0);
11426
11427         tg3_disable_ints(tp);
11428         tg3_flag_clear(tp, INIT_COMPLETE);
11429
11430         tg3_full_unlock(tp);
11431
11432         err = tg3_start(tp,
11433                         !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN),
11434                         true, true);
11435         if (err) {
11436                 tg3_frob_aux_power(tp, false);
11437                 pci_set_power_state(tp->pdev, PCI_D3hot);
11438         }
11439
11440         if (tg3_flag(tp, PTP_CAPABLE)) {
11441                 tp->ptp_clock = ptp_clock_register(&tp->ptp_info,
11442                                                    &tp->pdev->dev);
11443                 if (IS_ERR(tp->ptp_clock))
11444                         tp->ptp_clock = NULL;
11445         }
11446
11447         return err;
11448 }
11449
11450 static int tg3_close(struct net_device *dev)
11451 {
11452         struct tg3 *tp = netdev_priv(dev);
11453
11454         tg3_ptp_fini(tp);
11455
11456         tg3_stop(tp);
11457
11458         /* Clear stats across close / open calls */
11459         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
11460         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
11461
11462         tg3_power_down(tp);
11463
11464         tg3_carrier_off(tp);
11465
11466         return 0;
11467 }
11468
11469 static inline u64 get_stat64(tg3_stat64_t *val)
11470 {
11471        return ((u64)val->high << 32) | ((u64)val->low);
11472 }
11473
11474 static u64 tg3_calc_crc_errors(struct tg3 *tp)
11475 {
11476         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11477
11478         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11479             (tg3_asic_rev(tp) == ASIC_REV_5700 ||
11480              tg3_asic_rev(tp) == ASIC_REV_5701)) {
11481                 u32 val;
11482
11483                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
11484                         tg3_writephy(tp, MII_TG3_TEST1,
11485                                      val | MII_TG3_TEST1_CRC_EN);
11486                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
11487                 } else
11488                         val = 0;
11489
11490                 tp->phy_crc_errors += val;
11491
11492                 return tp->phy_crc_errors;
11493         }
11494
11495         return get_stat64(&hw_stats->rx_fcs_errors);
11496 }
11497
11498 #define ESTAT_ADD(member) \
11499         estats->member =        old_estats->member + \
11500                                 get_stat64(&hw_stats->member)
11501
11502 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
11503 {
11504         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
11505         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11506
11507         ESTAT_ADD(rx_octets);
11508         ESTAT_ADD(rx_fragments);
11509         ESTAT_ADD(rx_ucast_packets);
11510         ESTAT_ADD(rx_mcast_packets);
11511         ESTAT_ADD(rx_bcast_packets);
11512         ESTAT_ADD(rx_fcs_errors);
11513         ESTAT_ADD(rx_align_errors);
11514         ESTAT_ADD(rx_xon_pause_rcvd);
11515         ESTAT_ADD(rx_xoff_pause_rcvd);
11516         ESTAT_ADD(rx_mac_ctrl_rcvd);
11517         ESTAT_ADD(rx_xoff_entered);
11518         ESTAT_ADD(rx_frame_too_long_errors);
11519         ESTAT_ADD(rx_jabbers);
11520         ESTAT_ADD(rx_undersize_packets);
11521         ESTAT_ADD(rx_in_length_errors);
11522         ESTAT_ADD(rx_out_length_errors);
11523         ESTAT_ADD(rx_64_or_less_octet_packets);
11524         ESTAT_ADD(rx_65_to_127_octet_packets);
11525         ESTAT_ADD(rx_128_to_255_octet_packets);
11526         ESTAT_ADD(rx_256_to_511_octet_packets);
11527         ESTAT_ADD(rx_512_to_1023_octet_packets);
11528         ESTAT_ADD(rx_1024_to_1522_octet_packets);
11529         ESTAT_ADD(rx_1523_to_2047_octet_packets);
11530         ESTAT_ADD(rx_2048_to_4095_octet_packets);
11531         ESTAT_ADD(rx_4096_to_8191_octet_packets);
11532         ESTAT_ADD(rx_8192_to_9022_octet_packets);
11533
11534         ESTAT_ADD(tx_octets);
11535         ESTAT_ADD(tx_collisions);
11536         ESTAT_ADD(tx_xon_sent);
11537         ESTAT_ADD(tx_xoff_sent);
11538         ESTAT_ADD(tx_flow_control);
11539         ESTAT_ADD(tx_mac_errors);
11540         ESTAT_ADD(tx_single_collisions);
11541         ESTAT_ADD(tx_mult_collisions);
11542         ESTAT_ADD(tx_deferred);
11543         ESTAT_ADD(tx_excessive_collisions);
11544         ESTAT_ADD(tx_late_collisions);
11545         ESTAT_ADD(tx_collide_2times);
11546         ESTAT_ADD(tx_collide_3times);
11547         ESTAT_ADD(tx_collide_4times);
11548         ESTAT_ADD(tx_collide_5times);
11549         ESTAT_ADD(tx_collide_6times);
11550         ESTAT_ADD(tx_collide_7times);
11551         ESTAT_ADD(tx_collide_8times);
11552         ESTAT_ADD(tx_collide_9times);
11553         ESTAT_ADD(tx_collide_10times);
11554         ESTAT_ADD(tx_collide_11times);
11555         ESTAT_ADD(tx_collide_12times);
11556         ESTAT_ADD(tx_collide_13times);
11557         ESTAT_ADD(tx_collide_14times);
11558         ESTAT_ADD(tx_collide_15times);
11559         ESTAT_ADD(tx_ucast_packets);
11560         ESTAT_ADD(tx_mcast_packets);
11561         ESTAT_ADD(tx_bcast_packets);
11562         ESTAT_ADD(tx_carrier_sense_errors);
11563         ESTAT_ADD(tx_discards);
11564         ESTAT_ADD(tx_errors);
11565
11566         ESTAT_ADD(dma_writeq_full);
11567         ESTAT_ADD(dma_write_prioq_full);
11568         ESTAT_ADD(rxbds_empty);
11569         ESTAT_ADD(rx_discards);
11570         ESTAT_ADD(rx_errors);
11571         ESTAT_ADD(rx_threshold_hit);
11572
11573         ESTAT_ADD(dma_readq_full);
11574         ESTAT_ADD(dma_read_prioq_full);
11575         ESTAT_ADD(tx_comp_queue_full);
11576
11577         ESTAT_ADD(ring_set_send_prod_index);
11578         ESTAT_ADD(ring_status_update);
11579         ESTAT_ADD(nic_irqs);
11580         ESTAT_ADD(nic_avoided_irqs);
11581         ESTAT_ADD(nic_tx_threshold_hit);
11582
11583         ESTAT_ADD(mbuf_lwm_thresh_hit);
11584 }
11585
11586 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
11587 {
11588         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
11589         struct tg3_hw_stats *hw_stats = tp->hw_stats;
11590
11591         stats->rx_packets = old_stats->rx_packets +
11592                 get_stat64(&hw_stats->rx_ucast_packets) +
11593                 get_stat64(&hw_stats->rx_mcast_packets) +
11594                 get_stat64(&hw_stats->rx_bcast_packets);
11595
11596         stats->tx_packets = old_stats->tx_packets +
11597                 get_stat64(&hw_stats->tx_ucast_packets) +
11598                 get_stat64(&hw_stats->tx_mcast_packets) +
11599                 get_stat64(&hw_stats->tx_bcast_packets);
11600
11601         stats->rx_bytes = old_stats->rx_bytes +
11602                 get_stat64(&hw_stats->rx_octets);
11603         stats->tx_bytes = old_stats->tx_bytes +
11604                 get_stat64(&hw_stats->tx_octets);
11605
11606         stats->rx_errors = old_stats->rx_errors +
11607                 get_stat64(&hw_stats->rx_errors);
11608         stats->tx_errors = old_stats->tx_errors +
11609                 get_stat64(&hw_stats->tx_errors) +
11610                 get_stat64(&hw_stats->tx_mac_errors) +
11611                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
11612                 get_stat64(&hw_stats->tx_discards);
11613
11614         stats->multicast = old_stats->multicast +
11615                 get_stat64(&hw_stats->rx_mcast_packets);
11616         stats->collisions = old_stats->collisions +
11617                 get_stat64(&hw_stats->tx_collisions);
11618
11619         stats->rx_length_errors = old_stats->rx_length_errors +
11620                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
11621                 get_stat64(&hw_stats->rx_undersize_packets);
11622
11623         stats->rx_over_errors = old_stats->rx_over_errors +
11624                 get_stat64(&hw_stats->rxbds_empty);
11625         stats->rx_frame_errors = old_stats->rx_frame_errors +
11626                 get_stat64(&hw_stats->rx_align_errors);
11627         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
11628                 get_stat64(&hw_stats->tx_discards);
11629         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
11630                 get_stat64(&hw_stats->tx_carrier_sense_errors);
11631
11632         stats->rx_crc_errors = old_stats->rx_crc_errors +
11633                 tg3_calc_crc_errors(tp);
11634
11635         stats->rx_missed_errors = old_stats->rx_missed_errors +
11636                 get_stat64(&hw_stats->rx_discards);
11637
11638         stats->rx_dropped = tp->rx_dropped;
11639         stats->tx_dropped = tp->tx_dropped;
11640 }
11641
11642 static int tg3_get_regs_len(struct net_device *dev)
11643 {
11644         return TG3_REG_BLK_SIZE;
11645 }
11646
11647 static void tg3_get_regs(struct net_device *dev,
11648                 struct ethtool_regs *regs, void *_p)
11649 {
11650         struct tg3 *tp = netdev_priv(dev);
11651
11652         regs->version = 0;
11653
11654         memset(_p, 0, TG3_REG_BLK_SIZE);
11655
11656         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11657                 return;
11658
11659         tg3_full_lock(tp, 0);
11660
11661         tg3_dump_legacy_regs(tp, (u32 *)_p);
11662
11663         tg3_full_unlock(tp);
11664 }
11665
11666 static int tg3_get_eeprom_len(struct net_device *dev)
11667 {
11668         struct tg3 *tp = netdev_priv(dev);
11669
11670         return tp->nvram_size;
11671 }
11672
11673 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11674 {
11675         struct tg3 *tp = netdev_priv(dev);
11676         int ret;
11677         u8  *pd;
11678         u32 i, offset, len, b_offset, b_count;
11679         __be32 val;
11680
11681         if (tg3_flag(tp, NO_NVRAM))
11682                 return -EINVAL;
11683
11684         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11685                 return -EAGAIN;
11686
11687         offset = eeprom->offset;
11688         len = eeprom->len;
11689         eeprom->len = 0;
11690
11691         eeprom->magic = TG3_EEPROM_MAGIC;
11692
11693         if (offset & 3) {
11694                 /* adjustments to start on required 4 byte boundary */
11695                 b_offset = offset & 3;
11696                 b_count = 4 - b_offset;
11697                 if (b_count > len) {
11698                         /* i.e. offset=1 len=2 */
11699                         b_count = len;
11700                 }
11701                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
11702                 if (ret)
11703                         return ret;
11704                 memcpy(data, ((char *)&val) + b_offset, b_count);
11705                 len -= b_count;
11706                 offset += b_count;
11707                 eeprom->len += b_count;
11708         }
11709
11710         /* read bytes up to the last 4 byte boundary */
11711         pd = &data[eeprom->len];
11712         for (i = 0; i < (len - (len & 3)); i += 4) {
11713                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
11714                 if (ret) {
11715                         eeprom->len += i;
11716                         return ret;
11717                 }
11718                 memcpy(pd + i, &val, 4);
11719         }
11720         eeprom->len += i;
11721
11722         if (len & 3) {
11723                 /* read last bytes not ending on 4 byte boundary */
11724                 pd = &data[eeprom->len];
11725                 b_count = len & 3;
11726                 b_offset = offset + len - b_count;
11727                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
11728                 if (ret)
11729                         return ret;
11730                 memcpy(pd, &val, b_count);
11731                 eeprom->len += b_count;
11732         }
11733         return 0;
11734 }
11735
11736 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
11737 {
11738         struct tg3 *tp = netdev_priv(dev);
11739         int ret;
11740         u32 offset, len, b_offset, odd_len;
11741         u8 *buf;
11742         __be32 start, end;
11743
11744         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11745                 return -EAGAIN;
11746
11747         if (tg3_flag(tp, NO_NVRAM) ||
11748             eeprom->magic != TG3_EEPROM_MAGIC)
11749                 return -EINVAL;
11750
11751         offset = eeprom->offset;
11752         len = eeprom->len;
11753
11754         if ((b_offset = (offset & 3))) {
11755                 /* adjustments to start on required 4 byte boundary */
11756                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
11757                 if (ret)
11758                         return ret;
11759                 len += b_offset;
11760                 offset &= ~3;
11761                 if (len < 4)
11762                         len = 4;
11763         }
11764
11765         odd_len = 0;
11766         if (len & 3) {
11767                 /* adjustments to end on required 4 byte boundary */
11768                 odd_len = 1;
11769                 len = (len + 3) & ~3;
11770                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
11771                 if (ret)
11772                         return ret;
11773         }
11774
11775         buf = data;
11776         if (b_offset || odd_len) {
11777                 buf = kmalloc(len, GFP_KERNEL);
11778                 if (!buf)
11779                         return -ENOMEM;
11780                 if (b_offset)
11781                         memcpy(buf, &start, 4);
11782                 if (odd_len)
11783                         memcpy(buf+len-4, &end, 4);
11784                 memcpy(buf + b_offset, data, eeprom->len);
11785         }
11786
11787         ret = tg3_nvram_write_block(tp, offset, len, buf);
11788
11789         if (buf != data)
11790                 kfree(buf);
11791
11792         return ret;
11793 }
11794
11795 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11796 {
11797         struct tg3 *tp = netdev_priv(dev);
11798
11799         if (tg3_flag(tp, USE_PHYLIB)) {
11800                 struct phy_device *phydev;
11801                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11802                         return -EAGAIN;
11803                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11804                 return phy_ethtool_gset(phydev, cmd);
11805         }
11806
11807         cmd->supported = (SUPPORTED_Autoneg);
11808
11809         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11810                 cmd->supported |= (SUPPORTED_1000baseT_Half |
11811                                    SUPPORTED_1000baseT_Full);
11812
11813         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11814                 cmd->supported |= (SUPPORTED_100baseT_Half |
11815                                   SUPPORTED_100baseT_Full |
11816                                   SUPPORTED_10baseT_Half |
11817                                   SUPPORTED_10baseT_Full |
11818                                   SUPPORTED_TP);
11819                 cmd->port = PORT_TP;
11820         } else {
11821                 cmd->supported |= SUPPORTED_FIBRE;
11822                 cmd->port = PORT_FIBRE;
11823         }
11824
11825         cmd->advertising = tp->link_config.advertising;
11826         if (tg3_flag(tp, PAUSE_AUTONEG)) {
11827                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
11828                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11829                                 cmd->advertising |= ADVERTISED_Pause;
11830                         } else {
11831                                 cmd->advertising |= ADVERTISED_Pause |
11832                                                     ADVERTISED_Asym_Pause;
11833                         }
11834                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
11835                         cmd->advertising |= ADVERTISED_Asym_Pause;
11836                 }
11837         }
11838         if (netif_running(dev) && tp->link_up) {
11839                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
11840                 cmd->duplex = tp->link_config.active_duplex;
11841                 cmd->lp_advertising = tp->link_config.rmt_adv;
11842                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
11843                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
11844                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
11845                         else
11846                                 cmd->eth_tp_mdix = ETH_TP_MDI;
11847                 }
11848         } else {
11849                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
11850                 cmd->duplex = DUPLEX_UNKNOWN;
11851                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
11852         }
11853         cmd->phy_address = tp->phy_addr;
11854         cmd->transceiver = XCVR_INTERNAL;
11855         cmd->autoneg = tp->link_config.autoneg;
11856         cmd->maxtxpkt = 0;
11857         cmd->maxrxpkt = 0;
11858         return 0;
11859 }
11860
11861 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
11862 {
11863         struct tg3 *tp = netdev_priv(dev);
11864         u32 speed = ethtool_cmd_speed(cmd);
11865
11866         if (tg3_flag(tp, USE_PHYLIB)) {
11867                 struct phy_device *phydev;
11868                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
11869                         return -EAGAIN;
11870                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
11871                 return phy_ethtool_sset(phydev, cmd);
11872         }
11873
11874         if (cmd->autoneg != AUTONEG_ENABLE &&
11875             cmd->autoneg != AUTONEG_DISABLE)
11876                 return -EINVAL;
11877
11878         if (cmd->autoneg == AUTONEG_DISABLE &&
11879             cmd->duplex != DUPLEX_FULL &&
11880             cmd->duplex != DUPLEX_HALF)
11881                 return -EINVAL;
11882
11883         if (cmd->autoneg == AUTONEG_ENABLE) {
11884                 u32 mask = ADVERTISED_Autoneg |
11885                            ADVERTISED_Pause |
11886                            ADVERTISED_Asym_Pause;
11887
11888                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
11889                         mask |= ADVERTISED_1000baseT_Half |
11890                                 ADVERTISED_1000baseT_Full;
11891
11892                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
11893                         mask |= ADVERTISED_100baseT_Half |
11894                                 ADVERTISED_100baseT_Full |
11895                                 ADVERTISED_10baseT_Half |
11896                                 ADVERTISED_10baseT_Full |
11897                                 ADVERTISED_TP;
11898                 else
11899                         mask |= ADVERTISED_FIBRE;
11900
11901                 if (cmd->advertising & ~mask)
11902                         return -EINVAL;
11903
11904                 mask &= (ADVERTISED_1000baseT_Half |
11905                          ADVERTISED_1000baseT_Full |
11906                          ADVERTISED_100baseT_Half |
11907                          ADVERTISED_100baseT_Full |
11908                          ADVERTISED_10baseT_Half |
11909                          ADVERTISED_10baseT_Full);
11910
11911                 cmd->advertising &= mask;
11912         } else {
11913                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
11914                         if (speed != SPEED_1000)
11915                                 return -EINVAL;
11916
11917                         if (cmd->duplex != DUPLEX_FULL)
11918                                 return -EINVAL;
11919                 } else {
11920                         if (speed != SPEED_100 &&
11921                             speed != SPEED_10)
11922                                 return -EINVAL;
11923                 }
11924         }
11925
11926         tg3_full_lock(tp, 0);
11927
11928         tp->link_config.autoneg = cmd->autoneg;
11929         if (cmd->autoneg == AUTONEG_ENABLE) {
11930                 tp->link_config.advertising = (cmd->advertising |
11931                                               ADVERTISED_Autoneg);
11932                 tp->link_config.speed = SPEED_UNKNOWN;
11933                 tp->link_config.duplex = DUPLEX_UNKNOWN;
11934         } else {
11935                 tp->link_config.advertising = 0;
11936                 tp->link_config.speed = speed;
11937                 tp->link_config.duplex = cmd->duplex;
11938         }
11939
11940         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
11941
11942         tg3_warn_mgmt_link_flap(tp);
11943
11944         if (netif_running(dev))
11945                 tg3_setup_phy(tp, true);
11946
11947         tg3_full_unlock(tp);
11948
11949         return 0;
11950 }
11951
11952 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
11953 {
11954         struct tg3 *tp = netdev_priv(dev);
11955
11956         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
11957         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
11958         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
11959         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
11960 }
11961
11962 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11963 {
11964         struct tg3 *tp = netdev_priv(dev);
11965
11966         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
11967                 wol->supported = WAKE_MAGIC;
11968         else
11969                 wol->supported = 0;
11970         wol->wolopts = 0;
11971         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
11972                 wol->wolopts = WAKE_MAGIC;
11973         memset(&wol->sopass, 0, sizeof(wol->sopass));
11974 }
11975
11976 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
11977 {
11978         struct tg3 *tp = netdev_priv(dev);
11979         struct device *dp = &tp->pdev->dev;
11980
11981         if (wol->wolopts & ~WAKE_MAGIC)
11982                 return -EINVAL;
11983         if ((wol->wolopts & WAKE_MAGIC) &&
11984             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
11985                 return -EINVAL;
11986
11987         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
11988
11989         spin_lock_bh(&tp->lock);
11990         if (device_may_wakeup(dp))
11991                 tg3_flag_set(tp, WOL_ENABLE);
11992         else
11993                 tg3_flag_clear(tp, WOL_ENABLE);
11994         spin_unlock_bh(&tp->lock);
11995
11996         return 0;
11997 }
11998
11999 static u32 tg3_get_msglevel(struct net_device *dev)
12000 {
12001         struct tg3 *tp = netdev_priv(dev);
12002         return tp->msg_enable;
12003 }
12004
12005 static void tg3_set_msglevel(struct net_device *dev, u32 value)
12006 {
12007         struct tg3 *tp = netdev_priv(dev);
12008         tp->msg_enable = value;
12009 }
12010
12011 static int tg3_nway_reset(struct net_device *dev)
12012 {
12013         struct tg3 *tp = netdev_priv(dev);
12014         int r;
12015
12016         if (!netif_running(dev))
12017                 return -EAGAIN;
12018
12019         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12020                 return -EINVAL;
12021
12022         tg3_warn_mgmt_link_flap(tp);
12023
12024         if (tg3_flag(tp, USE_PHYLIB)) {
12025                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12026                         return -EAGAIN;
12027                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
12028         } else {
12029                 u32 bmcr;
12030
12031                 spin_lock_bh(&tp->lock);
12032                 r = -EINVAL;
12033                 tg3_readphy(tp, MII_BMCR, &bmcr);
12034                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
12035                     ((bmcr & BMCR_ANENABLE) ||
12036                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
12037                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
12038                                                    BMCR_ANENABLE);
12039                         r = 0;
12040                 }
12041                 spin_unlock_bh(&tp->lock);
12042         }
12043
12044         return r;
12045 }
12046
12047 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12048 {
12049         struct tg3 *tp = netdev_priv(dev);
12050
12051         ering->rx_max_pending = tp->rx_std_ring_mask;
12052         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12053                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
12054         else
12055                 ering->rx_jumbo_max_pending = 0;
12056
12057         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
12058
12059         ering->rx_pending = tp->rx_pending;
12060         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12061                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
12062         else
12063                 ering->rx_jumbo_pending = 0;
12064
12065         ering->tx_pending = tp->napi[0].tx_pending;
12066 }
12067
12068 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
12069 {
12070         struct tg3 *tp = netdev_priv(dev);
12071         int i, irq_sync = 0, err = 0;
12072
12073         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
12074             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
12075             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
12076             (ering->tx_pending <= MAX_SKB_FRAGS) ||
12077             (tg3_flag(tp, TSO_BUG) &&
12078              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
12079                 return -EINVAL;
12080
12081         if (netif_running(dev)) {
12082                 tg3_phy_stop(tp);
12083                 tg3_netif_stop(tp);
12084                 irq_sync = 1;
12085         }
12086
12087         tg3_full_lock(tp, irq_sync);
12088
12089         tp->rx_pending = ering->rx_pending;
12090
12091         if (tg3_flag(tp, MAX_RXPEND_64) &&
12092             tp->rx_pending > 63)
12093                 tp->rx_pending = 63;
12094
12095         if (tg3_flag(tp, JUMBO_RING_ENABLE))
12096                 tp->rx_jumbo_pending = ering->rx_jumbo_pending;
12097
12098         for (i = 0; i < tp->irq_max; i++)
12099                 tp->napi[i].tx_pending = ering->tx_pending;
12100
12101         if (netif_running(dev)) {
12102                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12103                 err = tg3_restart_hw(tp, false);
12104                 if (!err)
12105                         tg3_netif_start(tp);
12106         }
12107
12108         tg3_full_unlock(tp);
12109
12110         if (irq_sync && !err)
12111                 tg3_phy_start(tp);
12112
12113         return err;
12114 }
12115
12116 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12117 {
12118         struct tg3 *tp = netdev_priv(dev);
12119
12120         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
12121
12122         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
12123                 epause->rx_pause = 1;
12124         else
12125                 epause->rx_pause = 0;
12126
12127         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
12128                 epause->tx_pause = 1;
12129         else
12130                 epause->tx_pause = 0;
12131 }
12132
12133 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
12134 {
12135         struct tg3 *tp = netdev_priv(dev);
12136         int err = 0;
12137
12138         if (tp->link_config.autoneg == AUTONEG_ENABLE)
12139                 tg3_warn_mgmt_link_flap(tp);
12140
12141         if (tg3_flag(tp, USE_PHYLIB)) {
12142                 u32 newadv;
12143                 struct phy_device *phydev;
12144
12145                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12146
12147                 if (!(phydev->supported & SUPPORTED_Pause) ||
12148                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
12149                      (epause->rx_pause != epause->tx_pause)))
12150                         return -EINVAL;
12151
12152                 tp->link_config.flowctrl = 0;
12153                 if (epause->rx_pause) {
12154                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12155
12156                         if (epause->tx_pause) {
12157                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
12158                                 newadv = ADVERTISED_Pause;
12159                         } else
12160                                 newadv = ADVERTISED_Pause |
12161                                          ADVERTISED_Asym_Pause;
12162                 } else if (epause->tx_pause) {
12163                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12164                         newadv = ADVERTISED_Asym_Pause;
12165                 } else
12166                         newadv = 0;
12167
12168                 if (epause->autoneg)
12169                         tg3_flag_set(tp, PAUSE_AUTONEG);
12170                 else
12171                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12172
12173                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
12174                         u32 oldadv = phydev->advertising &
12175                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
12176                         if (oldadv != newadv) {
12177                                 phydev->advertising &=
12178                                         ~(ADVERTISED_Pause |
12179                                           ADVERTISED_Asym_Pause);
12180                                 phydev->advertising |= newadv;
12181                                 if (phydev->autoneg) {
12182                                         /*
12183                                          * Always renegotiate the link to
12184                                          * inform our link partner of our
12185                                          * flow control settings, even if the
12186                                          * flow control is forced.  Let
12187                                          * tg3_adjust_link() do the final
12188                                          * flow control setup.
12189                                          */
12190                                         return phy_start_aneg(phydev);
12191                                 }
12192                         }
12193
12194                         if (!epause->autoneg)
12195                                 tg3_setup_flow_control(tp, 0, 0);
12196                 } else {
12197                         tp->link_config.advertising &=
12198                                         ~(ADVERTISED_Pause |
12199                                           ADVERTISED_Asym_Pause);
12200                         tp->link_config.advertising |= newadv;
12201                 }
12202         } else {
12203                 int irq_sync = 0;
12204
12205                 if (netif_running(dev)) {
12206                         tg3_netif_stop(tp);
12207                         irq_sync = 1;
12208                 }
12209
12210                 tg3_full_lock(tp, irq_sync);
12211
12212                 if (epause->autoneg)
12213                         tg3_flag_set(tp, PAUSE_AUTONEG);
12214                 else
12215                         tg3_flag_clear(tp, PAUSE_AUTONEG);
12216                 if (epause->rx_pause)
12217                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
12218                 else
12219                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
12220                 if (epause->tx_pause)
12221                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
12222                 else
12223                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
12224
12225                 if (netif_running(dev)) {
12226                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12227                         err = tg3_restart_hw(tp, false);
12228                         if (!err)
12229                                 tg3_netif_start(tp);
12230                 }
12231
12232                 tg3_full_unlock(tp);
12233         }
12234
12235         tp->phy_flags |= TG3_PHYFLG_USER_CONFIGURED;
12236
12237         return err;
12238 }
12239
12240 static int tg3_get_sset_count(struct net_device *dev, int sset)
12241 {
12242         switch (sset) {
12243         case ETH_SS_TEST:
12244                 return TG3_NUM_TEST;
12245         case ETH_SS_STATS:
12246                 return TG3_NUM_STATS;
12247         default:
12248                 return -EOPNOTSUPP;
12249         }
12250 }
12251
12252 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
12253                          u32 *rules __always_unused)
12254 {
12255         struct tg3 *tp = netdev_priv(dev);
12256
12257         if (!tg3_flag(tp, SUPPORT_MSIX))
12258                 return -EOPNOTSUPP;
12259
12260         switch (info->cmd) {
12261         case ETHTOOL_GRXRINGS:
12262                 if (netif_running(tp->dev))
12263                         info->data = tp->rxq_cnt;
12264                 else {
12265                         info->data = num_online_cpus();
12266                         if (info->data > TG3_RSS_MAX_NUM_QS)
12267                                 info->data = TG3_RSS_MAX_NUM_QS;
12268                 }
12269
12270                 /* The first interrupt vector only
12271                  * handles link interrupts.
12272                  */
12273                 info->data -= 1;
12274                 return 0;
12275
12276         default:
12277                 return -EOPNOTSUPP;
12278         }
12279 }
12280
12281 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
12282 {
12283         u32 size = 0;
12284         struct tg3 *tp = netdev_priv(dev);
12285
12286         if (tg3_flag(tp, SUPPORT_MSIX))
12287                 size = TG3_RSS_INDIR_TBL_SIZE;
12288
12289         return size;
12290 }
12291
12292 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
12293 {
12294         struct tg3 *tp = netdev_priv(dev);
12295         int i;
12296
12297         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12298                 indir[i] = tp->rss_ind_tbl[i];
12299
12300         return 0;
12301 }
12302
12303 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
12304 {
12305         struct tg3 *tp = netdev_priv(dev);
12306         size_t i;
12307
12308         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
12309                 tp->rss_ind_tbl[i] = indir[i];
12310
12311         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
12312                 return 0;
12313
12314         /* It is legal to write the indirection
12315          * table while the device is running.
12316          */
12317         tg3_full_lock(tp, 0);
12318         tg3_rss_write_indir_tbl(tp);
12319         tg3_full_unlock(tp);
12320
12321         return 0;
12322 }
12323
12324 static void tg3_get_channels(struct net_device *dev,
12325                              struct ethtool_channels *channel)
12326 {
12327         struct tg3 *tp = netdev_priv(dev);
12328         u32 deflt_qs = netif_get_num_default_rss_queues();
12329
12330         channel->max_rx = tp->rxq_max;
12331         channel->max_tx = tp->txq_max;
12332
12333         if (netif_running(dev)) {
12334                 channel->rx_count = tp->rxq_cnt;
12335                 channel->tx_count = tp->txq_cnt;
12336         } else {
12337                 if (tp->rxq_req)
12338                         channel->rx_count = tp->rxq_req;
12339                 else
12340                         channel->rx_count = min(deflt_qs, tp->rxq_max);
12341
12342                 if (tp->txq_req)
12343                         channel->tx_count = tp->txq_req;
12344                 else
12345                         channel->tx_count = min(deflt_qs, tp->txq_max);
12346         }
12347 }
12348
12349 static int tg3_set_channels(struct net_device *dev,
12350                             struct ethtool_channels *channel)
12351 {
12352         struct tg3 *tp = netdev_priv(dev);
12353
12354         if (!tg3_flag(tp, SUPPORT_MSIX))
12355                 return -EOPNOTSUPP;
12356
12357         if (channel->rx_count > tp->rxq_max ||
12358             channel->tx_count > tp->txq_max)
12359                 return -EINVAL;
12360
12361         tp->rxq_req = channel->rx_count;
12362         tp->txq_req = channel->tx_count;
12363
12364         if (!netif_running(dev))
12365                 return 0;
12366
12367         tg3_stop(tp);
12368
12369         tg3_carrier_off(tp);
12370
12371         tg3_start(tp, true, false, false);
12372
12373         return 0;
12374 }
12375
12376 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
12377 {
12378         switch (stringset) {
12379         case ETH_SS_STATS:
12380                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
12381                 break;
12382         case ETH_SS_TEST:
12383                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
12384                 break;
12385         default:
12386                 WARN_ON(1);     /* we need a WARN() */
12387                 break;
12388         }
12389 }
12390
12391 static int tg3_set_phys_id(struct net_device *dev,
12392                             enum ethtool_phys_id_state state)
12393 {
12394         struct tg3 *tp = netdev_priv(dev);
12395
12396         if (!netif_running(tp->dev))
12397                 return -EAGAIN;
12398
12399         switch (state) {
12400         case ETHTOOL_ID_ACTIVE:
12401                 return 1;       /* cycle on/off once per second */
12402
12403         case ETHTOOL_ID_ON:
12404                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12405                      LED_CTRL_1000MBPS_ON |
12406                      LED_CTRL_100MBPS_ON |
12407                      LED_CTRL_10MBPS_ON |
12408                      LED_CTRL_TRAFFIC_OVERRIDE |
12409                      LED_CTRL_TRAFFIC_BLINK |
12410                      LED_CTRL_TRAFFIC_LED);
12411                 break;
12412
12413         case ETHTOOL_ID_OFF:
12414                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
12415                      LED_CTRL_TRAFFIC_OVERRIDE);
12416                 break;
12417
12418         case ETHTOOL_ID_INACTIVE:
12419                 tw32(MAC_LED_CTRL, tp->led_ctrl);
12420                 break;
12421         }
12422
12423         return 0;
12424 }
12425
12426 static void tg3_get_ethtool_stats(struct net_device *dev,
12427                                    struct ethtool_stats *estats, u64 *tmp_stats)
12428 {
12429         struct tg3 *tp = netdev_priv(dev);
12430
12431         if (tp->hw_stats)
12432                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
12433         else
12434                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
12435 }
12436
12437 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
12438 {
12439         int i;
12440         __be32 *buf;
12441         u32 offset = 0, len = 0;
12442         u32 magic, val;
12443
12444         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
12445                 return NULL;
12446
12447         if (magic == TG3_EEPROM_MAGIC) {
12448                 for (offset = TG3_NVM_DIR_START;
12449                      offset < TG3_NVM_DIR_END;
12450                      offset += TG3_NVM_DIRENT_SIZE) {
12451                         if (tg3_nvram_read(tp, offset, &val))
12452                                 return NULL;
12453
12454                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
12455                             TG3_NVM_DIRTYPE_EXTVPD)
12456                                 break;
12457                 }
12458
12459                 if (offset != TG3_NVM_DIR_END) {
12460                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
12461                         if (tg3_nvram_read(tp, offset + 4, &offset))
12462                                 return NULL;
12463
12464                         offset = tg3_nvram_logical_addr(tp, offset);
12465                 }
12466         }
12467
12468         if (!offset || !len) {
12469                 offset = TG3_NVM_VPD_OFF;
12470                 len = TG3_NVM_VPD_LEN;
12471         }
12472
12473         buf = kmalloc(len, GFP_KERNEL);
12474         if (buf == NULL)
12475                 return NULL;
12476
12477         if (magic == TG3_EEPROM_MAGIC) {
12478                 for (i = 0; i < len; i += 4) {
12479                         /* The data is in little-endian format in NVRAM.
12480                          * Use the big-endian read routines to preserve
12481                          * the byte order as it exists in NVRAM.
12482                          */
12483                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
12484                                 goto error;
12485                 }
12486         } else {
12487                 u8 *ptr;
12488                 ssize_t cnt;
12489                 unsigned int pos = 0;
12490
12491                 ptr = (u8 *)&buf[0];
12492                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
12493                         cnt = pci_read_vpd(tp->pdev, pos,
12494                                            len - pos, ptr);
12495                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
12496                                 cnt = 0;
12497                         else if (cnt < 0)
12498                                 goto error;
12499                 }
12500                 if (pos != len)
12501                         goto error;
12502         }
12503
12504         *vpdlen = len;
12505
12506         return buf;
12507
12508 error:
12509         kfree(buf);
12510         return NULL;
12511 }
12512
12513 #define NVRAM_TEST_SIZE 0x100
12514 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
12515 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
12516 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
12517 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
12518 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
12519 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
12520 #define NVRAM_SELFBOOT_HW_SIZE 0x20
12521 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
12522
12523 static int tg3_test_nvram(struct tg3 *tp)
12524 {
12525         u32 csum, magic, len;
12526         __be32 *buf;
12527         int i, j, k, err = 0, size;
12528
12529         if (tg3_flag(tp, NO_NVRAM))
12530                 return 0;
12531
12532         if (tg3_nvram_read(tp, 0, &magic) != 0)
12533                 return -EIO;
12534
12535         if (magic == TG3_EEPROM_MAGIC)
12536                 size = NVRAM_TEST_SIZE;
12537         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
12538                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
12539                     TG3_EEPROM_SB_FORMAT_1) {
12540                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
12541                         case TG3_EEPROM_SB_REVISION_0:
12542                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
12543                                 break;
12544                         case TG3_EEPROM_SB_REVISION_2:
12545                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
12546                                 break;
12547                         case TG3_EEPROM_SB_REVISION_3:
12548                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
12549                                 break;
12550                         case TG3_EEPROM_SB_REVISION_4:
12551                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
12552                                 break;
12553                         case TG3_EEPROM_SB_REVISION_5:
12554                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
12555                                 break;
12556                         case TG3_EEPROM_SB_REVISION_6:
12557                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
12558                                 break;
12559                         default:
12560                                 return -EIO;
12561                         }
12562                 } else
12563                         return 0;
12564         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
12565                 size = NVRAM_SELFBOOT_HW_SIZE;
12566         else
12567                 return -EIO;
12568
12569         buf = kmalloc(size, GFP_KERNEL);
12570         if (buf == NULL)
12571                 return -ENOMEM;
12572
12573         err = -EIO;
12574         for (i = 0, j = 0; i < size; i += 4, j++) {
12575                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
12576                 if (err)
12577                         break;
12578         }
12579         if (i < size)
12580                 goto out;
12581
12582         /* Selfboot format */
12583         magic = be32_to_cpu(buf[0]);
12584         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
12585             TG3_EEPROM_MAGIC_FW) {
12586                 u8 *buf8 = (u8 *) buf, csum8 = 0;
12587
12588                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
12589                     TG3_EEPROM_SB_REVISION_2) {
12590                         /* For rev 2, the csum doesn't include the MBA. */
12591                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
12592                                 csum8 += buf8[i];
12593                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
12594                                 csum8 += buf8[i];
12595                 } else {
12596                         for (i = 0; i < size; i++)
12597                                 csum8 += buf8[i];
12598                 }
12599
12600                 if (csum8 == 0) {
12601                         err = 0;
12602                         goto out;
12603                 }
12604
12605                 err = -EIO;
12606                 goto out;
12607         }
12608
12609         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
12610             TG3_EEPROM_MAGIC_HW) {
12611                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
12612                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
12613                 u8 *buf8 = (u8 *) buf;
12614
12615                 /* Separate the parity bits and the data bytes.  */
12616                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
12617                         if ((i == 0) || (i == 8)) {
12618                                 int l;
12619                                 u8 msk;
12620
12621                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
12622                                         parity[k++] = buf8[i] & msk;
12623                                 i++;
12624                         } else if (i == 16) {
12625                                 int l;
12626                                 u8 msk;
12627
12628                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
12629                                         parity[k++] = buf8[i] & msk;
12630                                 i++;
12631
12632                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
12633                                         parity[k++] = buf8[i] & msk;
12634                                 i++;
12635                         }
12636                         data[j++] = buf8[i];
12637                 }
12638
12639                 err = -EIO;
12640                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
12641                         u8 hw8 = hweight8(data[i]);
12642
12643                         if ((hw8 & 0x1) && parity[i])
12644                                 goto out;
12645                         else if (!(hw8 & 0x1) && !parity[i])
12646                                 goto out;
12647                 }
12648                 err = 0;
12649                 goto out;
12650         }
12651
12652         err = -EIO;
12653
12654         /* Bootstrap checksum at offset 0x10 */
12655         csum = calc_crc((unsigned char *) buf, 0x10);
12656         if (csum != le32_to_cpu(buf[0x10/4]))
12657                 goto out;
12658
12659         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
12660         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
12661         if (csum != le32_to_cpu(buf[0xfc/4]))
12662                 goto out;
12663
12664         kfree(buf);
12665
12666         buf = tg3_vpd_readblock(tp, &len);
12667         if (!buf)
12668                 return -ENOMEM;
12669
12670         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
12671         if (i > 0) {
12672                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
12673                 if (j < 0)
12674                         goto out;
12675
12676                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
12677                         goto out;
12678
12679                 i += PCI_VPD_LRDT_TAG_SIZE;
12680                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
12681                                               PCI_VPD_RO_KEYWORD_CHKSUM);
12682                 if (j > 0) {
12683                         u8 csum8 = 0;
12684
12685                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
12686
12687                         for (i = 0; i <= j; i++)
12688                                 csum8 += ((u8 *)buf)[i];
12689
12690                         if (csum8)
12691                                 goto out;
12692                 }
12693         }
12694
12695         err = 0;
12696
12697 out:
12698         kfree(buf);
12699         return err;
12700 }
12701
12702 #define TG3_SERDES_TIMEOUT_SEC  2
12703 #define TG3_COPPER_TIMEOUT_SEC  6
12704
12705 static int tg3_test_link(struct tg3 *tp)
12706 {
12707         int i, max;
12708
12709         if (!netif_running(tp->dev))
12710                 return -ENODEV;
12711
12712         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
12713                 max = TG3_SERDES_TIMEOUT_SEC;
12714         else
12715                 max = TG3_COPPER_TIMEOUT_SEC;
12716
12717         for (i = 0; i < max; i++) {
12718                 if (tp->link_up)
12719                         return 0;
12720
12721                 if (msleep_interruptible(1000))
12722                         break;
12723         }
12724
12725         return -EIO;
12726 }
12727
12728 /* Only test the commonly used registers */
12729 static int tg3_test_registers(struct tg3 *tp)
12730 {
12731         int i, is_5705, is_5750;
12732         u32 offset, read_mask, write_mask, val, save_val, read_val;
12733         static struct {
12734                 u16 offset;
12735                 u16 flags;
12736 #define TG3_FL_5705     0x1
12737 #define TG3_FL_NOT_5705 0x2
12738 #define TG3_FL_NOT_5788 0x4
12739 #define TG3_FL_NOT_5750 0x8
12740                 u32 read_mask;
12741                 u32 write_mask;
12742         } reg_tbl[] = {
12743                 /* MAC Control Registers */
12744                 { MAC_MODE, TG3_FL_NOT_5705,
12745                         0x00000000, 0x00ef6f8c },
12746                 { MAC_MODE, TG3_FL_5705,
12747                         0x00000000, 0x01ef6b8c },
12748                 { MAC_STATUS, TG3_FL_NOT_5705,
12749                         0x03800107, 0x00000000 },
12750                 { MAC_STATUS, TG3_FL_5705,
12751                         0x03800100, 0x00000000 },
12752                 { MAC_ADDR_0_HIGH, 0x0000,
12753                         0x00000000, 0x0000ffff },
12754                 { MAC_ADDR_0_LOW, 0x0000,
12755                         0x00000000, 0xffffffff },
12756                 { MAC_RX_MTU_SIZE, 0x0000,
12757                         0x00000000, 0x0000ffff },
12758                 { MAC_TX_MODE, 0x0000,
12759                         0x00000000, 0x00000070 },
12760                 { MAC_TX_LENGTHS, 0x0000,
12761                         0x00000000, 0x00003fff },
12762                 { MAC_RX_MODE, TG3_FL_NOT_5705,
12763                         0x00000000, 0x000007fc },
12764                 { MAC_RX_MODE, TG3_FL_5705,
12765                         0x00000000, 0x000007dc },
12766                 { MAC_HASH_REG_0, 0x0000,
12767                         0x00000000, 0xffffffff },
12768                 { MAC_HASH_REG_1, 0x0000,
12769                         0x00000000, 0xffffffff },
12770                 { MAC_HASH_REG_2, 0x0000,
12771                         0x00000000, 0xffffffff },
12772                 { MAC_HASH_REG_3, 0x0000,
12773                         0x00000000, 0xffffffff },
12774
12775                 /* Receive Data and Receive BD Initiator Control Registers. */
12776                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
12777                         0x00000000, 0xffffffff },
12778                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
12779                         0x00000000, 0xffffffff },
12780                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
12781                         0x00000000, 0x00000003 },
12782                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
12783                         0x00000000, 0xffffffff },
12784                 { RCVDBDI_STD_BD+0, 0x0000,
12785                         0x00000000, 0xffffffff },
12786                 { RCVDBDI_STD_BD+4, 0x0000,
12787                         0x00000000, 0xffffffff },
12788                 { RCVDBDI_STD_BD+8, 0x0000,
12789                         0x00000000, 0xffff0002 },
12790                 { RCVDBDI_STD_BD+0xc, 0x0000,
12791                         0x00000000, 0xffffffff },
12792
12793                 /* Receive BD Initiator Control Registers. */
12794                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
12795                         0x00000000, 0xffffffff },
12796                 { RCVBDI_STD_THRESH, TG3_FL_5705,
12797                         0x00000000, 0x000003ff },
12798                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
12799                         0x00000000, 0xffffffff },
12800
12801                 /* Host Coalescing Control Registers. */
12802                 { HOSTCC_MODE, TG3_FL_NOT_5705,
12803                         0x00000000, 0x00000004 },
12804                 { HOSTCC_MODE, TG3_FL_5705,
12805                         0x00000000, 0x000000f6 },
12806                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
12807                         0x00000000, 0xffffffff },
12808                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
12809                         0x00000000, 0x000003ff },
12810                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
12811                         0x00000000, 0xffffffff },
12812                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
12813                         0x00000000, 0x000003ff },
12814                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
12815                         0x00000000, 0xffffffff },
12816                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12817                         0x00000000, 0x000000ff },
12818                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
12819                         0x00000000, 0xffffffff },
12820                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
12821                         0x00000000, 0x000000ff },
12822                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
12823                         0x00000000, 0xffffffff },
12824                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
12825                         0x00000000, 0xffffffff },
12826                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12827                         0x00000000, 0xffffffff },
12828                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12829                         0x00000000, 0x000000ff },
12830                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
12831                         0x00000000, 0xffffffff },
12832                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
12833                         0x00000000, 0x000000ff },
12834                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
12835                         0x00000000, 0xffffffff },
12836                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
12837                         0x00000000, 0xffffffff },
12838                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
12839                         0x00000000, 0xffffffff },
12840                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
12841                         0x00000000, 0xffffffff },
12842                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
12843                         0x00000000, 0xffffffff },
12844                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
12845                         0xffffffff, 0x00000000 },
12846                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
12847                         0xffffffff, 0x00000000 },
12848
12849                 /* Buffer Manager Control Registers. */
12850                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
12851                         0x00000000, 0x007fff80 },
12852                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
12853                         0x00000000, 0x007fffff },
12854                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
12855                         0x00000000, 0x0000003f },
12856                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
12857                         0x00000000, 0x000001ff },
12858                 { BUFMGR_MB_HIGH_WATER, 0x0000,
12859                         0x00000000, 0x000001ff },
12860                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
12861                         0xffffffff, 0x00000000 },
12862                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
12863                         0xffffffff, 0x00000000 },
12864
12865                 /* Mailbox Registers */
12866                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
12867                         0x00000000, 0x000001ff },
12868                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
12869                         0x00000000, 0x000001ff },
12870                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
12871                         0x00000000, 0x000007ff },
12872                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
12873                         0x00000000, 0x000001ff },
12874
12875                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
12876         };
12877
12878         is_5705 = is_5750 = 0;
12879         if (tg3_flag(tp, 5705_PLUS)) {
12880                 is_5705 = 1;
12881                 if (tg3_flag(tp, 5750_PLUS))
12882                         is_5750 = 1;
12883         }
12884
12885         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
12886                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
12887                         continue;
12888
12889                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
12890                         continue;
12891
12892                 if (tg3_flag(tp, IS_5788) &&
12893                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
12894                         continue;
12895
12896                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
12897                         continue;
12898
12899                 offset = (u32) reg_tbl[i].offset;
12900                 read_mask = reg_tbl[i].read_mask;
12901                 write_mask = reg_tbl[i].write_mask;
12902
12903                 /* Save the original register content */
12904                 save_val = tr32(offset);
12905
12906                 /* Determine the read-only value. */
12907                 read_val = save_val & read_mask;
12908
12909                 /* Write zero to the register, then make sure the read-only bits
12910                  * are not changed and the read/write bits are all zeros.
12911                  */
12912                 tw32(offset, 0);
12913
12914                 val = tr32(offset);
12915
12916                 /* Test the read-only and read/write bits. */
12917                 if (((val & read_mask) != read_val) || (val & write_mask))
12918                         goto out;
12919
12920                 /* Write ones to all the bits defined by RdMask and WrMask, then
12921                  * make sure the read-only bits are not changed and the
12922                  * read/write bits are all ones.
12923                  */
12924                 tw32(offset, read_mask | write_mask);
12925
12926                 val = tr32(offset);
12927
12928                 /* Test the read-only bits. */
12929                 if ((val & read_mask) != read_val)
12930                         goto out;
12931
12932                 /* Test the read/write bits. */
12933                 if ((val & write_mask) != write_mask)
12934                         goto out;
12935
12936                 tw32(offset, save_val);
12937         }
12938
12939         return 0;
12940
12941 out:
12942         if (netif_msg_hw(tp))
12943                 netdev_err(tp->dev,
12944                            "Register test failed at offset %x\n", offset);
12945         tw32(offset, save_val);
12946         return -EIO;
12947 }
12948
12949 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
12950 {
12951         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
12952         int i;
12953         u32 j;
12954
12955         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
12956                 for (j = 0; j < len; j += 4) {
12957                         u32 val;
12958
12959                         tg3_write_mem(tp, offset + j, test_pattern[i]);
12960                         tg3_read_mem(tp, offset + j, &val);
12961                         if (val != test_pattern[i])
12962                                 return -EIO;
12963                 }
12964         }
12965         return 0;
12966 }
12967
12968 static int tg3_test_memory(struct tg3 *tp)
12969 {
12970         static struct mem_entry {
12971                 u32 offset;
12972                 u32 len;
12973         } mem_tbl_570x[] = {
12974                 { 0x00000000, 0x00b50},
12975                 { 0x00002000, 0x1c000},
12976                 { 0xffffffff, 0x00000}
12977         }, mem_tbl_5705[] = {
12978                 { 0x00000100, 0x0000c},
12979                 { 0x00000200, 0x00008},
12980                 { 0x00004000, 0x00800},
12981                 { 0x00006000, 0x01000},
12982                 { 0x00008000, 0x02000},
12983                 { 0x00010000, 0x0e000},
12984                 { 0xffffffff, 0x00000}
12985         }, mem_tbl_5755[] = {
12986                 { 0x00000200, 0x00008},
12987                 { 0x00004000, 0x00800},
12988                 { 0x00006000, 0x00800},
12989                 { 0x00008000, 0x02000},
12990                 { 0x00010000, 0x0c000},
12991                 { 0xffffffff, 0x00000}
12992         }, mem_tbl_5906[] = {
12993                 { 0x00000200, 0x00008},
12994                 { 0x00004000, 0x00400},
12995                 { 0x00006000, 0x00400},
12996                 { 0x00008000, 0x01000},
12997                 { 0x00010000, 0x01000},
12998                 { 0xffffffff, 0x00000}
12999         }, mem_tbl_5717[] = {
13000                 { 0x00000200, 0x00008},
13001                 { 0x00010000, 0x0a000},
13002                 { 0x00020000, 0x13c00},
13003                 { 0xffffffff, 0x00000}
13004         }, mem_tbl_57765[] = {
13005                 { 0x00000200, 0x00008},
13006                 { 0x00004000, 0x00800},
13007                 { 0x00006000, 0x09800},
13008                 { 0x00010000, 0x0a000},
13009                 { 0xffffffff, 0x00000}
13010         };
13011         struct mem_entry *mem_tbl;
13012         int err = 0;
13013         int i;
13014
13015         if (tg3_flag(tp, 5717_PLUS))
13016                 mem_tbl = mem_tbl_5717;
13017         else if (tg3_flag(tp, 57765_CLASS) ||
13018                  tg3_asic_rev(tp) == ASIC_REV_5762)
13019                 mem_tbl = mem_tbl_57765;
13020         else if (tg3_flag(tp, 5755_PLUS))
13021                 mem_tbl = mem_tbl_5755;
13022         else if (tg3_asic_rev(tp) == ASIC_REV_5906)
13023                 mem_tbl = mem_tbl_5906;
13024         else if (tg3_flag(tp, 5705_PLUS))
13025                 mem_tbl = mem_tbl_5705;
13026         else
13027                 mem_tbl = mem_tbl_570x;
13028
13029         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
13030                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
13031                 if (err)
13032                         break;
13033         }
13034
13035         return err;
13036 }
13037
13038 #define TG3_TSO_MSS             500
13039
13040 #define TG3_TSO_IP_HDR_LEN      20
13041 #define TG3_TSO_TCP_HDR_LEN     20
13042 #define TG3_TSO_TCP_OPT_LEN     12
13043
13044 static const u8 tg3_tso_header[] = {
13045 0x08, 0x00,
13046 0x45, 0x00, 0x00, 0x00,
13047 0x00, 0x00, 0x40, 0x00,
13048 0x40, 0x06, 0x00, 0x00,
13049 0x0a, 0x00, 0x00, 0x01,
13050 0x0a, 0x00, 0x00, 0x02,
13051 0x0d, 0x00, 0xe0, 0x00,
13052 0x00, 0x00, 0x01, 0x00,
13053 0x00, 0x00, 0x02, 0x00,
13054 0x80, 0x10, 0x10, 0x00,
13055 0x14, 0x09, 0x00, 0x00,
13056 0x01, 0x01, 0x08, 0x0a,
13057 0x11, 0x11, 0x11, 0x11,
13058 0x11, 0x11, 0x11, 0x11,
13059 };
13060
13061 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
13062 {
13063         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
13064         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
13065         u32 budget;
13066         struct sk_buff *skb;
13067         u8 *tx_data, *rx_data;
13068         dma_addr_t map;
13069         int num_pkts, tx_len, rx_len, i, err;
13070         struct tg3_rx_buffer_desc *desc;
13071         struct tg3_napi *tnapi, *rnapi;
13072         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
13073
13074         tnapi = &tp->napi[0];
13075         rnapi = &tp->napi[0];
13076         if (tp->irq_cnt > 1) {
13077                 if (tg3_flag(tp, ENABLE_RSS))
13078                         rnapi = &tp->napi[1];
13079                 if (tg3_flag(tp, ENABLE_TSS))
13080                         tnapi = &tp->napi[1];
13081         }
13082         coal_now = tnapi->coal_now | rnapi->coal_now;
13083
13084         err = -EIO;
13085
13086         tx_len = pktsz;
13087         skb = netdev_alloc_skb(tp->dev, tx_len);
13088         if (!skb)
13089                 return -ENOMEM;
13090
13091         tx_data = skb_put(skb, tx_len);
13092         memcpy(tx_data, tp->dev->dev_addr, 6);
13093         memset(tx_data + 6, 0x0, 8);
13094
13095         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
13096
13097         if (tso_loopback) {
13098                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
13099
13100                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
13101                               TG3_TSO_TCP_OPT_LEN;
13102
13103                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
13104                        sizeof(tg3_tso_header));
13105                 mss = TG3_TSO_MSS;
13106
13107                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
13108                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
13109
13110                 /* Set the total length field in the IP header */
13111                 iph->tot_len = htons((u16)(mss + hdr_len));
13112
13113                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
13114                               TXD_FLAG_CPU_POST_DMA);
13115
13116                 if (tg3_flag(tp, HW_TSO_1) ||
13117                     tg3_flag(tp, HW_TSO_2) ||
13118                     tg3_flag(tp, HW_TSO_3)) {
13119                         struct tcphdr *th;
13120                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
13121                         th = (struct tcphdr *)&tx_data[val];
13122                         th->check = 0;
13123                 } else
13124                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
13125
13126                 if (tg3_flag(tp, HW_TSO_3)) {
13127                         mss |= (hdr_len & 0xc) << 12;
13128                         if (hdr_len & 0x10)
13129                                 base_flags |= 0x00000010;
13130                         base_flags |= (hdr_len & 0x3e0) << 5;
13131                 } else if (tg3_flag(tp, HW_TSO_2))
13132                         mss |= hdr_len << 9;
13133                 else if (tg3_flag(tp, HW_TSO_1) ||
13134                          tg3_asic_rev(tp) == ASIC_REV_5705) {
13135                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
13136                 } else {
13137                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
13138                 }
13139
13140                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
13141         } else {
13142                 num_pkts = 1;
13143                 data_off = ETH_HLEN;
13144
13145                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
13146                     tx_len > VLAN_ETH_FRAME_LEN)
13147                         base_flags |= TXD_FLAG_JMB_PKT;
13148         }
13149
13150         for (i = data_off; i < tx_len; i++)
13151                 tx_data[i] = (u8) (i & 0xff);
13152
13153         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
13154         if (pci_dma_mapping_error(tp->pdev, map)) {
13155                 dev_kfree_skb(skb);
13156                 return -EIO;
13157         }
13158
13159         val = tnapi->tx_prod;
13160         tnapi->tx_buffers[val].skb = skb;
13161         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
13162
13163         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13164                rnapi->coal_now);
13165
13166         udelay(10);
13167
13168         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
13169
13170         budget = tg3_tx_avail(tnapi);
13171         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
13172                             base_flags | TXD_FLAG_END, mss, 0)) {
13173                 tnapi->tx_buffers[val].skb = NULL;
13174                 dev_kfree_skb(skb);
13175                 return -EIO;
13176         }
13177
13178         tnapi->tx_prod++;
13179
13180         /* Sync BD data before updating mailbox */
13181         wmb();
13182
13183         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
13184         tr32_mailbox(tnapi->prodmbox);
13185
13186         udelay(10);
13187
13188         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
13189         for (i = 0; i < 35; i++) {
13190                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
13191                        coal_now);
13192
13193                 udelay(10);
13194
13195                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
13196                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
13197                 if ((tx_idx == tnapi->tx_prod) &&
13198                     (rx_idx == (rx_start_idx + num_pkts)))
13199                         break;
13200         }
13201
13202         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
13203         dev_kfree_skb(skb);
13204
13205         if (tx_idx != tnapi->tx_prod)
13206                 goto out;
13207
13208         if (rx_idx != rx_start_idx + num_pkts)
13209                 goto out;
13210
13211         val = data_off;
13212         while (rx_idx != rx_start_idx) {
13213                 desc = &rnapi->rx_rcb[rx_start_idx++];
13214                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
13215                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
13216
13217                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
13218                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
13219                         goto out;
13220
13221                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
13222                          - ETH_FCS_LEN;
13223
13224                 if (!tso_loopback) {
13225                         if (rx_len != tx_len)
13226                                 goto out;
13227
13228                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
13229                                 if (opaque_key != RXD_OPAQUE_RING_STD)
13230                                         goto out;
13231                         } else {
13232                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
13233                                         goto out;
13234                         }
13235                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
13236                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
13237                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
13238                         goto out;
13239                 }
13240
13241                 if (opaque_key == RXD_OPAQUE_RING_STD) {
13242                         rx_data = tpr->rx_std_buffers[desc_idx].data;
13243                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
13244                                              mapping);
13245                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
13246                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
13247                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
13248                                              mapping);
13249                 } else
13250                         goto out;
13251
13252                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
13253                                             PCI_DMA_FROMDEVICE);
13254
13255                 rx_data += TG3_RX_OFFSET(tp);
13256                 for (i = data_off; i < rx_len; i++, val++) {
13257                         if (*(rx_data + i) != (u8) (val & 0xff))
13258                                 goto out;
13259                 }
13260         }
13261
13262         err = 0;
13263
13264         /* tg3_free_rings will unmap and free the rx_data */
13265 out:
13266         return err;
13267 }
13268
13269 #define TG3_STD_LOOPBACK_FAILED         1
13270 #define TG3_JMB_LOOPBACK_FAILED         2
13271 #define TG3_TSO_LOOPBACK_FAILED         4
13272 #define TG3_LOOPBACK_FAILED \
13273         (TG3_STD_LOOPBACK_FAILED | \
13274          TG3_JMB_LOOPBACK_FAILED | \
13275          TG3_TSO_LOOPBACK_FAILED)
13276
13277 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
13278 {
13279         int err = -EIO;
13280         u32 eee_cap;
13281         u32 jmb_pkt_sz = 9000;
13282
13283         if (tp->dma_limit)
13284                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
13285
13286         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
13287         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
13288
13289         if (!netif_running(tp->dev)) {
13290                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13291                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13292                 if (do_extlpbk)
13293                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13294                 goto done;
13295         }
13296
13297         err = tg3_reset_hw(tp, true);
13298         if (err) {
13299                 data[TG3_MAC_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13300                 data[TG3_PHY_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13301                 if (do_extlpbk)
13302                         data[TG3_EXT_LOOPB_TEST] = TG3_LOOPBACK_FAILED;
13303                 goto done;
13304         }
13305
13306         if (tg3_flag(tp, ENABLE_RSS)) {
13307                 int i;
13308
13309                 /* Reroute all rx packets to the 1st queue */
13310                 for (i = MAC_RSS_INDIR_TBL_0;
13311                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
13312                         tw32(i, 0x0);
13313         }
13314
13315         /* HW errata - mac loopback fails in some cases on 5780.
13316          * Normal traffic and PHY loopback are not affected by
13317          * errata.  Also, the MAC loopback test is deprecated for
13318          * all newer ASIC revisions.
13319          */
13320         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
13321             !tg3_flag(tp, CPMU_PRESENT)) {
13322                 tg3_mac_loopback(tp, true);
13323
13324                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13325                         data[TG3_MAC_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13326
13327                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13328                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13329                         data[TG3_MAC_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13330
13331                 tg3_mac_loopback(tp, false);
13332         }
13333
13334         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
13335             !tg3_flag(tp, USE_PHYLIB)) {
13336                 int i;
13337
13338                 tg3_phy_lpbk_set(tp, 0, false);
13339
13340                 /* Wait for link */
13341                 for (i = 0; i < 100; i++) {
13342                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
13343                                 break;
13344                         mdelay(1);
13345                 }
13346
13347                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13348                         data[TG3_PHY_LOOPB_TEST] |= TG3_STD_LOOPBACK_FAILED;
13349                 if (tg3_flag(tp, TSO_CAPABLE) &&
13350                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13351                         data[TG3_PHY_LOOPB_TEST] |= TG3_TSO_LOOPBACK_FAILED;
13352                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13353                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13354                         data[TG3_PHY_LOOPB_TEST] |= TG3_JMB_LOOPBACK_FAILED;
13355
13356                 if (do_extlpbk) {
13357                         tg3_phy_lpbk_set(tp, 0, true);
13358
13359                         /* All link indications report up, but the hardware
13360                          * isn't really ready for about 20 msec.  Double it
13361                          * to be sure.
13362                          */
13363                         mdelay(40);
13364
13365                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
13366                                 data[TG3_EXT_LOOPB_TEST] |=
13367                                                         TG3_STD_LOOPBACK_FAILED;
13368                         if (tg3_flag(tp, TSO_CAPABLE) &&
13369                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
13370                                 data[TG3_EXT_LOOPB_TEST] |=
13371                                                         TG3_TSO_LOOPBACK_FAILED;
13372                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
13373                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
13374                                 data[TG3_EXT_LOOPB_TEST] |=
13375                                                         TG3_JMB_LOOPBACK_FAILED;
13376                 }
13377
13378                 /* Re-enable gphy autopowerdown. */
13379                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
13380                         tg3_phy_toggle_apd(tp, true);
13381         }
13382
13383         err = (data[TG3_MAC_LOOPB_TEST] | data[TG3_PHY_LOOPB_TEST] |
13384                data[TG3_EXT_LOOPB_TEST]) ? -EIO : 0;
13385
13386 done:
13387         tp->phy_flags |= eee_cap;
13388
13389         return err;
13390 }
13391
13392 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
13393                           u64 *data)
13394 {
13395         struct tg3 *tp = netdev_priv(dev);
13396         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
13397
13398         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
13399             tg3_power_up(tp)) {
13400                 etest->flags |= ETH_TEST_FL_FAILED;
13401                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
13402                 return;
13403         }
13404
13405         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
13406
13407         if (tg3_test_nvram(tp) != 0) {
13408                 etest->flags |= ETH_TEST_FL_FAILED;
13409                 data[TG3_NVRAM_TEST] = 1;
13410         }
13411         if (!doextlpbk && tg3_test_link(tp)) {
13412                 etest->flags |= ETH_TEST_FL_FAILED;
13413                 data[TG3_LINK_TEST] = 1;
13414         }
13415         if (etest->flags & ETH_TEST_FL_OFFLINE) {
13416                 int err, err2 = 0, irq_sync = 0;
13417
13418                 if (netif_running(dev)) {
13419                         tg3_phy_stop(tp);
13420                         tg3_netif_stop(tp);
13421                         irq_sync = 1;
13422                 }
13423
13424                 tg3_full_lock(tp, irq_sync);
13425                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
13426                 err = tg3_nvram_lock(tp);
13427                 tg3_halt_cpu(tp, RX_CPU_BASE);
13428                 if (!tg3_flag(tp, 5705_PLUS))
13429                         tg3_halt_cpu(tp, TX_CPU_BASE);
13430                 if (!err)
13431                         tg3_nvram_unlock(tp);
13432
13433                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
13434                         tg3_phy_reset(tp);
13435
13436                 if (tg3_test_registers(tp) != 0) {
13437                         etest->flags |= ETH_TEST_FL_FAILED;
13438                         data[TG3_REGISTER_TEST] = 1;
13439                 }
13440
13441                 if (tg3_test_memory(tp) != 0) {
13442                         etest->flags |= ETH_TEST_FL_FAILED;
13443                         data[TG3_MEMORY_TEST] = 1;
13444                 }
13445
13446                 if (doextlpbk)
13447                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
13448
13449                 if (tg3_test_loopback(tp, data, doextlpbk))
13450                         etest->flags |= ETH_TEST_FL_FAILED;
13451
13452                 tg3_full_unlock(tp);
13453
13454                 if (tg3_test_interrupt(tp) != 0) {
13455                         etest->flags |= ETH_TEST_FL_FAILED;
13456                         data[TG3_INTERRUPT_TEST] = 1;
13457                 }
13458
13459                 tg3_full_lock(tp, 0);
13460
13461                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13462                 if (netif_running(dev)) {
13463                         tg3_flag_set(tp, INIT_COMPLETE);
13464                         err2 = tg3_restart_hw(tp, true);
13465                         if (!err2)
13466                                 tg3_netif_start(tp);
13467                 }
13468
13469                 tg3_full_unlock(tp);
13470
13471                 if (irq_sync && !err2)
13472                         tg3_phy_start(tp);
13473         }
13474         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
13475                 tg3_power_down(tp);
13476
13477 }
13478
13479 static int tg3_hwtstamp_ioctl(struct net_device *dev,
13480                               struct ifreq *ifr, int cmd)
13481 {
13482         struct tg3 *tp = netdev_priv(dev);
13483         struct hwtstamp_config stmpconf;
13484
13485         if (!tg3_flag(tp, PTP_CAPABLE))
13486                 return -EINVAL;
13487
13488         if (copy_from_user(&stmpconf, ifr->ifr_data, sizeof(stmpconf)))
13489                 return -EFAULT;
13490
13491         if (stmpconf.flags)
13492                 return -EINVAL;
13493
13494         switch (stmpconf.tx_type) {
13495         case HWTSTAMP_TX_ON:
13496                 tg3_flag_set(tp, TX_TSTAMP_EN);
13497                 break;
13498         case HWTSTAMP_TX_OFF:
13499                 tg3_flag_clear(tp, TX_TSTAMP_EN);
13500                 break;
13501         default:
13502                 return -ERANGE;
13503         }
13504
13505         switch (stmpconf.rx_filter) {
13506         case HWTSTAMP_FILTER_NONE:
13507                 tp->rxptpctl = 0;
13508                 break;
13509         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
13510                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13511                                TG3_RX_PTP_CTL_ALL_V1_EVENTS;
13512                 break;
13513         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
13514                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13515                                TG3_RX_PTP_CTL_SYNC_EVNT;
13516                 break;
13517         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
13518                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V1_EN |
13519                                TG3_RX_PTP_CTL_DELAY_REQ;
13520                 break;
13521         case HWTSTAMP_FILTER_PTP_V2_EVENT:
13522                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13523                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13524                 break;
13525         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
13526                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13527                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13528                 break;
13529         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
13530                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13531                                TG3_RX_PTP_CTL_ALL_V2_EVENTS;
13532                 break;
13533         case HWTSTAMP_FILTER_PTP_V2_SYNC:
13534                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13535                                TG3_RX_PTP_CTL_SYNC_EVNT;
13536                 break;
13537         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
13538                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13539                                TG3_RX_PTP_CTL_SYNC_EVNT;
13540                 break;
13541         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
13542                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13543                                TG3_RX_PTP_CTL_SYNC_EVNT;
13544                 break;
13545         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
13546                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_EN |
13547                                TG3_RX_PTP_CTL_DELAY_REQ;
13548                 break;
13549         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
13550                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L2_EN |
13551                                TG3_RX_PTP_CTL_DELAY_REQ;
13552                 break;
13553         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
13554                 tp->rxptpctl = TG3_RX_PTP_CTL_RX_PTP_V2_L4_EN |
13555                                TG3_RX_PTP_CTL_DELAY_REQ;
13556                 break;
13557         default:
13558                 return -ERANGE;
13559         }
13560
13561         if (netif_running(dev) && tp->rxptpctl)
13562                 tw32(TG3_RX_PTP_CTL,
13563                      tp->rxptpctl | TG3_RX_PTP_CTL_HWTS_INTERLOCK);
13564
13565         return copy_to_user(ifr->ifr_data, &stmpconf, sizeof(stmpconf)) ?
13566                 -EFAULT : 0;
13567 }
13568
13569 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
13570 {
13571         struct mii_ioctl_data *data = if_mii(ifr);
13572         struct tg3 *tp = netdev_priv(dev);
13573         int err;
13574
13575         if (tg3_flag(tp, USE_PHYLIB)) {
13576                 struct phy_device *phydev;
13577                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
13578                         return -EAGAIN;
13579                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
13580                 return phy_mii_ioctl(phydev, ifr, cmd);
13581         }
13582
13583         switch (cmd) {
13584         case SIOCGMIIPHY:
13585                 data->phy_id = tp->phy_addr;
13586
13587                 /* fallthru */
13588         case SIOCGMIIREG: {
13589                 u32 mii_regval;
13590
13591                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13592                         break;                  /* We have no PHY */
13593
13594                 if (!netif_running(dev))
13595                         return -EAGAIN;
13596
13597                 spin_lock_bh(&tp->lock);
13598                 err = __tg3_readphy(tp, data->phy_id & 0x1f,
13599                                     data->reg_num & 0x1f, &mii_regval);
13600                 spin_unlock_bh(&tp->lock);
13601
13602                 data->val_out = mii_regval;
13603
13604                 return err;
13605         }
13606
13607         case SIOCSMIIREG:
13608                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
13609                         break;                  /* We have no PHY */
13610
13611                 if (!netif_running(dev))
13612                         return -EAGAIN;
13613
13614                 spin_lock_bh(&tp->lock);
13615                 err = __tg3_writephy(tp, data->phy_id & 0x1f,
13616                                      data->reg_num & 0x1f, data->val_in);
13617                 spin_unlock_bh(&tp->lock);
13618
13619                 return err;
13620
13621         case SIOCSHWTSTAMP:
13622                 return tg3_hwtstamp_ioctl(dev, ifr, cmd);
13623
13624         default:
13625                 /* do nothing */
13626                 break;
13627         }
13628         return -EOPNOTSUPP;
13629 }
13630
13631 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13632 {
13633         struct tg3 *tp = netdev_priv(dev);
13634
13635         memcpy(ec, &tp->coal, sizeof(*ec));
13636         return 0;
13637 }
13638
13639 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
13640 {
13641         struct tg3 *tp = netdev_priv(dev);
13642         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
13643         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
13644
13645         if (!tg3_flag(tp, 5705_PLUS)) {
13646                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
13647                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
13648                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
13649                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
13650         }
13651
13652         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
13653             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
13654             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
13655             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
13656             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
13657             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
13658             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
13659             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
13660             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
13661             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
13662                 return -EINVAL;
13663
13664         /* No rx interrupts will be generated if both are zero */
13665         if ((ec->rx_coalesce_usecs == 0) &&
13666             (ec->rx_max_coalesced_frames == 0))
13667                 return -EINVAL;
13668
13669         /* No tx interrupts will be generated if both are zero */
13670         if ((ec->tx_coalesce_usecs == 0) &&
13671             (ec->tx_max_coalesced_frames == 0))
13672                 return -EINVAL;
13673
13674         /* Only copy relevant parameters, ignore all others. */
13675         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
13676         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
13677         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
13678         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
13679         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
13680         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
13681         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
13682         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
13683         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
13684
13685         if (netif_running(dev)) {
13686                 tg3_full_lock(tp, 0);
13687                 __tg3_set_coalesce(tp, &tp->coal);
13688                 tg3_full_unlock(tp);
13689         }
13690         return 0;
13691 }
13692
13693 static const struct ethtool_ops tg3_ethtool_ops = {
13694         .get_settings           = tg3_get_settings,
13695         .set_settings           = tg3_set_settings,
13696         .get_drvinfo            = tg3_get_drvinfo,
13697         .get_regs_len           = tg3_get_regs_len,
13698         .get_regs               = tg3_get_regs,
13699         .get_wol                = tg3_get_wol,
13700         .set_wol                = tg3_set_wol,
13701         .get_msglevel           = tg3_get_msglevel,
13702         .set_msglevel           = tg3_set_msglevel,
13703         .nway_reset             = tg3_nway_reset,
13704         .get_link               = ethtool_op_get_link,
13705         .get_eeprom_len         = tg3_get_eeprom_len,
13706         .get_eeprom             = tg3_get_eeprom,
13707         .set_eeprom             = tg3_set_eeprom,
13708         .get_ringparam          = tg3_get_ringparam,
13709         .set_ringparam          = tg3_set_ringparam,
13710         .get_pauseparam         = tg3_get_pauseparam,
13711         .set_pauseparam         = tg3_set_pauseparam,
13712         .self_test              = tg3_self_test,
13713         .get_strings            = tg3_get_strings,
13714         .set_phys_id            = tg3_set_phys_id,
13715         .get_ethtool_stats      = tg3_get_ethtool_stats,
13716         .get_coalesce           = tg3_get_coalesce,
13717         .set_coalesce           = tg3_set_coalesce,
13718         .get_sset_count         = tg3_get_sset_count,
13719         .get_rxnfc              = tg3_get_rxnfc,
13720         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
13721         .get_rxfh_indir         = tg3_get_rxfh_indir,
13722         .set_rxfh_indir         = tg3_set_rxfh_indir,
13723         .get_channels           = tg3_get_channels,
13724         .set_channels           = tg3_set_channels,
13725         .get_ts_info            = tg3_get_ts_info,
13726 };
13727
13728 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
13729                                                 struct rtnl_link_stats64 *stats)
13730 {
13731         struct tg3 *tp = netdev_priv(dev);
13732
13733         spin_lock_bh(&tp->lock);
13734         if (!tp->hw_stats) {
13735                 spin_unlock_bh(&tp->lock);
13736                 return &tp->net_stats_prev;
13737         }
13738
13739         tg3_get_nstats(tp, stats);
13740         spin_unlock_bh(&tp->lock);
13741
13742         return stats;
13743 }
13744
13745 static void tg3_set_rx_mode(struct net_device *dev)
13746 {
13747         struct tg3 *tp = netdev_priv(dev);
13748
13749         if (!netif_running(dev))
13750                 return;
13751
13752         tg3_full_lock(tp, 0);
13753         __tg3_set_rx_mode(dev);
13754         tg3_full_unlock(tp);
13755 }
13756
13757 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
13758                                int new_mtu)
13759 {
13760         dev->mtu = new_mtu;
13761
13762         if (new_mtu > ETH_DATA_LEN) {
13763                 if (tg3_flag(tp, 5780_CLASS)) {
13764                         netdev_update_features(dev);
13765                         tg3_flag_clear(tp, TSO_CAPABLE);
13766                 } else {
13767                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
13768                 }
13769         } else {
13770                 if (tg3_flag(tp, 5780_CLASS)) {
13771                         tg3_flag_set(tp, TSO_CAPABLE);
13772                         netdev_update_features(dev);
13773                 }
13774                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
13775         }
13776 }
13777
13778 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
13779 {
13780         struct tg3 *tp = netdev_priv(dev);
13781         int err;
13782         bool reset_phy = false;
13783
13784         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
13785                 return -EINVAL;
13786
13787         if (!netif_running(dev)) {
13788                 /* We'll just catch it later when the
13789                  * device is up'd.
13790                  */
13791                 tg3_set_mtu(dev, tp, new_mtu);
13792                 return 0;
13793         }
13794
13795         tg3_phy_stop(tp);
13796
13797         tg3_netif_stop(tp);
13798
13799         tg3_set_mtu(dev, tp, new_mtu);
13800
13801         tg3_full_lock(tp, 1);
13802
13803         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
13804
13805         /* Reset PHY, otherwise the read DMA engine will be in a mode that
13806          * breaks all requests to 256 bytes.
13807          */
13808         if (tg3_asic_rev(tp) == ASIC_REV_57766)
13809                 reset_phy = true;
13810
13811         err = tg3_restart_hw(tp, reset_phy);
13812
13813         if (!err)
13814                 tg3_netif_start(tp);
13815
13816         tg3_full_unlock(tp);
13817
13818         if (!err)
13819                 tg3_phy_start(tp);
13820
13821         return err;
13822 }
13823
13824 static const struct net_device_ops tg3_netdev_ops = {
13825         .ndo_open               = tg3_open,
13826         .ndo_stop               = tg3_close,
13827         .ndo_start_xmit         = tg3_start_xmit,
13828         .ndo_get_stats64        = tg3_get_stats64,
13829         .ndo_validate_addr      = eth_validate_addr,
13830         .ndo_set_rx_mode        = tg3_set_rx_mode,
13831         .ndo_set_mac_address    = tg3_set_mac_addr,
13832         .ndo_do_ioctl           = tg3_ioctl,
13833         .ndo_tx_timeout         = tg3_tx_timeout,
13834         .ndo_change_mtu         = tg3_change_mtu,
13835         .ndo_fix_features       = tg3_fix_features,
13836         .ndo_set_features       = tg3_set_features,
13837 #ifdef CONFIG_NET_POLL_CONTROLLER
13838         .ndo_poll_controller    = tg3_poll_controller,
13839 #endif
13840 };
13841
13842 static void tg3_get_eeprom_size(struct tg3 *tp)
13843 {
13844         u32 cursize, val, magic;
13845
13846         tp->nvram_size = EEPROM_CHIP_SIZE;
13847
13848         if (tg3_nvram_read(tp, 0, &magic) != 0)
13849                 return;
13850
13851         if ((magic != TG3_EEPROM_MAGIC) &&
13852             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
13853             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
13854                 return;
13855
13856         /*
13857          * Size the chip by reading offsets at increasing powers of two.
13858          * When we encounter our validation signature, we know the addressing
13859          * has wrapped around, and thus have our chip size.
13860          */
13861         cursize = 0x10;
13862
13863         while (cursize < tp->nvram_size) {
13864                 if (tg3_nvram_read(tp, cursize, &val) != 0)
13865                         return;
13866
13867                 if (val == magic)
13868                         break;
13869
13870                 cursize <<= 1;
13871         }
13872
13873         tp->nvram_size = cursize;
13874 }
13875
13876 static void tg3_get_nvram_size(struct tg3 *tp)
13877 {
13878         u32 val;
13879
13880         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
13881                 return;
13882
13883         /* Selfboot format */
13884         if (val != TG3_EEPROM_MAGIC) {
13885                 tg3_get_eeprom_size(tp);
13886                 return;
13887         }
13888
13889         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
13890                 if (val != 0) {
13891                         /* This is confusing.  We want to operate on the
13892                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
13893                          * call will read from NVRAM and byteswap the data
13894                          * according to the byteswapping settings for all
13895                          * other register accesses.  This ensures the data we
13896                          * want will always reside in the lower 16-bits.
13897                          * However, the data in NVRAM is in LE format, which
13898                          * means the data from the NVRAM read will always be
13899                          * opposite the endianness of the CPU.  The 16-bit
13900                          * byteswap then brings the data to CPU endianness.
13901                          */
13902                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
13903                         return;
13904                 }
13905         }
13906         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
13907 }
13908
13909 static void tg3_get_nvram_info(struct tg3 *tp)
13910 {
13911         u32 nvcfg1;
13912
13913         nvcfg1 = tr32(NVRAM_CFG1);
13914         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
13915                 tg3_flag_set(tp, FLASH);
13916         } else {
13917                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
13918                 tw32(NVRAM_CFG1, nvcfg1);
13919         }
13920
13921         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
13922             tg3_flag(tp, 5780_CLASS)) {
13923                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
13924                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
13925                         tp->nvram_jedecnum = JEDEC_ATMEL;
13926                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13927                         tg3_flag_set(tp, NVRAM_BUFFERED);
13928                         break;
13929                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
13930                         tp->nvram_jedecnum = JEDEC_ATMEL;
13931                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
13932                         break;
13933                 case FLASH_VENDOR_ATMEL_EEPROM:
13934                         tp->nvram_jedecnum = JEDEC_ATMEL;
13935                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
13936                         tg3_flag_set(tp, NVRAM_BUFFERED);
13937                         break;
13938                 case FLASH_VENDOR_ST:
13939                         tp->nvram_jedecnum = JEDEC_ST;
13940                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
13941                         tg3_flag_set(tp, NVRAM_BUFFERED);
13942                         break;
13943                 case FLASH_VENDOR_SAIFUN:
13944                         tp->nvram_jedecnum = JEDEC_SAIFUN;
13945                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
13946                         break;
13947                 case FLASH_VENDOR_SST_SMALL:
13948                 case FLASH_VENDOR_SST_LARGE:
13949                         tp->nvram_jedecnum = JEDEC_SST;
13950                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
13951                         break;
13952                 }
13953         } else {
13954                 tp->nvram_jedecnum = JEDEC_ATMEL;
13955                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
13956                 tg3_flag_set(tp, NVRAM_BUFFERED);
13957         }
13958 }
13959
13960 static void tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
13961 {
13962         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
13963         case FLASH_5752PAGE_SIZE_256:
13964                 tp->nvram_pagesize = 256;
13965                 break;
13966         case FLASH_5752PAGE_SIZE_512:
13967                 tp->nvram_pagesize = 512;
13968                 break;
13969         case FLASH_5752PAGE_SIZE_1K:
13970                 tp->nvram_pagesize = 1024;
13971                 break;
13972         case FLASH_5752PAGE_SIZE_2K:
13973                 tp->nvram_pagesize = 2048;
13974                 break;
13975         case FLASH_5752PAGE_SIZE_4K:
13976                 tp->nvram_pagesize = 4096;
13977                 break;
13978         case FLASH_5752PAGE_SIZE_264:
13979                 tp->nvram_pagesize = 264;
13980                 break;
13981         case FLASH_5752PAGE_SIZE_528:
13982                 tp->nvram_pagesize = 528;
13983                 break;
13984         }
13985 }
13986
13987 static void tg3_get_5752_nvram_info(struct tg3 *tp)
13988 {
13989         u32 nvcfg1;
13990
13991         nvcfg1 = tr32(NVRAM_CFG1);
13992
13993         /* NVRAM protection for TPM */
13994         if (nvcfg1 & (1 << 27))
13995                 tg3_flag_set(tp, PROTECTED_NVRAM);
13996
13997         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
13998         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
13999         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
14000                 tp->nvram_jedecnum = JEDEC_ATMEL;
14001                 tg3_flag_set(tp, NVRAM_BUFFERED);
14002                 break;
14003         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14004                 tp->nvram_jedecnum = JEDEC_ATMEL;
14005                 tg3_flag_set(tp, NVRAM_BUFFERED);
14006                 tg3_flag_set(tp, FLASH);
14007                 break;
14008         case FLASH_5752VENDOR_ST_M45PE10:
14009         case FLASH_5752VENDOR_ST_M45PE20:
14010         case FLASH_5752VENDOR_ST_M45PE40:
14011                 tp->nvram_jedecnum = JEDEC_ST;
14012                 tg3_flag_set(tp, NVRAM_BUFFERED);
14013                 tg3_flag_set(tp, FLASH);
14014                 break;
14015         }
14016
14017         if (tg3_flag(tp, FLASH)) {
14018                 tg3_nvram_get_pagesize(tp, nvcfg1);
14019         } else {
14020                 /* For eeprom, set pagesize to maximum eeprom size */
14021                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14022
14023                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14024                 tw32(NVRAM_CFG1, nvcfg1);
14025         }
14026 }
14027
14028 static void tg3_get_5755_nvram_info(struct tg3 *tp)
14029 {
14030         u32 nvcfg1, protect = 0;
14031
14032         nvcfg1 = tr32(NVRAM_CFG1);
14033
14034         /* NVRAM protection for TPM */
14035         if (nvcfg1 & (1 << 27)) {
14036                 tg3_flag_set(tp, PROTECTED_NVRAM);
14037                 protect = 1;
14038         }
14039
14040         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14041         switch (nvcfg1) {
14042         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14043         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14044         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14045         case FLASH_5755VENDOR_ATMEL_FLASH_5:
14046                 tp->nvram_jedecnum = JEDEC_ATMEL;
14047                 tg3_flag_set(tp, NVRAM_BUFFERED);
14048                 tg3_flag_set(tp, FLASH);
14049                 tp->nvram_pagesize = 264;
14050                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
14051                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
14052                         tp->nvram_size = (protect ? 0x3e200 :
14053                                           TG3_NVRAM_SIZE_512KB);
14054                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
14055                         tp->nvram_size = (protect ? 0x1f200 :
14056                                           TG3_NVRAM_SIZE_256KB);
14057                 else
14058                         tp->nvram_size = (protect ? 0x1f200 :
14059                                           TG3_NVRAM_SIZE_128KB);
14060                 break;
14061         case FLASH_5752VENDOR_ST_M45PE10:
14062         case FLASH_5752VENDOR_ST_M45PE20:
14063         case FLASH_5752VENDOR_ST_M45PE40:
14064                 tp->nvram_jedecnum = JEDEC_ST;
14065                 tg3_flag_set(tp, NVRAM_BUFFERED);
14066                 tg3_flag_set(tp, FLASH);
14067                 tp->nvram_pagesize = 256;
14068                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
14069                         tp->nvram_size = (protect ?
14070                                           TG3_NVRAM_SIZE_64KB :
14071                                           TG3_NVRAM_SIZE_128KB);
14072                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
14073                         tp->nvram_size = (protect ?
14074                                           TG3_NVRAM_SIZE_64KB :
14075                                           TG3_NVRAM_SIZE_256KB);
14076                 else
14077                         tp->nvram_size = (protect ?
14078                                           TG3_NVRAM_SIZE_128KB :
14079                                           TG3_NVRAM_SIZE_512KB);
14080                 break;
14081         }
14082 }
14083
14084 static void tg3_get_5787_nvram_info(struct tg3 *tp)
14085 {
14086         u32 nvcfg1;
14087
14088         nvcfg1 = tr32(NVRAM_CFG1);
14089
14090         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14091         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
14092         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14093         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
14094         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14095                 tp->nvram_jedecnum = JEDEC_ATMEL;
14096                 tg3_flag_set(tp, NVRAM_BUFFERED);
14097                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14098
14099                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14100                 tw32(NVRAM_CFG1, nvcfg1);
14101                 break;
14102         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14103         case FLASH_5755VENDOR_ATMEL_FLASH_1:
14104         case FLASH_5755VENDOR_ATMEL_FLASH_2:
14105         case FLASH_5755VENDOR_ATMEL_FLASH_3:
14106                 tp->nvram_jedecnum = JEDEC_ATMEL;
14107                 tg3_flag_set(tp, NVRAM_BUFFERED);
14108                 tg3_flag_set(tp, FLASH);
14109                 tp->nvram_pagesize = 264;
14110                 break;
14111         case FLASH_5752VENDOR_ST_M45PE10:
14112         case FLASH_5752VENDOR_ST_M45PE20:
14113         case FLASH_5752VENDOR_ST_M45PE40:
14114                 tp->nvram_jedecnum = JEDEC_ST;
14115                 tg3_flag_set(tp, NVRAM_BUFFERED);
14116                 tg3_flag_set(tp, FLASH);
14117                 tp->nvram_pagesize = 256;
14118                 break;
14119         }
14120 }
14121
14122 static void tg3_get_5761_nvram_info(struct tg3 *tp)
14123 {
14124         u32 nvcfg1, protect = 0;
14125
14126         nvcfg1 = tr32(NVRAM_CFG1);
14127
14128         /* NVRAM protection for TPM */
14129         if (nvcfg1 & (1 << 27)) {
14130                 tg3_flag_set(tp, PROTECTED_NVRAM);
14131                 protect = 1;
14132         }
14133
14134         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
14135         switch (nvcfg1) {
14136         case FLASH_5761VENDOR_ATMEL_ADB021D:
14137         case FLASH_5761VENDOR_ATMEL_ADB041D:
14138         case FLASH_5761VENDOR_ATMEL_ADB081D:
14139         case FLASH_5761VENDOR_ATMEL_ADB161D:
14140         case FLASH_5761VENDOR_ATMEL_MDB021D:
14141         case FLASH_5761VENDOR_ATMEL_MDB041D:
14142         case FLASH_5761VENDOR_ATMEL_MDB081D:
14143         case FLASH_5761VENDOR_ATMEL_MDB161D:
14144                 tp->nvram_jedecnum = JEDEC_ATMEL;
14145                 tg3_flag_set(tp, NVRAM_BUFFERED);
14146                 tg3_flag_set(tp, FLASH);
14147                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14148                 tp->nvram_pagesize = 256;
14149                 break;
14150         case FLASH_5761VENDOR_ST_A_M45PE20:
14151         case FLASH_5761VENDOR_ST_A_M45PE40:
14152         case FLASH_5761VENDOR_ST_A_M45PE80:
14153         case FLASH_5761VENDOR_ST_A_M45PE16:
14154         case FLASH_5761VENDOR_ST_M_M45PE20:
14155         case FLASH_5761VENDOR_ST_M_M45PE40:
14156         case FLASH_5761VENDOR_ST_M_M45PE80:
14157         case FLASH_5761VENDOR_ST_M_M45PE16:
14158                 tp->nvram_jedecnum = JEDEC_ST;
14159                 tg3_flag_set(tp, NVRAM_BUFFERED);
14160                 tg3_flag_set(tp, FLASH);
14161                 tp->nvram_pagesize = 256;
14162                 break;
14163         }
14164
14165         if (protect) {
14166                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
14167         } else {
14168                 switch (nvcfg1) {
14169                 case FLASH_5761VENDOR_ATMEL_ADB161D:
14170                 case FLASH_5761VENDOR_ATMEL_MDB161D:
14171                 case FLASH_5761VENDOR_ST_A_M45PE16:
14172                 case FLASH_5761VENDOR_ST_M_M45PE16:
14173                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
14174                         break;
14175                 case FLASH_5761VENDOR_ATMEL_ADB081D:
14176                 case FLASH_5761VENDOR_ATMEL_MDB081D:
14177                 case FLASH_5761VENDOR_ST_A_M45PE80:
14178                 case FLASH_5761VENDOR_ST_M_M45PE80:
14179                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14180                         break;
14181                 case FLASH_5761VENDOR_ATMEL_ADB041D:
14182                 case FLASH_5761VENDOR_ATMEL_MDB041D:
14183                 case FLASH_5761VENDOR_ST_A_M45PE40:
14184                 case FLASH_5761VENDOR_ST_M_M45PE40:
14185                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14186                         break;
14187                 case FLASH_5761VENDOR_ATMEL_ADB021D:
14188                 case FLASH_5761VENDOR_ATMEL_MDB021D:
14189                 case FLASH_5761VENDOR_ST_A_M45PE20:
14190                 case FLASH_5761VENDOR_ST_M_M45PE20:
14191                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14192                         break;
14193                 }
14194         }
14195 }
14196
14197 static void tg3_get_5906_nvram_info(struct tg3 *tp)
14198 {
14199         tp->nvram_jedecnum = JEDEC_ATMEL;
14200         tg3_flag_set(tp, NVRAM_BUFFERED);
14201         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14202 }
14203
14204 static void tg3_get_57780_nvram_info(struct tg3 *tp)
14205 {
14206         u32 nvcfg1;
14207
14208         nvcfg1 = tr32(NVRAM_CFG1);
14209
14210         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14211         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
14212         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
14213                 tp->nvram_jedecnum = JEDEC_ATMEL;
14214                 tg3_flag_set(tp, NVRAM_BUFFERED);
14215                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14216
14217                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14218                 tw32(NVRAM_CFG1, nvcfg1);
14219                 return;
14220         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14221         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14222         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14223         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14224         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14225         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14226         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14227                 tp->nvram_jedecnum = JEDEC_ATMEL;
14228                 tg3_flag_set(tp, NVRAM_BUFFERED);
14229                 tg3_flag_set(tp, FLASH);
14230
14231                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14232                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
14233                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
14234                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
14235                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14236                         break;
14237                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
14238                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
14239                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14240                         break;
14241                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
14242                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
14243                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14244                         break;
14245                 }
14246                 break;
14247         case FLASH_5752VENDOR_ST_M45PE10:
14248         case FLASH_5752VENDOR_ST_M45PE20:
14249         case FLASH_5752VENDOR_ST_M45PE40:
14250                 tp->nvram_jedecnum = JEDEC_ST;
14251                 tg3_flag_set(tp, NVRAM_BUFFERED);
14252                 tg3_flag_set(tp, FLASH);
14253
14254                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14255                 case FLASH_5752VENDOR_ST_M45PE10:
14256                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14257                         break;
14258                 case FLASH_5752VENDOR_ST_M45PE20:
14259                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14260                         break;
14261                 case FLASH_5752VENDOR_ST_M45PE40:
14262                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14263                         break;
14264                 }
14265                 break;
14266         default:
14267                 tg3_flag_set(tp, NO_NVRAM);
14268                 return;
14269         }
14270
14271         tg3_nvram_get_pagesize(tp, nvcfg1);
14272         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14273                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14274 }
14275
14276
14277 static void tg3_get_5717_nvram_info(struct tg3 *tp)
14278 {
14279         u32 nvcfg1;
14280
14281         nvcfg1 = tr32(NVRAM_CFG1);
14282
14283         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14284         case FLASH_5717VENDOR_ATMEL_EEPROM:
14285         case FLASH_5717VENDOR_MICRO_EEPROM:
14286                 tp->nvram_jedecnum = JEDEC_ATMEL;
14287                 tg3_flag_set(tp, NVRAM_BUFFERED);
14288                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14289
14290                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14291                 tw32(NVRAM_CFG1, nvcfg1);
14292                 return;
14293         case FLASH_5717VENDOR_ATMEL_MDB011D:
14294         case FLASH_5717VENDOR_ATMEL_ADB011B:
14295         case FLASH_5717VENDOR_ATMEL_ADB011D:
14296         case FLASH_5717VENDOR_ATMEL_MDB021D:
14297         case FLASH_5717VENDOR_ATMEL_ADB021B:
14298         case FLASH_5717VENDOR_ATMEL_ADB021D:
14299         case FLASH_5717VENDOR_ATMEL_45USPT:
14300                 tp->nvram_jedecnum = JEDEC_ATMEL;
14301                 tg3_flag_set(tp, NVRAM_BUFFERED);
14302                 tg3_flag_set(tp, FLASH);
14303
14304                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14305                 case FLASH_5717VENDOR_ATMEL_MDB021D:
14306                         /* Detect size with tg3_nvram_get_size() */
14307                         break;
14308                 case FLASH_5717VENDOR_ATMEL_ADB021B:
14309                 case FLASH_5717VENDOR_ATMEL_ADB021D:
14310                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14311                         break;
14312                 default:
14313                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14314                         break;
14315                 }
14316                 break;
14317         case FLASH_5717VENDOR_ST_M_M25PE10:
14318         case FLASH_5717VENDOR_ST_A_M25PE10:
14319         case FLASH_5717VENDOR_ST_M_M45PE10:
14320         case FLASH_5717VENDOR_ST_A_M45PE10:
14321         case FLASH_5717VENDOR_ST_M_M25PE20:
14322         case FLASH_5717VENDOR_ST_A_M25PE20:
14323         case FLASH_5717VENDOR_ST_M_M45PE20:
14324         case FLASH_5717VENDOR_ST_A_M45PE20:
14325         case FLASH_5717VENDOR_ST_25USPT:
14326         case FLASH_5717VENDOR_ST_45USPT:
14327                 tp->nvram_jedecnum = JEDEC_ST;
14328                 tg3_flag_set(tp, NVRAM_BUFFERED);
14329                 tg3_flag_set(tp, FLASH);
14330
14331                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
14332                 case FLASH_5717VENDOR_ST_M_M25PE20:
14333                 case FLASH_5717VENDOR_ST_M_M45PE20:
14334                         /* Detect size with tg3_nvram_get_size() */
14335                         break;
14336                 case FLASH_5717VENDOR_ST_A_M25PE20:
14337                 case FLASH_5717VENDOR_ST_A_M45PE20:
14338                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14339                         break;
14340                 default:
14341                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14342                         break;
14343                 }
14344                 break;
14345         default:
14346                 tg3_flag_set(tp, NO_NVRAM);
14347                 return;
14348         }
14349
14350         tg3_nvram_get_pagesize(tp, nvcfg1);
14351         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14352                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14353 }
14354
14355 static void tg3_get_5720_nvram_info(struct tg3 *tp)
14356 {
14357         u32 nvcfg1, nvmpinstrp;
14358
14359         nvcfg1 = tr32(NVRAM_CFG1);
14360         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
14361
14362         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14363                 if (!(nvcfg1 & NVRAM_CFG1_5762VENDOR_MASK)) {
14364                         tg3_flag_set(tp, NO_NVRAM);
14365                         return;
14366                 }
14367
14368                 switch (nvmpinstrp) {
14369                 case FLASH_5762_EEPROM_HD:
14370                         nvmpinstrp = FLASH_5720_EEPROM_HD;
14371                         break;
14372                 case FLASH_5762_EEPROM_LD:
14373                         nvmpinstrp = FLASH_5720_EEPROM_LD;
14374                         break;
14375                 case FLASH_5720VENDOR_M_ST_M45PE20:
14376                         /* This pinstrap supports multiple sizes, so force it
14377                          * to read the actual size from location 0xf0.
14378                          */
14379                         nvmpinstrp = FLASH_5720VENDOR_ST_45USPT;
14380                         break;
14381                 }
14382         }
14383
14384         switch (nvmpinstrp) {
14385         case FLASH_5720_EEPROM_HD:
14386         case FLASH_5720_EEPROM_LD:
14387                 tp->nvram_jedecnum = JEDEC_ATMEL;
14388                 tg3_flag_set(tp, NVRAM_BUFFERED);
14389
14390                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
14391                 tw32(NVRAM_CFG1, nvcfg1);
14392                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
14393                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
14394                 else
14395                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
14396                 return;
14397         case FLASH_5720VENDOR_M_ATMEL_DB011D:
14398         case FLASH_5720VENDOR_A_ATMEL_DB011B:
14399         case FLASH_5720VENDOR_A_ATMEL_DB011D:
14400         case FLASH_5720VENDOR_M_ATMEL_DB021D:
14401         case FLASH_5720VENDOR_A_ATMEL_DB021B:
14402         case FLASH_5720VENDOR_A_ATMEL_DB021D:
14403         case FLASH_5720VENDOR_M_ATMEL_DB041D:
14404         case FLASH_5720VENDOR_A_ATMEL_DB041B:
14405         case FLASH_5720VENDOR_A_ATMEL_DB041D:
14406         case FLASH_5720VENDOR_M_ATMEL_DB081D:
14407         case FLASH_5720VENDOR_A_ATMEL_DB081D:
14408         case FLASH_5720VENDOR_ATMEL_45USPT:
14409                 tp->nvram_jedecnum = JEDEC_ATMEL;
14410                 tg3_flag_set(tp, NVRAM_BUFFERED);
14411                 tg3_flag_set(tp, FLASH);
14412
14413                 switch (nvmpinstrp) {
14414                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
14415                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
14416                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
14417                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14418                         break;
14419                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
14420                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
14421                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
14422                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14423                         break;
14424                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
14425                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
14426                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14427                         break;
14428                 default:
14429                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14430                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14431                         break;
14432                 }
14433                 break;
14434         case FLASH_5720VENDOR_M_ST_M25PE10:
14435         case FLASH_5720VENDOR_M_ST_M45PE10:
14436         case FLASH_5720VENDOR_A_ST_M25PE10:
14437         case FLASH_5720VENDOR_A_ST_M45PE10:
14438         case FLASH_5720VENDOR_M_ST_M25PE20:
14439         case FLASH_5720VENDOR_M_ST_M45PE20:
14440         case FLASH_5720VENDOR_A_ST_M25PE20:
14441         case FLASH_5720VENDOR_A_ST_M45PE20:
14442         case FLASH_5720VENDOR_M_ST_M25PE40:
14443         case FLASH_5720VENDOR_M_ST_M45PE40:
14444         case FLASH_5720VENDOR_A_ST_M25PE40:
14445         case FLASH_5720VENDOR_A_ST_M45PE40:
14446         case FLASH_5720VENDOR_M_ST_M25PE80:
14447         case FLASH_5720VENDOR_M_ST_M45PE80:
14448         case FLASH_5720VENDOR_A_ST_M25PE80:
14449         case FLASH_5720VENDOR_A_ST_M45PE80:
14450         case FLASH_5720VENDOR_ST_25USPT:
14451         case FLASH_5720VENDOR_ST_45USPT:
14452                 tp->nvram_jedecnum = JEDEC_ST;
14453                 tg3_flag_set(tp, NVRAM_BUFFERED);
14454                 tg3_flag_set(tp, FLASH);
14455
14456                 switch (nvmpinstrp) {
14457                 case FLASH_5720VENDOR_M_ST_M25PE20:
14458                 case FLASH_5720VENDOR_M_ST_M45PE20:
14459                 case FLASH_5720VENDOR_A_ST_M25PE20:
14460                 case FLASH_5720VENDOR_A_ST_M45PE20:
14461                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
14462                         break;
14463                 case FLASH_5720VENDOR_M_ST_M25PE40:
14464                 case FLASH_5720VENDOR_M_ST_M45PE40:
14465                 case FLASH_5720VENDOR_A_ST_M25PE40:
14466                 case FLASH_5720VENDOR_A_ST_M45PE40:
14467                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
14468                         break;
14469                 case FLASH_5720VENDOR_M_ST_M25PE80:
14470                 case FLASH_5720VENDOR_M_ST_M45PE80:
14471                 case FLASH_5720VENDOR_A_ST_M25PE80:
14472                 case FLASH_5720VENDOR_A_ST_M45PE80:
14473                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
14474                         break;
14475                 default:
14476                         if (tg3_asic_rev(tp) != ASIC_REV_5762)
14477                                 tp->nvram_size = TG3_NVRAM_SIZE_128KB;
14478                         break;
14479                 }
14480                 break;
14481         default:
14482                 tg3_flag_set(tp, NO_NVRAM);
14483                 return;
14484         }
14485
14486         tg3_nvram_get_pagesize(tp, nvcfg1);
14487         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
14488                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
14489
14490         if (tg3_asic_rev(tp) == ASIC_REV_5762) {
14491                 u32 val;
14492
14493                 if (tg3_nvram_read(tp, 0, &val))
14494                         return;
14495
14496                 if (val != TG3_EEPROM_MAGIC &&
14497                     (val & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW)
14498                         tg3_flag_set(tp, NO_NVRAM);
14499         }
14500 }
14501
14502 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
14503 static void tg3_nvram_init(struct tg3 *tp)
14504 {
14505         if (tg3_flag(tp, IS_SSB_CORE)) {
14506                 /* No NVRAM and EEPROM on the SSB Broadcom GigE core. */
14507                 tg3_flag_clear(tp, NVRAM);
14508                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14509                 tg3_flag_set(tp, NO_NVRAM);
14510                 return;
14511         }
14512
14513         tw32_f(GRC_EEPROM_ADDR,
14514              (EEPROM_ADDR_FSM_RESET |
14515               (EEPROM_DEFAULT_CLOCK_PERIOD <<
14516                EEPROM_ADDR_CLKPERD_SHIFT)));
14517
14518         msleep(1);
14519
14520         /* Enable seeprom accesses. */
14521         tw32_f(GRC_LOCAL_CTRL,
14522              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
14523         udelay(100);
14524
14525         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14526             tg3_asic_rev(tp) != ASIC_REV_5701) {
14527                 tg3_flag_set(tp, NVRAM);
14528
14529                 if (tg3_nvram_lock(tp)) {
14530                         netdev_warn(tp->dev,
14531                                     "Cannot get nvram lock, %s failed\n",
14532                                     __func__);
14533                         return;
14534                 }
14535                 tg3_enable_nvram_access(tp);
14536
14537                 tp->nvram_size = 0;
14538
14539                 if (tg3_asic_rev(tp) == ASIC_REV_5752)
14540                         tg3_get_5752_nvram_info(tp);
14541                 else if (tg3_asic_rev(tp) == ASIC_REV_5755)
14542                         tg3_get_5755_nvram_info(tp);
14543                 else if (tg3_asic_rev(tp) == ASIC_REV_5787 ||
14544                          tg3_asic_rev(tp) == ASIC_REV_5784 ||
14545                          tg3_asic_rev(tp) == ASIC_REV_5785)
14546                         tg3_get_5787_nvram_info(tp);
14547                 else if (tg3_asic_rev(tp) == ASIC_REV_5761)
14548                         tg3_get_5761_nvram_info(tp);
14549                 else if (tg3_asic_rev(tp) == ASIC_REV_5906)
14550                         tg3_get_5906_nvram_info(tp);
14551                 else if (tg3_asic_rev(tp) == ASIC_REV_57780 ||
14552                          tg3_flag(tp, 57765_CLASS))
14553                         tg3_get_57780_nvram_info(tp);
14554                 else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
14555                          tg3_asic_rev(tp) == ASIC_REV_5719)
14556                         tg3_get_5717_nvram_info(tp);
14557                 else if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
14558                          tg3_asic_rev(tp) == ASIC_REV_5762)
14559                         tg3_get_5720_nvram_info(tp);
14560                 else
14561                         tg3_get_nvram_info(tp);
14562
14563                 if (tp->nvram_size == 0)
14564                         tg3_get_nvram_size(tp);
14565
14566                 tg3_disable_nvram_access(tp);
14567                 tg3_nvram_unlock(tp);
14568
14569         } else {
14570                 tg3_flag_clear(tp, NVRAM);
14571                 tg3_flag_clear(tp, NVRAM_BUFFERED);
14572
14573                 tg3_get_eeprom_size(tp);
14574         }
14575 }
14576
14577 struct subsys_tbl_ent {
14578         u16 subsys_vendor, subsys_devid;
14579         u32 phy_id;
14580 };
14581
14582 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
14583         /* Broadcom boards. */
14584         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14585           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
14586         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14587           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
14588         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14589           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
14590         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14591           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
14592         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14593           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
14594         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14595           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
14596         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14597           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
14598         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14599           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
14600         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14601           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
14602         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14603           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
14604         { TG3PCI_SUBVENDOR_ID_BROADCOM,
14605           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
14606
14607         /* 3com boards. */
14608         { TG3PCI_SUBVENDOR_ID_3COM,
14609           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
14610         { TG3PCI_SUBVENDOR_ID_3COM,
14611           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
14612         { TG3PCI_SUBVENDOR_ID_3COM,
14613           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
14614         { TG3PCI_SUBVENDOR_ID_3COM,
14615           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
14616         { TG3PCI_SUBVENDOR_ID_3COM,
14617           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
14618
14619         /* DELL boards. */
14620         { TG3PCI_SUBVENDOR_ID_DELL,
14621           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
14622         { TG3PCI_SUBVENDOR_ID_DELL,
14623           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
14624         { TG3PCI_SUBVENDOR_ID_DELL,
14625           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
14626         { TG3PCI_SUBVENDOR_ID_DELL,
14627           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
14628
14629         /* Compaq boards. */
14630         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14631           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
14632         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14633           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
14634         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14635           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
14636         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14637           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
14638         { TG3PCI_SUBVENDOR_ID_COMPAQ,
14639           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
14640
14641         /* IBM boards. */
14642         { TG3PCI_SUBVENDOR_ID_IBM,
14643           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
14644 };
14645
14646 static struct subsys_tbl_ent *tg3_lookup_by_subsys(struct tg3 *tp)
14647 {
14648         int i;
14649
14650         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
14651                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
14652                      tp->pdev->subsystem_vendor) &&
14653                     (subsys_id_to_phy_id[i].subsys_devid ==
14654                      tp->pdev->subsystem_device))
14655                         return &subsys_id_to_phy_id[i];
14656         }
14657         return NULL;
14658 }
14659
14660 static void tg3_get_eeprom_hw_cfg(struct tg3 *tp)
14661 {
14662         u32 val;
14663
14664         tp->phy_id = TG3_PHY_ID_INVALID;
14665         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14666
14667         /* Assume an onboard device and WOL capable by default.  */
14668         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14669         tg3_flag_set(tp, WOL_CAP);
14670
14671         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
14672                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
14673                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14674                         tg3_flag_set(tp, IS_NIC);
14675                 }
14676                 val = tr32(VCPU_CFGSHDW);
14677                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
14678                         tg3_flag_set(tp, ASPM_WORKAROUND);
14679                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
14680                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
14681                         tg3_flag_set(tp, WOL_ENABLE);
14682                         device_set_wakeup_enable(&tp->pdev->dev, true);
14683                 }
14684                 goto done;
14685         }
14686
14687         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
14688         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
14689                 u32 nic_cfg, led_cfg;
14690                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
14691                 int eeprom_phy_serdes = 0;
14692
14693                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
14694                 tp->nic_sram_data_cfg = nic_cfg;
14695
14696                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
14697                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
14698                 if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
14699                     tg3_asic_rev(tp) != ASIC_REV_5701 &&
14700                     tg3_asic_rev(tp) != ASIC_REV_5703 &&
14701                     (ver > 0) && (ver < 0x100))
14702                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
14703
14704                 if (tg3_asic_rev(tp) == ASIC_REV_5785)
14705                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
14706
14707                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
14708                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
14709                         eeprom_phy_serdes = 1;
14710
14711                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
14712                 if (nic_phy_id != 0) {
14713                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
14714                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
14715
14716                         eeprom_phy_id  = (id1 >> 16) << 10;
14717                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
14718                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
14719                 } else
14720                         eeprom_phy_id = 0;
14721
14722                 tp->phy_id = eeprom_phy_id;
14723                 if (eeprom_phy_serdes) {
14724                         if (!tg3_flag(tp, 5705_PLUS))
14725                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
14726                         else
14727                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
14728                 }
14729
14730                 if (tg3_flag(tp, 5750_PLUS))
14731                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
14732                                     SHASTA_EXT_LED_MODE_MASK);
14733                 else
14734                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
14735
14736                 switch (led_cfg) {
14737                 default:
14738                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
14739                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14740                         break;
14741
14742                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
14743                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14744                         break;
14745
14746                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
14747                         tp->led_ctrl = LED_CTRL_MODE_MAC;
14748
14749                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
14750                          * read on some older 5700/5701 bootcode.
14751                          */
14752                         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
14753                             tg3_asic_rev(tp) == ASIC_REV_5701)
14754                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14755
14756                         break;
14757
14758                 case SHASTA_EXT_LED_SHARED:
14759                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
14760                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0 &&
14761                             tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A1)
14762                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14763                                                  LED_CTRL_MODE_PHY_2);
14764                         break;
14765
14766                 case SHASTA_EXT_LED_MAC:
14767                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
14768                         break;
14769
14770                 case SHASTA_EXT_LED_COMBO:
14771                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
14772                         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5750_A0)
14773                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
14774                                                  LED_CTRL_MODE_PHY_2);
14775                         break;
14776
14777                 }
14778
14779                 if ((tg3_asic_rev(tp) == ASIC_REV_5700 ||
14780                      tg3_asic_rev(tp) == ASIC_REV_5701) &&
14781                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
14782                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
14783
14784                 if (tg3_chip_rev(tp) == CHIPREV_5784_AX)
14785                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
14786
14787                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
14788                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
14789                         if ((tp->pdev->subsystem_vendor ==
14790                              PCI_VENDOR_ID_ARIMA) &&
14791                             (tp->pdev->subsystem_device == 0x205a ||
14792                              tp->pdev->subsystem_device == 0x2063))
14793                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14794                 } else {
14795                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
14796                         tg3_flag_set(tp, IS_NIC);
14797                 }
14798
14799                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
14800                         tg3_flag_set(tp, ENABLE_ASF);
14801                         if (tg3_flag(tp, 5750_PLUS))
14802                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
14803                 }
14804
14805                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
14806                     tg3_flag(tp, 5750_PLUS))
14807                         tg3_flag_set(tp, ENABLE_APE);
14808
14809                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
14810                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
14811                         tg3_flag_clear(tp, WOL_CAP);
14812
14813                 if (tg3_flag(tp, WOL_CAP) &&
14814                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
14815                         tg3_flag_set(tp, WOL_ENABLE);
14816                         device_set_wakeup_enable(&tp->pdev->dev, true);
14817                 }
14818
14819                 if (cfg2 & (1 << 17))
14820                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
14821
14822                 /* serdes signal pre-emphasis in register 0x590 set by */
14823                 /* bootcode if bit 18 is set */
14824                 if (cfg2 & (1 << 18))
14825                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
14826
14827                 if ((tg3_flag(tp, 57765_PLUS) ||
14828                      (tg3_asic_rev(tp) == ASIC_REV_5784 &&
14829                       tg3_chip_rev(tp) != CHIPREV_5784_AX)) &&
14830                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
14831                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
14832
14833                 if (tg3_flag(tp, PCI_EXPRESS)) {
14834                         u32 cfg3;
14835
14836                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
14837                         if (tg3_asic_rev(tp) != ASIC_REV_5785 &&
14838                             !tg3_flag(tp, 57765_PLUS) &&
14839                             (cfg3 & NIC_SRAM_ASPM_DEBOUNCE))
14840                                 tg3_flag_set(tp, ASPM_WORKAROUND);
14841                         if (cfg3 & NIC_SRAM_LNK_FLAP_AVOID)
14842                                 tp->phy_flags |= TG3_PHYFLG_KEEP_LINK_ON_PWRDN;
14843                         if (cfg3 & NIC_SRAM_1G_ON_VAUX_OK)
14844                                 tp->phy_flags |= TG3_PHYFLG_1G_ON_VAUX_OK;
14845                 }
14846
14847                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
14848                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
14849                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
14850                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
14851                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
14852                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
14853         }
14854 done:
14855         if (tg3_flag(tp, WOL_CAP))
14856                 device_set_wakeup_enable(&tp->pdev->dev,
14857                                          tg3_flag(tp, WOL_ENABLE));
14858         else
14859                 device_set_wakeup_capable(&tp->pdev->dev, false);
14860 }
14861
14862 static int tg3_ape_otp_read(struct tg3 *tp, u32 offset, u32 *val)
14863 {
14864         int i, err;
14865         u32 val2, off = offset * 8;
14866
14867         err = tg3_nvram_lock(tp);
14868         if (err)
14869                 return err;
14870
14871         tg3_ape_write32(tp, TG3_APE_OTP_ADDR, off | APE_OTP_ADDR_CPU_ENABLE);
14872         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, APE_OTP_CTRL_PROG_EN |
14873                         APE_OTP_CTRL_CMD_RD | APE_OTP_CTRL_START);
14874         tg3_ape_read32(tp, TG3_APE_OTP_CTRL);
14875         udelay(10);
14876
14877         for (i = 0; i < 100; i++) {
14878                 val2 = tg3_ape_read32(tp, TG3_APE_OTP_STATUS);
14879                 if (val2 & APE_OTP_STATUS_CMD_DONE) {
14880                         *val = tg3_ape_read32(tp, TG3_APE_OTP_RD_DATA);
14881                         break;
14882                 }
14883                 udelay(10);
14884         }
14885
14886         tg3_ape_write32(tp, TG3_APE_OTP_CTRL, 0);
14887
14888         tg3_nvram_unlock(tp);
14889         if (val2 & APE_OTP_STATUS_CMD_DONE)
14890                 return 0;
14891
14892         return -EBUSY;
14893 }
14894
14895 static int tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
14896 {
14897         int i;
14898         u32 val;
14899
14900         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
14901         tw32(OTP_CTRL, cmd);
14902
14903         /* Wait for up to 1 ms for command to execute. */
14904         for (i = 0; i < 100; i++) {
14905                 val = tr32(OTP_STATUS);
14906                 if (val & OTP_STATUS_CMD_DONE)
14907                         break;
14908                 udelay(10);
14909         }
14910
14911         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
14912 }
14913
14914 /* Read the gphy configuration from the OTP region of the chip.  The gphy
14915  * configuration is a 32-bit value that straddles the alignment boundary.
14916  * We do two 32-bit reads and then shift and merge the results.
14917  */
14918 static u32 tg3_read_otp_phycfg(struct tg3 *tp)
14919 {
14920         u32 bhalf_otp, thalf_otp;
14921
14922         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
14923
14924         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
14925                 return 0;
14926
14927         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
14928
14929         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14930                 return 0;
14931
14932         thalf_otp = tr32(OTP_READ_DATA);
14933
14934         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
14935
14936         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
14937                 return 0;
14938
14939         bhalf_otp = tr32(OTP_READ_DATA);
14940
14941         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
14942 }
14943
14944 static void tg3_phy_init_link_config(struct tg3 *tp)
14945 {
14946         u32 adv = ADVERTISED_Autoneg;
14947
14948         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
14949                 adv |= ADVERTISED_1000baseT_Half |
14950                        ADVERTISED_1000baseT_Full;
14951
14952         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14953                 adv |= ADVERTISED_100baseT_Half |
14954                        ADVERTISED_100baseT_Full |
14955                        ADVERTISED_10baseT_Half |
14956                        ADVERTISED_10baseT_Full |
14957                        ADVERTISED_TP;
14958         else
14959                 adv |= ADVERTISED_FIBRE;
14960
14961         tp->link_config.advertising = adv;
14962         tp->link_config.speed = SPEED_UNKNOWN;
14963         tp->link_config.duplex = DUPLEX_UNKNOWN;
14964         tp->link_config.autoneg = AUTONEG_ENABLE;
14965         tp->link_config.active_speed = SPEED_UNKNOWN;
14966         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
14967
14968         tp->old_link = -1;
14969 }
14970
14971 static int tg3_phy_probe(struct tg3 *tp)
14972 {
14973         u32 hw_phy_id_1, hw_phy_id_2;
14974         u32 hw_phy_id, hw_phy_id_masked;
14975         int err;
14976
14977         /* flow control autonegotiation is default behavior */
14978         tg3_flag_set(tp, PAUSE_AUTONEG);
14979         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14980
14981         if (tg3_flag(tp, ENABLE_APE)) {
14982                 switch (tp->pci_fn) {
14983                 case 0:
14984                         tp->phy_ape_lock = TG3_APE_LOCK_PHY0;
14985                         break;
14986                 case 1:
14987                         tp->phy_ape_lock = TG3_APE_LOCK_PHY1;
14988                         break;
14989                 case 2:
14990                         tp->phy_ape_lock = TG3_APE_LOCK_PHY2;
14991                         break;
14992                 case 3:
14993                         tp->phy_ape_lock = TG3_APE_LOCK_PHY3;
14994                         break;
14995                 }
14996         }
14997
14998         if (!tg3_flag(tp, ENABLE_ASF) &&
14999             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15000             !(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
15001                 tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
15002                                    TG3_PHYFLG_KEEP_LINK_ON_PWRDN);
15003
15004         if (tg3_flag(tp, USE_PHYLIB))
15005                 return tg3_phy_init(tp);
15006
15007         /* Reading the PHY ID register can conflict with ASF
15008          * firmware access to the PHY hardware.
15009          */
15010         err = 0;
15011         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
15012                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
15013         } else {
15014                 /* Now read the physical PHY_ID from the chip and verify
15015                  * that it is sane.  If it doesn't look good, we fall back
15016                  * to either the hard-coded table based PHY_ID and failing
15017                  * that the value found in the eeprom area.
15018                  */
15019                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
15020                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
15021
15022                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
15023                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
15024                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
15025
15026                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
15027         }
15028
15029         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
15030                 tp->phy_id = hw_phy_id;
15031                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
15032                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15033                 else
15034                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
15035         } else {
15036                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
15037                         /* Do nothing, phy ID already set up in
15038                          * tg3_get_eeprom_hw_cfg().
15039                          */
15040                 } else {
15041                         struct subsys_tbl_ent *p;
15042
15043                         /* No eeprom signature?  Try the hardcoded
15044                          * subsys device table.
15045                          */
15046                         p = tg3_lookup_by_subsys(tp);
15047                         if (p) {
15048                                 tp->phy_id = p->phy_id;
15049                         } else if (!tg3_flag(tp, IS_SSB_CORE)) {
15050                                 /* For now we saw the IDs 0xbc050cd0,
15051                                  * 0xbc050f80 and 0xbc050c30 on devices
15052                                  * connected to an BCM4785 and there are
15053                                  * probably more. Just assume that the phy is
15054                                  * supported when it is connected to a SSB core
15055                                  * for now.
15056                                  */
15057                                 return -ENODEV;
15058                         }
15059
15060                         if (!tp->phy_id ||
15061                             tp->phy_id == TG3_PHY_ID_BCM8002)
15062                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
15063                 }
15064         }
15065
15066         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15067             (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15068              tg3_asic_rev(tp) == ASIC_REV_5720 ||
15069              tg3_asic_rev(tp) == ASIC_REV_57766 ||
15070              tg3_asic_rev(tp) == ASIC_REV_5762 ||
15071              (tg3_asic_rev(tp) == ASIC_REV_5717 &&
15072               tg3_chip_rev_id(tp) != CHIPREV_ID_5717_A0) ||
15073              (tg3_asic_rev(tp) == ASIC_REV_57765 &&
15074               tg3_chip_rev_id(tp) != CHIPREV_ID_57765_A0)))
15075                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
15076
15077         tg3_phy_init_link_config(tp);
15078
15079         if (!(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN) &&
15080             !(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
15081             !tg3_flag(tp, ENABLE_APE) &&
15082             !tg3_flag(tp, ENABLE_ASF)) {
15083                 u32 bmsr, dummy;
15084
15085                 tg3_readphy(tp, MII_BMSR, &bmsr);
15086                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
15087                     (bmsr & BMSR_LSTATUS))
15088                         goto skip_phy_reset;
15089
15090                 err = tg3_phy_reset(tp);
15091                 if (err)
15092                         return err;
15093
15094                 tg3_phy_set_wirespeed(tp);
15095
15096                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
15097                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
15098                                             tp->link_config.flowctrl);
15099
15100                         tg3_writephy(tp, MII_BMCR,
15101                                      BMCR_ANENABLE | BMCR_ANRESTART);
15102                 }
15103         }
15104
15105 skip_phy_reset:
15106         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
15107                 err = tg3_init_5401phy_dsp(tp);
15108                 if (err)
15109                         return err;
15110
15111                 err = tg3_init_5401phy_dsp(tp);
15112         }
15113
15114         return err;
15115 }
15116
15117 static void tg3_read_vpd(struct tg3 *tp)
15118 {
15119         u8 *vpd_data;
15120         unsigned int block_end, rosize, len;
15121         u32 vpdlen;
15122         int j, i = 0;
15123
15124         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
15125         if (!vpd_data)
15126                 goto out_no_vpd;
15127
15128         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
15129         if (i < 0)
15130                 goto out_not_found;
15131
15132         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
15133         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
15134         i += PCI_VPD_LRDT_TAG_SIZE;
15135
15136         if (block_end > vpdlen)
15137                 goto out_not_found;
15138
15139         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15140                                       PCI_VPD_RO_KEYWORD_MFR_ID);
15141         if (j > 0) {
15142                 len = pci_vpd_info_field_size(&vpd_data[j]);
15143
15144                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15145                 if (j + len > block_end || len != 4 ||
15146                     memcmp(&vpd_data[j], "1028", 4))
15147                         goto partno;
15148
15149                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15150                                               PCI_VPD_RO_KEYWORD_VENDOR0);
15151                 if (j < 0)
15152                         goto partno;
15153
15154                 len = pci_vpd_info_field_size(&vpd_data[j]);
15155
15156                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
15157                 if (j + len > block_end)
15158                         goto partno;
15159
15160                 if (len >= sizeof(tp->fw_ver))
15161                         len = sizeof(tp->fw_ver) - 1;
15162                 memset(tp->fw_ver, 0, sizeof(tp->fw_ver));
15163                 snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len,
15164                          &vpd_data[j]);
15165         }
15166
15167 partno:
15168         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
15169                                       PCI_VPD_RO_KEYWORD_PARTNO);
15170         if (i < 0)
15171                 goto out_not_found;
15172
15173         len = pci_vpd_info_field_size(&vpd_data[i]);
15174
15175         i += PCI_VPD_INFO_FLD_HDR_SIZE;
15176         if (len > TG3_BPN_SIZE ||
15177             (len + i) > vpdlen)
15178                 goto out_not_found;
15179
15180         memcpy(tp->board_part_number, &vpd_data[i], len);
15181
15182 out_not_found:
15183         kfree(vpd_data);
15184         if (tp->board_part_number[0])
15185                 return;
15186
15187 out_no_vpd:
15188         if (tg3_asic_rev(tp) == ASIC_REV_5717) {
15189                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15190                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C)
15191                         strcpy(tp->board_part_number, "BCM5717");
15192                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
15193                         strcpy(tp->board_part_number, "BCM5718");
15194                 else
15195                         goto nomatch;
15196         } else if (tg3_asic_rev(tp) == ASIC_REV_57780) {
15197                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
15198                         strcpy(tp->board_part_number, "BCM57780");
15199                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
15200                         strcpy(tp->board_part_number, "BCM57760");
15201                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
15202                         strcpy(tp->board_part_number, "BCM57790");
15203                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
15204                         strcpy(tp->board_part_number, "BCM57788");
15205                 else
15206                         goto nomatch;
15207         } else if (tg3_asic_rev(tp) == ASIC_REV_57765) {
15208                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
15209                         strcpy(tp->board_part_number, "BCM57761");
15210                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
15211                         strcpy(tp->board_part_number, "BCM57765");
15212                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
15213                         strcpy(tp->board_part_number, "BCM57781");
15214                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
15215                         strcpy(tp->board_part_number, "BCM57785");
15216                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
15217                         strcpy(tp->board_part_number, "BCM57791");
15218                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
15219                         strcpy(tp->board_part_number, "BCM57795");
15220                 else
15221                         goto nomatch;
15222         } else if (tg3_asic_rev(tp) == ASIC_REV_57766) {
15223                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
15224                         strcpy(tp->board_part_number, "BCM57762");
15225                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
15226                         strcpy(tp->board_part_number, "BCM57766");
15227                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
15228                         strcpy(tp->board_part_number, "BCM57782");
15229                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15230                         strcpy(tp->board_part_number, "BCM57786");
15231                 else
15232                         goto nomatch;
15233         } else if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15234                 strcpy(tp->board_part_number, "BCM95906");
15235         } else {
15236 nomatch:
15237                 strcpy(tp->board_part_number, "none");
15238         }
15239 }
15240
15241 static int tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
15242 {
15243         u32 val;
15244
15245         if (tg3_nvram_read(tp, offset, &val) ||
15246             (val & 0xfc000000) != 0x0c000000 ||
15247             tg3_nvram_read(tp, offset + 4, &val) ||
15248             val != 0)
15249                 return 0;
15250
15251         return 1;
15252 }
15253
15254 static void tg3_read_bc_ver(struct tg3 *tp)
15255 {
15256         u32 val, offset, start, ver_offset;
15257         int i, dst_off;
15258         bool newver = false;
15259
15260         if (tg3_nvram_read(tp, 0xc, &offset) ||
15261             tg3_nvram_read(tp, 0x4, &start))
15262                 return;
15263
15264         offset = tg3_nvram_logical_addr(tp, offset);
15265
15266         if (tg3_nvram_read(tp, offset, &val))
15267                 return;
15268
15269         if ((val & 0xfc000000) == 0x0c000000) {
15270                 if (tg3_nvram_read(tp, offset + 4, &val))
15271                         return;
15272
15273                 if (val == 0)
15274                         newver = true;
15275         }
15276
15277         dst_off = strlen(tp->fw_ver);
15278
15279         if (newver) {
15280                 if (TG3_VER_SIZE - dst_off < 16 ||
15281                     tg3_nvram_read(tp, offset + 8, &ver_offset))
15282                         return;
15283
15284                 offset = offset + ver_offset - start;
15285                 for (i = 0; i < 16; i += 4) {
15286                         __be32 v;
15287                         if (tg3_nvram_read_be32(tp, offset + i, &v))
15288                                 return;
15289
15290                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
15291                 }
15292         } else {
15293                 u32 major, minor;
15294
15295                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
15296                         return;
15297
15298                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
15299                         TG3_NVM_BCVER_MAJSFT;
15300                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
15301                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
15302                          "v%d.%02d", major, minor);
15303         }
15304 }
15305
15306 static void tg3_read_hwsb_ver(struct tg3 *tp)
15307 {
15308         u32 val, major, minor;
15309
15310         /* Use native endian representation */
15311         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
15312                 return;
15313
15314         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
15315                 TG3_NVM_HWSB_CFG1_MAJSFT;
15316         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
15317                 TG3_NVM_HWSB_CFG1_MINSFT;
15318
15319         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
15320 }
15321
15322 static void tg3_read_sb_ver(struct tg3 *tp, u32 val)
15323 {
15324         u32 offset, major, minor, build;
15325
15326         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
15327
15328         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
15329                 return;
15330
15331         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
15332         case TG3_EEPROM_SB_REVISION_0:
15333                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
15334                 break;
15335         case TG3_EEPROM_SB_REVISION_2:
15336                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
15337                 break;
15338         case TG3_EEPROM_SB_REVISION_3:
15339                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
15340                 break;
15341         case TG3_EEPROM_SB_REVISION_4:
15342                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
15343                 break;
15344         case TG3_EEPROM_SB_REVISION_5:
15345                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
15346                 break;
15347         case TG3_EEPROM_SB_REVISION_6:
15348                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
15349                 break;
15350         default:
15351                 return;
15352         }
15353
15354         if (tg3_nvram_read(tp, offset, &val))
15355                 return;
15356
15357         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
15358                 TG3_EEPROM_SB_EDH_BLD_SHFT;
15359         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
15360                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
15361         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
15362
15363         if (minor > 99 || build > 26)
15364                 return;
15365
15366         offset = strlen(tp->fw_ver);
15367         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
15368                  " v%d.%02d", major, minor);
15369
15370         if (build > 0) {
15371                 offset = strlen(tp->fw_ver);
15372                 if (offset < TG3_VER_SIZE - 1)
15373                         tp->fw_ver[offset] = 'a' + build - 1;
15374         }
15375 }
15376
15377 static void tg3_read_mgmtfw_ver(struct tg3 *tp)
15378 {
15379         u32 val, offset, start;
15380         int i, vlen;
15381
15382         for (offset = TG3_NVM_DIR_START;
15383              offset < TG3_NVM_DIR_END;
15384              offset += TG3_NVM_DIRENT_SIZE) {
15385                 if (tg3_nvram_read(tp, offset, &val))
15386                         return;
15387
15388                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
15389                         break;
15390         }
15391
15392         if (offset == TG3_NVM_DIR_END)
15393                 return;
15394
15395         if (!tg3_flag(tp, 5705_PLUS))
15396                 start = 0x08000000;
15397         else if (tg3_nvram_read(tp, offset - 4, &start))
15398                 return;
15399
15400         if (tg3_nvram_read(tp, offset + 4, &offset) ||
15401             !tg3_fw_img_is_valid(tp, offset) ||
15402             tg3_nvram_read(tp, offset + 8, &val))
15403                 return;
15404
15405         offset += val - start;
15406
15407         vlen = strlen(tp->fw_ver);
15408
15409         tp->fw_ver[vlen++] = ',';
15410         tp->fw_ver[vlen++] = ' ';
15411
15412         for (i = 0; i < 4; i++) {
15413                 __be32 v;
15414                 if (tg3_nvram_read_be32(tp, offset, &v))
15415                         return;
15416
15417                 offset += sizeof(v);
15418
15419                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
15420                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
15421                         break;
15422                 }
15423
15424                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
15425                 vlen += sizeof(v);
15426         }
15427 }
15428
15429 static void tg3_probe_ncsi(struct tg3 *tp)
15430 {
15431         u32 apedata;
15432
15433         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
15434         if (apedata != APE_SEG_SIG_MAGIC)
15435                 return;
15436
15437         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
15438         if (!(apedata & APE_FW_STATUS_READY))
15439                 return;
15440
15441         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI)
15442                 tg3_flag_set(tp, APE_HAS_NCSI);
15443 }
15444
15445 static void tg3_read_dash_ver(struct tg3 *tp)
15446 {
15447         int vlen;
15448         u32 apedata;
15449         char *fwtype;
15450
15451         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
15452
15453         if (tg3_flag(tp, APE_HAS_NCSI))
15454                 fwtype = "NCSI";
15455         else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725)
15456                 fwtype = "SMASH";
15457         else
15458                 fwtype = "DASH";
15459
15460         vlen = strlen(tp->fw_ver);
15461
15462         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
15463                  fwtype,
15464                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
15465                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
15466                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
15467                  (apedata & APE_FW_VERSION_BLDMSK));
15468 }
15469
15470 static void tg3_read_otp_ver(struct tg3 *tp)
15471 {
15472         u32 val, val2;
15473
15474         if (tg3_asic_rev(tp) != ASIC_REV_5762)
15475                 return;
15476
15477         if (!tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0, &val) &&
15478             !tg3_ape_otp_read(tp, OTP_ADDRESS_MAGIC0 + 4, &val2) &&
15479             TG3_OTP_MAGIC0_VALID(val)) {
15480                 u64 val64 = (u64) val << 32 | val2;
15481                 u32 ver = 0;
15482                 int i, vlen;
15483
15484                 for (i = 0; i < 7; i++) {
15485                         if ((val64 & 0xff) == 0)
15486                                 break;
15487                         ver = val64 & 0xff;
15488                         val64 >>= 8;
15489                 }
15490                 vlen = strlen(tp->fw_ver);
15491                 snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " .%02d", ver);
15492         }
15493 }
15494
15495 static void tg3_read_fw_ver(struct tg3 *tp)
15496 {
15497         u32 val;
15498         bool vpd_vers = false;
15499
15500         if (tp->fw_ver[0] != 0)
15501                 vpd_vers = true;
15502
15503         if (tg3_flag(tp, NO_NVRAM)) {
15504                 strcat(tp->fw_ver, "sb");
15505                 tg3_read_otp_ver(tp);
15506                 return;
15507         }
15508
15509         if (tg3_nvram_read(tp, 0, &val))
15510                 return;
15511
15512         if (val == TG3_EEPROM_MAGIC)
15513                 tg3_read_bc_ver(tp);
15514         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
15515                 tg3_read_sb_ver(tp, val);
15516         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
15517                 tg3_read_hwsb_ver(tp);
15518
15519         if (tg3_flag(tp, ENABLE_ASF)) {
15520                 if (tg3_flag(tp, ENABLE_APE)) {
15521                         tg3_probe_ncsi(tp);
15522                         if (!vpd_vers)
15523                                 tg3_read_dash_ver(tp);
15524                 } else if (!vpd_vers) {
15525                         tg3_read_mgmtfw_ver(tp);
15526                 }
15527         }
15528
15529         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
15530 }
15531
15532 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
15533 {
15534         if (tg3_flag(tp, LRG_PROD_RING_CAP))
15535                 return TG3_RX_RET_MAX_SIZE_5717;
15536         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
15537                 return TG3_RX_RET_MAX_SIZE_5700;
15538         else
15539                 return TG3_RX_RET_MAX_SIZE_5705;
15540 }
15541
15542 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
15543         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
15544         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
15545         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
15546         { },
15547 };
15548
15549 static struct pci_dev *tg3_find_peer(struct tg3 *tp)
15550 {
15551         struct pci_dev *peer;
15552         unsigned int func, devnr = tp->pdev->devfn & ~7;
15553
15554         for (func = 0; func < 8; func++) {
15555                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
15556                 if (peer && peer != tp->pdev)
15557                         break;
15558                 pci_dev_put(peer);
15559         }
15560         /* 5704 can be configured in single-port mode, set peer to
15561          * tp->pdev in that case.
15562          */
15563         if (!peer) {
15564                 peer = tp->pdev;
15565                 return peer;
15566         }
15567
15568         /*
15569          * We don't need to keep the refcount elevated; there's no way
15570          * to remove one half of this device without removing the other
15571          */
15572         pci_dev_put(peer);
15573
15574         return peer;
15575 }
15576
15577 static void tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
15578 {
15579         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
15580         if (tg3_asic_rev(tp) == ASIC_REV_USE_PROD_ID_REG) {
15581                 u32 reg;
15582
15583                 /* All devices that use the alternate
15584                  * ASIC REV location have a CPMU.
15585                  */
15586                 tg3_flag_set(tp, CPMU_PRESENT);
15587
15588                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15589                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
15590                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15591                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15592                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
15593                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
15594                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
15595                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727)
15596                         reg = TG3PCI_GEN2_PRODID_ASICREV;
15597                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
15598                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
15599                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
15600                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
15601                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
15602                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
15603                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
15604                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
15605                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
15606                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
15607                         reg = TG3PCI_GEN15_PRODID_ASICREV;
15608                 else
15609                         reg = TG3PCI_PRODID_ASICREV;
15610
15611                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
15612         }
15613
15614         /* Wrong chip ID in 5752 A0. This code can be removed later
15615          * as A0 is not in production.
15616          */
15617         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5752_A0_HW)
15618                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
15619
15620         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_C0)
15621                 tp->pci_chip_rev_id = CHIPREV_ID_5720_A0;
15622
15623         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15624             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15625             tg3_asic_rev(tp) == ASIC_REV_5720)
15626                 tg3_flag_set(tp, 5717_PLUS);
15627
15628         if (tg3_asic_rev(tp) == ASIC_REV_57765 ||
15629             tg3_asic_rev(tp) == ASIC_REV_57766)
15630                 tg3_flag_set(tp, 57765_CLASS);
15631
15632         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS) ||
15633              tg3_asic_rev(tp) == ASIC_REV_5762)
15634                 tg3_flag_set(tp, 57765_PLUS);
15635
15636         /* Intentionally exclude ASIC_REV_5906 */
15637         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
15638             tg3_asic_rev(tp) == ASIC_REV_5787 ||
15639             tg3_asic_rev(tp) == ASIC_REV_5784 ||
15640             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15641             tg3_asic_rev(tp) == ASIC_REV_5785 ||
15642             tg3_asic_rev(tp) == ASIC_REV_57780 ||
15643             tg3_flag(tp, 57765_PLUS))
15644                 tg3_flag_set(tp, 5755_PLUS);
15645
15646         if (tg3_asic_rev(tp) == ASIC_REV_5780 ||
15647             tg3_asic_rev(tp) == ASIC_REV_5714)
15648                 tg3_flag_set(tp, 5780_CLASS);
15649
15650         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
15651             tg3_asic_rev(tp) == ASIC_REV_5752 ||
15652             tg3_asic_rev(tp) == ASIC_REV_5906 ||
15653             tg3_flag(tp, 5755_PLUS) ||
15654             tg3_flag(tp, 5780_CLASS))
15655                 tg3_flag_set(tp, 5750_PLUS);
15656
15657         if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
15658             tg3_flag(tp, 5750_PLUS))
15659                 tg3_flag_set(tp, 5705_PLUS);
15660 }
15661
15662 static bool tg3_10_100_only_device(struct tg3 *tp,
15663                                    const struct pci_device_id *ent)
15664 {
15665         u32 grc_misc_cfg = tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK;
15666
15667         if ((tg3_asic_rev(tp) == ASIC_REV_5703 &&
15668              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
15669             (tp->phy_flags & TG3_PHYFLG_IS_FET))
15670                 return true;
15671
15672         if (ent->driver_data & TG3_DRV_DATA_FLAG_10_100_ONLY) {
15673                 if (tg3_asic_rev(tp) == ASIC_REV_5705) {
15674                         if (ent->driver_data & TG3_DRV_DATA_FLAG_5705_10_100)
15675                                 return true;
15676                 } else {
15677                         return true;
15678                 }
15679         }
15680
15681         return false;
15682 }
15683
15684 static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
15685 {
15686         u32 misc_ctrl_reg;
15687         u32 pci_state_reg, grc_misc_cfg;
15688         u32 val;
15689         u16 pci_cmd;
15690         int err;
15691
15692         /* Force memory write invalidate off.  If we leave it on,
15693          * then on 5700_BX chips we have to enable a workaround.
15694          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
15695          * to match the cacheline size.  The Broadcom driver have this
15696          * workaround but turns MWI off all the times so never uses
15697          * it.  This seems to suggest that the workaround is insufficient.
15698          */
15699         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
15700         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
15701         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
15702
15703         /* Important! -- Make sure register accesses are byteswapped
15704          * correctly.  Also, for those chips that require it, make
15705          * sure that indirect register accesses are enabled before
15706          * the first operation.
15707          */
15708         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15709                               &misc_ctrl_reg);
15710         tp->misc_host_ctrl |= (misc_ctrl_reg &
15711                                MISC_HOST_CTRL_CHIPREV);
15712         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
15713                                tp->misc_host_ctrl);
15714
15715         tg3_detect_asic_rev(tp, misc_ctrl_reg);
15716
15717         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
15718          * we need to disable memory and use config. cycles
15719          * only to access all registers. The 5702/03 chips
15720          * can mistakenly decode the special cycles from the
15721          * ICH chipsets as memory write cycles, causing corruption
15722          * of register and memory space. Only certain ICH bridges
15723          * will drive special cycles with non-zero data during the
15724          * address phase which can fall within the 5703's address
15725          * range. This is not an ICH bug as the PCI spec allows
15726          * non-zero address during special cycles. However, only
15727          * these ICH bridges are known to drive non-zero addresses
15728          * during special cycles.
15729          *
15730          * Since special cycles do not cross PCI bridges, we only
15731          * enable this workaround if the 5703 is on the secondary
15732          * bus of these ICH bridges.
15733          */
15734         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A1) ||
15735             (tg3_chip_rev_id(tp) == CHIPREV_ID_5703_A2)) {
15736                 static struct tg3_dev_id {
15737                         u32     vendor;
15738                         u32     device;
15739                         u32     rev;
15740                 } ich_chipsets[] = {
15741                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
15742                           PCI_ANY_ID },
15743                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
15744                           PCI_ANY_ID },
15745                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
15746                           0xa },
15747                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
15748                           PCI_ANY_ID },
15749                         { },
15750                 };
15751                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
15752                 struct pci_dev *bridge = NULL;
15753
15754                 while (pci_id->vendor != 0) {
15755                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
15756                                                 bridge);
15757                         if (!bridge) {
15758                                 pci_id++;
15759                                 continue;
15760                         }
15761                         if (pci_id->rev != PCI_ANY_ID) {
15762                                 if (bridge->revision > pci_id->rev)
15763                                         continue;
15764                         }
15765                         if (bridge->subordinate &&
15766                             (bridge->subordinate->number ==
15767                              tp->pdev->bus->number)) {
15768                                 tg3_flag_set(tp, ICH_WORKAROUND);
15769                                 pci_dev_put(bridge);
15770                                 break;
15771                         }
15772                 }
15773         }
15774
15775         if (tg3_asic_rev(tp) == ASIC_REV_5701) {
15776                 static struct tg3_dev_id {
15777                         u32     vendor;
15778                         u32     device;
15779                 } bridge_chipsets[] = {
15780                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
15781                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
15782                         { },
15783                 };
15784                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
15785                 struct pci_dev *bridge = NULL;
15786
15787                 while (pci_id->vendor != 0) {
15788                         bridge = pci_get_device(pci_id->vendor,
15789                                                 pci_id->device,
15790                                                 bridge);
15791                         if (!bridge) {
15792                                 pci_id++;
15793                                 continue;
15794                         }
15795                         if (bridge->subordinate &&
15796                             (bridge->subordinate->number <=
15797                              tp->pdev->bus->number) &&
15798                             (bridge->subordinate->busn_res.end >=
15799                              tp->pdev->bus->number)) {
15800                                 tg3_flag_set(tp, 5701_DMA_BUG);
15801                                 pci_dev_put(bridge);
15802                                 break;
15803                         }
15804                 }
15805         }
15806
15807         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
15808          * DMA addresses > 40-bit. This bridge may have other additional
15809          * 57xx devices behind it in some 4-port NIC designs for example.
15810          * Any tg3 device found behind the bridge will also need the 40-bit
15811          * DMA workaround.
15812          */
15813         if (tg3_flag(tp, 5780_CLASS)) {
15814                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15815                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
15816         } else {
15817                 struct pci_dev *bridge = NULL;
15818
15819                 do {
15820                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
15821                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
15822                                                 bridge);
15823                         if (bridge && bridge->subordinate &&
15824                             (bridge->subordinate->number <=
15825                              tp->pdev->bus->number) &&
15826                             (bridge->subordinate->busn_res.end >=
15827                              tp->pdev->bus->number)) {
15828                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
15829                                 pci_dev_put(bridge);
15830                                 break;
15831                         }
15832                 } while (bridge);
15833         }
15834
15835         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
15836             tg3_asic_rev(tp) == ASIC_REV_5714)
15837                 tp->pdev_peer = tg3_find_peer(tp);
15838
15839         /* Determine TSO capabilities */
15840         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0)
15841                 ; /* Do nothing. HW bug. */
15842         else if (tg3_flag(tp, 57765_PLUS))
15843                 tg3_flag_set(tp, HW_TSO_3);
15844         else if (tg3_flag(tp, 5755_PLUS) ||
15845                  tg3_asic_rev(tp) == ASIC_REV_5906)
15846                 tg3_flag_set(tp, HW_TSO_2);
15847         else if (tg3_flag(tp, 5750_PLUS)) {
15848                 tg3_flag_set(tp, HW_TSO_1);
15849                 tg3_flag_set(tp, TSO_BUG);
15850                 if (tg3_asic_rev(tp) == ASIC_REV_5750 &&
15851                     tg3_chip_rev_id(tp) >= CHIPREV_ID_5750_C2)
15852                         tg3_flag_clear(tp, TSO_BUG);
15853         } else if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
15854                    tg3_asic_rev(tp) != ASIC_REV_5701 &&
15855                    tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) {
15856                 tg3_flag_set(tp, FW_TSO);
15857                 tg3_flag_set(tp, TSO_BUG);
15858                 if (tg3_asic_rev(tp) == ASIC_REV_5705)
15859                         tp->fw_needed = FIRMWARE_TG3TSO5;
15860                 else
15861                         tp->fw_needed = FIRMWARE_TG3TSO;
15862         }
15863
15864         /* Selectively allow TSO based on operating conditions */
15865         if (tg3_flag(tp, HW_TSO_1) ||
15866             tg3_flag(tp, HW_TSO_2) ||
15867             tg3_flag(tp, HW_TSO_3) ||
15868             tg3_flag(tp, FW_TSO)) {
15869                 /* For firmware TSO, assume ASF is disabled.
15870                  * We'll disable TSO later if we discover ASF
15871                  * is enabled in tg3_get_eeprom_hw_cfg().
15872                  */
15873                 tg3_flag_set(tp, TSO_CAPABLE);
15874         } else {
15875                 tg3_flag_clear(tp, TSO_CAPABLE);
15876                 tg3_flag_clear(tp, TSO_BUG);
15877                 tp->fw_needed = NULL;
15878         }
15879
15880         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0)
15881                 tp->fw_needed = FIRMWARE_TG3;
15882
15883         if (tg3_asic_rev(tp) == ASIC_REV_57766)
15884                 tp->fw_needed = FIRMWARE_TG357766;
15885
15886         tp->irq_max = 1;
15887
15888         if (tg3_flag(tp, 5750_PLUS)) {
15889                 tg3_flag_set(tp, SUPPORT_MSI);
15890                 if (tg3_chip_rev(tp) == CHIPREV_5750_AX ||
15891                     tg3_chip_rev(tp) == CHIPREV_5750_BX ||
15892                     (tg3_asic_rev(tp) == ASIC_REV_5714 &&
15893                      tg3_chip_rev_id(tp) <= CHIPREV_ID_5714_A2 &&
15894                      tp->pdev_peer == tp->pdev))
15895                         tg3_flag_clear(tp, SUPPORT_MSI);
15896
15897                 if (tg3_flag(tp, 5755_PLUS) ||
15898                     tg3_asic_rev(tp) == ASIC_REV_5906) {
15899                         tg3_flag_set(tp, 1SHOT_MSI);
15900                 }
15901
15902                 if (tg3_flag(tp, 57765_PLUS)) {
15903                         tg3_flag_set(tp, SUPPORT_MSIX);
15904                         tp->irq_max = TG3_IRQ_MAX_VECS;
15905                 }
15906         }
15907
15908         tp->txq_max = 1;
15909         tp->rxq_max = 1;
15910         if (tp->irq_max > 1) {
15911                 tp->rxq_max = TG3_RSS_MAX_NUM_QS;
15912                 tg3_rss_init_dflt_indir_tbl(tp, TG3_RSS_MAX_NUM_QS);
15913
15914                 if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
15915                     tg3_asic_rev(tp) == ASIC_REV_5720)
15916                         tp->txq_max = tp->irq_max - 1;
15917         }
15918
15919         if (tg3_flag(tp, 5755_PLUS) ||
15920             tg3_asic_rev(tp) == ASIC_REV_5906)
15921                 tg3_flag_set(tp, SHORT_DMA_BUG);
15922
15923         if (tg3_asic_rev(tp) == ASIC_REV_5719)
15924                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
15925
15926         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
15927             tg3_asic_rev(tp) == ASIC_REV_5719 ||
15928             tg3_asic_rev(tp) == ASIC_REV_5720 ||
15929             tg3_asic_rev(tp) == ASIC_REV_5762)
15930                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
15931
15932         if (tg3_flag(tp, 57765_PLUS) &&
15933             tg3_chip_rev_id(tp) != CHIPREV_ID_5719_A0)
15934                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
15935
15936         if (!tg3_flag(tp, 5705_PLUS) ||
15937             tg3_flag(tp, 5780_CLASS) ||
15938             tg3_flag(tp, USE_JUMBO_BDFLAG))
15939                 tg3_flag_set(tp, JUMBO_CAPABLE);
15940
15941         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
15942                               &pci_state_reg);
15943
15944         if (pci_is_pcie(tp->pdev)) {
15945                 u16 lnkctl;
15946
15947                 tg3_flag_set(tp, PCI_EXPRESS);
15948
15949                 pcie_capability_read_word(tp->pdev, PCI_EXP_LNKCTL, &lnkctl);
15950                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
15951                         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
15952                                 tg3_flag_clear(tp, HW_TSO_2);
15953                                 tg3_flag_clear(tp, TSO_CAPABLE);
15954                         }
15955                         if (tg3_asic_rev(tp) == ASIC_REV_5784 ||
15956                             tg3_asic_rev(tp) == ASIC_REV_5761 ||
15957                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A0 ||
15958                             tg3_chip_rev_id(tp) == CHIPREV_ID_57780_A1)
15959                                 tg3_flag_set(tp, CLKREQ_BUG);
15960                 } else if (tg3_chip_rev_id(tp) == CHIPREV_ID_5717_A0) {
15961                         tg3_flag_set(tp, L1PLLPD_EN);
15962                 }
15963         } else if (tg3_asic_rev(tp) == ASIC_REV_5785) {
15964                 /* BCM5785 devices are effectively PCIe devices, and should
15965                  * follow PCIe codepaths, but do not have a PCIe capabilities
15966                  * section.
15967                  */
15968                 tg3_flag_set(tp, PCI_EXPRESS);
15969         } else if (!tg3_flag(tp, 5705_PLUS) ||
15970                    tg3_flag(tp, 5780_CLASS)) {
15971                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
15972                 if (!tp->pcix_cap) {
15973                         dev_err(&tp->pdev->dev,
15974                                 "Cannot find PCI-X capability, aborting\n");
15975                         return -EIO;
15976                 }
15977
15978                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
15979                         tg3_flag_set(tp, PCIX_MODE);
15980         }
15981
15982         /* If we have an AMD 762 or VIA K8T800 chipset, write
15983          * reordering to the mailbox registers done by the host
15984          * controller can cause major troubles.  We read back from
15985          * every mailbox register write to force the writes to be
15986          * posted to the chip in order.
15987          */
15988         if (pci_dev_present(tg3_write_reorder_chipsets) &&
15989             !tg3_flag(tp, PCI_EXPRESS))
15990                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
15991
15992         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
15993                              &tp->pci_cacheline_sz);
15994         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
15995                              &tp->pci_lat_timer);
15996         if (tg3_asic_rev(tp) == ASIC_REV_5703 &&
15997             tp->pci_lat_timer < 64) {
15998                 tp->pci_lat_timer = 64;
15999                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
16000                                       tp->pci_lat_timer);
16001         }
16002
16003         /* Important! -- It is critical that the PCI-X hw workaround
16004          * situation is decided before the first MMIO register access.
16005          */
16006         if (tg3_chip_rev(tp) == CHIPREV_5700_BX) {
16007                 /* 5700 BX chips need to have their TX producer index
16008                  * mailboxes written twice to workaround a bug.
16009                  */
16010                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
16011
16012                 /* If we are in PCI-X mode, enable register write workaround.
16013                  *
16014                  * The workaround is to use indirect register accesses
16015                  * for all chip writes not to mailbox registers.
16016                  */
16017                 if (tg3_flag(tp, PCIX_MODE)) {
16018                         u32 pm_reg;
16019
16020                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16021
16022                         /* The chip can have it's power management PCI config
16023                          * space registers clobbered due to this bug.
16024                          * So explicitly force the chip into D0 here.
16025                          */
16026                         pci_read_config_dword(tp->pdev,
16027                                               tp->pm_cap + PCI_PM_CTRL,
16028                                               &pm_reg);
16029                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
16030                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
16031                         pci_write_config_dword(tp->pdev,
16032                                                tp->pm_cap + PCI_PM_CTRL,
16033                                                pm_reg);
16034
16035                         /* Also, force SERR#/PERR# in PCI command. */
16036                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16037                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
16038                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16039                 }
16040         }
16041
16042         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
16043                 tg3_flag_set(tp, PCI_HIGH_SPEED);
16044         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
16045                 tg3_flag_set(tp, PCI_32BIT);
16046
16047         /* Chip-specific fixup from Broadcom driver */
16048         if ((tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0) &&
16049             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
16050                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
16051                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
16052         }
16053
16054         /* Default fast path register access methods */
16055         tp->read32 = tg3_read32;
16056         tp->write32 = tg3_write32;
16057         tp->read32_mbox = tg3_read32;
16058         tp->write32_mbox = tg3_write32;
16059         tp->write32_tx_mbox = tg3_write32;
16060         tp->write32_rx_mbox = tg3_write32;
16061
16062         /* Various workaround register access methods */
16063         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
16064                 tp->write32 = tg3_write_indirect_reg32;
16065         else if (tg3_asic_rev(tp) == ASIC_REV_5701 ||
16066                  (tg3_flag(tp, PCI_EXPRESS) &&
16067                   tg3_chip_rev_id(tp) == CHIPREV_ID_5750_A0)) {
16068                 /*
16069                  * Back to back register writes can cause problems on these
16070                  * chips, the workaround is to read back all reg writes
16071                  * except those to mailbox regs.
16072                  *
16073                  * See tg3_write_indirect_reg32().
16074                  */
16075                 tp->write32 = tg3_write_flush_reg32;
16076         }
16077
16078         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
16079                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
16080                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
16081                         tp->write32_rx_mbox = tg3_write_flush_reg32;
16082         }
16083
16084         if (tg3_flag(tp, ICH_WORKAROUND)) {
16085                 tp->read32 = tg3_read_indirect_reg32;
16086                 tp->write32 = tg3_write_indirect_reg32;
16087                 tp->read32_mbox = tg3_read_indirect_mbox;
16088                 tp->write32_mbox = tg3_write_indirect_mbox;
16089                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
16090                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
16091
16092                 iounmap(tp->regs);
16093                 tp->regs = NULL;
16094
16095                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
16096                 pci_cmd &= ~PCI_COMMAND_MEMORY;
16097                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
16098         }
16099         if (tg3_asic_rev(tp) == ASIC_REV_5906) {
16100                 tp->read32_mbox = tg3_read32_mbox_5906;
16101                 tp->write32_mbox = tg3_write32_mbox_5906;
16102                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
16103                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
16104         }
16105
16106         if (tp->write32 == tg3_write_indirect_reg32 ||
16107             (tg3_flag(tp, PCIX_MODE) &&
16108              (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16109               tg3_asic_rev(tp) == ASIC_REV_5701)))
16110                 tg3_flag_set(tp, SRAM_USE_CONFIG);
16111
16112         /* The memory arbiter has to be enabled in order for SRAM accesses
16113          * to succeed.  Normally on powerup the tg3 chip firmware will make
16114          * sure it is enabled, but other entities such as system netboot
16115          * code might disable it.
16116          */
16117         val = tr32(MEMARB_MODE);
16118         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
16119
16120         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
16121         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16122             tg3_flag(tp, 5780_CLASS)) {
16123                 if (tg3_flag(tp, PCIX_MODE)) {
16124                         pci_read_config_dword(tp->pdev,
16125                                               tp->pcix_cap + PCI_X_STATUS,
16126                                               &val);
16127                         tp->pci_fn = val & 0x7;
16128                 }
16129         } else if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16130                    tg3_asic_rev(tp) == ASIC_REV_5719 ||
16131                    tg3_asic_rev(tp) == ASIC_REV_5720) {
16132                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
16133                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) != NIC_SRAM_CPMUSTAT_SIG)
16134                         val = tr32(TG3_CPMU_STATUS);
16135
16136                 if (tg3_asic_rev(tp) == ASIC_REV_5717)
16137                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5717) ? 1 : 0;
16138                 else
16139                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
16140                                      TG3_CPMU_STATUS_FSHFT_5719;
16141         }
16142
16143         if (tg3_flag(tp, FLUSH_POSTED_WRITES)) {
16144                 tp->write32_tx_mbox = tg3_write_flush_reg32;
16145                 tp->write32_rx_mbox = tg3_write_flush_reg32;
16146         }
16147
16148         /* Get eeprom hw config before calling tg3_set_power_state().
16149          * In particular, the TG3_FLAG_IS_NIC flag must be
16150          * determined before calling tg3_set_power_state() so that
16151          * we know whether or not to switch out of Vaux power.
16152          * When the flag is set, it means that GPIO1 is used for eeprom
16153          * write protect and also implies that it is a LOM where GPIOs
16154          * are not used to switch power.
16155          */
16156         tg3_get_eeprom_hw_cfg(tp);
16157
16158         if (tg3_flag(tp, FW_TSO) && tg3_flag(tp, ENABLE_ASF)) {
16159                 tg3_flag_clear(tp, TSO_CAPABLE);
16160                 tg3_flag_clear(tp, TSO_BUG);
16161                 tp->fw_needed = NULL;
16162         }
16163
16164         if (tg3_flag(tp, ENABLE_APE)) {
16165                 /* Allow reads and writes to the
16166                  * APE register and memory space.
16167                  */
16168                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
16169                                  PCISTATE_ALLOW_APE_SHMEM_WR |
16170                                  PCISTATE_ALLOW_APE_PSPACE_WR;
16171                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
16172                                        pci_state_reg);
16173
16174                 tg3_ape_lock_init(tp);
16175         }
16176
16177         /* Set up tp->grc_local_ctrl before calling
16178          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
16179          * will bring 5700's external PHY out of reset.
16180          * It is also used as eeprom write protect on LOMs.
16181          */
16182         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
16183         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16184             tg3_flag(tp, EEPROM_WRITE_PROT))
16185                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
16186                                        GRC_LCLCTRL_GPIO_OUTPUT1);
16187         /* Unused GPIO3 must be driven as output on 5752 because there
16188          * are no pull-up resistors on unused GPIO pins.
16189          */
16190         else if (tg3_asic_rev(tp) == ASIC_REV_5752)
16191                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
16192
16193         if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16194             tg3_asic_rev(tp) == ASIC_REV_57780 ||
16195             tg3_flag(tp, 57765_CLASS))
16196                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16197
16198         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
16199             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
16200                 /* Turn off the debug UART. */
16201                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
16202                 if (tg3_flag(tp, IS_NIC))
16203                         /* Keep VMain power. */
16204                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
16205                                               GRC_LCLCTRL_GPIO_OUTPUT0;
16206         }
16207
16208         if (tg3_asic_rev(tp) == ASIC_REV_5762)
16209                 tp->grc_local_ctrl |=
16210                         tr32(GRC_LOCAL_CTRL) & GRC_LCLCTRL_GPIO_UART_SEL;
16211
16212         /* Switch out of Vaux if it is a NIC */
16213         tg3_pwrsrc_switch_to_vmain(tp);
16214
16215         /* Derive initial jumbo mode from MTU assigned in
16216          * ether_setup() via the alloc_etherdev() call
16217          */
16218         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
16219                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
16220
16221         /* Determine WakeOnLan speed to use. */
16222         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16223             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16224             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16225             tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2) {
16226                 tg3_flag_clear(tp, WOL_SPEED_100MB);
16227         } else {
16228                 tg3_flag_set(tp, WOL_SPEED_100MB);
16229         }
16230
16231         if (tg3_asic_rev(tp) == ASIC_REV_5906)
16232                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
16233
16234         /* A few boards don't want Ethernet@WireSpeed phy feature */
16235         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16236             (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16237              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A0) &&
16238              (tg3_chip_rev_id(tp) != CHIPREV_ID_5705_A1)) ||
16239             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
16240             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
16241                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
16242
16243         if (tg3_chip_rev(tp) == CHIPREV_5703_AX ||
16244             tg3_chip_rev(tp) == CHIPREV_5704_AX)
16245                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
16246         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5704_A0)
16247                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
16248
16249         if (tg3_flag(tp, 5705_PLUS) &&
16250             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
16251             tg3_asic_rev(tp) != ASIC_REV_5785 &&
16252             tg3_asic_rev(tp) != ASIC_REV_57780 &&
16253             !tg3_flag(tp, 57765_PLUS)) {
16254                 if (tg3_asic_rev(tp) == ASIC_REV_5755 ||
16255                     tg3_asic_rev(tp) == ASIC_REV_5787 ||
16256                     tg3_asic_rev(tp) == ASIC_REV_5784 ||
16257                     tg3_asic_rev(tp) == ASIC_REV_5761) {
16258                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
16259                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
16260                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
16261                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
16262                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
16263                 } else
16264                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
16265         }
16266
16267         if (tg3_asic_rev(tp) == ASIC_REV_5784 &&
16268             tg3_chip_rev(tp) != CHIPREV_5784_AX) {
16269                 tp->phy_otp = tg3_read_otp_phycfg(tp);
16270                 if (tp->phy_otp == 0)
16271                         tp->phy_otp = TG3_OTP_DEFAULT;
16272         }
16273
16274         if (tg3_flag(tp, CPMU_PRESENT))
16275                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
16276         else
16277                 tp->mi_mode = MAC_MI_MODE_BASE;
16278
16279         tp->coalesce_mode = 0;
16280         if (tg3_chip_rev(tp) != CHIPREV_5700_AX &&
16281             tg3_chip_rev(tp) != CHIPREV_5700_BX)
16282                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
16283
16284         /* Set these bits to enable statistics workaround. */
16285         if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
16286             tg3_chip_rev_id(tp) == CHIPREV_ID_5719_A0 ||
16287             tg3_chip_rev_id(tp) == CHIPREV_ID_5720_A0) {
16288                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
16289                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
16290         }
16291
16292         if (tg3_asic_rev(tp) == ASIC_REV_5785 ||
16293             tg3_asic_rev(tp) == ASIC_REV_57780)
16294                 tg3_flag_set(tp, USE_PHYLIB);
16295
16296         err = tg3_mdio_init(tp);
16297         if (err)
16298                 return err;
16299
16300         /* Initialize data/descriptor byte/word swapping. */
16301         val = tr32(GRC_MODE);
16302         if (tg3_asic_rev(tp) == ASIC_REV_5720 ||
16303             tg3_asic_rev(tp) == ASIC_REV_5762)
16304                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
16305                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
16306                         GRC_MODE_B2HRX_ENABLE |
16307                         GRC_MODE_HTX2B_ENABLE |
16308                         GRC_MODE_HOST_STACKUP);
16309         else
16310                 val &= GRC_MODE_HOST_STACKUP;
16311
16312         tw32(GRC_MODE, val | tp->grc_mode);
16313
16314         tg3_switch_clocks(tp);
16315
16316         /* Clear this out for sanity. */
16317         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
16318
16319         /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
16320         tw32(TG3PCI_REG_BASE_ADDR, 0);
16321
16322         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
16323                               &pci_state_reg);
16324         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
16325             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
16326                 if (tg3_chip_rev_id(tp) == CHIPREV_ID_5701_A0 ||
16327                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B0 ||
16328                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B2 ||
16329                     tg3_chip_rev_id(tp) == CHIPREV_ID_5701_B5) {
16330                         void __iomem *sram_base;
16331
16332                         /* Write some dummy words into the SRAM status block
16333                          * area, see if it reads back correctly.  If the return
16334                          * value is bad, force enable the PCIX workaround.
16335                          */
16336                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
16337
16338                         writel(0x00000000, sram_base);
16339                         writel(0x00000000, sram_base + 4);
16340                         writel(0xffffffff, sram_base + 4);
16341                         if (readl(sram_base) != 0x00000000)
16342                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
16343                 }
16344         }
16345
16346         udelay(50);
16347         tg3_nvram_init(tp);
16348
16349         /* If the device has an NVRAM, no need to load patch firmware */
16350         if (tg3_asic_rev(tp) == ASIC_REV_57766 &&
16351             !tg3_flag(tp, NO_NVRAM))
16352                 tp->fw_needed = NULL;
16353
16354         grc_misc_cfg = tr32(GRC_MISC_CFG);
16355         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
16356
16357         if (tg3_asic_rev(tp) == ASIC_REV_5705 &&
16358             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
16359              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
16360                 tg3_flag_set(tp, IS_5788);
16361
16362         if (!tg3_flag(tp, IS_5788) &&
16363             tg3_asic_rev(tp) != ASIC_REV_5700)
16364                 tg3_flag_set(tp, TAGGED_STATUS);
16365         if (tg3_flag(tp, TAGGED_STATUS)) {
16366                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
16367                                       HOSTCC_MODE_CLRTICK_TXBD);
16368
16369                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
16370                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
16371                                        tp->misc_host_ctrl);
16372         }
16373
16374         /* Preserve the APE MAC_MODE bits */
16375         if (tg3_flag(tp, ENABLE_APE))
16376                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
16377         else
16378                 tp->mac_mode = 0;
16379
16380         if (tg3_10_100_only_device(tp, ent))
16381                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
16382
16383         err = tg3_phy_probe(tp);
16384         if (err) {
16385                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
16386                 /* ... but do not return immediately ... */
16387                 tg3_mdio_fini(tp);
16388         }
16389
16390         tg3_read_vpd(tp);
16391         tg3_read_fw_ver(tp);
16392
16393         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
16394                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16395         } else {
16396                 if (tg3_asic_rev(tp) == ASIC_REV_5700)
16397                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16398                 else
16399                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
16400         }
16401
16402         /* 5700 {AX,BX} chips have a broken status block link
16403          * change bit implementation, so we must use the
16404          * status register in those cases.
16405          */
16406         if (tg3_asic_rev(tp) == ASIC_REV_5700)
16407                 tg3_flag_set(tp, USE_LINKCHG_REG);
16408         else
16409                 tg3_flag_clear(tp, USE_LINKCHG_REG);
16410
16411         /* The led_ctrl is set during tg3_phy_probe, here we might
16412          * have to force the link status polling mechanism based
16413          * upon subsystem IDs.
16414          */
16415         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
16416             tg3_asic_rev(tp) == ASIC_REV_5701 &&
16417             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
16418                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
16419                 tg3_flag_set(tp, USE_LINKCHG_REG);
16420         }
16421
16422         /* For all SERDES we poll the MAC status register. */
16423         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
16424                 tg3_flag_set(tp, POLL_SERDES);
16425         else
16426                 tg3_flag_clear(tp, POLL_SERDES);
16427
16428         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
16429         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
16430         if (tg3_asic_rev(tp) == ASIC_REV_5701 &&
16431             tg3_flag(tp, PCIX_MODE)) {
16432                 tp->rx_offset = NET_SKB_PAD;
16433 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
16434                 tp->rx_copy_thresh = ~(u16)0;
16435 #endif
16436         }
16437
16438         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
16439         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
16440         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
16441
16442         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
16443
16444         /* Increment the rx prod index on the rx std ring by at most
16445          * 8 for these chips to workaround hw errata.
16446          */
16447         if (tg3_asic_rev(tp) == ASIC_REV_5750 ||
16448             tg3_asic_rev(tp) == ASIC_REV_5752 ||
16449             tg3_asic_rev(tp) == ASIC_REV_5755)
16450                 tp->rx_std_max_post = 8;
16451
16452         if (tg3_flag(tp, ASPM_WORKAROUND))
16453                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
16454                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
16455
16456         return err;
16457 }
16458
16459 #ifdef CONFIG_SPARC
16460 static int tg3_get_macaddr_sparc(struct tg3 *tp)
16461 {
16462         struct net_device *dev = tp->dev;
16463         struct pci_dev *pdev = tp->pdev;
16464         struct device_node *dp = pci_device_to_OF_node(pdev);
16465         const unsigned char *addr;
16466         int len;
16467
16468         addr = of_get_property(dp, "local-mac-address", &len);
16469         if (addr && len == 6) {
16470                 memcpy(dev->dev_addr, addr, 6);
16471                 return 0;
16472         }
16473         return -ENODEV;
16474 }
16475
16476 static int tg3_get_default_macaddr_sparc(struct tg3 *tp)
16477 {
16478         struct net_device *dev = tp->dev;
16479
16480         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
16481         return 0;
16482 }
16483 #endif
16484
16485 static int tg3_get_device_address(struct tg3 *tp)
16486 {
16487         struct net_device *dev = tp->dev;
16488         u32 hi, lo, mac_offset;
16489         int addr_ok = 0;
16490         int err;
16491
16492 #ifdef CONFIG_SPARC
16493         if (!tg3_get_macaddr_sparc(tp))
16494                 return 0;
16495 #endif
16496
16497         if (tg3_flag(tp, IS_SSB_CORE)) {
16498                 err = ssb_gige_get_macaddr(tp->pdev, &dev->dev_addr[0]);
16499                 if (!err && is_valid_ether_addr(&dev->dev_addr[0]))
16500                         return 0;
16501         }
16502
16503         mac_offset = 0x7c;
16504         if (tg3_asic_rev(tp) == ASIC_REV_5704 ||
16505             tg3_flag(tp, 5780_CLASS)) {
16506                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
16507                         mac_offset = 0xcc;
16508                 if (tg3_nvram_lock(tp))
16509                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
16510                 else
16511                         tg3_nvram_unlock(tp);
16512         } else if (tg3_flag(tp, 5717_PLUS)) {
16513                 if (tp->pci_fn & 1)
16514                         mac_offset = 0xcc;
16515                 if (tp->pci_fn > 1)
16516                         mac_offset += 0x18c;
16517         } else if (tg3_asic_rev(tp) == ASIC_REV_5906)
16518                 mac_offset = 0x10;
16519
16520         /* First try to get it from MAC address mailbox. */
16521         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
16522         if ((hi >> 16) == 0x484b) {
16523                 dev->dev_addr[0] = (hi >>  8) & 0xff;
16524                 dev->dev_addr[1] = (hi >>  0) & 0xff;
16525
16526                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
16527                 dev->dev_addr[2] = (lo >> 24) & 0xff;
16528                 dev->dev_addr[3] = (lo >> 16) & 0xff;
16529                 dev->dev_addr[4] = (lo >>  8) & 0xff;
16530                 dev->dev_addr[5] = (lo >>  0) & 0xff;
16531
16532                 /* Some old bootcode may report a 0 MAC address in SRAM */
16533                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
16534         }
16535         if (!addr_ok) {
16536                 /* Next, try NVRAM. */
16537                 if (!tg3_flag(tp, NO_NVRAM) &&
16538                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
16539                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
16540                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
16541                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
16542                 }
16543                 /* Finally just fetch it out of the MAC control regs. */
16544                 else {
16545                         hi = tr32(MAC_ADDR_0_HIGH);
16546                         lo = tr32(MAC_ADDR_0_LOW);
16547
16548                         dev->dev_addr[5] = lo & 0xff;
16549                         dev->dev_addr[4] = (lo >> 8) & 0xff;
16550                         dev->dev_addr[3] = (lo >> 16) & 0xff;
16551                         dev->dev_addr[2] = (lo >> 24) & 0xff;
16552                         dev->dev_addr[1] = hi & 0xff;
16553                         dev->dev_addr[0] = (hi >> 8) & 0xff;
16554                 }
16555         }
16556
16557         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
16558 #ifdef CONFIG_SPARC
16559                 if (!tg3_get_default_macaddr_sparc(tp))
16560                         return 0;
16561 #endif
16562                 return -EINVAL;
16563         }
16564         return 0;
16565 }
16566
16567 #define BOUNDARY_SINGLE_CACHELINE       1
16568 #define BOUNDARY_MULTI_CACHELINE        2
16569
16570 static u32 tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
16571 {
16572         int cacheline_size;
16573         u8 byte;
16574         int goal;
16575
16576         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
16577         if (byte == 0)
16578                 cacheline_size = 1024;
16579         else
16580                 cacheline_size = (int) byte * 4;
16581
16582         /* On 5703 and later chips, the boundary bits have no
16583          * effect.
16584          */
16585         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16586             tg3_asic_rev(tp) != ASIC_REV_5701 &&
16587             !tg3_flag(tp, PCI_EXPRESS))
16588                 goto out;
16589
16590 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
16591         goal = BOUNDARY_MULTI_CACHELINE;
16592 #else
16593 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
16594         goal = BOUNDARY_SINGLE_CACHELINE;
16595 #else
16596         goal = 0;
16597 #endif
16598 #endif
16599
16600         if (tg3_flag(tp, 57765_PLUS)) {
16601                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
16602                 goto out;
16603         }
16604
16605         if (!goal)
16606                 goto out;
16607
16608         /* PCI controllers on most RISC systems tend to disconnect
16609          * when a device tries to burst across a cache-line boundary.
16610          * Therefore, letting tg3 do so just wastes PCI bandwidth.
16611          *
16612          * Unfortunately, for PCI-E there are only limited
16613          * write-side controls for this, and thus for reads
16614          * we will still get the disconnects.  We'll also waste
16615          * these PCI cycles for both read and write for chips
16616          * other than 5700 and 5701 which do not implement the
16617          * boundary bits.
16618          */
16619         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
16620                 switch (cacheline_size) {
16621                 case 16:
16622                 case 32:
16623                 case 64:
16624                 case 128:
16625                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16626                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
16627                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
16628                         } else {
16629                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16630                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16631                         }
16632                         break;
16633
16634                 case 256:
16635                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
16636                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
16637                         break;
16638
16639                 default:
16640                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
16641                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
16642                         break;
16643                 }
16644         } else if (tg3_flag(tp, PCI_EXPRESS)) {
16645                 switch (cacheline_size) {
16646                 case 16:
16647                 case 32:
16648                 case 64:
16649                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16650                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16651                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
16652                                 break;
16653                         }
16654                         /* fallthrough */
16655                 case 128:
16656                 default:
16657                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
16658                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
16659                         break;
16660                 }
16661         } else {
16662                 switch (cacheline_size) {
16663                 case 16:
16664                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16665                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
16666                                         DMA_RWCTRL_WRITE_BNDRY_16);
16667                                 break;
16668                         }
16669                         /* fallthrough */
16670                 case 32:
16671                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16672                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
16673                                         DMA_RWCTRL_WRITE_BNDRY_32);
16674                                 break;
16675                         }
16676                         /* fallthrough */
16677                 case 64:
16678                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16679                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
16680                                         DMA_RWCTRL_WRITE_BNDRY_64);
16681                                 break;
16682                         }
16683                         /* fallthrough */
16684                 case 128:
16685                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
16686                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
16687                                         DMA_RWCTRL_WRITE_BNDRY_128);
16688                                 break;
16689                         }
16690                         /* fallthrough */
16691                 case 256:
16692                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
16693                                 DMA_RWCTRL_WRITE_BNDRY_256);
16694                         break;
16695                 case 512:
16696                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
16697                                 DMA_RWCTRL_WRITE_BNDRY_512);
16698                         break;
16699                 case 1024:
16700                 default:
16701                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
16702                                 DMA_RWCTRL_WRITE_BNDRY_1024);
16703                         break;
16704                 }
16705         }
16706
16707 out:
16708         return val;
16709 }
16710
16711 static int tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma,
16712                            int size, bool to_device)
16713 {
16714         struct tg3_internal_buffer_desc test_desc;
16715         u32 sram_dma_descs;
16716         int i, ret;
16717
16718         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
16719
16720         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
16721         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
16722         tw32(RDMAC_STATUS, 0);
16723         tw32(WDMAC_STATUS, 0);
16724
16725         tw32(BUFMGR_MODE, 0);
16726         tw32(FTQ_RESET, 0);
16727
16728         test_desc.addr_hi = ((u64) buf_dma) >> 32;
16729         test_desc.addr_lo = buf_dma & 0xffffffff;
16730         test_desc.nic_mbuf = 0x00002100;
16731         test_desc.len = size;
16732
16733         /*
16734          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
16735          * the *second* time the tg3 driver was getting loaded after an
16736          * initial scan.
16737          *
16738          * Broadcom tells me:
16739          *   ...the DMA engine is connected to the GRC block and a DMA
16740          *   reset may affect the GRC block in some unpredictable way...
16741          *   The behavior of resets to individual blocks has not been tested.
16742          *
16743          * Broadcom noted the GRC reset will also reset all sub-components.
16744          */
16745         if (to_device) {
16746                 test_desc.cqid_sqid = (13 << 8) | 2;
16747
16748                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
16749                 udelay(40);
16750         } else {
16751                 test_desc.cqid_sqid = (16 << 8) | 7;
16752
16753                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
16754                 udelay(40);
16755         }
16756         test_desc.flags = 0x00000005;
16757
16758         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
16759                 u32 val;
16760
16761                 val = *(((u32 *)&test_desc) + i);
16762                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
16763                                        sram_dma_descs + (i * sizeof(u32)));
16764                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
16765         }
16766         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
16767
16768         if (to_device)
16769                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
16770         else
16771                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
16772
16773         ret = -ENODEV;
16774         for (i = 0; i < 40; i++) {
16775                 u32 val;
16776
16777                 if (to_device)
16778                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
16779                 else
16780                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
16781                 if ((val & 0xffff) == sram_dma_descs) {
16782                         ret = 0;
16783                         break;
16784                 }
16785
16786                 udelay(100);
16787         }
16788
16789         return ret;
16790 }
16791
16792 #define TEST_BUFFER_SIZE        0x2000
16793
16794 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
16795         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
16796         { },
16797 };
16798
16799 static int tg3_test_dma(struct tg3 *tp)
16800 {
16801         dma_addr_t buf_dma;
16802         u32 *buf, saved_dma_rwctrl;
16803         int ret = 0;
16804
16805         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
16806                                  &buf_dma, GFP_KERNEL);
16807         if (!buf) {
16808                 ret = -ENOMEM;
16809                 goto out_nofree;
16810         }
16811
16812         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
16813                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
16814
16815         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
16816
16817         if (tg3_flag(tp, 57765_PLUS))
16818                 goto out;
16819
16820         if (tg3_flag(tp, PCI_EXPRESS)) {
16821                 /* DMA read watermark not used on PCIE */
16822                 tp->dma_rwctrl |= 0x00180000;
16823         } else if (!tg3_flag(tp, PCIX_MODE)) {
16824                 if (tg3_asic_rev(tp) == ASIC_REV_5705 ||
16825                     tg3_asic_rev(tp) == ASIC_REV_5750)
16826                         tp->dma_rwctrl |= 0x003f0000;
16827                 else
16828                         tp->dma_rwctrl |= 0x003f000f;
16829         } else {
16830                 if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16831                     tg3_asic_rev(tp) == ASIC_REV_5704) {
16832                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
16833                         u32 read_water = 0x7;
16834
16835                         /* If the 5704 is behind the EPB bridge, we can
16836                          * do the less restrictive ONE_DMA workaround for
16837                          * better performance.
16838                          */
16839                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
16840                             tg3_asic_rev(tp) == ASIC_REV_5704)
16841                                 tp->dma_rwctrl |= 0x8000;
16842                         else if (ccval == 0x6 || ccval == 0x7)
16843                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16844
16845                         if (tg3_asic_rev(tp) == ASIC_REV_5703)
16846                                 read_water = 4;
16847                         /* Set bit 23 to enable PCIX hw bug fix */
16848                         tp->dma_rwctrl |=
16849                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
16850                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
16851                                 (1 << 23);
16852                 } else if (tg3_asic_rev(tp) == ASIC_REV_5780) {
16853                         /* 5780 always in PCIX mode */
16854                         tp->dma_rwctrl |= 0x00144000;
16855                 } else if (tg3_asic_rev(tp) == ASIC_REV_5714) {
16856                         /* 5714 always in PCIX mode */
16857                         tp->dma_rwctrl |= 0x00148000;
16858                 } else {
16859                         tp->dma_rwctrl |= 0x001b000f;
16860                 }
16861         }
16862         if (tg3_flag(tp, ONE_DMA_AT_ONCE))
16863                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
16864
16865         if (tg3_asic_rev(tp) == ASIC_REV_5703 ||
16866             tg3_asic_rev(tp) == ASIC_REV_5704)
16867                 tp->dma_rwctrl &= 0xfffffff0;
16868
16869         if (tg3_asic_rev(tp) == ASIC_REV_5700 ||
16870             tg3_asic_rev(tp) == ASIC_REV_5701) {
16871                 /* Remove this if it causes problems for some boards. */
16872                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
16873
16874                 /* On 5700/5701 chips, we need to set this bit.
16875                  * Otherwise the chip will issue cacheline transactions
16876                  * to streamable DMA memory with not all the byte
16877                  * enables turned on.  This is an error on several
16878                  * RISC PCI controllers, in particular sparc64.
16879                  *
16880                  * On 5703/5704 chips, this bit has been reassigned
16881                  * a different meaning.  In particular, it is used
16882                  * on those chips to enable a PCI-X workaround.
16883                  */
16884                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
16885         }
16886
16887         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16888
16889 #if 0
16890         /* Unneeded, already done by tg3_get_invariants.  */
16891         tg3_switch_clocks(tp);
16892 #endif
16893
16894         if (tg3_asic_rev(tp) != ASIC_REV_5700 &&
16895             tg3_asic_rev(tp) != ASIC_REV_5701)
16896                 goto out;
16897
16898         /* It is best to perform DMA test with maximum write burst size
16899          * to expose the 5700/5701 write DMA bug.
16900          */
16901         saved_dma_rwctrl = tp->dma_rwctrl;
16902         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16903         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16904
16905         while (1) {
16906                 u32 *p = buf, i;
16907
16908                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
16909                         p[i] = i;
16910
16911                 /* Send the buffer to the chip. */
16912                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, true);
16913                 if (ret) {
16914                         dev_err(&tp->pdev->dev,
16915                                 "%s: Buffer write failed. err = %d\n",
16916                                 __func__, ret);
16917                         break;
16918                 }
16919
16920 #if 0
16921                 /* validate data reached card RAM correctly. */
16922                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16923                         u32 val;
16924                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
16925                         if (le32_to_cpu(val) != p[i]) {
16926                                 dev_err(&tp->pdev->dev,
16927                                         "%s: Buffer corrupted on device! "
16928                                         "(%d != %d)\n", __func__, val, i);
16929                                 /* ret = -ENODEV here? */
16930                         }
16931                         p[i] = 0;
16932                 }
16933 #endif
16934                 /* Now read it back. */
16935                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, false);
16936                 if (ret) {
16937                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
16938                                 "err = %d\n", __func__, ret);
16939                         break;
16940                 }
16941
16942                 /* Verify it. */
16943                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
16944                         if (p[i] == i)
16945                                 continue;
16946
16947                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16948                             DMA_RWCTRL_WRITE_BNDRY_16) {
16949                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16950                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16951                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16952                                 break;
16953                         } else {
16954                                 dev_err(&tp->pdev->dev,
16955                                         "%s: Buffer corrupted on read back! "
16956                                         "(%d != %d)\n", __func__, p[i], i);
16957                                 ret = -ENODEV;
16958                                 goto out;
16959                         }
16960                 }
16961
16962                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
16963                         /* Success. */
16964                         ret = 0;
16965                         break;
16966                 }
16967         }
16968         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
16969             DMA_RWCTRL_WRITE_BNDRY_16) {
16970                 /* DMA test passed without adjusting DMA boundary,
16971                  * now look for chipsets that are known to expose the
16972                  * DMA bug without failing the test.
16973                  */
16974                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
16975                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
16976                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
16977                 } else {
16978                         /* Safe to use the calculated DMA boundary. */
16979                         tp->dma_rwctrl = saved_dma_rwctrl;
16980                 }
16981
16982                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
16983         }
16984
16985 out:
16986         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
16987 out_nofree:
16988         return ret;
16989 }
16990
16991 static void tg3_init_bufmgr_config(struct tg3 *tp)
16992 {
16993         if (tg3_flag(tp, 57765_PLUS)) {
16994                 tp->bufmgr_config.mbuf_read_dma_low_water =
16995                         DEFAULT_MB_RDMA_LOW_WATER_5705;
16996                 tp->bufmgr_config.mbuf_mac_rx_low_water =
16997                         DEFAULT_MB_MACRX_LOW_WATER_57765;
16998                 tp->bufmgr_config.mbuf_high_water =
16999                         DEFAULT_MB_HIGH_WATER_57765;
17000
17001                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17002                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17003                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17004                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
17005                 tp->bufmgr_config.mbuf_high_water_jumbo =
17006                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
17007         } else if (tg3_flag(tp, 5705_PLUS)) {
17008                 tp->bufmgr_config.mbuf_read_dma_low_water =
17009                         DEFAULT_MB_RDMA_LOW_WATER_5705;
17010                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17011                         DEFAULT_MB_MACRX_LOW_WATER_5705;
17012                 tp->bufmgr_config.mbuf_high_water =
17013                         DEFAULT_MB_HIGH_WATER_5705;
17014                 if (tg3_asic_rev(tp) == ASIC_REV_5906) {
17015                         tp->bufmgr_config.mbuf_mac_rx_low_water =
17016                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
17017                         tp->bufmgr_config.mbuf_high_water =
17018                                 DEFAULT_MB_HIGH_WATER_5906;
17019                 }
17020
17021                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17022                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
17023                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17024                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
17025                 tp->bufmgr_config.mbuf_high_water_jumbo =
17026                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
17027         } else {
17028                 tp->bufmgr_config.mbuf_read_dma_low_water =
17029                         DEFAULT_MB_RDMA_LOW_WATER;
17030                 tp->bufmgr_config.mbuf_mac_rx_low_water =
17031                         DEFAULT_MB_MACRX_LOW_WATER;
17032                 tp->bufmgr_config.mbuf_high_water =
17033                         DEFAULT_MB_HIGH_WATER;
17034
17035                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
17036                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
17037                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
17038                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
17039                 tp->bufmgr_config.mbuf_high_water_jumbo =
17040                         DEFAULT_MB_HIGH_WATER_JUMBO;
17041         }
17042
17043         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
17044         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
17045 }
17046
17047 static char *tg3_phy_string(struct tg3 *tp)
17048 {
17049         switch (tp->phy_id & TG3_PHY_ID_MASK) {
17050         case TG3_PHY_ID_BCM5400:        return "5400";
17051         case TG3_PHY_ID_BCM5401:        return "5401";
17052         case TG3_PHY_ID_BCM5411:        return "5411";
17053         case TG3_PHY_ID_BCM5701:        return "5701";
17054         case TG3_PHY_ID_BCM5703:        return "5703";
17055         case TG3_PHY_ID_BCM5704:        return "5704";
17056         case TG3_PHY_ID_BCM5705:        return "5705";
17057         case TG3_PHY_ID_BCM5750:        return "5750";
17058         case TG3_PHY_ID_BCM5752:        return "5752";
17059         case TG3_PHY_ID_BCM5714:        return "5714";
17060         case TG3_PHY_ID_BCM5780:        return "5780";
17061         case TG3_PHY_ID_BCM5755:        return "5755";
17062         case TG3_PHY_ID_BCM5787:        return "5787";
17063         case TG3_PHY_ID_BCM5784:        return "5784";
17064         case TG3_PHY_ID_BCM5756:        return "5722/5756";
17065         case TG3_PHY_ID_BCM5906:        return "5906";
17066         case TG3_PHY_ID_BCM5761:        return "5761";
17067         case TG3_PHY_ID_BCM5718C:       return "5718C";
17068         case TG3_PHY_ID_BCM5718S:       return "5718S";
17069         case TG3_PHY_ID_BCM57765:       return "57765";
17070         case TG3_PHY_ID_BCM5719C:       return "5719C";
17071         case TG3_PHY_ID_BCM5720C:       return "5720C";
17072         case TG3_PHY_ID_BCM5762:        return "5762C";
17073         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
17074         case 0:                 return "serdes";
17075         default:                return "unknown";
17076         }
17077 }
17078
17079 static char *tg3_bus_string(struct tg3 *tp, char *str)
17080 {
17081         if (tg3_flag(tp, PCI_EXPRESS)) {
17082                 strcpy(str, "PCI Express");
17083                 return str;
17084         } else if (tg3_flag(tp, PCIX_MODE)) {
17085                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
17086
17087                 strcpy(str, "PCIX:");
17088
17089                 if ((clock_ctrl == 7) ||
17090                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
17091                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
17092                         strcat(str, "133MHz");
17093                 else if (clock_ctrl == 0)
17094                         strcat(str, "33MHz");
17095                 else if (clock_ctrl == 2)
17096                         strcat(str, "50MHz");
17097                 else if (clock_ctrl == 4)
17098                         strcat(str, "66MHz");
17099                 else if (clock_ctrl == 6)
17100                         strcat(str, "100MHz");
17101         } else {
17102                 strcpy(str, "PCI:");
17103                 if (tg3_flag(tp, PCI_HIGH_SPEED))
17104                         strcat(str, "66MHz");
17105                 else
17106                         strcat(str, "33MHz");
17107         }
17108         if (tg3_flag(tp, PCI_32BIT))
17109                 strcat(str, ":32-bit");
17110         else
17111                 strcat(str, ":64-bit");
17112         return str;
17113 }
17114
17115 static void tg3_init_coal(struct tg3 *tp)
17116 {
17117         struct ethtool_coalesce *ec = &tp->coal;
17118
17119         memset(ec, 0, sizeof(*ec));
17120         ec->cmd = ETHTOOL_GCOALESCE;
17121         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
17122         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
17123         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
17124         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
17125         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
17126         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
17127         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
17128         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
17129         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
17130
17131         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
17132                                  HOSTCC_MODE_CLRTICK_TXBD)) {
17133                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
17134                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
17135                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
17136                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
17137         }
17138
17139         if (tg3_flag(tp, 5705_PLUS)) {
17140                 ec->rx_coalesce_usecs_irq = 0;
17141                 ec->tx_coalesce_usecs_irq = 0;
17142                 ec->stats_block_coalesce_usecs = 0;
17143         }
17144 }
17145
17146 static int tg3_init_one(struct pci_dev *pdev,
17147                                   const struct pci_device_id *ent)
17148 {
17149         struct net_device *dev;
17150         struct tg3 *tp;
17151         int i, err, pm_cap;
17152         u32 sndmbx, rcvmbx, intmbx;
17153         char str[40];
17154         u64 dma_mask, persist_dma_mask;
17155         netdev_features_t features = 0;
17156
17157         printk_once(KERN_INFO "%s\n", version);
17158
17159         err = pci_enable_device(pdev);
17160         if (err) {
17161                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
17162                 return err;
17163         }
17164
17165         err = pci_request_regions(pdev, DRV_MODULE_NAME);
17166         if (err) {
17167                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
17168                 goto err_out_disable_pdev;
17169         }
17170
17171         pci_set_master(pdev);
17172
17173         /* Find power-management capability. */
17174         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
17175         if (pm_cap == 0) {
17176                 dev_err(&pdev->dev,
17177                         "Cannot find Power Management capability, aborting\n");
17178                 err = -EIO;
17179                 goto err_out_free_res;
17180         }
17181
17182         err = pci_set_power_state(pdev, PCI_D0);
17183         if (err) {
17184                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
17185                 goto err_out_free_res;
17186         }
17187
17188         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
17189         if (!dev) {
17190                 err = -ENOMEM;
17191                 goto err_out_power_down;
17192         }
17193
17194         SET_NETDEV_DEV(dev, &pdev->dev);
17195
17196         tp = netdev_priv(dev);
17197         tp->pdev = pdev;
17198         tp->dev = dev;
17199         tp->pm_cap = pm_cap;
17200         tp->rx_mode = TG3_DEF_RX_MODE;
17201         tp->tx_mode = TG3_DEF_TX_MODE;
17202         tp->irq_sync = 1;
17203
17204         if (tg3_debug > 0)
17205                 tp->msg_enable = tg3_debug;
17206         else
17207                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
17208
17209         if (pdev_is_ssb_gige_core(pdev)) {
17210                 tg3_flag_set(tp, IS_SSB_CORE);
17211                 if (ssb_gige_must_flush_posted_writes(pdev))
17212                         tg3_flag_set(tp, FLUSH_POSTED_WRITES);
17213                 if (ssb_gige_one_dma_at_once(pdev))
17214                         tg3_flag_set(tp, ONE_DMA_AT_ONCE);
17215                 if (ssb_gige_have_roboswitch(pdev))
17216                         tg3_flag_set(tp, ROBOSWITCH);
17217                 if (ssb_gige_is_rgmii(pdev))
17218                         tg3_flag_set(tp, RGMII_MODE);
17219         }
17220
17221         /* The word/byte swap controls here control register access byte
17222          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
17223          * setting below.
17224          */
17225         tp->misc_host_ctrl =
17226                 MISC_HOST_CTRL_MASK_PCI_INT |
17227                 MISC_HOST_CTRL_WORD_SWAP |
17228                 MISC_HOST_CTRL_INDIR_ACCESS |
17229                 MISC_HOST_CTRL_PCISTATE_RW;
17230
17231         /* The NONFRM (non-frame) byte/word swap controls take effect
17232          * on descriptor entries, anything which isn't packet data.
17233          *
17234          * The StrongARM chips on the board (one for tx, one for rx)
17235          * are running in big-endian mode.
17236          */
17237         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
17238                         GRC_MODE_WSWAP_NONFRM_DATA);
17239 #ifdef __BIG_ENDIAN
17240         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
17241 #endif
17242         spin_lock_init(&tp->lock);
17243         spin_lock_init(&tp->indirect_lock);
17244         INIT_WORK(&tp->reset_task, tg3_reset_task);
17245
17246         tp->regs = pci_ioremap_bar(pdev, BAR_0);
17247         if (!tp->regs) {
17248                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
17249                 err = -ENOMEM;
17250                 goto err_out_free_dev;
17251         }
17252
17253         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
17254             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
17255             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
17256             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
17257             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
17258             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717_C ||
17259             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
17260             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
17261             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720 ||
17262             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5762 ||
17263             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5725 ||
17264             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5727) {
17265                 tg3_flag_set(tp, ENABLE_APE);
17266                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
17267                 if (!tp->aperegs) {
17268                         dev_err(&pdev->dev,
17269                                 "Cannot map APE registers, aborting\n");
17270                         err = -ENOMEM;
17271                         goto err_out_iounmap;
17272                 }
17273         }
17274
17275         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
17276         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
17277
17278         dev->ethtool_ops = &tg3_ethtool_ops;
17279         dev->watchdog_timeo = TG3_TX_TIMEOUT;
17280         dev->netdev_ops = &tg3_netdev_ops;
17281         dev->irq = pdev->irq;
17282
17283         err = tg3_get_invariants(tp, ent);
17284         if (err) {
17285                 dev_err(&pdev->dev,
17286                         "Problem fetching invariants of chip, aborting\n");
17287                 goto err_out_apeunmap;
17288         }
17289
17290         /* The EPB bridge inside 5714, 5715, and 5780 and any
17291          * device behind the EPB cannot support DMA addresses > 40-bit.
17292          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
17293          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
17294          * do DMA address check in tg3_start_xmit().
17295          */
17296         if (tg3_flag(tp, IS_5788))
17297                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
17298         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
17299                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
17300 #ifdef CONFIG_HIGHMEM
17301                 dma_mask = DMA_BIT_MASK(64);
17302 #endif
17303         } else
17304                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
17305
17306         /* Configure DMA attributes. */
17307         if (dma_mask > DMA_BIT_MASK(32)) {
17308                 err = pci_set_dma_mask(pdev, dma_mask);
17309                 if (!err) {
17310                         features |= NETIF_F_HIGHDMA;
17311                         err = pci_set_consistent_dma_mask(pdev,
17312                                                           persist_dma_mask);
17313                         if (err < 0) {
17314                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
17315                                         "DMA for consistent allocations\n");
17316                                 goto err_out_apeunmap;
17317                         }
17318                 }
17319         }
17320         if (err || dma_mask == DMA_BIT_MASK(32)) {
17321                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
17322                 if (err) {
17323                         dev_err(&pdev->dev,
17324                                 "No usable DMA configuration, aborting\n");
17325                         goto err_out_apeunmap;
17326                 }
17327         }
17328
17329         tg3_init_bufmgr_config(tp);
17330
17331         /* 5700 B0 chips do not support checksumming correctly due
17332          * to hardware bugs.
17333          */
17334         if (tg3_chip_rev_id(tp) != CHIPREV_ID_5700_B0) {
17335                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
17336
17337                 if (tg3_flag(tp, 5755_PLUS))
17338                         features |= NETIF_F_IPV6_CSUM;
17339         }
17340
17341         /* TSO is on by default on chips that support hardware TSO.
17342          * Firmware TSO on older chips gives lower performance, so it
17343          * is off by default, but can be enabled using ethtool.
17344          */
17345         if ((tg3_flag(tp, HW_TSO_1) ||
17346              tg3_flag(tp, HW_TSO_2) ||
17347              tg3_flag(tp, HW_TSO_3)) &&
17348             (features & NETIF_F_IP_CSUM))
17349                 features |= NETIF_F_TSO;
17350         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
17351                 if (features & NETIF_F_IPV6_CSUM)
17352                         features |= NETIF_F_TSO6;
17353                 if (tg3_flag(tp, HW_TSO_3) ||
17354                     tg3_asic_rev(tp) == ASIC_REV_5761 ||
17355                     (tg3_asic_rev(tp) == ASIC_REV_5784 &&
17356                      tg3_chip_rev(tp) != CHIPREV_5784_AX) ||
17357                     tg3_asic_rev(tp) == ASIC_REV_5785 ||
17358                     tg3_asic_rev(tp) == ASIC_REV_57780)
17359                         features |= NETIF_F_TSO_ECN;
17360         }
17361
17362         dev->features |= features | NETIF_F_HW_VLAN_CTAG_TX |
17363                          NETIF_F_HW_VLAN_CTAG_RX;
17364         dev->vlan_features |= features;
17365
17366         /*
17367          * Add loopback capability only for a subset of devices that support
17368          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
17369          * loopback for the remaining devices.
17370          */
17371         if (tg3_asic_rev(tp) != ASIC_REV_5780 &&
17372             !tg3_flag(tp, CPMU_PRESENT))
17373                 /* Add the loopback capability */
17374                 features |= NETIF_F_LOOPBACK;
17375
17376         dev->hw_features |= features;
17377
17378         if (tg3_chip_rev_id(tp) == CHIPREV_ID_5705_A1 &&
17379             !tg3_flag(tp, TSO_CAPABLE) &&
17380             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
17381                 tg3_flag_set(tp, MAX_RXPEND_64);
17382                 tp->rx_pending = 63;
17383         }
17384
17385         err = tg3_get_device_address(tp);
17386         if (err) {
17387                 dev_err(&pdev->dev,
17388                         "Could not obtain valid ethernet address, aborting\n");
17389                 goto err_out_apeunmap;
17390         }
17391
17392         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
17393         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
17394         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
17395         for (i = 0; i < tp->irq_max; i++) {
17396                 struct tg3_napi *tnapi = &tp->napi[i];
17397
17398                 tnapi->tp = tp;
17399                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
17400
17401                 tnapi->int_mbox = intmbx;
17402                 if (i <= 4)
17403                         intmbx += 0x8;
17404                 else
17405                         intmbx += 0x4;
17406
17407                 tnapi->consmbox = rcvmbx;
17408                 tnapi->prodmbox = sndmbx;
17409
17410                 if (i)
17411                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
17412                 else
17413                         tnapi->coal_now = HOSTCC_MODE_NOW;
17414
17415                 if (!tg3_flag(tp, SUPPORT_MSIX))
17416                         break;
17417
17418                 /*
17419                  * If we support MSIX, we'll be using RSS.  If we're using
17420                  * RSS, the first vector only handles link interrupts and the
17421                  * remaining vectors handle rx and tx interrupts.  Reuse the
17422                  * mailbox values for the next iteration.  The values we setup
17423                  * above are still useful for the single vectored mode.
17424                  */
17425                 if (!i)
17426                         continue;
17427
17428                 rcvmbx += 0x8;
17429
17430                 if (sndmbx & 0x4)
17431                         sndmbx -= 0x4;
17432                 else
17433                         sndmbx += 0xc;
17434         }
17435
17436         /*
17437          * Reset chip in case UNDI or EFI driver did not shutdown
17438          * DMA self test will enable WDMAC and we'll see (spurious)
17439          * pending DMA on the PCI bus at that point.
17440          */
17441         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
17442             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
17443                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
17444                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17445         }
17446
17447         err = tg3_test_dma(tp);
17448         if (err) {
17449                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
17450                 goto err_out_apeunmap;
17451         }
17452
17453         tg3_init_coal(tp);
17454
17455         pci_set_drvdata(pdev, dev);
17456
17457         if (tg3_asic_rev(tp) == ASIC_REV_5719 ||
17458             tg3_asic_rev(tp) == ASIC_REV_5720 ||
17459             tg3_asic_rev(tp) == ASIC_REV_5762)
17460                 tg3_flag_set(tp, PTP_CAPABLE);
17461
17462         if (tg3_flag(tp, 5717_PLUS)) {
17463                 /* Resume a low-power mode */
17464                 tg3_frob_aux_power(tp, false);
17465         }
17466
17467         tg3_timer_init(tp);
17468
17469         tg3_carrier_off(tp);
17470
17471         err = register_netdev(dev);
17472         if (err) {
17473                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
17474                 goto err_out_apeunmap;
17475         }
17476
17477         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
17478                     tp->board_part_number,
17479                     tg3_chip_rev_id(tp),
17480                     tg3_bus_string(tp, str),
17481                     dev->dev_addr);
17482
17483         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
17484                 struct phy_device *phydev;
17485                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
17486                 netdev_info(dev,
17487                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
17488                             phydev->drv->name, dev_name(&phydev->dev));
17489         } else {
17490                 char *ethtype;
17491
17492                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
17493                         ethtype = "10/100Base-TX";
17494                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
17495                         ethtype = "1000Base-SX";
17496                 else
17497                         ethtype = "10/100/1000Base-T";
17498
17499                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
17500                             "(WireSpeed[%d], EEE[%d])\n",
17501                             tg3_phy_string(tp), ethtype,
17502                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
17503                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
17504         }
17505
17506         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
17507                     (dev->features & NETIF_F_RXCSUM) != 0,
17508                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
17509                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
17510                     tg3_flag(tp, ENABLE_ASF) != 0,
17511                     tg3_flag(tp, TSO_CAPABLE) != 0);
17512         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
17513                     tp->dma_rwctrl,
17514                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
17515                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
17516
17517         pci_save_state(pdev);
17518
17519         return 0;
17520
17521 err_out_apeunmap:
17522         if (tp->aperegs) {
17523                 iounmap(tp->aperegs);
17524                 tp->aperegs = NULL;
17525         }
17526
17527 err_out_iounmap:
17528         if (tp->regs) {
17529                 iounmap(tp->regs);
17530                 tp->regs = NULL;
17531         }
17532
17533 err_out_free_dev:
17534         free_netdev(dev);
17535
17536 err_out_power_down:
17537         pci_set_power_state(pdev, PCI_D3hot);
17538
17539 err_out_free_res:
17540         pci_release_regions(pdev);
17541
17542 err_out_disable_pdev:
17543         pci_disable_device(pdev);
17544         pci_set_drvdata(pdev, NULL);
17545         return err;
17546 }
17547
17548 static void tg3_remove_one(struct pci_dev *pdev)
17549 {
17550         struct net_device *dev = pci_get_drvdata(pdev);
17551
17552         if (dev) {
17553                 struct tg3 *tp = netdev_priv(dev);
17554
17555                 release_firmware(tp->fw);
17556
17557                 tg3_reset_task_cancel(tp);
17558
17559                 if (tg3_flag(tp, USE_PHYLIB)) {
17560                         tg3_phy_fini(tp);
17561                         tg3_mdio_fini(tp);
17562                 }
17563
17564                 unregister_netdev(dev);
17565                 if (tp->aperegs) {
17566                         iounmap(tp->aperegs);
17567                         tp->aperegs = NULL;
17568                 }
17569                 if (tp->regs) {
17570                         iounmap(tp->regs);
17571                         tp->regs = NULL;
17572                 }
17573                 free_netdev(dev);
17574                 pci_release_regions(pdev);
17575                 pci_disable_device(pdev);
17576                 pci_set_drvdata(pdev, NULL);
17577         }
17578 }
17579
17580 #ifdef CONFIG_PM_SLEEP
17581 static int tg3_suspend(struct device *device)
17582 {
17583         struct pci_dev *pdev = to_pci_dev(device);
17584         struct net_device *dev = pci_get_drvdata(pdev);
17585         struct tg3 *tp = netdev_priv(dev);
17586         int err;
17587
17588         if (!netif_running(dev))
17589                 return 0;
17590
17591         tg3_reset_task_cancel(tp);
17592         tg3_phy_stop(tp);
17593         tg3_netif_stop(tp);
17594
17595         tg3_timer_stop(tp);
17596
17597         tg3_full_lock(tp, 1);
17598         tg3_disable_ints(tp);
17599         tg3_full_unlock(tp);
17600
17601         netif_device_detach(dev);
17602
17603         tg3_full_lock(tp, 0);
17604         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
17605         tg3_flag_clear(tp, INIT_COMPLETE);
17606         tg3_full_unlock(tp);
17607
17608         err = tg3_power_down_prepare(tp);
17609         if (err) {
17610                 int err2;
17611
17612                 tg3_full_lock(tp, 0);
17613
17614                 tg3_flag_set(tp, INIT_COMPLETE);
17615                 err2 = tg3_restart_hw(tp, true);
17616                 if (err2)
17617                         goto out;
17618
17619                 tg3_timer_start(tp);
17620
17621                 netif_device_attach(dev);
17622                 tg3_netif_start(tp);
17623
17624 out:
17625                 tg3_full_unlock(tp);
17626
17627                 if (!err2)
17628                         tg3_phy_start(tp);
17629         }
17630
17631         return err;
17632 }
17633
17634 static int tg3_resume(struct device *device)
17635 {
17636         struct pci_dev *pdev = to_pci_dev(device);
17637         struct net_device *dev = pci_get_drvdata(pdev);
17638         struct tg3 *tp = netdev_priv(dev);
17639         int err;
17640
17641         if (!netif_running(dev))
17642                 return 0;
17643
17644         netif_device_attach(dev);
17645
17646         tg3_full_lock(tp, 0);
17647
17648         tg3_flag_set(tp, INIT_COMPLETE);
17649         err = tg3_restart_hw(tp,
17650                              !(tp->phy_flags & TG3_PHYFLG_KEEP_LINK_ON_PWRDN));
17651         if (err)
17652                 goto out;
17653
17654         tg3_timer_start(tp);
17655
17656         tg3_netif_start(tp);
17657
17658 out:
17659         tg3_full_unlock(tp);
17660
17661         if (!err)
17662                 tg3_phy_start(tp);
17663
17664         return err;
17665 }
17666 #endif /* CONFIG_PM_SLEEP */
17667
17668 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
17669
17670 /**
17671  * tg3_io_error_detected - called when PCI error is detected
17672  * @pdev: Pointer to PCI device
17673  * @state: The current pci connection state
17674  *
17675  * This function is called after a PCI bus error affecting
17676  * this device has been detected.
17677  */
17678 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
17679                                               pci_channel_state_t state)
17680 {
17681         struct net_device *netdev = pci_get_drvdata(pdev);
17682         struct tg3 *tp = netdev_priv(netdev);
17683         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
17684
17685         netdev_info(netdev, "PCI I/O error detected\n");
17686
17687         rtnl_lock();
17688
17689         if (!netif_running(netdev))
17690                 goto done;
17691
17692         tg3_phy_stop(tp);
17693
17694         tg3_netif_stop(tp);
17695
17696         tg3_timer_stop(tp);
17697
17698         /* Want to make sure that the reset task doesn't run */
17699         tg3_reset_task_cancel(tp);
17700
17701         netif_device_detach(netdev);
17702
17703         /* Clean up software state, even if MMIO is blocked */
17704         tg3_full_lock(tp, 0);
17705         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
17706         tg3_full_unlock(tp);
17707
17708 done:
17709         if (state == pci_channel_io_perm_failure)
17710                 err = PCI_ERS_RESULT_DISCONNECT;
17711         else
17712                 pci_disable_device(pdev);
17713
17714         rtnl_unlock();
17715
17716         return err;
17717 }
17718
17719 /**
17720  * tg3_io_slot_reset - called after the pci bus has been reset.
17721  * @pdev: Pointer to PCI device
17722  *
17723  * Restart the card from scratch, as if from a cold-boot.
17724  * At this point, the card has exprienced a hard reset,
17725  * followed by fixups by BIOS, and has its config space
17726  * set up identically to what it was at cold boot.
17727  */
17728 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
17729 {
17730         struct net_device *netdev = pci_get_drvdata(pdev);
17731         struct tg3 *tp = netdev_priv(netdev);
17732         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
17733         int err;
17734
17735         rtnl_lock();
17736
17737         if (pci_enable_device(pdev)) {
17738                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
17739                 goto done;
17740         }
17741
17742         pci_set_master(pdev);
17743         pci_restore_state(pdev);
17744         pci_save_state(pdev);
17745
17746         if (!netif_running(netdev)) {
17747                 rc = PCI_ERS_RESULT_RECOVERED;
17748                 goto done;
17749         }
17750
17751         err = tg3_power_up(tp);
17752         if (err)
17753                 goto done;
17754
17755         rc = PCI_ERS_RESULT_RECOVERED;
17756
17757 done:
17758         rtnl_unlock();
17759
17760         return rc;
17761 }
17762
17763 /**
17764  * tg3_io_resume - called when traffic can start flowing again.
17765  * @pdev: Pointer to PCI device
17766  *
17767  * This callback is called when the error recovery driver tells
17768  * us that its OK to resume normal operation.
17769  */
17770 static void tg3_io_resume(struct pci_dev *pdev)
17771 {
17772         struct net_device *netdev = pci_get_drvdata(pdev);
17773         struct tg3 *tp = netdev_priv(netdev);
17774         int err;
17775
17776         rtnl_lock();
17777
17778         if (!netif_running(netdev))
17779                 goto done;
17780
17781         tg3_full_lock(tp, 0);
17782         tg3_flag_set(tp, INIT_COMPLETE);
17783         err = tg3_restart_hw(tp, true);
17784         if (err) {
17785                 tg3_full_unlock(tp);
17786                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
17787                 goto done;
17788         }
17789
17790         netif_device_attach(netdev);
17791
17792         tg3_timer_start(tp);
17793
17794         tg3_netif_start(tp);
17795
17796         tg3_full_unlock(tp);
17797
17798         tg3_phy_start(tp);
17799
17800 done:
17801         rtnl_unlock();
17802 }
17803
17804 static const struct pci_error_handlers tg3_err_handler = {
17805         .error_detected = tg3_io_error_detected,
17806         .slot_reset     = tg3_io_slot_reset,
17807         .resume         = tg3_io_resume
17808 };
17809
17810 static struct pci_driver tg3_driver = {
17811         .name           = DRV_MODULE_NAME,
17812         .id_table       = tg3_pci_tbl,
17813         .probe          = tg3_init_one,
17814         .remove         = tg3_remove_one,
17815         .err_handler    = &tg3_err_handler,
17816         .driver.pm      = &tg3_pm_ops,
17817 };
17818
17819 static int __init tg3_init(void)
17820 {
17821         return pci_register_driver(&tg3_driver);
17822 }
17823
17824 static void __exit tg3_cleanup(void)
17825 {
17826         pci_unregister_driver(&tg3_driver);
17827 }
17828
17829 module_init(tg3_init);
17830 module_exit(tg3_cleanup);