2 * r8169.c: RealTek 8169/8168/8101 ethernet driver.
4 * Copyright (c) 2002 ShuChen <shuchen@realtek.com.tw>
5 * Copyright (c) 2003 - 2007 Francois Romieu <romieu@fr.zoreil.com>
6 * Copyright (c) a lot of people too. Please respect their work.
8 * See MAINTAINERS file for support contact information.
11 #include <linux/module.h>
12 #include <linux/moduleparam.h>
13 #include <linux/pci.h>
14 #include <linux/netdevice.h>
15 #include <linux/etherdevice.h>
16 #include <linux/delay.h>
17 #include <linux/ethtool.h>
18 #include <linux/mii.h>
19 #include <linux/if_vlan.h>
20 #include <linux/crc32.h>
23 #include <linux/tcp.h>
24 #include <linux/init.h>
25 #include <linux/interrupt.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/pm_runtime.h>
28 #include <linux/firmware.h>
29 #include <linux/pci-aspm.h>
30 #include <linux/prefetch.h>
35 #define RTL8169_VERSION "2.3LK-NAPI"
36 #define MODULENAME "r8169"
37 #define PFX MODULENAME ": "
39 #define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
40 #define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
41 #define FIRMWARE_8168E_1 "rtl_nic/rtl8168e-1.fw"
42 #define FIRMWARE_8168E_2 "rtl_nic/rtl8168e-2.fw"
43 #define FIRMWARE_8168E_3 "rtl_nic/rtl8168e-3.fw"
44 #define FIRMWARE_8168F_1 "rtl_nic/rtl8168f-1.fw"
45 #define FIRMWARE_8168F_2 "rtl_nic/rtl8168f-2.fw"
46 #define FIRMWARE_8105E_1 "rtl_nic/rtl8105e-1.fw"
47 #define FIRMWARE_8402_1 "rtl_nic/rtl8402-1.fw"
48 #define FIRMWARE_8411_1 "rtl_nic/rtl8411-1.fw"
49 #define FIRMWARE_8106E_1 "rtl_nic/rtl8106e-1.fw"
50 #define FIRMWARE_8168G_1 "rtl_nic/rtl8168g-1.fw"
53 #define assert(expr) \
55 printk( "Assertion failed! %s,%s,%s,line=%d\n", \
56 #expr,__FILE__,__func__,__LINE__); \
58 #define dprintk(fmt, args...) \
59 do { printk(KERN_DEBUG PFX fmt, ## args); } while (0)
61 #define assert(expr) do {} while (0)
62 #define dprintk(fmt, args...) do {} while (0)
63 #endif /* RTL8169_DEBUG */
65 #define R8169_MSG_DEFAULT \
66 (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
68 #define TX_SLOTS_AVAIL(tp) \
69 (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
71 /* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
72 #define TX_FRAGS_READY_FOR(tp,nr_frags) \
73 (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
75 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
76 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
77 static const int multicast_filter_limit = 32;
79 #define MAX_READ_REQUEST_SHIFT 12
80 #define TX_DMA_BURST 7 /* Maximum PCI burst, '7' is unlimited */
81 #define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
83 #define R8169_REGS_SIZE 256
84 #define R8169_NAPI_WEIGHT 64
85 #define NUM_TX_DESC 64 /* Number of Tx descriptor registers */
86 #define NUM_RX_DESC 256U /* Number of Rx descriptor registers */
87 #define R8169_TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc))
88 #define R8169_RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc))
90 #define RTL8169_TX_TIMEOUT (6*HZ)
91 #define RTL8169_PHY_TIMEOUT (10*HZ)
93 /* write/read MMIO register */
94 #define RTL_W8(reg, val8) writeb ((val8), ioaddr + (reg))
95 #define RTL_W16(reg, val16) writew ((val16), ioaddr + (reg))
96 #define RTL_W32(reg, val32) writel ((val32), ioaddr + (reg))
97 #define RTL_R8(reg) readb (ioaddr + (reg))
98 #define RTL_R16(reg) readw (ioaddr + (reg))
99 #define RTL_R32(reg) readl (ioaddr + (reg))
102 RTL_GIGA_MAC_VER_01 = 0,
143 RTL_GIGA_MAC_NONE = 0xff,
146 enum rtl_tx_desc_version {
151 #define JUMBO_1K ETH_DATA_LEN
152 #define JUMBO_4K (4*1024 - ETH_HLEN - 2)
153 #define JUMBO_6K (6*1024 - ETH_HLEN - 2)
154 #define JUMBO_7K (7*1024 - ETH_HLEN - 2)
155 #define JUMBO_9K (9*1024 - ETH_HLEN - 2)
157 #define _R(NAME,TD,FW,SZ,B) { \
165 static const struct {
167 enum rtl_tx_desc_version txd_version;
171 } rtl_chip_infos[] = {
173 [RTL_GIGA_MAC_VER_01] =
174 _R("RTL8169", RTL_TD_0, NULL, JUMBO_7K, true),
175 [RTL_GIGA_MAC_VER_02] =
176 _R("RTL8169s", RTL_TD_0, NULL, JUMBO_7K, true),
177 [RTL_GIGA_MAC_VER_03] =
178 _R("RTL8110s", RTL_TD_0, NULL, JUMBO_7K, true),
179 [RTL_GIGA_MAC_VER_04] =
180 _R("RTL8169sb/8110sb", RTL_TD_0, NULL, JUMBO_7K, true),
181 [RTL_GIGA_MAC_VER_05] =
182 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
183 [RTL_GIGA_MAC_VER_06] =
184 _R("RTL8169sc/8110sc", RTL_TD_0, NULL, JUMBO_7K, true),
186 [RTL_GIGA_MAC_VER_07] =
187 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
188 [RTL_GIGA_MAC_VER_08] =
189 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
190 [RTL_GIGA_MAC_VER_09] =
191 _R("RTL8102e", RTL_TD_1, NULL, JUMBO_1K, true),
192 [RTL_GIGA_MAC_VER_10] =
193 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
194 [RTL_GIGA_MAC_VER_11] =
195 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
196 [RTL_GIGA_MAC_VER_12] =
197 _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
198 [RTL_GIGA_MAC_VER_13] =
199 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
200 [RTL_GIGA_MAC_VER_14] =
201 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
202 [RTL_GIGA_MAC_VER_15] =
203 _R("RTL8100e", RTL_TD_0, NULL, JUMBO_1K, true),
204 [RTL_GIGA_MAC_VER_16] =
205 _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
206 [RTL_GIGA_MAC_VER_17] =
207 _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
208 [RTL_GIGA_MAC_VER_18] =
209 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
210 [RTL_GIGA_MAC_VER_19] =
211 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
212 [RTL_GIGA_MAC_VER_20] =
213 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
214 [RTL_GIGA_MAC_VER_21] =
215 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
216 [RTL_GIGA_MAC_VER_22] =
217 _R("RTL8168c/8111c", RTL_TD_1, NULL, JUMBO_6K, false),
218 [RTL_GIGA_MAC_VER_23] =
219 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
220 [RTL_GIGA_MAC_VER_24] =
221 _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
222 [RTL_GIGA_MAC_VER_25] =
223 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_1,
225 [RTL_GIGA_MAC_VER_26] =
226 _R("RTL8168d/8111d", RTL_TD_1, FIRMWARE_8168D_2,
228 [RTL_GIGA_MAC_VER_27] =
229 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
230 [RTL_GIGA_MAC_VER_28] =
231 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
232 [RTL_GIGA_MAC_VER_29] =
233 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
235 [RTL_GIGA_MAC_VER_30] =
236 _R("RTL8105e", RTL_TD_1, FIRMWARE_8105E_1,
238 [RTL_GIGA_MAC_VER_31] =
239 _R("RTL8168dp/8111dp", RTL_TD_1, NULL, JUMBO_9K, false),
240 [RTL_GIGA_MAC_VER_32] =
241 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_1,
243 [RTL_GIGA_MAC_VER_33] =
244 _R("RTL8168e/8111e", RTL_TD_1, FIRMWARE_8168E_2,
246 [RTL_GIGA_MAC_VER_34] =
247 _R("RTL8168evl/8111evl",RTL_TD_1, FIRMWARE_8168E_3,
249 [RTL_GIGA_MAC_VER_35] =
250 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_1,
252 [RTL_GIGA_MAC_VER_36] =
253 _R("RTL8168f/8111f", RTL_TD_1, FIRMWARE_8168F_2,
255 [RTL_GIGA_MAC_VER_37] =
256 _R("RTL8402", RTL_TD_1, FIRMWARE_8402_1,
258 [RTL_GIGA_MAC_VER_38] =
259 _R("RTL8411", RTL_TD_1, FIRMWARE_8411_1,
261 [RTL_GIGA_MAC_VER_39] =
262 _R("RTL8106e", RTL_TD_1, FIRMWARE_8106E_1,
264 [RTL_GIGA_MAC_VER_40] =
265 _R("RTL8168g/8111g", RTL_TD_1, FIRMWARE_8168G_1,
267 [RTL_GIGA_MAC_VER_41] =
268 _R("RTL8168g/8111g", RTL_TD_1, NULL, JUMBO_9K, false),
278 static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
279 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8129), 0, 0, RTL_CFG_0 },
280 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8136), 0, 0, RTL_CFG_2 },
281 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8167), 0, 0, RTL_CFG_0 },
282 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8168), 0, 0, RTL_CFG_1 },
283 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8169), 0, 0, RTL_CFG_0 },
284 { PCI_VENDOR_ID_DLINK, 0x4300,
285 PCI_VENDOR_ID_DLINK, 0x4b10, 0, 0, RTL_CFG_1 },
286 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4300), 0, 0, RTL_CFG_0 },
287 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4302), 0, 0, RTL_CFG_0 },
288 { PCI_DEVICE(PCI_VENDOR_ID_AT, 0xc107), 0, 0, RTL_CFG_0 },
289 { PCI_DEVICE(0x16ec, 0x0116), 0, 0, RTL_CFG_0 },
290 { PCI_VENDOR_ID_LINKSYS, 0x1032,
291 PCI_ANY_ID, 0x0024, 0, 0, RTL_CFG_0 },
293 PCI_ANY_ID, 0x2410, 0, 0, RTL_CFG_2 },
297 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
299 static int rx_buf_sz = 16383;
306 MAC0 = 0, /* Ethernet hardware address. */
308 MAR0 = 8, /* Multicast filter. */
309 CounterAddrLow = 0x10,
310 CounterAddrHigh = 0x14,
311 TxDescStartAddrLow = 0x20,
312 TxDescStartAddrHigh = 0x24,
313 TxHDescStartAddrLow = 0x28,
314 TxHDescStartAddrHigh = 0x2c,
323 #define TXCFG_AUTO_FIFO (1 << 7) /* 8111e-vl */
324 #define TXCFG_EMPTY (1 << 11) /* 8111e-vl */
327 #define RX128_INT_EN (1 << 15) /* 8111c and later */
328 #define RX_MULTI_EN (1 << 14) /* 8111c only */
329 #define RXCFG_FIFO_SHIFT 13
330 /* No threshold before first PCI xfer */
331 #define RX_FIFO_THRESH (7 << RXCFG_FIFO_SHIFT)
332 #define RXCFG_DMA_SHIFT 8
333 /* Unlimited maximum PCI burst. */
334 #define RX_DMA_BURST (7 << RXCFG_DMA_SHIFT)
341 #define PME_SIGNAL (1 << 5) /* 8168c and later */
352 RxDescAddrLow = 0xe4,
353 RxDescAddrHigh = 0xe8,
354 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
356 #define NoEarlyTx 0x3f /* Max value : no early transmit. */
358 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
360 #define TxPacketMax (8064 >> 7)
361 #define EarlySize 0x27
364 FuncEventMask = 0xf4,
365 FuncPresetState = 0xf8,
366 FuncForceEvent = 0xfc,
369 enum rtl8110_registers {
375 enum rtl8168_8101_registers {
378 #define CSIAR_FLAG 0x80000000
379 #define CSIAR_WRITE_CMD 0x80000000
380 #define CSIAR_BYTE_ENABLE 0x0f
381 #define CSIAR_BYTE_ENABLE_SHIFT 12
382 #define CSIAR_ADDR_MASK 0x0fff
383 #define CSIAR_FUNC_CARD 0x00000000
384 #define CSIAR_FUNC_SDIO 0x00010000
385 #define CSIAR_FUNC_NIC 0x00020000
388 #define EPHYAR_FLAG 0x80000000
389 #define EPHYAR_WRITE_CMD 0x80000000
390 #define EPHYAR_REG_MASK 0x1f
391 #define EPHYAR_REG_SHIFT 16
392 #define EPHYAR_DATA_MASK 0xffff
394 #define PFM_EN (1 << 6)
396 #define FIX_NAK_1 (1 << 4)
397 #define FIX_NAK_2 (1 << 3)
400 #define NOW_IS_OOB (1 << 7)
401 #define TX_EMPTY (1 << 5)
402 #define RX_EMPTY (1 << 4)
403 #define RXTX_EMPTY (TX_EMPTY | RX_EMPTY)
404 #define EN_NDP (1 << 3)
405 #define EN_OOB_RESET (1 << 2)
406 #define LINK_LIST_RDY (1 << 1)
408 #define EFUSEAR_FLAG 0x80000000
409 #define EFUSEAR_WRITE_CMD 0x80000000
410 #define EFUSEAR_READ_CMD 0x00000000
411 #define EFUSEAR_REG_MASK 0x03ff
412 #define EFUSEAR_REG_SHIFT 8
413 #define EFUSEAR_DATA_MASK 0xff
416 enum rtl8168_registers {
421 #define ERIAR_FLAG 0x80000000
422 #define ERIAR_WRITE_CMD 0x80000000
423 #define ERIAR_READ_CMD 0x00000000
424 #define ERIAR_ADDR_BYTE_ALIGN 4
425 #define ERIAR_TYPE_SHIFT 16
426 #define ERIAR_EXGMAC (0x00 << ERIAR_TYPE_SHIFT)
427 #define ERIAR_MSIX (0x01 << ERIAR_TYPE_SHIFT)
428 #define ERIAR_ASF (0x02 << ERIAR_TYPE_SHIFT)
429 #define ERIAR_MASK_SHIFT 12
430 #define ERIAR_MASK_0001 (0x1 << ERIAR_MASK_SHIFT)
431 #define ERIAR_MASK_0011 (0x3 << ERIAR_MASK_SHIFT)
432 #define ERIAR_MASK_0101 (0x5 << ERIAR_MASK_SHIFT)
433 #define ERIAR_MASK_1111 (0xf << ERIAR_MASK_SHIFT)
434 EPHY_RXER_NUM = 0x7c,
435 OCPDR = 0xb0, /* OCP GPHY access */
436 #define OCPDR_WRITE_CMD 0x80000000
437 #define OCPDR_READ_CMD 0x00000000
438 #define OCPDR_REG_MASK 0x7f
439 #define OCPDR_GPHY_REG_SHIFT 16
440 #define OCPDR_DATA_MASK 0xffff
442 #define OCPAR_FLAG 0x80000000
443 #define OCPAR_GPHY_WRITE_CMD 0x8000f060
444 #define OCPAR_GPHY_READ_CMD 0x0000f060
446 RDSAR1 = 0xd0, /* 8168c only. Undocumented on 8168dp */
447 MISC = 0xf0, /* 8168e only. */
448 #define TXPLA_RST (1 << 29)
449 #define DISABLE_LAN_EN (1 << 23) /* Enable GPIO pin */
450 #define PWM_EN (1 << 22)
451 #define RXDV_GATED_EN (1 << 19)
452 #define EARLY_TALLY_EN (1 << 16)
455 enum rtl_register_content {
456 /* InterruptStatusBits */
460 TxDescUnavail = 0x0080,
484 /* TXPoll register p.5 */
485 HPQ = 0x80, /* Poll cmd on the high prio queue */
486 NPQ = 0x40, /* Poll cmd on the low prio queue */
487 FSWInt = 0x01, /* Forced software interrupt */
491 Cfg9346_Unlock = 0xc0,
496 AcceptBroadcast = 0x08,
497 AcceptMulticast = 0x04,
499 AcceptAllPhys = 0x01,
500 #define RX_CONFIG_ACCEPT_MASK 0x3f
503 TxInterFrameGapShift = 24,
504 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
506 /* Config1 register p.24 */
509 Speed_down = (1 << 4),
513 PMEnable = (1 << 0), /* Power Management Enable */
515 /* Config2 register p. 25 */
516 MSIEnable = (1 << 5), /* 8169 only. Reserved in the 8168. */
517 PCI_Clock_66MHz = 0x01,
518 PCI_Clock_33MHz = 0x00,
520 /* Config3 register p.25 */
521 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
522 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
523 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
524 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
526 /* Config4 register */
527 Jumbo_En1 = (1 << 1), /* 8168 only. Reserved in the 8168b */
529 /* Config5 register p.27 */
530 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
531 MWF = (1 << 5), /* Accept Multicast wakeup frame */
532 UWF = (1 << 4), /* Accept Unicast wakeup frame */
534 LanWake = (1 << 1), /* LanWake enable/disable */
535 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
538 TBIReset = 0x80000000,
539 TBILoopback = 0x40000000,
540 TBINwEnable = 0x20000000,
541 TBINwRestart = 0x10000000,
542 TBILinkOk = 0x02000000,
543 TBINwComplete = 0x01000000,
546 EnableBist = (1 << 15), // 8168 8101
547 Mac_dbgo_oe = (1 << 14), // 8168 8101
548 Normal_mode = (1 << 13), // unused
549 Force_half_dup = (1 << 12), // 8168 8101
550 Force_rxflow_en = (1 << 11), // 8168 8101
551 Force_txflow_en = (1 << 10), // 8168 8101
552 Cxpl_dbg_sel = (1 << 9), // 8168 8101
553 ASF = (1 << 8), // 8168 8101
554 PktCntrDisable = (1 << 7), // 8168 8101
555 Mac_dbgo_sel = 0x001c, // 8168
560 INTT_0 = 0x0000, // 8168
561 INTT_1 = 0x0001, // 8168
562 INTT_2 = 0x0002, // 8168
563 INTT_3 = 0x0003, // 8168
565 /* rtl8169_PHYstatus */
576 TBILinkOK = 0x02000000,
578 /* DumpCounterCommand */
583 /* First doubleword. */
584 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
585 RingEnd = (1 << 30), /* End of descriptor ring */
586 FirstFrag = (1 << 29), /* First segment of a packet */
587 LastFrag = (1 << 28), /* Final segment of a packet */
591 enum rtl_tx_desc_bit {
592 /* First doubleword. */
593 TD_LSO = (1 << 27), /* Large Send Offload */
594 #define TD_MSS_MAX 0x07ffu /* MSS value */
596 /* Second doubleword. */
597 TxVlanTag = (1 << 17), /* Add VLAN tag */
600 /* 8169, 8168b and 810x except 8102e. */
601 enum rtl_tx_desc_bit_0 {
602 /* First doubleword. */
603 #define TD0_MSS_SHIFT 16 /* MSS position (11 bits) */
604 TD0_TCP_CS = (1 << 16), /* Calculate TCP/IP checksum */
605 TD0_UDP_CS = (1 << 17), /* Calculate UDP/IP checksum */
606 TD0_IP_CS = (1 << 18), /* Calculate IP checksum */
609 /* 8102e, 8168c and beyond. */
610 enum rtl_tx_desc_bit_1 {
611 /* Second doubleword. */
612 #define TD1_MSS_SHIFT 18 /* MSS position (11 bits) */
613 TD1_IP_CS = (1 << 29), /* Calculate IP checksum */
614 TD1_TCP_CS = (1 << 30), /* Calculate TCP/IP checksum */
615 TD1_UDP_CS = (1 << 31), /* Calculate UDP/IP checksum */
618 static const struct rtl_tx_desc_info {
625 } tx_desc_info [] = {
628 .udp = TD0_IP_CS | TD0_UDP_CS,
629 .tcp = TD0_IP_CS | TD0_TCP_CS
631 .mss_shift = TD0_MSS_SHIFT,
636 .udp = TD1_IP_CS | TD1_UDP_CS,
637 .tcp = TD1_IP_CS | TD1_TCP_CS
639 .mss_shift = TD1_MSS_SHIFT,
644 enum rtl_rx_desc_bit {
646 PID1 = (1 << 18), /* Protocol ID bit 1/2 */
647 PID0 = (1 << 17), /* Protocol ID bit 2/2 */
649 #define RxProtoUDP (PID1)
650 #define RxProtoTCP (PID0)
651 #define RxProtoIP (PID1 | PID0)
652 #define RxProtoMask RxProtoIP
654 IPFail = (1 << 16), /* IP checksum failed */
655 UDPFail = (1 << 15), /* UDP/IP checksum failed */
656 TCPFail = (1 << 14), /* TCP/IP checksum failed */
657 RxVlanTag = (1 << 16), /* VLAN tag available */
660 #define RsvdMask 0x3fffc000
677 u8 __pad[sizeof(void *) - sizeof(u32)];
681 RTL_FEATURE_WOL = (1 << 0),
682 RTL_FEATURE_MSI = (1 << 1),
683 RTL_FEATURE_GMII = (1 << 2),
686 struct rtl8169_counters {
693 __le32 tx_one_collision;
694 __le32 tx_multi_collision;
703 RTL_FLAG_TASK_ENABLED,
704 RTL_FLAG_TASK_SLOW_PENDING,
705 RTL_FLAG_TASK_RESET_PENDING,
706 RTL_FLAG_TASK_PHY_PENDING,
710 struct rtl8169_stats {
713 struct u64_stats_sync syncp;
716 struct rtl8169_private {
717 void __iomem *mmio_addr; /* memory map physical address */
718 struct pci_dev *pci_dev;
719 struct net_device *dev;
720 struct napi_struct napi;
724 u32 cur_rx; /* Index into the Rx descriptor buffer of next Rx pkt. */
725 u32 cur_tx; /* Index into the Tx descriptor buffer of next Rx pkt. */
727 struct rtl8169_stats rx_stats;
728 struct rtl8169_stats tx_stats;
729 struct TxDesc *TxDescArray; /* 256-aligned Tx descriptor ring */
730 struct RxDesc *RxDescArray; /* 256-aligned Rx descriptor ring */
731 dma_addr_t TxPhyAddr;
732 dma_addr_t RxPhyAddr;
733 void *Rx_databuff[NUM_RX_DESC]; /* Rx data buffers */
734 struct ring_info tx_skb[NUM_TX_DESC]; /* Tx data buffers */
735 struct timer_list timer;
741 void (*write)(struct rtl8169_private *, int, int);
742 int (*read)(struct rtl8169_private *, int);
745 struct pll_power_ops {
746 void (*down)(struct rtl8169_private *);
747 void (*up)(struct rtl8169_private *);
751 void (*enable)(struct rtl8169_private *);
752 void (*disable)(struct rtl8169_private *);
756 void (*write)(struct rtl8169_private *, int, int);
757 u32 (*read)(struct rtl8169_private *, int);
760 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
761 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
762 void (*phy_reset_enable)(struct rtl8169_private *tp);
763 void (*hw_start)(struct net_device *);
764 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
765 unsigned int (*link_ok)(void __iomem *);
766 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
769 DECLARE_BITMAP(flags, RTL_FLAG_MAX);
771 struct work_struct work;
776 struct mii_if_info mii;
777 struct rtl8169_counters counters;
782 const struct firmware *fw;
784 #define RTL_VER_SIZE 32
786 char version[RTL_VER_SIZE];
788 struct rtl_fw_phy_action {
793 #define RTL_FIRMWARE_UNKNOWN ERR_PTR(-EAGAIN)
798 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
799 MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
800 module_param(use_dac, int, 0);
801 MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
802 module_param_named(debug, debug.msg_enable, int, 0);
803 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
804 MODULE_LICENSE("GPL");
805 MODULE_VERSION(RTL8169_VERSION);
806 MODULE_FIRMWARE(FIRMWARE_8168D_1);
807 MODULE_FIRMWARE(FIRMWARE_8168D_2);
808 MODULE_FIRMWARE(FIRMWARE_8168E_1);
809 MODULE_FIRMWARE(FIRMWARE_8168E_2);
810 MODULE_FIRMWARE(FIRMWARE_8168E_3);
811 MODULE_FIRMWARE(FIRMWARE_8105E_1);
812 MODULE_FIRMWARE(FIRMWARE_8168F_1);
813 MODULE_FIRMWARE(FIRMWARE_8168F_2);
814 MODULE_FIRMWARE(FIRMWARE_8402_1);
815 MODULE_FIRMWARE(FIRMWARE_8411_1);
816 MODULE_FIRMWARE(FIRMWARE_8106E_1);
817 MODULE_FIRMWARE(FIRMWARE_8168G_1);
819 static void rtl_lock_work(struct rtl8169_private *tp)
821 mutex_lock(&tp->wk.mutex);
824 static void rtl_unlock_work(struct rtl8169_private *tp)
826 mutex_unlock(&tp->wk.mutex);
829 static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
831 pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL,
832 PCI_EXP_DEVCTL_READRQ, force);
836 bool (*check)(struct rtl8169_private *);
840 static void rtl_udelay(unsigned int d)
845 static bool rtl_loop_wait(struct rtl8169_private *tp, const struct rtl_cond *c,
846 void (*delay)(unsigned int), unsigned int d, int n,
851 for (i = 0; i < n; i++) {
853 if (c->check(tp) == high)
856 netif_err(tp, drv, tp->dev, "%s == %d (loop: %d, delay: %d).\n",
857 c->msg, !high, n, d);
861 static bool rtl_udelay_loop_wait_high(struct rtl8169_private *tp,
862 const struct rtl_cond *c,
863 unsigned int d, int n)
865 return rtl_loop_wait(tp, c, rtl_udelay, d, n, true);
868 static bool rtl_udelay_loop_wait_low(struct rtl8169_private *tp,
869 const struct rtl_cond *c,
870 unsigned int d, int n)
872 return rtl_loop_wait(tp, c, rtl_udelay, d, n, false);
875 static bool rtl_msleep_loop_wait_high(struct rtl8169_private *tp,
876 const struct rtl_cond *c,
877 unsigned int d, int n)
879 return rtl_loop_wait(tp, c, msleep, d, n, true);
882 static bool rtl_msleep_loop_wait_low(struct rtl8169_private *tp,
883 const struct rtl_cond *c,
884 unsigned int d, int n)
886 return rtl_loop_wait(tp, c, msleep, d, n, false);
889 #define DECLARE_RTL_COND(name) \
890 static bool name ## _check(struct rtl8169_private *); \
892 static const struct rtl_cond name = { \
893 .check = name ## _check, \
897 static bool name ## _check(struct rtl8169_private *tp)
899 DECLARE_RTL_COND(rtl_ocpar_cond)
901 void __iomem *ioaddr = tp->mmio_addr;
903 return RTL_R32(OCPAR) & OCPAR_FLAG;
906 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
908 void __iomem *ioaddr = tp->mmio_addr;
910 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
912 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 100, 20) ?
916 static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
918 void __iomem *ioaddr = tp->mmio_addr;
920 RTL_W32(OCPDR, data);
921 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
923 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 100, 20);
926 DECLARE_RTL_COND(rtl_eriar_cond)
928 void __iomem *ioaddr = tp->mmio_addr;
930 return RTL_R32(ERIAR) & ERIAR_FLAG;
933 static void rtl8168_oob_notify(struct rtl8169_private *tp, u8 cmd)
935 void __iomem *ioaddr = tp->mmio_addr;
938 RTL_W32(ERIAR, 0x800010e8);
941 if (!rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 5))
944 ocp_write(tp, 0x1, 0x30, 0x00000001);
947 #define OOB_CMD_RESET 0x00
948 #define OOB_CMD_DRIVER_START 0x05
949 #define OOB_CMD_DRIVER_STOP 0x06
951 static u16 rtl8168_get_ocp_reg(struct rtl8169_private *tp)
953 return (tp->mac_version == RTL_GIGA_MAC_VER_31) ? 0xb8 : 0x10;
956 DECLARE_RTL_COND(rtl_ocp_read_cond)
960 reg = rtl8168_get_ocp_reg(tp);
962 return ocp_read(tp, 0x0f, reg) & 0x00000800;
965 static void rtl8168_driver_start(struct rtl8169_private *tp)
967 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
969 rtl_msleep_loop_wait_high(tp, &rtl_ocp_read_cond, 10, 10);
972 static void rtl8168_driver_stop(struct rtl8169_private *tp)
974 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
976 rtl_msleep_loop_wait_low(tp, &rtl_ocp_read_cond, 10, 10);
979 static int r8168dp_check_dash(struct rtl8169_private *tp)
981 u16 reg = rtl8168_get_ocp_reg(tp);
983 return (ocp_read(tp, 0x0f, reg) & 0x00008000) ? 1 : 0;
986 static bool rtl_ocp_reg_failure(struct rtl8169_private *tp, u32 reg)
988 if (reg & 0xffff0001) {
989 netif_err(tp, drv, tp->dev, "Invalid ocp reg %x!\n", reg);
995 DECLARE_RTL_COND(rtl_ocp_gphy_cond)
997 void __iomem *ioaddr = tp->mmio_addr;
999 return RTL_R32(GPHY_OCP) & OCPAR_FLAG;
1002 static void r8168_phy_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1004 void __iomem *ioaddr = tp->mmio_addr;
1006 if (rtl_ocp_reg_failure(tp, reg))
1009 RTL_W32(GPHY_OCP, OCPAR_FLAG | (reg << 15) | data);
1011 rtl_udelay_loop_wait_low(tp, &rtl_ocp_gphy_cond, 25, 10);
1014 static u16 r8168_phy_ocp_read(struct rtl8169_private *tp, u32 reg)
1016 void __iomem *ioaddr = tp->mmio_addr;
1018 if (rtl_ocp_reg_failure(tp, reg))
1021 RTL_W32(GPHY_OCP, reg << 15);
1023 return rtl_udelay_loop_wait_high(tp, &rtl_ocp_gphy_cond, 25, 10) ?
1024 (RTL_R32(GPHY_OCP) & 0xffff) : ~0;
1027 static void rtl_w1w0_phy_ocp(struct rtl8169_private *tp, int reg, int p, int m)
1031 val = r8168_phy_ocp_read(tp, reg);
1032 r8168_phy_ocp_write(tp, reg, (val | p) & ~m);
1035 static void r8168_mac_ocp_write(struct rtl8169_private *tp, u32 reg, u32 data)
1037 void __iomem *ioaddr = tp->mmio_addr;
1039 if (rtl_ocp_reg_failure(tp, reg))
1042 RTL_W32(OCPDR, OCPAR_FLAG | (reg << 15) | data);
1045 static u16 r8168_mac_ocp_read(struct rtl8169_private *tp, u32 reg)
1047 void __iomem *ioaddr = tp->mmio_addr;
1049 if (rtl_ocp_reg_failure(tp, reg))
1052 RTL_W32(OCPDR, reg << 15);
1054 return RTL_R32(OCPDR);
1057 #define OCP_STD_PHY_BASE 0xa400
1059 static void r8168g_mdio_write(struct rtl8169_private *tp, int reg, int value)
1062 tp->ocp_base = value ? value << 4 : OCP_STD_PHY_BASE;
1066 if (tp->ocp_base != OCP_STD_PHY_BASE)
1069 r8168_phy_ocp_write(tp, tp->ocp_base + reg * 2, value);
1072 static int r8168g_mdio_read(struct rtl8169_private *tp, int reg)
1074 if (tp->ocp_base != OCP_STD_PHY_BASE)
1077 return r8168_phy_ocp_read(tp, tp->ocp_base + reg * 2);
1080 DECLARE_RTL_COND(rtl_phyar_cond)
1082 void __iomem *ioaddr = tp->mmio_addr;
1084 return RTL_R32(PHYAR) & 0x80000000;
1087 static void r8169_mdio_write(struct rtl8169_private *tp, int reg, int value)
1089 void __iomem *ioaddr = tp->mmio_addr;
1091 RTL_W32(PHYAR, 0x80000000 | (reg & 0x1f) << 16 | (value & 0xffff));
1093 rtl_udelay_loop_wait_low(tp, &rtl_phyar_cond, 25, 20);
1095 * According to hardware specs a 20us delay is required after write
1096 * complete indication, but before sending next command.
1101 static int r8169_mdio_read(struct rtl8169_private *tp, int reg)
1103 void __iomem *ioaddr = tp->mmio_addr;
1106 RTL_W32(PHYAR, 0x0 | (reg & 0x1f) << 16);
1108 value = rtl_udelay_loop_wait_high(tp, &rtl_phyar_cond, 25, 20) ?
1109 RTL_R32(PHYAR) & 0xffff : ~0;
1112 * According to hardware specs a 20us delay is required after read
1113 * complete indication, but before sending next command.
1120 static void r8168dp_1_mdio_access(struct rtl8169_private *tp, int reg, u32 data)
1122 void __iomem *ioaddr = tp->mmio_addr;
1124 RTL_W32(OCPDR, data | ((reg & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
1125 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
1126 RTL_W32(EPHY_RXER_NUM, 0);
1128 rtl_udelay_loop_wait_low(tp, &rtl_ocpar_cond, 1000, 100);
1131 static void r8168dp_1_mdio_write(struct rtl8169_private *tp, int reg, int value)
1133 r8168dp_1_mdio_access(tp, reg,
1134 OCPDR_WRITE_CMD | (value & OCPDR_DATA_MASK));
1137 static int r8168dp_1_mdio_read(struct rtl8169_private *tp, int reg)
1139 void __iomem *ioaddr = tp->mmio_addr;
1141 r8168dp_1_mdio_access(tp, reg, OCPDR_READ_CMD);
1144 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
1145 RTL_W32(EPHY_RXER_NUM, 0);
1147 return rtl_udelay_loop_wait_high(tp, &rtl_ocpar_cond, 1000, 100) ?
1148 RTL_R32(OCPDR) & OCPDR_DATA_MASK : ~0;
1151 #define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
1153 static void r8168dp_2_mdio_start(void __iomem *ioaddr)
1155 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
1158 static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
1160 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
1163 static void r8168dp_2_mdio_write(struct rtl8169_private *tp, int reg, int value)
1165 void __iomem *ioaddr = tp->mmio_addr;
1167 r8168dp_2_mdio_start(ioaddr);
1169 r8169_mdio_write(tp, reg, value);
1171 r8168dp_2_mdio_stop(ioaddr);
1174 static int r8168dp_2_mdio_read(struct rtl8169_private *tp, int reg)
1176 void __iomem *ioaddr = tp->mmio_addr;
1179 r8168dp_2_mdio_start(ioaddr);
1181 value = r8169_mdio_read(tp, reg);
1183 r8168dp_2_mdio_stop(ioaddr);
1188 static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
1190 tp->mdio_ops.write(tp, location, val);
1193 static int rtl_readphy(struct rtl8169_private *tp, int location)
1195 return tp->mdio_ops.read(tp, location);
1198 static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
1200 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
1203 static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
1207 val = rtl_readphy(tp, reg_addr);
1208 rtl_writephy(tp, reg_addr, (val | p) & ~m);
1211 static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
1214 struct rtl8169_private *tp = netdev_priv(dev);
1216 rtl_writephy(tp, location, val);
1219 static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
1221 struct rtl8169_private *tp = netdev_priv(dev);
1223 return rtl_readphy(tp, location);
1226 DECLARE_RTL_COND(rtl_ephyar_cond)
1228 void __iomem *ioaddr = tp->mmio_addr;
1230 return RTL_R32(EPHYAR) & EPHYAR_FLAG;
1233 static void rtl_ephy_write(struct rtl8169_private *tp, int reg_addr, int value)
1235 void __iomem *ioaddr = tp->mmio_addr;
1237 RTL_W32(EPHYAR, EPHYAR_WRITE_CMD | (value & EPHYAR_DATA_MASK) |
1238 (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1240 rtl_udelay_loop_wait_low(tp, &rtl_ephyar_cond, 10, 100);
1245 static u16 rtl_ephy_read(struct rtl8169_private *tp, int reg_addr)
1247 void __iomem *ioaddr = tp->mmio_addr;
1249 RTL_W32(EPHYAR, (reg_addr & EPHYAR_REG_MASK) << EPHYAR_REG_SHIFT);
1251 return rtl_udelay_loop_wait_high(tp, &rtl_ephyar_cond, 10, 100) ?
1252 RTL_R32(EPHYAR) & EPHYAR_DATA_MASK : ~0;
1255 static void rtl_eri_write(struct rtl8169_private *tp, int addr, u32 mask,
1258 void __iomem *ioaddr = tp->mmio_addr;
1260 BUG_ON((addr & 3) || (mask == 0));
1261 RTL_W32(ERIDR, val);
1262 RTL_W32(ERIAR, ERIAR_WRITE_CMD | type | mask | addr);
1264 rtl_udelay_loop_wait_low(tp, &rtl_eriar_cond, 100, 100);
1267 static u32 rtl_eri_read(struct rtl8169_private *tp, int addr, int type)
1269 void __iomem *ioaddr = tp->mmio_addr;
1271 RTL_W32(ERIAR, ERIAR_READ_CMD | type | ERIAR_MASK_1111 | addr);
1273 return rtl_udelay_loop_wait_high(tp, &rtl_eriar_cond, 100, 100) ?
1274 RTL_R32(ERIDR) : ~0;
1277 static void rtl_w1w0_eri(struct rtl8169_private *tp, int addr, u32 mask, u32 p,
1282 val = rtl_eri_read(tp, addr, type);
1283 rtl_eri_write(tp, addr, mask, (val & ~m) | p, type);
1292 static void rtl_write_exgmac_batch(struct rtl8169_private *tp,
1293 const struct exgmac_reg *r, int len)
1296 rtl_eri_write(tp, r->addr, r->mask, r->val, ERIAR_EXGMAC);
1301 DECLARE_RTL_COND(rtl_efusear_cond)
1303 void __iomem *ioaddr = tp->mmio_addr;
1305 return RTL_R32(EFUSEAR) & EFUSEAR_FLAG;
1308 static u8 rtl8168d_efuse_read(struct rtl8169_private *tp, int reg_addr)
1310 void __iomem *ioaddr = tp->mmio_addr;
1312 RTL_W32(EFUSEAR, (reg_addr & EFUSEAR_REG_MASK) << EFUSEAR_REG_SHIFT);
1314 return rtl_udelay_loop_wait_high(tp, &rtl_efusear_cond, 100, 300) ?
1315 RTL_R32(EFUSEAR) & EFUSEAR_DATA_MASK : ~0;
1318 static u16 rtl_get_events(struct rtl8169_private *tp)
1320 void __iomem *ioaddr = tp->mmio_addr;
1322 return RTL_R16(IntrStatus);
1325 static void rtl_ack_events(struct rtl8169_private *tp, u16 bits)
1327 void __iomem *ioaddr = tp->mmio_addr;
1329 RTL_W16(IntrStatus, bits);
1333 static void rtl_irq_disable(struct rtl8169_private *tp)
1335 void __iomem *ioaddr = tp->mmio_addr;
1337 RTL_W16(IntrMask, 0);
1341 static void rtl_irq_enable(struct rtl8169_private *tp, u16 bits)
1343 void __iomem *ioaddr = tp->mmio_addr;
1345 RTL_W16(IntrMask, bits);
1348 #define RTL_EVENT_NAPI_RX (RxOK | RxErr)
1349 #define RTL_EVENT_NAPI_TX (TxOK | TxErr)
1350 #define RTL_EVENT_NAPI (RTL_EVENT_NAPI_RX | RTL_EVENT_NAPI_TX)
1352 static void rtl_irq_enable_all(struct rtl8169_private *tp)
1354 rtl_irq_enable(tp, RTL_EVENT_NAPI | tp->event_slow);
1357 static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
1359 void __iomem *ioaddr = tp->mmio_addr;
1361 rtl_irq_disable(tp);
1362 rtl_ack_events(tp, RTL_EVENT_NAPI | tp->event_slow);
1366 static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
1368 void __iomem *ioaddr = tp->mmio_addr;
1370 return RTL_R32(TBICSR) & TBIReset;
1373 static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
1375 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
1378 static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
1380 return RTL_R32(TBICSR) & TBILinkOk;
1383 static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
1385 return RTL_R8(PHYstatus) & LinkStatus;
1388 static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
1390 void __iomem *ioaddr = tp->mmio_addr;
1392 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
1395 static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
1399 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
1400 rtl_writephy(tp, MII_BMCR, val & 0xffff);
1403 static void rtl_link_chg_patch(struct rtl8169_private *tp)
1405 void __iomem *ioaddr = tp->mmio_addr;
1406 struct net_device *dev = tp->dev;
1408 if (!netif_running(dev))
1411 if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
1412 tp->mac_version == RTL_GIGA_MAC_VER_38) {
1413 if (RTL_R8(PHYstatus) & _1000bpsF) {
1414 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1416 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1418 } else if (RTL_R8(PHYstatus) & _100bps) {
1419 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1421 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1424 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1426 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1429 /* Reset packet filter */
1430 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01,
1432 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00,
1434 } else if (tp->mac_version == RTL_GIGA_MAC_VER_35 ||
1435 tp->mac_version == RTL_GIGA_MAC_VER_36) {
1436 if (RTL_R8(PHYstatus) & _1000bpsF) {
1437 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x00000011,
1439 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x00000005,
1442 rtl_eri_write(tp, 0x1bc, ERIAR_MASK_1111, 0x0000001f,
1444 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_1111, 0x0000003f,
1447 } else if (tp->mac_version == RTL_GIGA_MAC_VER_37) {
1448 if (RTL_R8(PHYstatus) & _10bps) {
1449 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x4d02,
1451 rtl_eri_write(tp, 0x1dc, ERIAR_MASK_0011, 0x0060,
1454 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000,
1460 static void __rtl8169_check_link_status(struct net_device *dev,
1461 struct rtl8169_private *tp,
1462 void __iomem *ioaddr, bool pm)
1464 if (tp->link_ok(ioaddr)) {
1465 rtl_link_chg_patch(tp);
1466 /* This is to cancel a scheduled suspend if there's one. */
1468 pm_request_resume(&tp->pci_dev->dev);
1469 netif_carrier_on(dev);
1470 if (net_ratelimit())
1471 netif_info(tp, ifup, dev, "link up\n");
1473 netif_carrier_off(dev);
1474 netif_info(tp, ifdown, dev, "link down\n");
1476 pm_schedule_suspend(&tp->pci_dev->dev, 5000);
1480 static void rtl8169_check_link_status(struct net_device *dev,
1481 struct rtl8169_private *tp,
1482 void __iomem *ioaddr)
1484 __rtl8169_check_link_status(dev, tp, ioaddr, false);
1487 #define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
1489 static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
1491 void __iomem *ioaddr = tp->mmio_addr;
1495 options = RTL_R8(Config1);
1496 if (!(options & PMEnable))
1499 options = RTL_R8(Config3);
1500 if (options & LinkUp)
1501 wolopts |= WAKE_PHY;
1502 if (options & MagicPacket)
1503 wolopts |= WAKE_MAGIC;
1505 options = RTL_R8(Config5);
1507 wolopts |= WAKE_UCAST;
1509 wolopts |= WAKE_BCAST;
1511 wolopts |= WAKE_MCAST;
1516 static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1518 struct rtl8169_private *tp = netdev_priv(dev);
1522 wol->supported = WAKE_ANY;
1523 wol->wolopts = __rtl8169_get_wol(tp);
1525 rtl_unlock_work(tp);
1528 static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
1530 void __iomem *ioaddr = tp->mmio_addr;
1532 static const struct {
1537 { WAKE_PHY, Config3, LinkUp },
1538 { WAKE_MAGIC, Config3, MagicPacket },
1539 { WAKE_UCAST, Config5, UWF },
1540 { WAKE_BCAST, Config5, BWF },
1541 { WAKE_MCAST, Config5, MWF },
1542 { WAKE_ANY, Config5, LanWake }
1546 RTL_W8(Cfg9346, Cfg9346_Unlock);
1548 for (i = 0; i < ARRAY_SIZE(cfg); i++) {
1549 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
1550 if (wolopts & cfg[i].opt)
1551 options |= cfg[i].mask;
1552 RTL_W8(cfg[i].reg, options);
1555 switch (tp->mac_version) {
1556 case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
1557 options = RTL_R8(Config1) & ~PMEnable;
1559 options |= PMEnable;
1560 RTL_W8(Config1, options);
1563 options = RTL_R8(Config2) & ~PME_SIGNAL;
1565 options |= PME_SIGNAL;
1566 RTL_W8(Config2, options);
1570 RTL_W8(Cfg9346, Cfg9346_Lock);
1573 static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1575 struct rtl8169_private *tp = netdev_priv(dev);
1580 tp->features |= RTL_FEATURE_WOL;
1582 tp->features &= ~RTL_FEATURE_WOL;
1583 __rtl8169_set_wol(tp, wol->wolopts);
1585 rtl_unlock_work(tp);
1587 device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
1592 static const char *rtl_lookup_firmware_name(struct rtl8169_private *tp)
1594 return rtl_chip_infos[tp->mac_version].fw_name;
1597 static void rtl8169_get_drvinfo(struct net_device *dev,
1598 struct ethtool_drvinfo *info)
1600 struct rtl8169_private *tp = netdev_priv(dev);
1601 struct rtl_fw *rtl_fw = tp->rtl_fw;
1603 strlcpy(info->driver, MODULENAME, sizeof(info->driver));
1604 strlcpy(info->version, RTL8169_VERSION, sizeof(info->version));
1605 strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info));
1606 BUILD_BUG_ON(sizeof(info->fw_version) < sizeof(rtl_fw->version));
1607 if (!IS_ERR_OR_NULL(rtl_fw))
1608 strlcpy(info->fw_version, rtl_fw->version,
1609 sizeof(info->fw_version));
1612 static int rtl8169_get_regs_len(struct net_device *dev)
1614 return R8169_REGS_SIZE;
1617 static int rtl8169_set_speed_tbi(struct net_device *dev,
1618 u8 autoneg, u16 speed, u8 duplex, u32 ignored)
1620 struct rtl8169_private *tp = netdev_priv(dev);
1621 void __iomem *ioaddr = tp->mmio_addr;
1625 reg = RTL_R32(TBICSR);
1626 if ((autoneg == AUTONEG_DISABLE) && (speed == SPEED_1000) &&
1627 (duplex == DUPLEX_FULL)) {
1628 RTL_W32(TBICSR, reg & ~(TBINwEnable | TBINwRestart));
1629 } else if (autoneg == AUTONEG_ENABLE)
1630 RTL_W32(TBICSR, reg | TBINwEnable | TBINwRestart);
1632 netif_warn(tp, link, dev,
1633 "incorrect speed setting refused in TBI mode\n");
1640 static int rtl8169_set_speed_xmii(struct net_device *dev,
1641 u8 autoneg, u16 speed, u8 duplex, u32 adv)
1643 struct rtl8169_private *tp = netdev_priv(dev);
1644 int giga_ctrl, bmcr;
1647 rtl_writephy(tp, 0x1f, 0x0000);
1649 if (autoneg == AUTONEG_ENABLE) {
1652 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
1653 auto_nego &= ~(ADVERTISE_10HALF | ADVERTISE_10FULL |
1654 ADVERTISE_100HALF | ADVERTISE_100FULL);
1656 if (adv & ADVERTISED_10baseT_Half)
1657 auto_nego |= ADVERTISE_10HALF;
1658 if (adv & ADVERTISED_10baseT_Full)
1659 auto_nego |= ADVERTISE_10FULL;
1660 if (adv & ADVERTISED_100baseT_Half)
1661 auto_nego |= ADVERTISE_100HALF;
1662 if (adv & ADVERTISED_100baseT_Full)
1663 auto_nego |= ADVERTISE_100FULL;
1665 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1667 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
1668 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
1670 /* The 8100e/8101e/8102e do Fast Ethernet only. */
1671 if (tp->mii.supports_gmii) {
1672 if (adv & ADVERTISED_1000baseT_Half)
1673 giga_ctrl |= ADVERTISE_1000HALF;
1674 if (adv & ADVERTISED_1000baseT_Full)
1675 giga_ctrl |= ADVERTISE_1000FULL;
1676 } else if (adv & (ADVERTISED_1000baseT_Half |
1677 ADVERTISED_1000baseT_Full)) {
1678 netif_info(tp, link, dev,
1679 "PHY does not support 1000Mbps\n");
1683 bmcr = BMCR_ANENABLE | BMCR_ANRESTART;
1685 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
1686 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
1690 if (speed == SPEED_10)
1692 else if (speed == SPEED_100)
1693 bmcr = BMCR_SPEED100;
1697 if (duplex == DUPLEX_FULL)
1698 bmcr |= BMCR_FULLDPLX;
1701 rtl_writephy(tp, MII_BMCR, bmcr);
1703 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
1704 tp->mac_version == RTL_GIGA_MAC_VER_03) {
1705 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
1706 rtl_writephy(tp, 0x17, 0x2138);
1707 rtl_writephy(tp, 0x0e, 0x0260);
1709 rtl_writephy(tp, 0x17, 0x2108);
1710 rtl_writephy(tp, 0x0e, 0x0000);
1719 static int rtl8169_set_speed(struct net_device *dev,
1720 u8 autoneg, u16 speed, u8 duplex, u32 advertising)
1722 struct rtl8169_private *tp = netdev_priv(dev);
1725 ret = tp->set_speed(dev, autoneg, speed, duplex, advertising);
1729 if (netif_running(dev) && (autoneg == AUTONEG_ENABLE) &&
1730 (advertising & ADVERTISED_1000baseT_Full)) {
1731 mod_timer(&tp->timer, jiffies + RTL8169_PHY_TIMEOUT);
1737 static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1739 struct rtl8169_private *tp = netdev_priv(dev);
1742 del_timer_sync(&tp->timer);
1745 ret = rtl8169_set_speed(dev, cmd->autoneg, ethtool_cmd_speed(cmd),
1746 cmd->duplex, cmd->advertising);
1747 rtl_unlock_work(tp);
1752 static netdev_features_t rtl8169_fix_features(struct net_device *dev,
1753 netdev_features_t features)
1755 struct rtl8169_private *tp = netdev_priv(dev);
1757 if (dev->mtu > TD_MSS_MAX)
1758 features &= ~NETIF_F_ALL_TSO;
1760 if (dev->mtu > JUMBO_1K &&
1761 !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
1762 features &= ~NETIF_F_IP_CSUM;
1767 static void __rtl8169_set_features(struct net_device *dev,
1768 netdev_features_t features)
1770 struct rtl8169_private *tp = netdev_priv(dev);
1771 netdev_features_t changed = features ^ dev->features;
1772 void __iomem *ioaddr = tp->mmio_addr;
1774 if (!(changed & (NETIF_F_RXALL | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)))
1777 if (changed & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX)) {
1778 if (features & NETIF_F_RXCSUM)
1779 tp->cp_cmd |= RxChkSum;
1781 tp->cp_cmd &= ~RxChkSum;
1783 if (dev->features & NETIF_F_HW_VLAN_RX)
1784 tp->cp_cmd |= RxVlan;
1786 tp->cp_cmd &= ~RxVlan;
1788 RTL_W16(CPlusCmd, tp->cp_cmd);
1791 if (changed & NETIF_F_RXALL) {
1792 int tmp = (RTL_R32(RxConfig) & ~(AcceptErr | AcceptRunt));
1793 if (features & NETIF_F_RXALL)
1794 tmp |= (AcceptErr | AcceptRunt);
1795 RTL_W32(RxConfig, tmp);
1799 static int rtl8169_set_features(struct net_device *dev,
1800 netdev_features_t features)
1802 struct rtl8169_private *tp = netdev_priv(dev);
1805 __rtl8169_set_features(dev, features);
1806 rtl_unlock_work(tp);
1812 static inline u32 rtl8169_tx_vlan_tag(struct sk_buff *skb)
1814 return (vlan_tx_tag_present(skb)) ?
1815 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
1818 static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb)
1820 u32 opts2 = le32_to_cpu(desc->opts2);
1822 if (opts2 & RxVlanTag)
1823 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
1826 static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
1828 struct rtl8169_private *tp = netdev_priv(dev);
1829 void __iomem *ioaddr = tp->mmio_addr;
1833 SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE;
1834 cmd->port = PORT_FIBRE;
1835 cmd->transceiver = XCVR_INTERNAL;
1837 status = RTL_R32(TBICSR);
1838 cmd->advertising = (status & TBINwEnable) ? ADVERTISED_Autoneg : 0;
1839 cmd->autoneg = !!(status & TBINwEnable);
1841 ethtool_cmd_speed_set(cmd, SPEED_1000);
1842 cmd->duplex = DUPLEX_FULL; /* Always set */
1847 static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
1849 struct rtl8169_private *tp = netdev_priv(dev);
1851 return mii_ethtool_gset(&tp->mii, cmd);
1854 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1856 struct rtl8169_private *tp = netdev_priv(dev);
1860 rc = tp->get_settings(dev, cmd);
1861 rtl_unlock_work(tp);
1866 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1869 struct rtl8169_private *tp = netdev_priv(dev);
1871 if (regs->len > R8169_REGS_SIZE)
1872 regs->len = R8169_REGS_SIZE;
1875 memcpy_fromio(p, tp->mmio_addr, regs->len);
1876 rtl_unlock_work(tp);
1879 static u32 rtl8169_get_msglevel(struct net_device *dev)
1881 struct rtl8169_private *tp = netdev_priv(dev);
1883 return tp->msg_enable;
1886 static void rtl8169_set_msglevel(struct net_device *dev, u32 value)
1888 struct rtl8169_private *tp = netdev_priv(dev);
1890 tp->msg_enable = value;
1893 static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1900 "tx_single_collisions",
1901 "tx_multi_collisions",
1909 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1913 return ARRAY_SIZE(rtl8169_gstrings);
1919 DECLARE_RTL_COND(rtl_counters_cond)
1921 void __iomem *ioaddr = tp->mmio_addr;
1923 return RTL_R32(CounterAddrLow) & CounterDump;
1926 static void rtl8169_update_counters(struct net_device *dev)
1928 struct rtl8169_private *tp = netdev_priv(dev);
1929 void __iomem *ioaddr = tp->mmio_addr;
1930 struct device *d = &tp->pci_dev->dev;
1931 struct rtl8169_counters *counters;
1936 * Some chips are unable to dump tally counters when the receiver
1939 if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1942 counters = dma_alloc_coherent(d, sizeof(*counters), &paddr, GFP_KERNEL);
1946 RTL_W32(CounterAddrHigh, (u64)paddr >> 32);
1947 cmd = (u64)paddr & DMA_BIT_MASK(32);
1948 RTL_W32(CounterAddrLow, cmd);
1949 RTL_W32(CounterAddrLow, cmd | CounterDump);
1951 if (rtl_udelay_loop_wait_low(tp, &rtl_counters_cond, 10, 1000))
1952 memcpy(&tp->counters, counters, sizeof(*counters));
1954 RTL_W32(CounterAddrLow, 0);
1955 RTL_W32(CounterAddrHigh, 0);
1957 dma_free_coherent(d, sizeof(*counters), counters, paddr);
1960 static void rtl8169_get_ethtool_stats(struct net_device *dev,
1961 struct ethtool_stats *stats, u64 *data)
1963 struct rtl8169_private *tp = netdev_priv(dev);
1967 rtl8169_update_counters(dev);
1969 data[0] = le64_to_cpu(tp->counters.tx_packets);
1970 data[1] = le64_to_cpu(tp->counters.rx_packets);
1971 data[2] = le64_to_cpu(tp->counters.tx_errors);
1972 data[3] = le32_to_cpu(tp->counters.rx_errors);
1973 data[4] = le16_to_cpu(tp->counters.rx_missed);
1974 data[5] = le16_to_cpu(tp->counters.align_errors);
1975 data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1976 data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1977 data[8] = le64_to_cpu(tp->counters.rx_unicast);
1978 data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1979 data[10] = le32_to_cpu(tp->counters.rx_multicast);
1980 data[11] = le16_to_cpu(tp->counters.tx_aborted);
1981 data[12] = le16_to_cpu(tp->counters.tx_underun);
1984 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1988 memcpy(data, *rtl8169_gstrings, sizeof(rtl8169_gstrings));
1993 static const struct ethtool_ops rtl8169_ethtool_ops = {
1994 .get_drvinfo = rtl8169_get_drvinfo,
1995 .get_regs_len = rtl8169_get_regs_len,
1996 .get_link = ethtool_op_get_link,
1997 .get_settings = rtl8169_get_settings,
1998 .set_settings = rtl8169_set_settings,
1999 .get_msglevel = rtl8169_get_msglevel,
2000 .set_msglevel = rtl8169_set_msglevel,
2001 .get_regs = rtl8169_get_regs,
2002 .get_wol = rtl8169_get_wol,
2003 .set_wol = rtl8169_set_wol,
2004 .get_strings = rtl8169_get_strings,
2005 .get_sset_count = rtl8169_get_sset_count,
2006 .get_ethtool_stats = rtl8169_get_ethtool_stats,
2007 .get_ts_info = ethtool_op_get_ts_info,
2010 static void rtl8169_get_mac_version(struct rtl8169_private *tp,
2011 struct net_device *dev, u8 default_version)
2013 void __iomem *ioaddr = tp->mmio_addr;
2015 * The driver currently handles the 8168Bf and the 8168Be identically
2016 * but they can be identified more specifically through the test below
2019 * (RTL_R32(TxConfig) & 0x700000) == 0x500000 ? 8168Bf : 8168Be
2021 * Same thing for the 8101Eb and the 8101Ec:
2023 * (RTL_R32(TxConfig) & 0x700000) == 0x200000 ? 8101Eb : 8101Ec
2025 static const struct rtl_mac_info {
2031 { 0x7cf00000, 0x4c100000, RTL_GIGA_MAC_VER_41 },
2032 { 0x7cf00000, 0x4c000000, RTL_GIGA_MAC_VER_40 },
2035 { 0x7c800000, 0x48800000, RTL_GIGA_MAC_VER_38 },
2036 { 0x7cf00000, 0x48100000, RTL_GIGA_MAC_VER_36 },
2037 { 0x7cf00000, 0x48000000, RTL_GIGA_MAC_VER_35 },
2040 { 0x7c800000, 0x2c800000, RTL_GIGA_MAC_VER_34 },
2041 { 0x7cf00000, 0x2c200000, RTL_GIGA_MAC_VER_33 },
2042 { 0x7cf00000, 0x2c100000, RTL_GIGA_MAC_VER_32 },
2043 { 0x7c800000, 0x2c000000, RTL_GIGA_MAC_VER_33 },
2046 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
2047 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
2048 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
2050 /* 8168DP family. */
2051 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
2052 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
2053 { 0x7cf00000, 0x28b00000, RTL_GIGA_MAC_VER_31 },
2056 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
2057 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
2058 { 0x7cf00000, 0x3c800000, RTL_GIGA_MAC_VER_18 },
2059 { 0x7c800000, 0x3c800000, RTL_GIGA_MAC_VER_24 },
2060 { 0x7cf00000, 0x3c000000, RTL_GIGA_MAC_VER_19 },
2061 { 0x7cf00000, 0x3c200000, RTL_GIGA_MAC_VER_20 },
2062 { 0x7cf00000, 0x3c300000, RTL_GIGA_MAC_VER_21 },
2063 { 0x7cf00000, 0x3c400000, RTL_GIGA_MAC_VER_22 },
2064 { 0x7c800000, 0x3c000000, RTL_GIGA_MAC_VER_22 },
2067 { 0x7cf00000, 0x38000000, RTL_GIGA_MAC_VER_12 },
2068 { 0x7cf00000, 0x38500000, RTL_GIGA_MAC_VER_17 },
2069 { 0x7c800000, 0x38000000, RTL_GIGA_MAC_VER_17 },
2070 { 0x7c800000, 0x30000000, RTL_GIGA_MAC_VER_11 },
2073 { 0x7cf00000, 0x44900000, RTL_GIGA_MAC_VER_39 },
2074 { 0x7c800000, 0x44800000, RTL_GIGA_MAC_VER_39 },
2075 { 0x7c800000, 0x44000000, RTL_GIGA_MAC_VER_37 },
2076 { 0x7cf00000, 0x40b00000, RTL_GIGA_MAC_VER_30 },
2077 { 0x7cf00000, 0x40a00000, RTL_GIGA_MAC_VER_30 },
2078 { 0x7cf00000, 0x40900000, RTL_GIGA_MAC_VER_29 },
2079 { 0x7c800000, 0x40800000, RTL_GIGA_MAC_VER_30 },
2080 { 0x7cf00000, 0x34a00000, RTL_GIGA_MAC_VER_09 },
2081 { 0x7cf00000, 0x24a00000, RTL_GIGA_MAC_VER_09 },
2082 { 0x7cf00000, 0x34900000, RTL_GIGA_MAC_VER_08 },
2083 { 0x7cf00000, 0x24900000, RTL_GIGA_MAC_VER_08 },
2084 { 0x7cf00000, 0x34800000, RTL_GIGA_MAC_VER_07 },
2085 { 0x7cf00000, 0x24800000, RTL_GIGA_MAC_VER_07 },
2086 { 0x7cf00000, 0x34000000, RTL_GIGA_MAC_VER_13 },
2087 { 0x7cf00000, 0x34300000, RTL_GIGA_MAC_VER_10 },
2088 { 0x7cf00000, 0x34200000, RTL_GIGA_MAC_VER_16 },
2089 { 0x7c800000, 0x34800000, RTL_GIGA_MAC_VER_09 },
2090 { 0x7c800000, 0x24800000, RTL_GIGA_MAC_VER_09 },
2091 { 0x7c800000, 0x34000000, RTL_GIGA_MAC_VER_16 },
2092 /* FIXME: where did these entries come from ? -- FR */
2093 { 0xfc800000, 0x38800000, RTL_GIGA_MAC_VER_15 },
2094 { 0xfc800000, 0x30800000, RTL_GIGA_MAC_VER_14 },
2097 { 0xfc800000, 0x98000000, RTL_GIGA_MAC_VER_06 },
2098 { 0xfc800000, 0x18000000, RTL_GIGA_MAC_VER_05 },
2099 { 0xfc800000, 0x10000000, RTL_GIGA_MAC_VER_04 },
2100 { 0xfc800000, 0x04000000, RTL_GIGA_MAC_VER_03 },
2101 { 0xfc800000, 0x00800000, RTL_GIGA_MAC_VER_02 },
2102 { 0xfc800000, 0x00000000, RTL_GIGA_MAC_VER_01 },
2105 { 0x00000000, 0x00000000, RTL_GIGA_MAC_NONE }
2107 const struct rtl_mac_info *p = mac_info;
2110 reg = RTL_R32(TxConfig);
2111 while ((reg & p->mask) != p->val)
2113 tp->mac_version = p->mac_version;
2115 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
2116 netif_notice(tp, probe, dev,
2117 "unknown MAC, using family default\n");
2118 tp->mac_version = default_version;
2122 static void rtl8169_print_mac_version(struct rtl8169_private *tp)
2124 dprintk("mac_version = 0x%02x\n", tp->mac_version);
2132 static void rtl_writephy_batch(struct rtl8169_private *tp,
2133 const struct phy_reg *regs, int len)
2136 rtl_writephy(tp, regs->reg, regs->val);
2141 #define PHY_READ 0x00000000
2142 #define PHY_DATA_OR 0x10000000
2143 #define PHY_DATA_AND 0x20000000
2144 #define PHY_BJMPN 0x30000000
2145 #define PHY_READ_EFUSE 0x40000000
2146 #define PHY_READ_MAC_BYTE 0x50000000
2147 #define PHY_WRITE_MAC_BYTE 0x60000000
2148 #define PHY_CLEAR_READCOUNT 0x70000000
2149 #define PHY_WRITE 0x80000000
2150 #define PHY_READCOUNT_EQ_SKIP 0x90000000
2151 #define PHY_COMP_EQ_SKIPN 0xa0000000
2152 #define PHY_COMP_NEQ_SKIPN 0xb0000000
2153 #define PHY_WRITE_PREVIOUS 0xc0000000
2154 #define PHY_SKIPN 0xd0000000
2155 #define PHY_DELAY_MS 0xe0000000
2156 #define PHY_WRITE_ERI_WORD 0xf0000000
2160 char version[RTL_VER_SIZE];
2166 #define FW_OPCODE_SIZE sizeof(typeof(*((struct rtl_fw_phy_action *)0)->code))
2168 static bool rtl_fw_format_ok(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2170 const struct firmware *fw = rtl_fw->fw;
2171 struct fw_info *fw_info = (struct fw_info *)fw->data;
2172 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2173 char *version = rtl_fw->version;
2176 if (fw->size < FW_OPCODE_SIZE)
2179 if (!fw_info->magic) {
2180 size_t i, size, start;
2183 if (fw->size < sizeof(*fw_info))
2186 for (i = 0; i < fw->size; i++)
2187 checksum += fw->data[i];
2191 start = le32_to_cpu(fw_info->fw_start);
2192 if (start > fw->size)
2195 size = le32_to_cpu(fw_info->fw_len);
2196 if (size > (fw->size - start) / FW_OPCODE_SIZE)
2199 memcpy(version, fw_info->version, RTL_VER_SIZE);
2201 pa->code = (__le32 *)(fw->data + start);
2204 if (fw->size % FW_OPCODE_SIZE)
2207 strlcpy(version, rtl_lookup_firmware_name(tp), RTL_VER_SIZE);
2209 pa->code = (__le32 *)fw->data;
2210 pa->size = fw->size / FW_OPCODE_SIZE;
2212 version[RTL_VER_SIZE - 1] = 0;
2219 static bool rtl_fw_data_ok(struct rtl8169_private *tp, struct net_device *dev,
2220 struct rtl_fw_phy_action *pa)
2225 for (index = 0; index < pa->size; index++) {
2226 u32 action = le32_to_cpu(pa->code[index]);
2227 u32 regno = (action & 0x0fff0000) >> 16;
2229 switch(action & 0xf0000000) {
2233 case PHY_READ_EFUSE:
2234 case PHY_CLEAR_READCOUNT:
2236 case PHY_WRITE_PREVIOUS:
2241 if (regno > index) {
2242 netif_err(tp, ifup, tp->dev,
2243 "Out of range of firmware\n");
2247 case PHY_READCOUNT_EQ_SKIP:
2248 if (index + 2 >= pa->size) {
2249 netif_err(tp, ifup, tp->dev,
2250 "Out of range of firmware\n");
2254 case PHY_COMP_EQ_SKIPN:
2255 case PHY_COMP_NEQ_SKIPN:
2257 if (index + 1 + regno >= pa->size) {
2258 netif_err(tp, ifup, tp->dev,
2259 "Out of range of firmware\n");
2264 case PHY_READ_MAC_BYTE:
2265 case PHY_WRITE_MAC_BYTE:
2266 case PHY_WRITE_ERI_WORD:
2268 netif_err(tp, ifup, tp->dev,
2269 "Invalid action 0x%08x\n", action);
2278 static int rtl_check_firmware(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2280 struct net_device *dev = tp->dev;
2283 if (!rtl_fw_format_ok(tp, rtl_fw)) {
2284 netif_err(tp, ifup, dev, "invalid firwmare\n");
2288 if (rtl_fw_data_ok(tp, dev, &rtl_fw->phy_action))
2294 static void rtl_phy_write_fw(struct rtl8169_private *tp, struct rtl_fw *rtl_fw)
2296 struct rtl_fw_phy_action *pa = &rtl_fw->phy_action;
2300 predata = count = 0;
2302 for (index = 0; index < pa->size; ) {
2303 u32 action = le32_to_cpu(pa->code[index]);
2304 u32 data = action & 0x0000ffff;
2305 u32 regno = (action & 0x0fff0000) >> 16;
2310 switch(action & 0xf0000000) {
2312 predata = rtl_readphy(tp, regno);
2327 case PHY_READ_EFUSE:
2328 predata = rtl8168d_efuse_read(tp, regno);
2331 case PHY_CLEAR_READCOUNT:
2336 rtl_writephy(tp, regno, data);
2339 case PHY_READCOUNT_EQ_SKIP:
2340 index += (count == data) ? 2 : 1;
2342 case PHY_COMP_EQ_SKIPN:
2343 if (predata == data)
2347 case PHY_COMP_NEQ_SKIPN:
2348 if (predata != data)
2352 case PHY_WRITE_PREVIOUS:
2353 rtl_writephy(tp, regno, predata);
2364 case PHY_READ_MAC_BYTE:
2365 case PHY_WRITE_MAC_BYTE:
2366 case PHY_WRITE_ERI_WORD:
2373 static void rtl_release_firmware(struct rtl8169_private *tp)
2375 if (!IS_ERR_OR_NULL(tp->rtl_fw)) {
2376 release_firmware(tp->rtl_fw->fw);
2379 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
2382 static void rtl_apply_firmware(struct rtl8169_private *tp)
2384 struct rtl_fw *rtl_fw = tp->rtl_fw;
2386 /* TODO: release firmware once rtl_phy_write_fw signals failures. */
2387 if (!IS_ERR_OR_NULL(rtl_fw))
2388 rtl_phy_write_fw(tp, rtl_fw);
2391 static void rtl_apply_firmware_cond(struct rtl8169_private *tp, u8 reg, u16 val)
2393 if (rtl_readphy(tp, reg) != val)
2394 netif_warn(tp, hw, tp->dev, "chipset not ready for firmware\n");
2396 rtl_apply_firmware(tp);
2399 static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
2401 static const struct phy_reg phy_reg_init[] = {
2463 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2466 static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
2468 static const struct phy_reg phy_reg_init[] = {
2474 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2477 static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
2479 struct pci_dev *pdev = tp->pci_dev;
2481 if ((pdev->subsystem_vendor != PCI_VENDOR_ID_GIGABYTE) ||
2482 (pdev->subsystem_device != 0xe000))
2485 rtl_writephy(tp, 0x1f, 0x0001);
2486 rtl_writephy(tp, 0x10, 0xf01b);
2487 rtl_writephy(tp, 0x1f, 0x0000);
2490 static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
2492 static const struct phy_reg phy_reg_init[] = {
2532 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2534 rtl8169scd_hw_phy_config_quirk(tp);
2537 static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
2539 static const struct phy_reg phy_reg_init[] = {
2587 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2590 static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
2592 static const struct phy_reg phy_reg_init[] = {
2597 rtl_writephy(tp, 0x1f, 0x0001);
2598 rtl_patchphy(tp, 0x16, 1 << 0);
2600 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2603 static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
2605 static const struct phy_reg phy_reg_init[] = {
2611 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2614 static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
2616 static const struct phy_reg phy_reg_init[] = {
2624 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2627 static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
2629 static const struct phy_reg phy_reg_init[] = {
2635 rtl_writephy(tp, 0x1f, 0x0000);
2636 rtl_patchphy(tp, 0x14, 1 << 5);
2637 rtl_patchphy(tp, 0x0d, 1 << 5);
2639 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2642 static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
2644 static const struct phy_reg phy_reg_init[] = {
2664 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2666 rtl_patchphy(tp, 0x14, 1 << 5);
2667 rtl_patchphy(tp, 0x0d, 1 << 5);
2668 rtl_writephy(tp, 0x1f, 0x0000);
2671 static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
2673 static const struct phy_reg phy_reg_init[] = {
2691 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2693 rtl_patchphy(tp, 0x16, 1 << 0);
2694 rtl_patchphy(tp, 0x14, 1 << 5);
2695 rtl_patchphy(tp, 0x0d, 1 << 5);
2696 rtl_writephy(tp, 0x1f, 0x0000);
2699 static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
2701 static const struct phy_reg phy_reg_init[] = {
2713 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2715 rtl_patchphy(tp, 0x16, 1 << 0);
2716 rtl_patchphy(tp, 0x14, 1 << 5);
2717 rtl_patchphy(tp, 0x0d, 1 << 5);
2718 rtl_writephy(tp, 0x1f, 0x0000);
2721 static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
2723 rtl8168c_3_hw_phy_config(tp);
2726 static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
2728 static const struct phy_reg phy_reg_init_0[] = {
2729 /* Channel Estimation */
2750 * Enhance line driver power
2759 * Can not link to 1Gbps with bad cable
2760 * Decrease SNR threshold form 21.07dB to 19.04dB
2769 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2773 * Fine Tune Switching regulator parameter
2775 rtl_writephy(tp, 0x1f, 0x0002);
2776 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2777 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2779 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2780 static const struct phy_reg phy_reg_init[] = {
2790 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2792 val = rtl_readphy(tp, 0x0d);
2794 if ((val & 0x00ff) != 0x006c) {
2795 static const u32 set[] = {
2796 0x0065, 0x0066, 0x0067, 0x0068,
2797 0x0069, 0x006a, 0x006b, 0x006c
2801 rtl_writephy(tp, 0x1f, 0x0002);
2804 for (i = 0; i < ARRAY_SIZE(set); i++)
2805 rtl_writephy(tp, 0x0d, val | set[i]);
2808 static const struct phy_reg phy_reg_init[] = {
2816 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2819 /* RSET couple improve */
2820 rtl_writephy(tp, 0x1f, 0x0002);
2821 rtl_patchphy(tp, 0x0d, 0x0300);
2822 rtl_patchphy(tp, 0x0f, 0x0010);
2824 /* Fine tune PLL performance */
2825 rtl_writephy(tp, 0x1f, 0x0002);
2826 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2827 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2829 rtl_writephy(tp, 0x1f, 0x0005);
2830 rtl_writephy(tp, 0x05, 0x001b);
2832 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xbf00);
2834 rtl_writephy(tp, 0x1f, 0x0000);
2837 static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2839 static const struct phy_reg phy_reg_init_0[] = {
2840 /* Channel Estimation */
2861 * Enhance line driver power
2870 * Can not link to 1Gbps with bad cable
2871 * Decrease SNR threshold form 21.07dB to 19.04dB
2880 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2882 if (rtl8168d_efuse_read(tp, 0x01) == 0xb1) {
2883 static const struct phy_reg phy_reg_init[] = {
2894 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2896 val = rtl_readphy(tp, 0x0d);
2897 if ((val & 0x00ff) != 0x006c) {
2898 static const u32 set[] = {
2899 0x0065, 0x0066, 0x0067, 0x0068,
2900 0x0069, 0x006a, 0x006b, 0x006c
2904 rtl_writephy(tp, 0x1f, 0x0002);
2907 for (i = 0; i < ARRAY_SIZE(set); i++)
2908 rtl_writephy(tp, 0x0d, val | set[i]);
2911 static const struct phy_reg phy_reg_init[] = {
2919 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2922 /* Fine tune PLL performance */
2923 rtl_writephy(tp, 0x1f, 0x0002);
2924 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2925 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2927 /* Switching regulator Slew rate */
2928 rtl_writephy(tp, 0x1f, 0x0002);
2929 rtl_patchphy(tp, 0x0f, 0x0017);
2931 rtl_writephy(tp, 0x1f, 0x0005);
2932 rtl_writephy(tp, 0x05, 0x001b);
2934 rtl_apply_firmware_cond(tp, MII_EXPANSION, 0xb300);
2936 rtl_writephy(tp, 0x1f, 0x0000);
2939 static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2941 static const struct phy_reg phy_reg_init[] = {
2997 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3000 static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
3002 static const struct phy_reg phy_reg_init[] = {
3012 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3013 rtl_patchphy(tp, 0x0d, 1 << 5);
3016 static void rtl8168e_1_hw_phy_config(struct rtl8169_private *tp)
3018 static const struct phy_reg phy_reg_init[] = {
3019 /* Enable Delay cap */
3025 /* Channel estimation fine tune */
3034 /* Update PFM & 10M TX idle timer */
3046 rtl_apply_firmware(tp);
3048 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3050 /* DCO enable for 10M IDLE Power */
3051 rtl_writephy(tp, 0x1f, 0x0007);
3052 rtl_writephy(tp, 0x1e, 0x0023);
3053 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3054 rtl_writephy(tp, 0x1f, 0x0000);
3056 /* For impedance matching */
3057 rtl_writephy(tp, 0x1f, 0x0002);
3058 rtl_w1w0_phy(tp, 0x08, 0x8000, 0x7f00);
3059 rtl_writephy(tp, 0x1f, 0x0000);
3061 /* PHY auto speed down */
3062 rtl_writephy(tp, 0x1f, 0x0007);
3063 rtl_writephy(tp, 0x1e, 0x002d);
3064 rtl_w1w0_phy(tp, 0x18, 0x0050, 0x0000);
3065 rtl_writephy(tp, 0x1f, 0x0000);
3066 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3068 rtl_writephy(tp, 0x1f, 0x0005);
3069 rtl_writephy(tp, 0x05, 0x8b86);
3070 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3071 rtl_writephy(tp, 0x1f, 0x0000);
3073 rtl_writephy(tp, 0x1f, 0x0005);
3074 rtl_writephy(tp, 0x05, 0x8b85);
3075 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3076 rtl_writephy(tp, 0x1f, 0x0007);
3077 rtl_writephy(tp, 0x1e, 0x0020);
3078 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x1100);
3079 rtl_writephy(tp, 0x1f, 0x0006);
3080 rtl_writephy(tp, 0x00, 0x5a00);
3081 rtl_writephy(tp, 0x1f, 0x0000);
3082 rtl_writephy(tp, 0x0d, 0x0007);
3083 rtl_writephy(tp, 0x0e, 0x003c);
3084 rtl_writephy(tp, 0x0d, 0x4007);
3085 rtl_writephy(tp, 0x0e, 0x0000);
3086 rtl_writephy(tp, 0x0d, 0x0000);
3089 static void rtl_rar_exgmac_set(struct rtl8169_private *tp, u8 *addr)
3092 addr[0] | (addr[1] << 8),
3093 addr[2] | (addr[3] << 8),
3094 addr[4] | (addr[5] << 8)
3096 const struct exgmac_reg e[] = {
3097 { .addr = 0xe0, ERIAR_MASK_1111, .val = w[0] | (w[1] << 16) },
3098 { .addr = 0xe4, ERIAR_MASK_1111, .val = w[2] },
3099 { .addr = 0xf0, ERIAR_MASK_1111, .val = w[0] << 16 },
3100 { .addr = 0xf4, ERIAR_MASK_1111, .val = w[1] | (w[2] << 16) }
3103 rtl_write_exgmac_batch(tp, e, ARRAY_SIZE(e));
3106 static void rtl8168e_2_hw_phy_config(struct rtl8169_private *tp)
3108 static const struct phy_reg phy_reg_init[] = {
3109 /* Enable Delay cap */
3118 /* Channel estimation fine tune */
3135 rtl_apply_firmware(tp);
3137 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3139 /* For 4-corner performance improve */
3140 rtl_writephy(tp, 0x1f, 0x0005);
3141 rtl_writephy(tp, 0x05, 0x8b80);
3142 rtl_w1w0_phy(tp, 0x17, 0x0006, 0x0000);
3143 rtl_writephy(tp, 0x1f, 0x0000);
3145 /* PHY auto speed down */
3146 rtl_writephy(tp, 0x1f, 0x0004);
3147 rtl_writephy(tp, 0x1f, 0x0007);
3148 rtl_writephy(tp, 0x1e, 0x002d);
3149 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3150 rtl_writephy(tp, 0x1f, 0x0002);
3151 rtl_writephy(tp, 0x1f, 0x0000);
3152 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3154 /* improve 10M EEE waveform */
3155 rtl_writephy(tp, 0x1f, 0x0005);
3156 rtl_writephy(tp, 0x05, 0x8b86);
3157 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3158 rtl_writephy(tp, 0x1f, 0x0000);
3160 /* Improve 2-pair detection performance */
3161 rtl_writephy(tp, 0x1f, 0x0005);
3162 rtl_writephy(tp, 0x05, 0x8b85);
3163 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3164 rtl_writephy(tp, 0x1f, 0x0000);
3167 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_1111, 0x0000, 0x0003, ERIAR_EXGMAC);
3168 rtl_writephy(tp, 0x1f, 0x0005);
3169 rtl_writephy(tp, 0x05, 0x8b85);
3170 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3171 rtl_writephy(tp, 0x1f, 0x0004);
3172 rtl_writephy(tp, 0x1f, 0x0007);
3173 rtl_writephy(tp, 0x1e, 0x0020);
3174 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3175 rtl_writephy(tp, 0x1f, 0x0002);
3176 rtl_writephy(tp, 0x1f, 0x0000);
3177 rtl_writephy(tp, 0x0d, 0x0007);
3178 rtl_writephy(tp, 0x0e, 0x003c);
3179 rtl_writephy(tp, 0x0d, 0x4007);
3180 rtl_writephy(tp, 0x0e, 0x0000);
3181 rtl_writephy(tp, 0x0d, 0x0000);
3184 rtl_writephy(tp, 0x1f, 0x0003);
3185 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3186 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3187 rtl_writephy(tp, 0x1f, 0x0000);
3189 /* Broken BIOS workaround: feed GigaMAC registers with MAC address. */
3190 rtl_rar_exgmac_set(tp, tp->dev->dev_addr);
3193 static void rtl8168f_hw_phy_config(struct rtl8169_private *tp)
3195 /* For 4-corner performance improve */
3196 rtl_writephy(tp, 0x1f, 0x0005);
3197 rtl_writephy(tp, 0x05, 0x8b80);
3198 rtl_w1w0_phy(tp, 0x06, 0x0006, 0x0000);
3199 rtl_writephy(tp, 0x1f, 0x0000);
3201 /* PHY auto speed down */
3202 rtl_writephy(tp, 0x1f, 0x0007);
3203 rtl_writephy(tp, 0x1e, 0x002d);
3204 rtl_w1w0_phy(tp, 0x18, 0x0010, 0x0000);
3205 rtl_writephy(tp, 0x1f, 0x0000);
3206 rtl_w1w0_phy(tp, 0x14, 0x8000, 0x0000);
3208 /* Improve 10M EEE waveform */
3209 rtl_writephy(tp, 0x1f, 0x0005);
3210 rtl_writephy(tp, 0x05, 0x8b86);
3211 rtl_w1w0_phy(tp, 0x06, 0x0001, 0x0000);
3212 rtl_writephy(tp, 0x1f, 0x0000);
3215 static void rtl8168f_1_hw_phy_config(struct rtl8169_private *tp)
3217 static const struct phy_reg phy_reg_init[] = {
3218 /* Channel estimation fine tune */
3223 /* Modify green table for giga & fnet */
3240 /* Modify green table for 10M */
3246 /* Disable hiimpedance detection (RTCT) */
3252 rtl_apply_firmware(tp);
3254 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3256 rtl8168f_hw_phy_config(tp);
3258 /* Improve 2-pair detection performance */
3259 rtl_writephy(tp, 0x1f, 0x0005);
3260 rtl_writephy(tp, 0x05, 0x8b85);
3261 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3262 rtl_writephy(tp, 0x1f, 0x0000);
3265 static void rtl8168f_2_hw_phy_config(struct rtl8169_private *tp)
3267 rtl_apply_firmware(tp);
3269 rtl8168f_hw_phy_config(tp);
3272 static void rtl8411_hw_phy_config(struct rtl8169_private *tp)
3274 static const struct phy_reg phy_reg_init[] = {
3275 /* Channel estimation fine tune */
3280 /* Modify green table for giga & fnet */
3297 /* Modify green table for 10M */
3303 /* Disable hiimpedance detection (RTCT) */
3310 rtl_apply_firmware(tp);
3312 rtl8168f_hw_phy_config(tp);
3314 /* Improve 2-pair detection performance */
3315 rtl_writephy(tp, 0x1f, 0x0005);
3316 rtl_writephy(tp, 0x05, 0x8b85);
3317 rtl_w1w0_phy(tp, 0x06, 0x4000, 0x0000);
3318 rtl_writephy(tp, 0x1f, 0x0000);
3320 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3322 /* Modify green table for giga */
3323 rtl_writephy(tp, 0x1f, 0x0005);
3324 rtl_writephy(tp, 0x05, 0x8b54);
3325 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3326 rtl_writephy(tp, 0x05, 0x8b5d);
3327 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0800);
3328 rtl_writephy(tp, 0x05, 0x8a7c);
3329 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3330 rtl_writephy(tp, 0x05, 0x8a7f);
3331 rtl_w1w0_phy(tp, 0x06, 0x0100, 0x0000);
3332 rtl_writephy(tp, 0x05, 0x8a82);
3333 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3334 rtl_writephy(tp, 0x05, 0x8a85);
3335 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3336 rtl_writephy(tp, 0x05, 0x8a88);
3337 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x0100);
3338 rtl_writephy(tp, 0x1f, 0x0000);
3340 /* uc same-seed solution */
3341 rtl_writephy(tp, 0x1f, 0x0005);
3342 rtl_writephy(tp, 0x05, 0x8b85);
3343 rtl_w1w0_phy(tp, 0x06, 0x8000, 0x0000);
3344 rtl_writephy(tp, 0x1f, 0x0000);
3347 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x00, 0x03, ERIAR_EXGMAC);
3348 rtl_writephy(tp, 0x1f, 0x0005);
3349 rtl_writephy(tp, 0x05, 0x8b85);
3350 rtl_w1w0_phy(tp, 0x06, 0x0000, 0x2000);
3351 rtl_writephy(tp, 0x1f, 0x0004);
3352 rtl_writephy(tp, 0x1f, 0x0007);
3353 rtl_writephy(tp, 0x1e, 0x0020);
3354 rtl_w1w0_phy(tp, 0x15, 0x0000, 0x0100);
3355 rtl_writephy(tp, 0x1f, 0x0000);
3356 rtl_writephy(tp, 0x0d, 0x0007);
3357 rtl_writephy(tp, 0x0e, 0x003c);
3358 rtl_writephy(tp, 0x0d, 0x4007);
3359 rtl_writephy(tp, 0x0e, 0x0000);
3360 rtl_writephy(tp, 0x0d, 0x0000);
3363 rtl_writephy(tp, 0x1f, 0x0003);
3364 rtl_w1w0_phy(tp, 0x19, 0x0000, 0x0001);
3365 rtl_w1w0_phy(tp, 0x10, 0x0000, 0x0400);
3366 rtl_writephy(tp, 0x1f, 0x0000);
3369 static void rtl8168g_1_hw_phy_config(struct rtl8169_private *tp)
3371 static const u16 mac_ocp_patch[] = {
3372 0xe008, 0xe01b, 0xe01d, 0xe01f,
3373 0xe021, 0xe023, 0xe025, 0xe027,
3374 0x49d2, 0xf10d, 0x766c, 0x49e2,
3375 0xf00a, 0x1ec0, 0x8ee1, 0xc60a,
3377 0x77c0, 0x4870, 0x9fc0, 0x1ea0,
3378 0xc707, 0x8ee1, 0x9d6c, 0xc603,
3379 0xbe00, 0xb416, 0x0076, 0xe86c,
3380 0xc602, 0xbe00, 0x0000, 0xc602,
3382 0xbe00, 0x0000, 0xc602, 0xbe00,
3383 0x0000, 0xc602, 0xbe00, 0x0000,
3384 0xc602, 0xbe00, 0x0000, 0xc602,
3385 0xbe00, 0x0000, 0xc602, 0xbe00,
3387 0x0000, 0x0000, 0x0000, 0x0000
3391 /* Patch code for GPHY reset */
3392 for (i = 0; i < ARRAY_SIZE(mac_ocp_patch); i++)
3393 r8168_mac_ocp_write(tp, 0xf800 + 2*i, mac_ocp_patch[i]);
3394 r8168_mac_ocp_write(tp, 0xfc26, 0x8000);
3395 r8168_mac_ocp_write(tp, 0xfc28, 0x0075);
3397 rtl_apply_firmware(tp);
3399 if (r8168_phy_ocp_read(tp, 0xa460) & 0x0100)
3400 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x8000);
3402 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x8000, 0x0000);
3404 if (r8168_phy_ocp_read(tp, 0xa466) & 0x0100)
3405 rtl_w1w0_phy_ocp(tp, 0xc41a, 0x0002, 0x0000);
3407 rtl_w1w0_phy_ocp(tp, 0xbcc4, 0x0000, 0x0002);
3409 rtl_w1w0_phy_ocp(tp, 0xa442, 0x000c, 0x0000);
3410 rtl_w1w0_phy_ocp(tp, 0xa4b2, 0x0004, 0x0000);
3412 r8168_phy_ocp_write(tp, 0xa436, 0x8012);
3413 rtl_w1w0_phy_ocp(tp, 0xa438, 0x8000, 0x0000);
3415 rtl_w1w0_phy_ocp(tp, 0xc422, 0x4000, 0x2000);
3418 static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
3420 static const struct phy_reg phy_reg_init[] = {
3427 rtl_writephy(tp, 0x1f, 0x0000);
3428 rtl_patchphy(tp, 0x11, 1 << 12);
3429 rtl_patchphy(tp, 0x19, 1 << 13);
3430 rtl_patchphy(tp, 0x10, 1 << 15);
3432 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3435 static void rtl8105e_hw_phy_config(struct rtl8169_private *tp)
3437 static const struct phy_reg phy_reg_init[] = {
3451 /* Disable ALDPS before ram code */
3452 rtl_writephy(tp, 0x1f, 0x0000);
3453 rtl_writephy(tp, 0x18, 0x0310);
3456 rtl_apply_firmware(tp);
3458 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3461 static void rtl8402_hw_phy_config(struct rtl8169_private *tp)
3463 /* Disable ALDPS before setting firmware */
3464 rtl_writephy(tp, 0x1f, 0x0000);
3465 rtl_writephy(tp, 0x18, 0x0310);
3468 rtl_apply_firmware(tp);
3471 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3472 rtl_writephy(tp, 0x1f, 0x0004);
3473 rtl_writephy(tp, 0x10, 0x401f);
3474 rtl_writephy(tp, 0x19, 0x7030);
3475 rtl_writephy(tp, 0x1f, 0x0000);
3478 static void rtl8106e_hw_phy_config(struct rtl8169_private *tp)
3480 static const struct phy_reg phy_reg_init[] = {
3487 /* Disable ALDPS before ram code */
3488 rtl_writephy(tp, 0x1f, 0x0000);
3489 rtl_writephy(tp, 0x18, 0x0310);
3492 rtl_apply_firmware(tp);
3494 rtl_eri_write(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3495 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
3497 rtl_eri_write(tp, 0x1d0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
3500 static void rtl_hw_phy_config(struct net_device *dev)
3502 struct rtl8169_private *tp = netdev_priv(dev);
3504 rtl8169_print_mac_version(tp);
3506 switch (tp->mac_version) {
3507 case RTL_GIGA_MAC_VER_01:
3509 case RTL_GIGA_MAC_VER_02:
3510 case RTL_GIGA_MAC_VER_03:
3511 rtl8169s_hw_phy_config(tp);
3513 case RTL_GIGA_MAC_VER_04:
3514 rtl8169sb_hw_phy_config(tp);
3516 case RTL_GIGA_MAC_VER_05:
3517 rtl8169scd_hw_phy_config(tp);
3519 case RTL_GIGA_MAC_VER_06:
3520 rtl8169sce_hw_phy_config(tp);
3522 case RTL_GIGA_MAC_VER_07:
3523 case RTL_GIGA_MAC_VER_08:
3524 case RTL_GIGA_MAC_VER_09:
3525 rtl8102e_hw_phy_config(tp);
3527 case RTL_GIGA_MAC_VER_11:
3528 rtl8168bb_hw_phy_config(tp);
3530 case RTL_GIGA_MAC_VER_12:
3531 rtl8168bef_hw_phy_config(tp);
3533 case RTL_GIGA_MAC_VER_17:
3534 rtl8168bef_hw_phy_config(tp);
3536 case RTL_GIGA_MAC_VER_18:
3537 rtl8168cp_1_hw_phy_config(tp);
3539 case RTL_GIGA_MAC_VER_19:
3540 rtl8168c_1_hw_phy_config(tp);
3542 case RTL_GIGA_MAC_VER_20:
3543 rtl8168c_2_hw_phy_config(tp);
3545 case RTL_GIGA_MAC_VER_21:
3546 rtl8168c_3_hw_phy_config(tp);
3548 case RTL_GIGA_MAC_VER_22:
3549 rtl8168c_4_hw_phy_config(tp);
3551 case RTL_GIGA_MAC_VER_23:
3552 case RTL_GIGA_MAC_VER_24:
3553 rtl8168cp_2_hw_phy_config(tp);
3555 case RTL_GIGA_MAC_VER_25:
3556 rtl8168d_1_hw_phy_config(tp);
3558 case RTL_GIGA_MAC_VER_26:
3559 rtl8168d_2_hw_phy_config(tp);
3561 case RTL_GIGA_MAC_VER_27:
3562 rtl8168d_3_hw_phy_config(tp);
3564 case RTL_GIGA_MAC_VER_28:
3565 rtl8168d_4_hw_phy_config(tp);
3567 case RTL_GIGA_MAC_VER_29:
3568 case RTL_GIGA_MAC_VER_30:
3569 rtl8105e_hw_phy_config(tp);
3571 case RTL_GIGA_MAC_VER_31:
3574 case RTL_GIGA_MAC_VER_32:
3575 case RTL_GIGA_MAC_VER_33:
3576 rtl8168e_1_hw_phy_config(tp);
3578 case RTL_GIGA_MAC_VER_34:
3579 rtl8168e_2_hw_phy_config(tp);
3581 case RTL_GIGA_MAC_VER_35:
3582 rtl8168f_1_hw_phy_config(tp);
3584 case RTL_GIGA_MAC_VER_36:
3585 rtl8168f_2_hw_phy_config(tp);
3588 case RTL_GIGA_MAC_VER_37:
3589 rtl8402_hw_phy_config(tp);
3592 case RTL_GIGA_MAC_VER_38:
3593 rtl8411_hw_phy_config(tp);
3596 case RTL_GIGA_MAC_VER_39:
3597 rtl8106e_hw_phy_config(tp);
3600 case RTL_GIGA_MAC_VER_40:
3601 rtl8168g_1_hw_phy_config(tp);
3604 case RTL_GIGA_MAC_VER_41:
3610 static void rtl_phy_work(struct rtl8169_private *tp)
3612 struct timer_list *timer = &tp->timer;
3613 void __iomem *ioaddr = tp->mmio_addr;
3614 unsigned long timeout = RTL8169_PHY_TIMEOUT;
3616 assert(tp->mac_version > RTL_GIGA_MAC_VER_01);
3618 if (tp->phy_reset_pending(tp)) {
3620 * A busy loop could burn quite a few cycles on nowadays CPU.
3621 * Let's delay the execution of the timer for a few ticks.
3627 if (tp->link_ok(ioaddr))
3630 netif_warn(tp, link, tp->dev, "PHY reset until link up\n");
3632 tp->phy_reset_enable(tp);
3635 mod_timer(timer, jiffies + timeout);
3638 static void rtl_schedule_task(struct rtl8169_private *tp, enum rtl_flag flag)
3640 if (!test_and_set_bit(flag, tp->wk.flags))
3641 schedule_work(&tp->wk.work);
3644 static void rtl8169_phy_timer(unsigned long __opaque)
3646 struct net_device *dev = (struct net_device *)__opaque;
3647 struct rtl8169_private *tp = netdev_priv(dev);
3649 rtl_schedule_task(tp, RTL_FLAG_TASK_PHY_PENDING);
3652 static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
3653 void __iomem *ioaddr)
3656 pci_release_regions(pdev);
3657 pci_clear_mwi(pdev);
3658 pci_disable_device(pdev);
3662 DECLARE_RTL_COND(rtl_phy_reset_cond)
3664 return tp->phy_reset_pending(tp);
3667 static void rtl8169_phy_reset(struct net_device *dev,
3668 struct rtl8169_private *tp)
3670 tp->phy_reset_enable(tp);
3671 rtl_msleep_loop_wait_low(tp, &rtl_phy_reset_cond, 1, 100);
3674 static bool rtl_tbi_enabled(struct rtl8169_private *tp)
3676 void __iomem *ioaddr = tp->mmio_addr;
3678 return (tp->mac_version == RTL_GIGA_MAC_VER_01) &&
3679 (RTL_R8(PHYstatus) & TBI_Enable);
3682 static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
3684 void __iomem *ioaddr = tp->mmio_addr;
3686 rtl_hw_phy_config(dev);
3688 if (tp->mac_version <= RTL_GIGA_MAC_VER_06) {
3689 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3693 pci_write_config_byte(tp->pci_dev, PCI_LATENCY_TIMER, 0x40);
3695 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
3696 pci_write_config_byte(tp->pci_dev, PCI_CACHE_LINE_SIZE, 0x08);
3698 if (tp->mac_version == RTL_GIGA_MAC_VER_02) {
3699 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
3701 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
3702 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
3705 rtl8169_phy_reset(dev, tp);
3707 rtl8169_set_speed(dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3708 ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3709 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3710 (tp->mii.supports_gmii ?
3711 ADVERTISED_1000baseT_Half |
3712 ADVERTISED_1000baseT_Full : 0));
3714 if (rtl_tbi_enabled(tp))
3715 netif_info(tp, link, dev, "TBI auto-negotiating\n");
3718 static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
3720 void __iomem *ioaddr = tp->mmio_addr;
3724 RTL_W8(Cfg9346, Cfg9346_Unlock);
3726 RTL_W32(MAC4, addr[4] | addr[5] << 8);
3729 RTL_W32(MAC0, addr[0] | addr[1] << 8 | addr[2] << 16 | addr[3] << 24);
3732 if (tp->mac_version == RTL_GIGA_MAC_VER_34)
3733 rtl_rar_exgmac_set(tp, addr);
3735 RTL_W8(Cfg9346, Cfg9346_Lock);
3737 rtl_unlock_work(tp);
3740 static int rtl_set_mac_address(struct net_device *dev, void *p)
3742 struct rtl8169_private *tp = netdev_priv(dev);
3743 struct sockaddr *addr = p;
3745 if (!is_valid_ether_addr(addr->sa_data))
3746 return -EADDRNOTAVAIL;
3748 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
3750 rtl_rar_set(tp, dev->dev_addr);
3755 static int rtl8169_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3757 struct rtl8169_private *tp = netdev_priv(dev);
3758 struct mii_ioctl_data *data = if_mii(ifr);
3760 return netif_running(dev) ? tp->do_ioctl(tp, data, cmd) : -ENODEV;
3763 static int rtl_xmii_ioctl(struct rtl8169_private *tp,
3764 struct mii_ioctl_data *data, int cmd)
3768 data->phy_id = 32; /* Internal PHY */
3772 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
3776 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
3782 static int rtl_tbi_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd)
3787 static void rtl_disable_msi(struct pci_dev *pdev, struct rtl8169_private *tp)
3789 if (tp->features & RTL_FEATURE_MSI) {
3790 pci_disable_msi(pdev);
3791 tp->features &= ~RTL_FEATURE_MSI;
3795 static void rtl_init_mdio_ops(struct rtl8169_private *tp)
3797 struct mdio_ops *ops = &tp->mdio_ops;
3799 switch (tp->mac_version) {
3800 case RTL_GIGA_MAC_VER_27:
3801 ops->write = r8168dp_1_mdio_write;
3802 ops->read = r8168dp_1_mdio_read;
3804 case RTL_GIGA_MAC_VER_28:
3805 case RTL_GIGA_MAC_VER_31:
3806 ops->write = r8168dp_2_mdio_write;
3807 ops->read = r8168dp_2_mdio_read;
3809 case RTL_GIGA_MAC_VER_40:
3810 case RTL_GIGA_MAC_VER_41:
3811 ops->write = r8168g_mdio_write;
3812 ops->read = r8168g_mdio_read;
3815 ops->write = r8169_mdio_write;
3816 ops->read = r8169_mdio_read;
3821 static void rtl_speed_down(struct rtl8169_private *tp)
3826 rtl_writephy(tp, 0x1f, 0x0000);
3827 lpa = rtl_readphy(tp, MII_LPA);
3829 if (lpa & (LPA_10HALF | LPA_10FULL))
3830 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full;
3831 else if (lpa & (LPA_100HALF | LPA_100FULL))
3832 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3833 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full;
3835 adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
3836 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
3837 (tp->mii.supports_gmii ?
3838 ADVERTISED_1000baseT_Half |
3839 ADVERTISED_1000baseT_Full : 0);
3841 rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL,
3845 static void rtl_wol_suspend_quirk(struct rtl8169_private *tp)
3847 void __iomem *ioaddr = tp->mmio_addr;
3849 switch (tp->mac_version) {
3850 case RTL_GIGA_MAC_VER_25:
3851 case RTL_GIGA_MAC_VER_26:
3852 case RTL_GIGA_MAC_VER_29:
3853 case RTL_GIGA_MAC_VER_30:
3854 case RTL_GIGA_MAC_VER_32:
3855 case RTL_GIGA_MAC_VER_33:
3856 case RTL_GIGA_MAC_VER_34:
3857 case RTL_GIGA_MAC_VER_37:
3858 case RTL_GIGA_MAC_VER_38:
3859 case RTL_GIGA_MAC_VER_39:
3860 case RTL_GIGA_MAC_VER_40:
3861 case RTL_GIGA_MAC_VER_41:
3862 RTL_W32(RxConfig, RTL_R32(RxConfig) |
3863 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
3870 static bool rtl_wol_pll_power_down(struct rtl8169_private *tp)
3872 if (!(__rtl8169_get_wol(tp) & WAKE_ANY))
3876 rtl_wol_suspend_quirk(tp);
3881 static void r810x_phy_power_down(struct rtl8169_private *tp)
3883 rtl_writephy(tp, 0x1f, 0x0000);
3884 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3887 static void r810x_phy_power_up(struct rtl8169_private *tp)
3889 rtl_writephy(tp, 0x1f, 0x0000);
3890 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3893 static void r810x_pll_power_down(struct rtl8169_private *tp)
3895 void __iomem *ioaddr = tp->mmio_addr;
3897 if (rtl_wol_pll_power_down(tp))
3900 r810x_phy_power_down(tp);
3902 switch (tp->mac_version) {
3903 case RTL_GIGA_MAC_VER_07:
3904 case RTL_GIGA_MAC_VER_08:
3905 case RTL_GIGA_MAC_VER_09:
3906 case RTL_GIGA_MAC_VER_10:
3907 case RTL_GIGA_MAC_VER_13:
3908 case RTL_GIGA_MAC_VER_16:
3911 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
3916 static void r810x_pll_power_up(struct rtl8169_private *tp)
3918 void __iomem *ioaddr = tp->mmio_addr;
3920 r810x_phy_power_up(tp);
3922 switch (tp->mac_version) {
3923 case RTL_GIGA_MAC_VER_07:
3924 case RTL_GIGA_MAC_VER_08:
3925 case RTL_GIGA_MAC_VER_09:
3926 case RTL_GIGA_MAC_VER_10:
3927 case RTL_GIGA_MAC_VER_13:
3928 case RTL_GIGA_MAC_VER_16:
3931 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
3936 static void r8168_phy_power_up(struct rtl8169_private *tp)
3938 rtl_writephy(tp, 0x1f, 0x0000);
3939 switch (tp->mac_version) {
3940 case RTL_GIGA_MAC_VER_11:
3941 case RTL_GIGA_MAC_VER_12:
3942 case RTL_GIGA_MAC_VER_17:
3943 case RTL_GIGA_MAC_VER_18:
3944 case RTL_GIGA_MAC_VER_19:
3945 case RTL_GIGA_MAC_VER_20:
3946 case RTL_GIGA_MAC_VER_21:
3947 case RTL_GIGA_MAC_VER_22:
3948 case RTL_GIGA_MAC_VER_23:
3949 case RTL_GIGA_MAC_VER_24:
3950 case RTL_GIGA_MAC_VER_25:
3951 case RTL_GIGA_MAC_VER_26:
3952 case RTL_GIGA_MAC_VER_27:
3953 case RTL_GIGA_MAC_VER_28:
3954 case RTL_GIGA_MAC_VER_31:
3955 rtl_writephy(tp, 0x0e, 0x0000);
3960 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
3963 static void r8168_phy_power_down(struct rtl8169_private *tp)
3965 rtl_writephy(tp, 0x1f, 0x0000);
3966 switch (tp->mac_version) {
3967 case RTL_GIGA_MAC_VER_32:
3968 case RTL_GIGA_MAC_VER_33:
3969 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE | BMCR_PDOWN);
3972 case RTL_GIGA_MAC_VER_11:
3973 case RTL_GIGA_MAC_VER_12:
3974 case RTL_GIGA_MAC_VER_17:
3975 case RTL_GIGA_MAC_VER_18:
3976 case RTL_GIGA_MAC_VER_19:
3977 case RTL_GIGA_MAC_VER_20:
3978 case RTL_GIGA_MAC_VER_21:
3979 case RTL_GIGA_MAC_VER_22:
3980 case RTL_GIGA_MAC_VER_23:
3981 case RTL_GIGA_MAC_VER_24:
3982 case RTL_GIGA_MAC_VER_25:
3983 case RTL_GIGA_MAC_VER_26:
3984 case RTL_GIGA_MAC_VER_27:
3985 case RTL_GIGA_MAC_VER_28:
3986 case RTL_GIGA_MAC_VER_31:
3987 rtl_writephy(tp, 0x0e, 0x0200);
3989 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
3994 static void r8168_pll_power_down(struct rtl8169_private *tp)
3996 void __iomem *ioaddr = tp->mmio_addr;
3998 if ((tp->mac_version == RTL_GIGA_MAC_VER_27 ||
3999 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4000 tp->mac_version == RTL_GIGA_MAC_VER_31) &&
4001 r8168dp_check_dash(tp)) {
4005 if ((tp->mac_version == RTL_GIGA_MAC_VER_23 ||
4006 tp->mac_version == RTL_GIGA_MAC_VER_24) &&
4007 (RTL_R16(CPlusCmd) & ASF)) {
4011 if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
4012 tp->mac_version == RTL_GIGA_MAC_VER_33)
4013 rtl_ephy_write(tp, 0x19, 0xff64);
4015 if (rtl_wol_pll_power_down(tp))
4018 r8168_phy_power_down(tp);
4020 switch (tp->mac_version) {
4021 case RTL_GIGA_MAC_VER_25:
4022 case RTL_GIGA_MAC_VER_26:
4023 case RTL_GIGA_MAC_VER_27:
4024 case RTL_GIGA_MAC_VER_28:
4025 case RTL_GIGA_MAC_VER_31:
4026 case RTL_GIGA_MAC_VER_32:
4027 case RTL_GIGA_MAC_VER_33:
4028 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
4033 static void r8168_pll_power_up(struct rtl8169_private *tp)
4035 void __iomem *ioaddr = tp->mmio_addr;
4037 switch (tp->mac_version) {
4038 case RTL_GIGA_MAC_VER_25:
4039 case RTL_GIGA_MAC_VER_26:
4040 case RTL_GIGA_MAC_VER_27:
4041 case RTL_GIGA_MAC_VER_28:
4042 case RTL_GIGA_MAC_VER_31:
4043 case RTL_GIGA_MAC_VER_32:
4044 case RTL_GIGA_MAC_VER_33:
4045 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
4049 r8168_phy_power_up(tp);
4052 static void rtl_generic_op(struct rtl8169_private *tp,
4053 void (*op)(struct rtl8169_private *))
4059 static void rtl_pll_power_down(struct rtl8169_private *tp)
4061 rtl_generic_op(tp, tp->pll_power_ops.down);
4064 static void rtl_pll_power_up(struct rtl8169_private *tp)
4066 rtl_generic_op(tp, tp->pll_power_ops.up);
4069 static void rtl_init_pll_power_ops(struct rtl8169_private *tp)
4071 struct pll_power_ops *ops = &tp->pll_power_ops;
4073 switch (tp->mac_version) {
4074 case RTL_GIGA_MAC_VER_07:
4075 case RTL_GIGA_MAC_VER_08:
4076 case RTL_GIGA_MAC_VER_09:
4077 case RTL_GIGA_MAC_VER_10:
4078 case RTL_GIGA_MAC_VER_16:
4079 case RTL_GIGA_MAC_VER_29:
4080 case RTL_GIGA_MAC_VER_30:
4081 case RTL_GIGA_MAC_VER_37:
4082 case RTL_GIGA_MAC_VER_39:
4083 ops->down = r810x_pll_power_down;
4084 ops->up = r810x_pll_power_up;
4087 case RTL_GIGA_MAC_VER_11:
4088 case RTL_GIGA_MAC_VER_12:
4089 case RTL_GIGA_MAC_VER_17:
4090 case RTL_GIGA_MAC_VER_18:
4091 case RTL_GIGA_MAC_VER_19:
4092 case RTL_GIGA_MAC_VER_20:
4093 case RTL_GIGA_MAC_VER_21:
4094 case RTL_GIGA_MAC_VER_22:
4095 case RTL_GIGA_MAC_VER_23:
4096 case RTL_GIGA_MAC_VER_24:
4097 case RTL_GIGA_MAC_VER_25:
4098 case RTL_GIGA_MAC_VER_26:
4099 case RTL_GIGA_MAC_VER_27:
4100 case RTL_GIGA_MAC_VER_28:
4101 case RTL_GIGA_MAC_VER_31:
4102 case RTL_GIGA_MAC_VER_32:
4103 case RTL_GIGA_MAC_VER_33:
4104 case RTL_GIGA_MAC_VER_34:
4105 case RTL_GIGA_MAC_VER_35:
4106 case RTL_GIGA_MAC_VER_36:
4107 case RTL_GIGA_MAC_VER_38:
4108 case RTL_GIGA_MAC_VER_40:
4109 case RTL_GIGA_MAC_VER_41:
4110 ops->down = r8168_pll_power_down;
4111 ops->up = r8168_pll_power_up;
4121 static void rtl_init_rxcfg(struct rtl8169_private *tp)
4123 void __iomem *ioaddr = tp->mmio_addr;
4125 switch (tp->mac_version) {
4126 case RTL_GIGA_MAC_VER_01:
4127 case RTL_GIGA_MAC_VER_02:
4128 case RTL_GIGA_MAC_VER_03:
4129 case RTL_GIGA_MAC_VER_04:
4130 case RTL_GIGA_MAC_VER_05:
4131 case RTL_GIGA_MAC_VER_06:
4132 case RTL_GIGA_MAC_VER_10:
4133 case RTL_GIGA_MAC_VER_11:
4134 case RTL_GIGA_MAC_VER_12:
4135 case RTL_GIGA_MAC_VER_13:
4136 case RTL_GIGA_MAC_VER_14:
4137 case RTL_GIGA_MAC_VER_15:
4138 case RTL_GIGA_MAC_VER_16:
4139 case RTL_GIGA_MAC_VER_17:
4140 RTL_W32(RxConfig, RX_FIFO_THRESH | RX_DMA_BURST);
4142 case RTL_GIGA_MAC_VER_18:
4143 case RTL_GIGA_MAC_VER_19:
4144 case RTL_GIGA_MAC_VER_20:
4145 case RTL_GIGA_MAC_VER_21:
4146 case RTL_GIGA_MAC_VER_22:
4147 case RTL_GIGA_MAC_VER_23:
4148 case RTL_GIGA_MAC_VER_24:
4149 case RTL_GIGA_MAC_VER_34:
4150 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
4153 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
4158 static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
4160 tp->dirty_tx = tp->cur_tx = tp->cur_rx = 0;
4163 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
4165 void __iomem *ioaddr = tp->mmio_addr;
4167 RTL_W8(Cfg9346, Cfg9346_Unlock);
4168 rtl_generic_op(tp, tp->jumbo_ops.enable);
4169 RTL_W8(Cfg9346, Cfg9346_Lock);
4172 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
4174 void __iomem *ioaddr = tp->mmio_addr;
4176 RTL_W8(Cfg9346, Cfg9346_Unlock);
4177 rtl_generic_op(tp, tp->jumbo_ops.disable);
4178 RTL_W8(Cfg9346, Cfg9346_Lock);
4181 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
4183 void __iomem *ioaddr = tp->mmio_addr;
4185 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4186 RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
4187 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4190 static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
4192 void __iomem *ioaddr = tp->mmio_addr;
4194 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4195 RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
4196 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4199 static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
4201 void __iomem *ioaddr = tp->mmio_addr;
4203 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4206 static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
4208 void __iomem *ioaddr = tp->mmio_addr;
4210 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4213 static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
4215 void __iomem *ioaddr = tp->mmio_addr;
4217 RTL_W8(MaxTxPacketSize, 0x3f);
4218 RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
4219 RTL_W8(Config4, RTL_R8(Config4) | 0x01);
4220 rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
4223 static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
4225 void __iomem *ioaddr = tp->mmio_addr;
4227 RTL_W8(MaxTxPacketSize, 0x0c);
4228 RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
4229 RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
4230 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
4233 static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
4235 rtl_tx_performance_tweak(tp->pci_dev,
4236 (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4239 static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
4241 rtl_tx_performance_tweak(tp->pci_dev,
4242 (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
4245 static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
4247 void __iomem *ioaddr = tp->mmio_addr;
4249 r8168b_0_hw_jumbo_enable(tp);
4251 RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
4254 static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
4256 void __iomem *ioaddr = tp->mmio_addr;
4258 r8168b_0_hw_jumbo_disable(tp);
4260 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4263 static void rtl_init_jumbo_ops(struct rtl8169_private *tp)
4265 struct jumbo_ops *ops = &tp->jumbo_ops;
4267 switch (tp->mac_version) {
4268 case RTL_GIGA_MAC_VER_11:
4269 ops->disable = r8168b_0_hw_jumbo_disable;
4270 ops->enable = r8168b_0_hw_jumbo_enable;
4272 case RTL_GIGA_MAC_VER_12:
4273 case RTL_GIGA_MAC_VER_17:
4274 ops->disable = r8168b_1_hw_jumbo_disable;
4275 ops->enable = r8168b_1_hw_jumbo_enable;
4277 case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
4278 case RTL_GIGA_MAC_VER_19:
4279 case RTL_GIGA_MAC_VER_20:
4280 case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
4281 case RTL_GIGA_MAC_VER_22:
4282 case RTL_GIGA_MAC_VER_23:
4283 case RTL_GIGA_MAC_VER_24:
4284 case RTL_GIGA_MAC_VER_25:
4285 case RTL_GIGA_MAC_VER_26:
4286 ops->disable = r8168c_hw_jumbo_disable;
4287 ops->enable = r8168c_hw_jumbo_enable;
4289 case RTL_GIGA_MAC_VER_27:
4290 case RTL_GIGA_MAC_VER_28:
4291 ops->disable = r8168dp_hw_jumbo_disable;
4292 ops->enable = r8168dp_hw_jumbo_enable;
4294 case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
4295 case RTL_GIGA_MAC_VER_32:
4296 case RTL_GIGA_MAC_VER_33:
4297 case RTL_GIGA_MAC_VER_34:
4298 ops->disable = r8168e_hw_jumbo_disable;
4299 ops->enable = r8168e_hw_jumbo_enable;
4303 * No action needed for jumbo frames with 8169.
4304 * No jumbo for 810x at all.
4306 case RTL_GIGA_MAC_VER_40:
4307 case RTL_GIGA_MAC_VER_41:
4309 ops->disable = NULL;
4315 DECLARE_RTL_COND(rtl_chipcmd_cond)
4317 void __iomem *ioaddr = tp->mmio_addr;
4319 return RTL_R8(ChipCmd) & CmdReset;
4322 static void rtl_hw_reset(struct rtl8169_private *tp)
4324 void __iomem *ioaddr = tp->mmio_addr;
4326 RTL_W8(ChipCmd, CmdReset);
4328 rtl_udelay_loop_wait_low(tp, &rtl_chipcmd_cond, 100, 100);
4331 static void rtl_request_uncached_firmware(struct rtl8169_private *tp)
4333 struct rtl_fw *rtl_fw;
4337 name = rtl_lookup_firmware_name(tp);
4339 goto out_no_firmware;
4341 rtl_fw = kzalloc(sizeof(*rtl_fw), GFP_KERNEL);
4345 rc = request_firmware(&rtl_fw->fw, name, &tp->pci_dev->dev);
4349 rc = rtl_check_firmware(tp, rtl_fw);
4351 goto err_release_firmware;
4353 tp->rtl_fw = rtl_fw;
4357 err_release_firmware:
4358 release_firmware(rtl_fw->fw);
4362 netif_warn(tp, ifup, tp->dev, "unable to load firmware patch %s (%d)\n",
4369 static void rtl_request_firmware(struct rtl8169_private *tp)
4371 if (IS_ERR(tp->rtl_fw))
4372 rtl_request_uncached_firmware(tp);
4375 static void rtl_rx_close(struct rtl8169_private *tp)
4377 void __iomem *ioaddr = tp->mmio_addr;
4379 RTL_W32(RxConfig, RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK);
4382 DECLARE_RTL_COND(rtl_npq_cond)
4384 void __iomem *ioaddr = tp->mmio_addr;
4386 return RTL_R8(TxPoll) & NPQ;
4389 DECLARE_RTL_COND(rtl_txcfg_empty_cond)
4391 void __iomem *ioaddr = tp->mmio_addr;
4393 return RTL_R32(TxConfig) & TXCFG_EMPTY;
4396 static void rtl8169_hw_reset(struct rtl8169_private *tp)
4398 void __iomem *ioaddr = tp->mmio_addr;
4400 /* Disable interrupts */
4401 rtl8169_irq_mask_and_ack(tp);
4405 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
4406 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
4407 tp->mac_version == RTL_GIGA_MAC_VER_31) {
4408 rtl_udelay_loop_wait_low(tp, &rtl_npq_cond, 20, 42*42);
4409 } else if (tp->mac_version == RTL_GIGA_MAC_VER_34 ||
4410 tp->mac_version == RTL_GIGA_MAC_VER_35 ||
4411 tp->mac_version == RTL_GIGA_MAC_VER_36 ||
4412 tp->mac_version == RTL_GIGA_MAC_VER_37 ||
4413 tp->mac_version == RTL_GIGA_MAC_VER_40 ||
4414 tp->mac_version == RTL_GIGA_MAC_VER_41 ||
4415 tp->mac_version == RTL_GIGA_MAC_VER_38) {
4416 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4417 rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 666);
4419 RTL_W8(ChipCmd, RTL_R8(ChipCmd) | StopReq);
4426 static void rtl_set_rx_tx_config_registers(struct rtl8169_private *tp)
4428 void __iomem *ioaddr = tp->mmio_addr;
4430 /* Set DMA burst size and Interframe Gap Time */
4431 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
4432 (InterFrameGap << TxInterFrameGapShift));
4435 static void rtl_hw_start(struct net_device *dev)
4437 struct rtl8169_private *tp = netdev_priv(dev);
4441 rtl_irq_enable_all(tp);
4444 static void rtl_set_rx_tx_desc_registers(struct rtl8169_private *tp,
4445 void __iomem *ioaddr)
4448 * Magic spell: some iop3xx ARM board needs the TxDescAddrHigh
4449 * register to be written before TxDescAddrLow to work.
4450 * Switching from MMIO to I/O access fixes the issue as well.
4452 RTL_W32(TxDescStartAddrHigh, ((u64) tp->TxPhyAddr) >> 32);
4453 RTL_W32(TxDescStartAddrLow, ((u64) tp->TxPhyAddr) & DMA_BIT_MASK(32));
4454 RTL_W32(RxDescAddrHigh, ((u64) tp->RxPhyAddr) >> 32);
4455 RTL_W32(RxDescAddrLow, ((u64) tp->RxPhyAddr) & DMA_BIT_MASK(32));
4458 static u16 rtl_rw_cpluscmd(void __iomem *ioaddr)
4462 cmd = RTL_R16(CPlusCmd);
4463 RTL_W16(CPlusCmd, cmd);
4467 static void rtl_set_rx_max_size(void __iomem *ioaddr, unsigned int rx_buf_sz)
4469 /* Low hurts. Let's disable the filtering. */
4470 RTL_W16(RxMaxSize, rx_buf_sz + 1);
4473 static void rtl8169_set_magic_reg(void __iomem *ioaddr, unsigned mac_version)
4475 static const struct rtl_cfg2_info {
4480 { RTL_GIGA_MAC_VER_05, PCI_Clock_33MHz, 0x000fff00 }, // 8110SCd
4481 { RTL_GIGA_MAC_VER_05, PCI_Clock_66MHz, 0x000fffff },
4482 { RTL_GIGA_MAC_VER_06, PCI_Clock_33MHz, 0x00ffff00 }, // 8110SCe
4483 { RTL_GIGA_MAC_VER_06, PCI_Clock_66MHz, 0x00ffffff }
4485 const struct rtl_cfg2_info *p = cfg2_info;
4489 clk = RTL_R8(Config2) & PCI_Clock_66MHz;
4490 for (i = 0; i < ARRAY_SIZE(cfg2_info); i++, p++) {
4491 if ((p->mac_version == mac_version) && (p->clk == clk)) {
4492 RTL_W32(0x7c, p->val);
4498 static void rtl_set_rx_mode(struct net_device *dev)
4500 struct rtl8169_private *tp = netdev_priv(dev);
4501 void __iomem *ioaddr = tp->mmio_addr;
4502 u32 mc_filter[2]; /* Multicast hash filter */
4506 if (dev->flags & IFF_PROMISC) {
4507 /* Unconditionally log net taps. */
4508 netif_notice(tp, link, dev, "Promiscuous mode enabled\n");
4510 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
4512 mc_filter[1] = mc_filter[0] = 0xffffffff;
4513 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
4514 (dev->flags & IFF_ALLMULTI)) {
4515 /* Too many to filter perfectly -- accept all multicasts. */
4516 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
4517 mc_filter[1] = mc_filter[0] = 0xffffffff;
4519 struct netdev_hw_addr *ha;
4521 rx_mode = AcceptBroadcast | AcceptMyPhys;
4522 mc_filter[1] = mc_filter[0] = 0;
4523 netdev_for_each_mc_addr(ha, dev) {
4524 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
4525 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
4526 rx_mode |= AcceptMulticast;
4530 if (dev->features & NETIF_F_RXALL)
4531 rx_mode |= (AcceptErr | AcceptRunt);
4533 tmp = (RTL_R32(RxConfig) & ~RX_CONFIG_ACCEPT_MASK) | rx_mode;
4535 if (tp->mac_version > RTL_GIGA_MAC_VER_06) {
4536 u32 data = mc_filter[0];
4538 mc_filter[0] = swab32(mc_filter[1]);
4539 mc_filter[1] = swab32(data);
4542 if (tp->mac_version == RTL_GIGA_MAC_VER_35)
4543 mc_filter[1] = mc_filter[0] = 0xffffffff;
4545 RTL_W32(MAR0 + 4, mc_filter[1]);
4546 RTL_W32(MAR0 + 0, mc_filter[0]);
4548 RTL_W32(RxConfig, tmp);
4551 static void rtl_hw_start_8169(struct net_device *dev)
4553 struct rtl8169_private *tp = netdev_priv(dev);
4554 void __iomem *ioaddr = tp->mmio_addr;
4555 struct pci_dev *pdev = tp->pci_dev;
4557 if (tp->mac_version == RTL_GIGA_MAC_VER_05) {
4558 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | PCIMulRW);
4559 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08);
4562 RTL_W8(Cfg9346, Cfg9346_Unlock);
4563 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4564 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4565 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4566 tp->mac_version == RTL_GIGA_MAC_VER_04)
4567 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4571 RTL_W8(EarlyTxThres, NoEarlyTx);
4573 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
4575 if (tp->mac_version == RTL_GIGA_MAC_VER_01 ||
4576 tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4577 tp->mac_version == RTL_GIGA_MAC_VER_03 ||
4578 tp->mac_version == RTL_GIGA_MAC_VER_04)
4579 rtl_set_rx_tx_config_registers(tp);
4581 tp->cp_cmd |= rtl_rw_cpluscmd(ioaddr) | PCIMulRW;
4583 if (tp->mac_version == RTL_GIGA_MAC_VER_02 ||
4584 tp->mac_version == RTL_GIGA_MAC_VER_03) {
4585 dprintk("Set MAC Reg C+CR Offset 0xE0. "
4586 "Bit-3 and bit-14 MUST be 1\n");
4587 tp->cp_cmd |= (1 << 14);
4590 RTL_W16(CPlusCmd, tp->cp_cmd);
4592 rtl8169_set_magic_reg(ioaddr, tp->mac_version);
4595 * Undocumented corner. Supposedly:
4596 * (TxTimer << 12) | (TxPackets << 8) | (RxTimer << 4) | RxPackets
4598 RTL_W16(IntrMitigate, 0x0000);
4600 rtl_set_rx_tx_desc_registers(tp, ioaddr);
4602 if (tp->mac_version != RTL_GIGA_MAC_VER_01 &&
4603 tp->mac_version != RTL_GIGA_MAC_VER_02 &&
4604 tp->mac_version != RTL_GIGA_MAC_VER_03 &&
4605 tp->mac_version != RTL_GIGA_MAC_VER_04) {
4606 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
4607 rtl_set_rx_tx_config_registers(tp);
4610 RTL_W8(Cfg9346, Cfg9346_Lock);
4612 /* Initially a 10 us delay. Turned it into a PCI commit. - FR */
4615 RTL_W32(RxMissed, 0);
4617 rtl_set_rx_mode(dev);
4619 /* no early-rx interrupts */
4620 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
4623 static void rtl_csi_write(struct rtl8169_private *tp, int addr, int value)
4625 if (tp->csi_ops.write)
4626 tp->csi_ops.write(tp, addr, value);
4629 static u32 rtl_csi_read(struct rtl8169_private *tp, int addr)
4631 return tp->csi_ops.read ? tp->csi_ops.read(tp, addr) : ~0;
4634 static void rtl_csi_access_enable(struct rtl8169_private *tp, u32 bits)
4638 csi = rtl_csi_read(tp, 0x070c) & 0x00ffffff;
4639 rtl_csi_write(tp, 0x070c, csi | bits);
4642 static void rtl_csi_access_enable_1(struct rtl8169_private *tp)
4644 rtl_csi_access_enable(tp, 0x17000000);
4647 static void rtl_csi_access_enable_2(struct rtl8169_private *tp)
4649 rtl_csi_access_enable(tp, 0x27000000);
4652 DECLARE_RTL_COND(rtl_csiar_cond)
4654 void __iomem *ioaddr = tp->mmio_addr;
4656 return RTL_R32(CSIAR) & CSIAR_FLAG;
4659 static void r8169_csi_write(struct rtl8169_private *tp, int addr, int value)
4661 void __iomem *ioaddr = tp->mmio_addr;
4663 RTL_W32(CSIDR, value);
4664 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4665 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4667 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4670 static u32 r8169_csi_read(struct rtl8169_private *tp, int addr)
4672 void __iomem *ioaddr = tp->mmio_addr;
4674 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) |
4675 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4677 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4678 RTL_R32(CSIDR) : ~0;
4681 static void r8402_csi_write(struct rtl8169_private *tp, int addr, int value)
4683 void __iomem *ioaddr = tp->mmio_addr;
4685 RTL_W32(CSIDR, value);
4686 RTL_W32(CSIAR, CSIAR_WRITE_CMD | (addr & CSIAR_ADDR_MASK) |
4687 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT |
4690 rtl_udelay_loop_wait_low(tp, &rtl_csiar_cond, 10, 100);
4693 static u32 r8402_csi_read(struct rtl8169_private *tp, int addr)
4695 void __iomem *ioaddr = tp->mmio_addr;
4697 RTL_W32(CSIAR, (addr & CSIAR_ADDR_MASK) | CSIAR_FUNC_NIC |
4698 CSIAR_BYTE_ENABLE << CSIAR_BYTE_ENABLE_SHIFT);
4700 return rtl_udelay_loop_wait_high(tp, &rtl_csiar_cond, 10, 100) ?
4701 RTL_R32(CSIDR) : ~0;
4704 static void rtl_init_csi_ops(struct rtl8169_private *tp)
4706 struct csi_ops *ops = &tp->csi_ops;
4708 switch (tp->mac_version) {
4709 case RTL_GIGA_MAC_VER_01:
4710 case RTL_GIGA_MAC_VER_02:
4711 case RTL_GIGA_MAC_VER_03:
4712 case RTL_GIGA_MAC_VER_04:
4713 case RTL_GIGA_MAC_VER_05:
4714 case RTL_GIGA_MAC_VER_06:
4715 case RTL_GIGA_MAC_VER_10:
4716 case RTL_GIGA_MAC_VER_11:
4717 case RTL_GIGA_MAC_VER_12:
4718 case RTL_GIGA_MAC_VER_13:
4719 case RTL_GIGA_MAC_VER_14:
4720 case RTL_GIGA_MAC_VER_15:
4721 case RTL_GIGA_MAC_VER_16:
4722 case RTL_GIGA_MAC_VER_17:
4727 case RTL_GIGA_MAC_VER_37:
4728 case RTL_GIGA_MAC_VER_38:
4729 ops->write = r8402_csi_write;
4730 ops->read = r8402_csi_read;
4734 ops->write = r8169_csi_write;
4735 ops->read = r8169_csi_read;
4741 unsigned int offset;
4746 static void rtl_ephy_init(struct rtl8169_private *tp, const struct ephy_info *e,
4752 w = (rtl_ephy_read(tp, e->offset) & ~e->mask) | e->bits;
4753 rtl_ephy_write(tp, e->offset, w);
4758 static void rtl_disable_clock_request(struct pci_dev *pdev)
4760 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL,
4761 PCI_EXP_LNKCTL_CLKREQ_EN);
4764 static void rtl_enable_clock_request(struct pci_dev *pdev)
4766 pcie_capability_set_word(pdev, PCI_EXP_LNKCTL,
4767 PCI_EXP_LNKCTL_CLKREQ_EN);
4770 #define R8168_CPCMD_QUIRK_MASK (\
4781 static void rtl_hw_start_8168bb(struct rtl8169_private *tp)
4783 void __iomem *ioaddr = tp->mmio_addr;
4784 struct pci_dev *pdev = tp->pci_dev;
4786 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4788 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4790 if (tp->dev->mtu <= ETH_DATA_LEN) {
4791 rtl_tx_performance_tweak(pdev, (0x5 << MAX_READ_REQUEST_SHIFT) |
4792 PCI_EXP_DEVCTL_NOSNOOP_EN);
4796 static void rtl_hw_start_8168bef(struct rtl8169_private *tp)
4798 void __iomem *ioaddr = tp->mmio_addr;
4800 rtl_hw_start_8168bb(tp);
4802 RTL_W8(MaxTxPacketSize, TxPacketMax);
4804 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
4807 static void __rtl_hw_start_8168cp(struct rtl8169_private *tp)
4809 void __iomem *ioaddr = tp->mmio_addr;
4810 struct pci_dev *pdev = tp->pci_dev;
4812 RTL_W8(Config1, RTL_R8(Config1) | Speed_down);
4814 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4816 if (tp->dev->mtu <= ETH_DATA_LEN)
4817 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4819 rtl_disable_clock_request(pdev);
4821 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4824 static void rtl_hw_start_8168cp_1(struct rtl8169_private *tp)
4826 static const struct ephy_info e_info_8168cp[] = {
4827 { 0x01, 0, 0x0001 },
4828 { 0x02, 0x0800, 0x1000 },
4829 { 0x03, 0, 0x0042 },
4830 { 0x06, 0x0080, 0x0000 },
4834 rtl_csi_access_enable_2(tp);
4836 rtl_ephy_init(tp, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
4838 __rtl_hw_start_8168cp(tp);
4841 static void rtl_hw_start_8168cp_2(struct rtl8169_private *tp)
4843 void __iomem *ioaddr = tp->mmio_addr;
4844 struct pci_dev *pdev = tp->pci_dev;
4846 rtl_csi_access_enable_2(tp);
4848 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4850 if (tp->dev->mtu <= ETH_DATA_LEN)
4851 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4853 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4856 static void rtl_hw_start_8168cp_3(struct rtl8169_private *tp)
4858 void __iomem *ioaddr = tp->mmio_addr;
4859 struct pci_dev *pdev = tp->pci_dev;
4861 rtl_csi_access_enable_2(tp);
4863 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
4866 RTL_W8(DBG_REG, 0x20);
4868 RTL_W8(MaxTxPacketSize, TxPacketMax);
4870 if (tp->dev->mtu <= ETH_DATA_LEN)
4871 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4873 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4876 static void rtl_hw_start_8168c_1(struct rtl8169_private *tp)
4878 void __iomem *ioaddr = tp->mmio_addr;
4879 static const struct ephy_info e_info_8168c_1[] = {
4880 { 0x02, 0x0800, 0x1000 },
4881 { 0x03, 0, 0x0002 },
4882 { 0x06, 0x0080, 0x0000 }
4885 rtl_csi_access_enable_2(tp);
4887 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
4889 rtl_ephy_init(tp, e_info_8168c_1, ARRAY_SIZE(e_info_8168c_1));
4891 __rtl_hw_start_8168cp(tp);
4894 static void rtl_hw_start_8168c_2(struct rtl8169_private *tp)
4896 static const struct ephy_info e_info_8168c_2[] = {
4897 { 0x01, 0, 0x0001 },
4898 { 0x03, 0x0400, 0x0220 }
4901 rtl_csi_access_enable_2(tp);
4903 rtl_ephy_init(tp, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
4905 __rtl_hw_start_8168cp(tp);
4908 static void rtl_hw_start_8168c_3(struct rtl8169_private *tp)
4910 rtl_hw_start_8168c_2(tp);
4913 static void rtl_hw_start_8168c_4(struct rtl8169_private *tp)
4915 rtl_csi_access_enable_2(tp);
4917 __rtl_hw_start_8168cp(tp);
4920 static void rtl_hw_start_8168d(struct rtl8169_private *tp)
4922 void __iomem *ioaddr = tp->mmio_addr;
4923 struct pci_dev *pdev = tp->pci_dev;
4925 rtl_csi_access_enable_2(tp);
4927 rtl_disable_clock_request(pdev);
4929 RTL_W8(MaxTxPacketSize, TxPacketMax);
4931 if (tp->dev->mtu <= ETH_DATA_LEN)
4932 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4934 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
4937 static void rtl_hw_start_8168dp(struct rtl8169_private *tp)
4939 void __iomem *ioaddr = tp->mmio_addr;
4940 struct pci_dev *pdev = tp->pci_dev;
4942 rtl_csi_access_enable_1(tp);
4944 if (tp->dev->mtu <= ETH_DATA_LEN)
4945 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4947 RTL_W8(MaxTxPacketSize, TxPacketMax);
4949 rtl_disable_clock_request(pdev);
4952 static void rtl_hw_start_8168d_4(struct rtl8169_private *tp)
4954 void __iomem *ioaddr = tp->mmio_addr;
4955 struct pci_dev *pdev = tp->pci_dev;
4956 static const struct ephy_info e_info_8168d_4[] = {
4958 { 0x19, 0x20, 0x50 },
4963 rtl_csi_access_enable_1(tp);
4965 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
4967 RTL_W8(MaxTxPacketSize, TxPacketMax);
4969 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
4970 const struct ephy_info *e = e_info_8168d_4 + i;
4973 w = rtl_ephy_read(tp, e->offset);
4974 rtl_ephy_write(tp, 0x03, (w & e->mask) | e->bits);
4977 rtl_enable_clock_request(pdev);
4980 static void rtl_hw_start_8168e_1(struct rtl8169_private *tp)
4982 void __iomem *ioaddr = tp->mmio_addr;
4983 struct pci_dev *pdev = tp->pci_dev;
4984 static const struct ephy_info e_info_8168e_1[] = {
4985 { 0x00, 0x0200, 0x0100 },
4986 { 0x00, 0x0000, 0x0004 },
4987 { 0x06, 0x0002, 0x0001 },
4988 { 0x06, 0x0000, 0x0030 },
4989 { 0x07, 0x0000, 0x2000 },
4990 { 0x00, 0x0000, 0x0020 },
4991 { 0x03, 0x5800, 0x2000 },
4992 { 0x03, 0x0000, 0x0001 },
4993 { 0x01, 0x0800, 0x1000 },
4994 { 0x07, 0x0000, 0x4000 },
4995 { 0x1e, 0x0000, 0x2000 },
4996 { 0x19, 0xffff, 0xfe6c },
4997 { 0x0a, 0x0000, 0x0040 }
5000 rtl_csi_access_enable_2(tp);
5002 rtl_ephy_init(tp, e_info_8168e_1, ARRAY_SIZE(e_info_8168e_1));
5004 if (tp->dev->mtu <= ETH_DATA_LEN)
5005 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5007 RTL_W8(MaxTxPacketSize, TxPacketMax);
5009 rtl_disable_clock_request(pdev);
5011 /* Reset tx FIFO pointer */
5012 RTL_W32(MISC, RTL_R32(MISC) | TXPLA_RST);
5013 RTL_W32(MISC, RTL_R32(MISC) & ~TXPLA_RST);
5015 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5018 static void rtl_hw_start_8168e_2(struct rtl8169_private *tp)
5020 void __iomem *ioaddr = tp->mmio_addr;
5021 struct pci_dev *pdev = tp->pci_dev;
5022 static const struct ephy_info e_info_8168e_2[] = {
5023 { 0x09, 0x0000, 0x0080 },
5024 { 0x19, 0x0000, 0x0224 }
5027 rtl_csi_access_enable_1(tp);
5029 rtl_ephy_init(tp, e_info_8168e_2, ARRAY_SIZE(e_info_8168e_2));
5031 if (tp->dev->mtu <= ETH_DATA_LEN)
5032 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5034 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5035 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5036 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5037 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5038 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5039 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x07ff0060, ERIAR_EXGMAC);
5040 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5041 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5043 RTL_W8(MaxTxPacketSize, EarlySize);
5045 rtl_disable_clock_request(pdev);
5047 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5048 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5050 /* Adjust EEE LED frequency */
5051 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5053 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5054 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5055 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5058 static void rtl_hw_start_8168f(struct rtl8169_private *tp)
5060 void __iomem *ioaddr = tp->mmio_addr;
5061 struct pci_dev *pdev = tp->pci_dev;
5063 rtl_csi_access_enable_2(tp);
5065 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5067 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5068 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5069 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00100002, ERIAR_EXGMAC);
5070 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5071 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5072 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5073 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5074 rtl_w1w0_eri(tp, 0x1d0, ERIAR_MASK_0001, 0x10, 0x00, ERIAR_EXGMAC);
5075 rtl_eri_write(tp, 0xcc, ERIAR_MASK_1111, 0x00000050, ERIAR_EXGMAC);
5076 rtl_eri_write(tp, 0xd0, ERIAR_MASK_1111, 0x00000060, ERIAR_EXGMAC);
5078 RTL_W8(MaxTxPacketSize, EarlySize);
5080 rtl_disable_clock_request(pdev);
5082 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5083 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5084 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5085 RTL_W32(MISC, RTL_R32(MISC) | PWM_EN);
5086 RTL_W8(Config5, RTL_R8(Config5) & ~Spi_en);
5089 static void rtl_hw_start_8168f_1(struct rtl8169_private *tp)
5091 void __iomem *ioaddr = tp->mmio_addr;
5092 static const struct ephy_info e_info_8168f_1[] = {
5093 { 0x06, 0x00c0, 0x0020 },
5094 { 0x08, 0x0001, 0x0002 },
5095 { 0x09, 0x0000, 0x0080 },
5096 { 0x19, 0x0000, 0x0224 }
5099 rtl_hw_start_8168f(tp);
5101 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5103 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0xff00, ERIAR_EXGMAC);
5105 /* Adjust EEE LED frequency */
5106 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5109 static void rtl_hw_start_8411(struct rtl8169_private *tp)
5111 static const struct ephy_info e_info_8168f_1[] = {
5112 { 0x06, 0x00c0, 0x0020 },
5113 { 0x0f, 0xffff, 0x5200 },
5114 { 0x1e, 0x0000, 0x4000 },
5115 { 0x19, 0x0000, 0x0224 }
5118 rtl_hw_start_8168f(tp);
5120 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5122 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0c00, 0x0000, ERIAR_EXGMAC);
5125 static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5127 void __iomem *ioaddr = tp->mmio_addr;
5128 struct pci_dev *pdev = tp->pci_dev;
5130 rtl_eri_write(tp, 0xc8, ERIAR_MASK_0101, 0x080002, ERIAR_EXGMAC);
5131 rtl_eri_write(tp, 0xcc, ERIAR_MASK_0001, 0x38, ERIAR_EXGMAC);
5132 rtl_eri_write(tp, 0xd0, ERIAR_MASK_0001, 0x48, ERIAR_EXGMAC);
5133 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00100006, ERIAR_EXGMAC);
5135 rtl_csi_access_enable_1(tp);
5137 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5139 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5140 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5142 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5143 RTL_W32(MISC, RTL_R32(MISC) & ~RXDV_GATED_EN);
5144 RTL_W8(MaxTxPacketSize, EarlySize);
5146 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5147 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5149 /* Adjust EEE LED frequency */
5150 RTL_W8(EEE_LED, RTL_R8(EEE_LED) & ~0x07);
5152 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x02, ERIAR_EXGMAC);
5155 static void rtl_hw_start_8168(struct net_device *dev)
5157 struct rtl8169_private *tp = netdev_priv(dev);
5158 void __iomem *ioaddr = tp->mmio_addr;
5160 RTL_W8(Cfg9346, Cfg9346_Unlock);
5162 RTL_W8(MaxTxPacketSize, TxPacketMax);
5164 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5166 tp->cp_cmd |= RTL_R16(CPlusCmd) | PktCntrDisable | INTT_1;
5168 RTL_W16(CPlusCmd, tp->cp_cmd);
5170 RTL_W16(IntrMitigate, 0x5151);
5172 /* Work around for RxFIFO overflow. */
5173 if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
5174 tp->event_slow |= RxFIFOOver | PCSTimeout;
5175 tp->event_slow &= ~RxOverflow;
5178 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5180 rtl_set_rx_mode(dev);
5182 RTL_W32(TxConfig, (TX_DMA_BURST << TxDMAShift) |
5183 (InterFrameGap << TxInterFrameGapShift));
5187 switch (tp->mac_version) {
5188 case RTL_GIGA_MAC_VER_11:
5189 rtl_hw_start_8168bb(tp);
5192 case RTL_GIGA_MAC_VER_12:
5193 case RTL_GIGA_MAC_VER_17:
5194 rtl_hw_start_8168bef(tp);
5197 case RTL_GIGA_MAC_VER_18:
5198 rtl_hw_start_8168cp_1(tp);
5201 case RTL_GIGA_MAC_VER_19:
5202 rtl_hw_start_8168c_1(tp);
5205 case RTL_GIGA_MAC_VER_20:
5206 rtl_hw_start_8168c_2(tp);
5209 case RTL_GIGA_MAC_VER_21:
5210 rtl_hw_start_8168c_3(tp);
5213 case RTL_GIGA_MAC_VER_22:
5214 rtl_hw_start_8168c_4(tp);
5217 case RTL_GIGA_MAC_VER_23:
5218 rtl_hw_start_8168cp_2(tp);
5221 case RTL_GIGA_MAC_VER_24:
5222 rtl_hw_start_8168cp_3(tp);
5225 case RTL_GIGA_MAC_VER_25:
5226 case RTL_GIGA_MAC_VER_26:
5227 case RTL_GIGA_MAC_VER_27:
5228 rtl_hw_start_8168d(tp);
5231 case RTL_GIGA_MAC_VER_28:
5232 rtl_hw_start_8168d_4(tp);
5235 case RTL_GIGA_MAC_VER_31:
5236 rtl_hw_start_8168dp(tp);
5239 case RTL_GIGA_MAC_VER_32:
5240 case RTL_GIGA_MAC_VER_33:
5241 rtl_hw_start_8168e_1(tp);
5243 case RTL_GIGA_MAC_VER_34:
5244 rtl_hw_start_8168e_2(tp);
5247 case RTL_GIGA_MAC_VER_35:
5248 case RTL_GIGA_MAC_VER_36:
5249 rtl_hw_start_8168f_1(tp);
5252 case RTL_GIGA_MAC_VER_38:
5253 rtl_hw_start_8411(tp);
5256 case RTL_GIGA_MAC_VER_40:
5257 case RTL_GIGA_MAC_VER_41:
5258 rtl_hw_start_8168g_1(tp);
5262 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
5263 dev->name, tp->mac_version);
5267 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5269 RTL_W8(Cfg9346, Cfg9346_Lock);
5271 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xF000);
5274 #define R810X_CPCMD_QUIRK_MASK (\
5285 static void rtl_hw_start_8102e_1(struct rtl8169_private *tp)
5287 void __iomem *ioaddr = tp->mmio_addr;
5288 struct pci_dev *pdev = tp->pci_dev;
5289 static const struct ephy_info e_info_8102e_1[] = {
5290 { 0x01, 0, 0x6e65 },
5291 { 0x02, 0, 0x091f },
5292 { 0x03, 0, 0xc2f9 },
5293 { 0x06, 0, 0xafb5 },
5294 { 0x07, 0, 0x0e00 },
5295 { 0x19, 0, 0xec80 },
5296 { 0x01, 0, 0x2e65 },
5301 rtl_csi_access_enable_2(tp);
5303 RTL_W8(DBG_REG, FIX_NAK_1);
5305 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5308 LEDS1 | LEDS0 | Speed_down | MEMMAP | IOMAP | VPD | PMEnable);
5309 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5311 cfg1 = RTL_R8(Config1);
5312 if ((cfg1 & LEDS0) && (cfg1 & LEDS1))
5313 RTL_W8(Config1, cfg1 & ~LEDS0);
5315 rtl_ephy_init(tp, e_info_8102e_1, ARRAY_SIZE(e_info_8102e_1));
5318 static void rtl_hw_start_8102e_2(struct rtl8169_private *tp)
5320 void __iomem *ioaddr = tp->mmio_addr;
5321 struct pci_dev *pdev = tp->pci_dev;
5323 rtl_csi_access_enable_2(tp);
5325 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
5327 RTL_W8(Config1, MEMMAP | IOMAP | VPD | PMEnable);
5328 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
5331 static void rtl_hw_start_8102e_3(struct rtl8169_private *tp)
5333 rtl_hw_start_8102e_2(tp);
5335 rtl_ephy_write(tp, 0x03, 0xc2f9);
5338 static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5340 void __iomem *ioaddr = tp->mmio_addr;
5341 static const struct ephy_info e_info_8105e_1[] = {
5342 { 0x07, 0, 0x4000 },
5343 { 0x19, 0, 0x0200 },
5344 { 0x19, 0, 0x0020 },
5345 { 0x1e, 0, 0x2000 },
5346 { 0x03, 0, 0x0001 },
5347 { 0x19, 0, 0x0100 },
5348 { 0x19, 0, 0x0004 },
5352 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5353 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5355 /* Disable Early Tally Counter */
5356 RTL_W32(FuncEvent, RTL_R32(FuncEvent) & ~0x010000);
5358 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5359 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5361 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5364 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
5366 rtl_hw_start_8105e_1(tp);
5367 rtl_ephy_write(tp, 0x1e, rtl_ephy_read(tp, 0x1e) | 0x8000);
5370 static void rtl_hw_start_8402(struct rtl8169_private *tp)
5372 void __iomem *ioaddr = tp->mmio_addr;
5373 static const struct ephy_info e_info_8402[] = {
5374 { 0x19, 0xffff, 0xff64 },
5378 rtl_csi_access_enable_2(tp);
5380 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5381 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5383 RTL_W32(TxConfig, RTL_R32(TxConfig) | TXCFG_AUTO_FIFO);
5384 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
5386 rtl_ephy_init(tp, e_info_8402, ARRAY_SIZE(e_info_8402));
5388 rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
5390 rtl_eri_write(tp, 0xc8, ERIAR_MASK_1111, 0x00000002, ERIAR_EXGMAC);
5391 rtl_eri_write(tp, 0xe8, ERIAR_MASK_1111, 0x00000006, ERIAR_EXGMAC);
5392 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x00, 0x01, ERIAR_EXGMAC);
5393 rtl_w1w0_eri(tp, 0xdc, ERIAR_MASK_0001, 0x01, 0x00, ERIAR_EXGMAC);
5394 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5395 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5396 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5399 static void rtl_hw_start_8106(struct rtl8169_private *tp)
5401 void __iomem *ioaddr = tp->mmio_addr;
5403 /* Force LAN exit from ASPM if Rx/Tx are not idle */
5404 RTL_W32(FuncEvent, RTL_R32(FuncEvent) | 0x002800);
5406 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5407 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5408 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5411 static void rtl_hw_start_8101(struct net_device *dev)
5413 struct rtl8169_private *tp = netdev_priv(dev);
5414 void __iomem *ioaddr = tp->mmio_addr;
5415 struct pci_dev *pdev = tp->pci_dev;
5417 if (tp->mac_version >= RTL_GIGA_MAC_VER_30)
5418 tp->event_slow &= ~RxFIFOOver;
5420 if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
5421 tp->mac_version == RTL_GIGA_MAC_VER_16)
5422 pcie_capability_set_word(pdev, PCI_EXP_DEVCTL,
5423 PCI_EXP_DEVCTL_NOSNOOP_EN);
5425 RTL_W8(Cfg9346, Cfg9346_Unlock);
5427 switch (tp->mac_version) {
5428 case RTL_GIGA_MAC_VER_07:
5429 rtl_hw_start_8102e_1(tp);
5432 case RTL_GIGA_MAC_VER_08:
5433 rtl_hw_start_8102e_3(tp);
5436 case RTL_GIGA_MAC_VER_09:
5437 rtl_hw_start_8102e_2(tp);
5440 case RTL_GIGA_MAC_VER_29:
5441 rtl_hw_start_8105e_1(tp);
5443 case RTL_GIGA_MAC_VER_30:
5444 rtl_hw_start_8105e_2(tp);
5447 case RTL_GIGA_MAC_VER_37:
5448 rtl_hw_start_8402(tp);
5451 case RTL_GIGA_MAC_VER_39:
5452 rtl_hw_start_8106(tp);
5456 RTL_W8(Cfg9346, Cfg9346_Lock);
5458 RTL_W8(MaxTxPacketSize, TxPacketMax);
5460 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
5462 tp->cp_cmd &= ~R810X_CPCMD_QUIRK_MASK;
5463 RTL_W16(CPlusCmd, tp->cp_cmd);
5465 RTL_W16(IntrMitigate, 0x0000);
5467 rtl_set_rx_tx_desc_registers(tp, ioaddr);
5469 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
5470 rtl_set_rx_tx_config_registers(tp);
5474 rtl_set_rx_mode(dev);
5476 RTL_W16(MultiIntr, RTL_R16(MultiIntr) & 0xf000);
5479 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
5481 struct rtl8169_private *tp = netdev_priv(dev);
5483 if (new_mtu < ETH_ZLEN ||
5484 new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
5487 if (new_mtu > ETH_DATA_LEN)
5488 rtl_hw_jumbo_enable(tp);
5490 rtl_hw_jumbo_disable(tp);
5493 netdev_update_features(dev);
5498 static inline void rtl8169_make_unusable_by_asic(struct RxDesc *desc)
5500 desc->addr = cpu_to_le64(0x0badbadbadbadbadull);
5501 desc->opts1 &= ~cpu_to_le32(DescOwn | RsvdMask);
5504 static void rtl8169_free_rx_databuff(struct rtl8169_private *tp,
5505 void **data_buff, struct RxDesc *desc)
5507 dma_unmap_single(&tp->pci_dev->dev, le64_to_cpu(desc->addr), rx_buf_sz,
5512 rtl8169_make_unusable_by_asic(desc);
5515 static inline void rtl8169_mark_to_asic(struct RxDesc *desc, u32 rx_buf_sz)
5517 u32 eor = le32_to_cpu(desc->opts1) & RingEnd;
5519 desc->opts1 = cpu_to_le32(DescOwn | eor | rx_buf_sz);
5522 static inline void rtl8169_map_to_asic(struct RxDesc *desc, dma_addr_t mapping,
5525 desc->addr = cpu_to_le64(mapping);
5527 rtl8169_mark_to_asic(desc, rx_buf_sz);
5530 static inline void *rtl8169_align(void *data)
5532 return (void *)ALIGN((long)data, 16);
5535 static struct sk_buff *rtl8169_alloc_rx_data(struct rtl8169_private *tp,
5536 struct RxDesc *desc)
5540 struct device *d = &tp->pci_dev->dev;
5541 struct net_device *dev = tp->dev;
5542 int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1;
5544 data = kmalloc_node(rx_buf_sz, GFP_KERNEL, node);
5548 if (rtl8169_align(data) != data) {
5550 data = kmalloc_node(rx_buf_sz + 15, GFP_KERNEL, node);
5555 mapping = dma_map_single(d, rtl8169_align(data), rx_buf_sz,
5557 if (unlikely(dma_mapping_error(d, mapping))) {
5558 if (net_ratelimit())
5559 netif_err(tp, drv, tp->dev, "Failed to map RX DMA!\n");
5563 rtl8169_map_to_asic(desc, mapping, rx_buf_sz);
5571 static void rtl8169_rx_clear(struct rtl8169_private *tp)
5575 for (i = 0; i < NUM_RX_DESC; i++) {
5576 if (tp->Rx_databuff[i]) {
5577 rtl8169_free_rx_databuff(tp, tp->Rx_databuff + i,
5578 tp->RxDescArray + i);
5583 static inline void rtl8169_mark_as_last_descriptor(struct RxDesc *desc)
5585 desc->opts1 |= cpu_to_le32(RingEnd);
5588 static int rtl8169_rx_fill(struct rtl8169_private *tp)
5592 for (i = 0; i < NUM_RX_DESC; i++) {
5595 if (tp->Rx_databuff[i])
5598 data = rtl8169_alloc_rx_data(tp, tp->RxDescArray + i);
5600 rtl8169_make_unusable_by_asic(tp->RxDescArray + i);
5603 tp->Rx_databuff[i] = data;
5606 rtl8169_mark_as_last_descriptor(tp->RxDescArray + NUM_RX_DESC - 1);
5610 rtl8169_rx_clear(tp);
5614 static int rtl8169_init_ring(struct net_device *dev)
5616 struct rtl8169_private *tp = netdev_priv(dev);
5618 rtl8169_init_ring_indexes(tp);
5620 memset(tp->tx_skb, 0x0, NUM_TX_DESC * sizeof(struct ring_info));
5621 memset(tp->Rx_databuff, 0x0, NUM_RX_DESC * sizeof(void *));
5623 return rtl8169_rx_fill(tp);
5626 static void rtl8169_unmap_tx_skb(struct device *d, struct ring_info *tx_skb,
5627 struct TxDesc *desc)
5629 unsigned int len = tx_skb->len;
5631 dma_unmap_single(d, le64_to_cpu(desc->addr), len, DMA_TO_DEVICE);
5639 static void rtl8169_tx_clear_range(struct rtl8169_private *tp, u32 start,
5644 for (i = 0; i < n; i++) {
5645 unsigned int entry = (start + i) % NUM_TX_DESC;
5646 struct ring_info *tx_skb = tp->tx_skb + entry;
5647 unsigned int len = tx_skb->len;
5650 struct sk_buff *skb = tx_skb->skb;
5652 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5653 tp->TxDescArray + entry);
5655 tp->dev->stats.tx_dropped++;
5663 static void rtl8169_tx_clear(struct rtl8169_private *tp)
5665 rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
5666 tp->cur_tx = tp->dirty_tx = 0;
5669 static void rtl_reset_work(struct rtl8169_private *tp)
5671 struct net_device *dev = tp->dev;
5674 napi_disable(&tp->napi);
5675 netif_stop_queue(dev);
5676 synchronize_sched();
5678 rtl8169_hw_reset(tp);
5680 for (i = 0; i < NUM_RX_DESC; i++)
5681 rtl8169_mark_to_asic(tp->RxDescArray + i, rx_buf_sz);
5683 rtl8169_tx_clear(tp);
5684 rtl8169_init_ring_indexes(tp);
5686 napi_enable(&tp->napi);
5688 netif_wake_queue(dev);
5689 rtl8169_check_link_status(dev, tp, tp->mmio_addr);
5692 static void rtl8169_tx_timeout(struct net_device *dev)
5694 struct rtl8169_private *tp = netdev_priv(dev);
5696 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5699 static int rtl8169_xmit_frags(struct rtl8169_private *tp, struct sk_buff *skb,
5702 struct skb_shared_info *info = skb_shinfo(skb);
5703 unsigned int cur_frag, entry;
5704 struct TxDesc * uninitialized_var(txd);
5705 struct device *d = &tp->pci_dev->dev;
5708 for (cur_frag = 0; cur_frag < info->nr_frags; cur_frag++) {
5709 const skb_frag_t *frag = info->frags + cur_frag;
5714 entry = (entry + 1) % NUM_TX_DESC;
5716 txd = tp->TxDescArray + entry;
5717 len = skb_frag_size(frag);
5718 addr = skb_frag_address(frag);
5719 mapping = dma_map_single(d, addr, len, DMA_TO_DEVICE);
5720 if (unlikely(dma_mapping_error(d, mapping))) {
5721 if (net_ratelimit())
5722 netif_err(tp, drv, tp->dev,
5723 "Failed to map TX fragments DMA!\n");
5727 /* Anti gcc 2.95.3 bugware (sic) */
5728 status = opts[0] | len |
5729 (RingEnd * !((entry + 1) % NUM_TX_DESC));
5731 txd->opts1 = cpu_to_le32(status);
5732 txd->opts2 = cpu_to_le32(opts[1]);
5733 txd->addr = cpu_to_le64(mapping);
5735 tp->tx_skb[entry].len = len;
5739 tp->tx_skb[entry].skb = skb;
5740 txd->opts1 |= cpu_to_le32(LastFrag);
5746 rtl8169_tx_clear_range(tp, tp->cur_tx + 1, cur_frag);
5750 static inline void rtl8169_tso_csum(struct rtl8169_private *tp,
5751 struct sk_buff *skb, u32 *opts)
5753 const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
5754 u32 mss = skb_shinfo(skb)->gso_size;
5755 int offset = info->opts_offset;
5759 opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
5760 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
5761 const struct iphdr *ip = ip_hdr(skb);
5763 if (ip->protocol == IPPROTO_TCP)
5764 opts[offset] |= info->checksum.tcp;
5765 else if (ip->protocol == IPPROTO_UDP)
5766 opts[offset] |= info->checksum.udp;
5772 static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
5773 struct net_device *dev)
5775 struct rtl8169_private *tp = netdev_priv(dev);
5776 unsigned int entry = tp->cur_tx % NUM_TX_DESC;
5777 struct TxDesc *txd = tp->TxDescArray + entry;
5778 void __iomem *ioaddr = tp->mmio_addr;
5779 struct device *d = &tp->pci_dev->dev;
5785 if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
5786 netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
5790 if (unlikely(le32_to_cpu(txd->opts1) & DescOwn))
5793 len = skb_headlen(skb);
5794 mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
5795 if (unlikely(dma_mapping_error(d, mapping))) {
5796 if (net_ratelimit())
5797 netif_err(tp, drv, dev, "Failed to map TX DMA!\n");
5801 tp->tx_skb[entry].len = len;
5802 txd->addr = cpu_to_le64(mapping);
5804 opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
5807 rtl8169_tso_csum(tp, skb, opts);
5809 frags = rtl8169_xmit_frags(tp, skb, opts);
5813 opts[0] |= FirstFrag;
5815 opts[0] |= FirstFrag | LastFrag;
5816 tp->tx_skb[entry].skb = skb;
5819 txd->opts2 = cpu_to_le32(opts[1]);
5821 skb_tx_timestamp(skb);
5825 /* Anti gcc 2.95.3 bugware (sic) */
5826 status = opts[0] | len | (RingEnd * !((entry + 1) % NUM_TX_DESC));
5827 txd->opts1 = cpu_to_le32(status);
5829 tp->cur_tx += frags + 1;
5833 RTL_W8(TxPoll, NPQ);
5837 if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5838 /* Avoid wrongly optimistic queue wake-up: rtl_tx thread must
5839 * not miss a ring update when it notices a stopped queue.
5842 netif_stop_queue(dev);
5843 /* Sync with rtl_tx:
5844 * - publish queue status and cur_tx ring index (write barrier)
5845 * - refresh dirty_tx ring index (read barrier).
5846 * May the current thread have a pessimistic view of the ring
5847 * status and forget to wake up queue, a racing rtl_tx thread
5851 if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
5852 netif_wake_queue(dev);
5855 return NETDEV_TX_OK;
5858 rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
5861 dev->stats.tx_dropped++;
5862 return NETDEV_TX_OK;
5865 netif_stop_queue(dev);
5866 dev->stats.tx_dropped++;
5867 return NETDEV_TX_BUSY;
5870 static void rtl8169_pcierr_interrupt(struct net_device *dev)
5872 struct rtl8169_private *tp = netdev_priv(dev);
5873 struct pci_dev *pdev = tp->pci_dev;
5874 u16 pci_status, pci_cmd;
5876 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
5877 pci_read_config_word(pdev, PCI_STATUS, &pci_status);
5879 netif_err(tp, intr, dev, "PCI error (cmd = 0x%04x, status = 0x%04x)\n",
5880 pci_cmd, pci_status);
5883 * The recovery sequence below admits a very elaborated explanation:
5884 * - it seems to work;
5885 * - I did not see what else could be done;
5886 * - it makes iop3xx happy.
5888 * Feel free to adjust to your needs.
5890 if (pdev->broken_parity_status)
5891 pci_cmd &= ~PCI_COMMAND_PARITY;
5893 pci_cmd |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY;
5895 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5897 pci_write_config_word(pdev, PCI_STATUS,
5898 pci_status & (PCI_STATUS_DETECTED_PARITY |
5899 PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
5900 PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
5902 /* The infamous DAC f*ckup only happens at boot time */
5903 if ((tp->cp_cmd & PCIDAC) && !tp->cur_rx) {
5904 void __iomem *ioaddr = tp->mmio_addr;
5906 netif_info(tp, intr, dev, "disabling PCI DAC\n");
5907 tp->cp_cmd &= ~PCIDAC;
5908 RTL_W16(CPlusCmd, tp->cp_cmd);
5909 dev->features &= ~NETIF_F_HIGHDMA;
5912 rtl8169_hw_reset(tp);
5914 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
5917 static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
5919 unsigned int dirty_tx, tx_left;
5921 dirty_tx = tp->dirty_tx;
5923 tx_left = tp->cur_tx - dirty_tx;
5925 while (tx_left > 0) {
5926 unsigned int entry = dirty_tx % NUM_TX_DESC;
5927 struct ring_info *tx_skb = tp->tx_skb + entry;
5931 status = le32_to_cpu(tp->TxDescArray[entry].opts1);
5932 if (status & DescOwn)
5935 rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
5936 tp->TxDescArray + entry);
5937 if (status & LastFrag) {
5938 u64_stats_update_begin(&tp->tx_stats.syncp);
5939 tp->tx_stats.packets++;
5940 tp->tx_stats.bytes += tx_skb->skb->len;
5941 u64_stats_update_end(&tp->tx_stats.syncp);
5942 dev_kfree_skb(tx_skb->skb);
5949 if (tp->dirty_tx != dirty_tx) {
5950 tp->dirty_tx = dirty_tx;
5951 /* Sync with rtl8169_start_xmit:
5952 * - publish dirty_tx ring index (write barrier)
5953 * - refresh cur_tx ring index and queue status (read barrier)
5954 * May the current thread miss the stopped queue condition,
5955 * a racing xmit thread can only have a right view of the
5959 if (netif_queue_stopped(dev) &&
5960 TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
5961 netif_wake_queue(dev);
5964 * 8168 hack: TxPoll requests are lost when the Tx packets are
5965 * too close. Let's kick an extra TxPoll request when a burst
5966 * of start_xmit activity is detected (if it is not detected,
5967 * it is slow enough). -- FR
5969 if (tp->cur_tx != dirty_tx) {
5970 void __iomem *ioaddr = tp->mmio_addr;
5972 RTL_W8(TxPoll, NPQ);
5977 static inline int rtl8169_fragmented_frame(u32 status)
5979 return (status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag);
5982 static inline void rtl8169_rx_csum(struct sk_buff *skb, u32 opts1)
5984 u32 status = opts1 & RxProtoMask;
5986 if (((status == RxProtoTCP) && !(opts1 & TCPFail)) ||
5987 ((status == RxProtoUDP) && !(opts1 & UDPFail)))
5988 skb->ip_summed = CHECKSUM_UNNECESSARY;
5990 skb_checksum_none_assert(skb);
5993 static struct sk_buff *rtl8169_try_rx_copy(void *data,
5994 struct rtl8169_private *tp,
5998 struct sk_buff *skb;
5999 struct device *d = &tp->pci_dev->dev;
6001 data = rtl8169_align(data);
6002 dma_sync_single_for_cpu(d, addr, pkt_size, DMA_FROM_DEVICE);
6004 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
6006 memcpy(skb->data, data, pkt_size);
6007 dma_sync_single_for_device(d, addr, pkt_size, DMA_FROM_DEVICE);
6012 static int rtl_rx(struct net_device *dev, struct rtl8169_private *tp, u32 budget)
6014 unsigned int cur_rx, rx_left;
6017 cur_rx = tp->cur_rx;
6019 for (rx_left = min(budget, NUM_RX_DESC); rx_left > 0; rx_left--, cur_rx++) {
6020 unsigned int entry = cur_rx % NUM_RX_DESC;
6021 struct RxDesc *desc = tp->RxDescArray + entry;
6025 status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
6027 if (status & DescOwn)
6029 if (unlikely(status & RxRES)) {
6030 netif_info(tp, rx_err, dev, "Rx ERROR. status = %08x\n",
6032 dev->stats.rx_errors++;
6033 if (status & (RxRWT | RxRUNT))
6034 dev->stats.rx_length_errors++;
6036 dev->stats.rx_crc_errors++;
6037 if (status & RxFOVF) {
6038 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6039 dev->stats.rx_fifo_errors++;
6041 if ((status & (RxRUNT | RxCRC)) &&
6042 !(status & (RxRWT | RxFOVF)) &&
6043 (dev->features & NETIF_F_RXALL))
6046 struct sk_buff *skb;
6051 addr = le64_to_cpu(desc->addr);
6052 if (likely(!(dev->features & NETIF_F_RXFCS)))
6053 pkt_size = (status & 0x00003fff) - 4;
6055 pkt_size = status & 0x00003fff;
6058 * The driver does not support incoming fragmented
6059 * frames. They are seen as a symptom of over-mtu
6062 if (unlikely(rtl8169_fragmented_frame(status))) {
6063 dev->stats.rx_dropped++;
6064 dev->stats.rx_length_errors++;
6065 goto release_descriptor;
6068 skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry],
6069 tp, pkt_size, addr);
6071 dev->stats.rx_dropped++;
6072 goto release_descriptor;
6075 rtl8169_rx_csum(skb, status);
6076 skb_put(skb, pkt_size);
6077 skb->protocol = eth_type_trans(skb, dev);
6079 rtl8169_rx_vlan_tag(desc, skb);
6081 napi_gro_receive(&tp->napi, skb);
6083 u64_stats_update_begin(&tp->rx_stats.syncp);
6084 tp->rx_stats.packets++;
6085 tp->rx_stats.bytes += pkt_size;
6086 u64_stats_update_end(&tp->rx_stats.syncp);
6091 rtl8169_mark_to_asic(desc, rx_buf_sz);
6094 count = cur_rx - tp->cur_rx;
6095 tp->cur_rx = cur_rx;
6100 static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
6102 struct net_device *dev = dev_instance;
6103 struct rtl8169_private *tp = netdev_priv(dev);
6107 status = rtl_get_events(tp);
6108 if (status && status != 0xffff) {
6109 status &= RTL_EVENT_NAPI | tp->event_slow;
6113 rtl_irq_disable(tp);
6114 napi_schedule(&tp->napi);
6117 return IRQ_RETVAL(handled);
6121 * Workqueue context.
6123 static void rtl_slow_event_work(struct rtl8169_private *tp)
6125 struct net_device *dev = tp->dev;
6128 status = rtl_get_events(tp) & tp->event_slow;
6129 rtl_ack_events(tp, status);
6131 if (unlikely(status & RxFIFOOver)) {
6132 switch (tp->mac_version) {
6133 /* Work around for rx fifo overflow */
6134 case RTL_GIGA_MAC_VER_11:
6135 netif_stop_queue(dev);
6136 /* XXX - Hack alert. See rtl_task(). */
6137 set_bit(RTL_FLAG_TASK_RESET_PENDING, tp->wk.flags);
6143 if (unlikely(status & SYSErr))
6144 rtl8169_pcierr_interrupt(dev);
6146 if (status & LinkChg)
6147 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
6149 rtl_irq_enable_all(tp);
6152 static void rtl_task(struct work_struct *work)
6154 static const struct {
6156 void (*action)(struct rtl8169_private *);
6158 /* XXX - keep rtl_slow_event_work() as first element. */
6159 { RTL_FLAG_TASK_SLOW_PENDING, rtl_slow_event_work },
6160 { RTL_FLAG_TASK_RESET_PENDING, rtl_reset_work },
6161 { RTL_FLAG_TASK_PHY_PENDING, rtl_phy_work }
6163 struct rtl8169_private *tp =
6164 container_of(work, struct rtl8169_private, wk.work);
6165 struct net_device *dev = tp->dev;
6170 if (!netif_running(dev) ||
6171 !test_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags))
6174 for (i = 0; i < ARRAY_SIZE(rtl_work); i++) {
6177 pending = test_and_clear_bit(rtl_work[i].bitnr, tp->wk.flags);
6179 rtl_work[i].action(tp);
6183 rtl_unlock_work(tp);
6186 static int rtl8169_poll(struct napi_struct *napi, int budget)
6188 struct rtl8169_private *tp = container_of(napi, struct rtl8169_private, napi);
6189 struct net_device *dev = tp->dev;
6190 u16 enable_mask = RTL_EVENT_NAPI | tp->event_slow;
6194 status = rtl_get_events(tp);
6195 rtl_ack_events(tp, status & ~tp->event_slow);
6197 if (status & RTL_EVENT_NAPI_RX)
6198 work_done = rtl_rx(dev, tp, (u32) budget);
6200 if (status & RTL_EVENT_NAPI_TX)
6203 if (status & tp->event_slow) {
6204 enable_mask &= ~tp->event_slow;
6206 rtl_schedule_task(tp, RTL_FLAG_TASK_SLOW_PENDING);
6209 if (work_done < budget) {
6210 napi_complete(napi);
6212 rtl_irq_enable(tp, enable_mask);
6219 static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
6221 struct rtl8169_private *tp = netdev_priv(dev);
6223 if (tp->mac_version > RTL_GIGA_MAC_VER_06)
6226 dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
6227 RTL_W32(RxMissed, 0);
6230 static void rtl8169_down(struct net_device *dev)
6232 struct rtl8169_private *tp = netdev_priv(dev);
6233 void __iomem *ioaddr = tp->mmio_addr;
6235 del_timer_sync(&tp->timer);
6237 napi_disable(&tp->napi);
6238 netif_stop_queue(dev);
6240 rtl8169_hw_reset(tp);
6242 * At this point device interrupts can not be enabled in any function,
6243 * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task)
6244 * and napi is disabled (rtl8169_poll).
6246 rtl8169_rx_missed(dev, ioaddr);
6248 /* Give a racing hard_start_xmit a few cycles to complete. */
6249 synchronize_sched();
6251 rtl8169_tx_clear(tp);
6253 rtl8169_rx_clear(tp);
6255 rtl_pll_power_down(tp);
6258 static int rtl8169_close(struct net_device *dev)
6260 struct rtl8169_private *tp = netdev_priv(dev);
6261 struct pci_dev *pdev = tp->pci_dev;
6263 pm_runtime_get_sync(&pdev->dev);
6265 /* Update counters before going down */
6266 rtl8169_update_counters(dev);
6269 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6272 rtl_unlock_work(tp);
6274 free_irq(pdev->irq, dev);
6276 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6278 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6280 tp->TxDescArray = NULL;
6281 tp->RxDescArray = NULL;
6283 pm_runtime_put_sync(&pdev->dev);
6288 #ifdef CONFIG_NET_POLL_CONTROLLER
6289 static void rtl8169_netpoll(struct net_device *dev)
6291 struct rtl8169_private *tp = netdev_priv(dev);
6293 rtl8169_interrupt(tp->pci_dev->irq, dev);
6297 static int rtl_open(struct net_device *dev)
6299 struct rtl8169_private *tp = netdev_priv(dev);
6300 void __iomem *ioaddr = tp->mmio_addr;
6301 struct pci_dev *pdev = tp->pci_dev;
6302 int retval = -ENOMEM;
6304 pm_runtime_get_sync(&pdev->dev);
6307 * Rx and Tx descriptors needs 256 bytes alignment.
6308 * dma_alloc_coherent provides more.
6310 tp->TxDescArray = dma_alloc_coherent(&pdev->dev, R8169_TX_RING_BYTES,
6311 &tp->TxPhyAddr, GFP_KERNEL);
6312 if (!tp->TxDescArray)
6313 goto err_pm_runtime_put;
6315 tp->RxDescArray = dma_alloc_coherent(&pdev->dev, R8169_RX_RING_BYTES,
6316 &tp->RxPhyAddr, GFP_KERNEL);
6317 if (!tp->RxDescArray)
6320 retval = rtl8169_init_ring(dev);
6324 INIT_WORK(&tp->wk.work, rtl_task);
6328 rtl_request_firmware(tp);
6330 retval = request_irq(pdev->irq, rtl8169_interrupt,
6331 (tp->features & RTL_FEATURE_MSI) ? 0 : IRQF_SHARED,
6334 goto err_release_fw_2;
6338 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6340 napi_enable(&tp->napi);
6342 rtl8169_init_phy(dev, tp);
6344 __rtl8169_set_features(dev, dev->features);
6346 rtl_pll_power_up(tp);
6350 netif_start_queue(dev);
6352 rtl_unlock_work(tp);
6354 tp->saved_wolopts = 0;
6355 pm_runtime_put_noidle(&pdev->dev);
6357 rtl8169_check_link_status(dev, tp, ioaddr);
6362 rtl_release_firmware(tp);
6363 rtl8169_rx_clear(tp);
6365 dma_free_coherent(&pdev->dev, R8169_RX_RING_BYTES, tp->RxDescArray,
6367 tp->RxDescArray = NULL;
6369 dma_free_coherent(&pdev->dev, R8169_TX_RING_BYTES, tp->TxDescArray,
6371 tp->TxDescArray = NULL;
6373 pm_runtime_put_noidle(&pdev->dev);
6377 static struct rtnl_link_stats64 *
6378 rtl8169_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6380 struct rtl8169_private *tp = netdev_priv(dev);
6381 void __iomem *ioaddr = tp->mmio_addr;
6384 if (netif_running(dev))
6385 rtl8169_rx_missed(dev, ioaddr);
6388 start = u64_stats_fetch_begin_bh(&tp->rx_stats.syncp);
6389 stats->rx_packets = tp->rx_stats.packets;
6390 stats->rx_bytes = tp->rx_stats.bytes;
6391 } while (u64_stats_fetch_retry_bh(&tp->rx_stats.syncp, start));
6395 start = u64_stats_fetch_begin_bh(&tp->tx_stats.syncp);
6396 stats->tx_packets = tp->tx_stats.packets;
6397 stats->tx_bytes = tp->tx_stats.bytes;
6398 } while (u64_stats_fetch_retry_bh(&tp->tx_stats.syncp, start));
6400 stats->rx_dropped = dev->stats.rx_dropped;
6401 stats->tx_dropped = dev->stats.tx_dropped;
6402 stats->rx_length_errors = dev->stats.rx_length_errors;
6403 stats->rx_errors = dev->stats.rx_errors;
6404 stats->rx_crc_errors = dev->stats.rx_crc_errors;
6405 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
6406 stats->rx_missed_errors = dev->stats.rx_missed_errors;
6411 static void rtl8169_net_suspend(struct net_device *dev)
6413 struct rtl8169_private *tp = netdev_priv(dev);
6415 if (!netif_running(dev))
6418 netif_device_detach(dev);
6419 netif_stop_queue(dev);
6422 napi_disable(&tp->napi);
6423 clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6424 rtl_unlock_work(tp);
6426 rtl_pll_power_down(tp);
6431 static int rtl8169_suspend(struct device *device)
6433 struct pci_dev *pdev = to_pci_dev(device);
6434 struct net_device *dev = pci_get_drvdata(pdev);
6436 rtl8169_net_suspend(dev);
6441 static void __rtl8169_resume(struct net_device *dev)
6443 struct rtl8169_private *tp = netdev_priv(dev);
6445 netif_device_attach(dev);
6447 rtl_pll_power_up(tp);
6450 napi_enable(&tp->napi);
6451 set_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
6452 rtl_unlock_work(tp);
6454 rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
6457 static int rtl8169_resume(struct device *device)
6459 struct pci_dev *pdev = to_pci_dev(device);
6460 struct net_device *dev = pci_get_drvdata(pdev);
6461 struct rtl8169_private *tp = netdev_priv(dev);
6463 rtl8169_init_phy(dev, tp);
6465 if (netif_running(dev))
6466 __rtl8169_resume(dev);
6471 static int rtl8169_runtime_suspend(struct device *device)
6473 struct pci_dev *pdev = to_pci_dev(device);
6474 struct net_device *dev = pci_get_drvdata(pdev);
6475 struct rtl8169_private *tp = netdev_priv(dev);
6477 if (!tp->TxDescArray)
6481 tp->saved_wolopts = __rtl8169_get_wol(tp);
6482 __rtl8169_set_wol(tp, WAKE_ANY);
6483 rtl_unlock_work(tp);
6485 rtl8169_net_suspend(dev);
6490 static int rtl8169_runtime_resume(struct device *device)
6492 struct pci_dev *pdev = to_pci_dev(device);
6493 struct net_device *dev = pci_get_drvdata(pdev);
6494 struct rtl8169_private *tp = netdev_priv(dev);
6496 if (!tp->TxDescArray)
6500 __rtl8169_set_wol(tp, tp->saved_wolopts);
6501 tp->saved_wolopts = 0;
6502 rtl_unlock_work(tp);
6504 rtl8169_init_phy(dev, tp);
6506 __rtl8169_resume(dev);
6511 static int rtl8169_runtime_idle(struct device *device)
6513 struct pci_dev *pdev = to_pci_dev(device);
6514 struct net_device *dev = pci_get_drvdata(pdev);
6515 struct rtl8169_private *tp = netdev_priv(dev);
6517 return tp->TxDescArray ? -EBUSY : 0;
6520 static const struct dev_pm_ops rtl8169_pm_ops = {
6521 .suspend = rtl8169_suspend,
6522 .resume = rtl8169_resume,
6523 .freeze = rtl8169_suspend,
6524 .thaw = rtl8169_resume,
6525 .poweroff = rtl8169_suspend,
6526 .restore = rtl8169_resume,
6527 .runtime_suspend = rtl8169_runtime_suspend,
6528 .runtime_resume = rtl8169_runtime_resume,
6529 .runtime_idle = rtl8169_runtime_idle,
6532 #define RTL8169_PM_OPS (&rtl8169_pm_ops)
6534 #else /* !CONFIG_PM */
6536 #define RTL8169_PM_OPS NULL
6538 #endif /* !CONFIG_PM */
6540 static void rtl_wol_shutdown_quirk(struct rtl8169_private *tp)
6542 void __iomem *ioaddr = tp->mmio_addr;
6544 /* WoL fails with 8168b when the receiver is disabled. */
6545 switch (tp->mac_version) {
6546 case RTL_GIGA_MAC_VER_11:
6547 case RTL_GIGA_MAC_VER_12:
6548 case RTL_GIGA_MAC_VER_17:
6549 pci_clear_master(tp->pci_dev);
6551 RTL_W8(ChipCmd, CmdRxEnb);
6560 static void rtl_shutdown(struct pci_dev *pdev)
6562 struct net_device *dev = pci_get_drvdata(pdev);
6563 struct rtl8169_private *tp = netdev_priv(dev);
6564 struct device *d = &pdev->dev;
6566 pm_runtime_get_sync(d);
6568 rtl8169_net_suspend(dev);
6570 /* Restore original MAC address */
6571 rtl_rar_set(tp, dev->perm_addr);
6573 rtl8169_hw_reset(tp);
6575 if (system_state == SYSTEM_POWER_OFF) {
6576 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
6577 rtl_wol_suspend_quirk(tp);
6578 rtl_wol_shutdown_quirk(tp);
6581 pci_wake_from_d3(pdev, true);
6582 pci_set_power_state(pdev, PCI_D3hot);
6585 pm_runtime_put_noidle(d);
6588 static void rtl_remove_one(struct pci_dev *pdev)
6590 struct net_device *dev = pci_get_drvdata(pdev);
6591 struct rtl8169_private *tp = netdev_priv(dev);
6593 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6594 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6595 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6596 rtl8168_driver_stop(tp);
6599 cancel_work_sync(&tp->wk.work);
6601 netif_napi_del(&tp->napi);
6603 unregister_netdev(dev);
6605 rtl_release_firmware(tp);
6607 if (pci_dev_run_wake(pdev))
6608 pm_runtime_get_noresume(&pdev->dev);
6610 /* restore original MAC address */
6611 rtl_rar_set(tp, dev->perm_addr);
6613 rtl_disable_msi(pdev, tp);
6614 rtl8169_release_board(pdev, dev, tp->mmio_addr);
6615 pci_set_drvdata(pdev, NULL);
6618 static const struct net_device_ops rtl_netdev_ops = {
6619 .ndo_open = rtl_open,
6620 .ndo_stop = rtl8169_close,
6621 .ndo_get_stats64 = rtl8169_get_stats64,
6622 .ndo_start_xmit = rtl8169_start_xmit,
6623 .ndo_tx_timeout = rtl8169_tx_timeout,
6624 .ndo_validate_addr = eth_validate_addr,
6625 .ndo_change_mtu = rtl8169_change_mtu,
6626 .ndo_fix_features = rtl8169_fix_features,
6627 .ndo_set_features = rtl8169_set_features,
6628 .ndo_set_mac_address = rtl_set_mac_address,
6629 .ndo_do_ioctl = rtl8169_ioctl,
6630 .ndo_set_rx_mode = rtl_set_rx_mode,
6631 #ifdef CONFIG_NET_POLL_CONTROLLER
6632 .ndo_poll_controller = rtl8169_netpoll,
6637 static const struct rtl_cfg_info {
6638 void (*hw_start)(struct net_device *);
6639 unsigned int region;
6644 } rtl_cfg_infos [] = {
6646 .hw_start = rtl_hw_start_8169,
6649 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver,
6650 .features = RTL_FEATURE_GMII,
6651 .default_ver = RTL_GIGA_MAC_VER_01,
6654 .hw_start = rtl_hw_start_8168,
6657 .event_slow = SYSErr | LinkChg | RxOverflow,
6658 .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI,
6659 .default_ver = RTL_GIGA_MAC_VER_11,
6662 .hw_start = rtl_hw_start_8101,
6665 .event_slow = SYSErr | LinkChg | RxOverflow | RxFIFOOver |
6667 .features = RTL_FEATURE_MSI,
6668 .default_ver = RTL_GIGA_MAC_VER_13,
6672 /* Cfg9346_Unlock assumed. */
6673 static unsigned rtl_try_msi(struct rtl8169_private *tp,
6674 const struct rtl_cfg_info *cfg)
6676 void __iomem *ioaddr = tp->mmio_addr;
6680 cfg2 = RTL_R8(Config2) & ~MSIEnable;
6681 if (cfg->features & RTL_FEATURE_MSI) {
6682 if (pci_enable_msi(tp->pci_dev)) {
6683 netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
6686 msi = RTL_FEATURE_MSI;
6689 if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
6690 RTL_W8(Config2, cfg2);
6694 DECLARE_RTL_COND(rtl_link_list_ready_cond)
6696 void __iomem *ioaddr = tp->mmio_addr;
6698 return RTL_R8(MCU) & LINK_LIST_RDY;
6701 DECLARE_RTL_COND(rtl_rxtx_empty_cond)
6703 void __iomem *ioaddr = tp->mmio_addr;
6705 return (RTL_R8(MCU) & RXTX_EMPTY) == RXTX_EMPTY;
6708 static void rtl_hw_init_8168g(struct rtl8169_private *tp)
6710 void __iomem *ioaddr = tp->mmio_addr;
6713 tp->ocp_base = OCP_STD_PHY_BASE;
6715 RTL_W32(MISC, RTL_R32(MISC) | RXDV_GATED_EN);
6717 if (!rtl_udelay_loop_wait_high(tp, &rtl_txcfg_empty_cond, 100, 42))
6720 if (!rtl_udelay_loop_wait_high(tp, &rtl_rxtx_empty_cond, 100, 42))
6723 RTL_W8(ChipCmd, RTL_R8(ChipCmd) & ~(CmdTxEnb | CmdRxEnb));
6725 RTL_W8(MCU, RTL_R8(MCU) & ~NOW_IS_OOB);
6727 data = r8168_mac_ocp_read(tp, 0xe8de);
6729 r8168_mac_ocp_write(tp, 0xe8de, data);
6731 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6734 data = r8168_mac_ocp_read(tp, 0xe8de);
6736 r8168_mac_ocp_write(tp, 0xe8de, data);
6738 if (!rtl_udelay_loop_wait_high(tp, &rtl_link_list_ready_cond, 100, 42))
6742 static void rtl_hw_initialize(struct rtl8169_private *tp)
6744 switch (tp->mac_version) {
6745 case RTL_GIGA_MAC_VER_40:
6746 case RTL_GIGA_MAC_VER_41:
6747 rtl_hw_init_8168g(tp);
6756 rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6758 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
6759 const unsigned int region = cfg->region;
6760 struct rtl8169_private *tp;
6761 struct mii_if_info *mii;
6762 struct net_device *dev;
6763 void __iomem *ioaddr;
6767 if (netif_msg_drv(&debug)) {
6768 printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
6769 MODULENAME, RTL8169_VERSION);
6772 dev = alloc_etherdev(sizeof (*tp));
6778 SET_NETDEV_DEV(dev, &pdev->dev);
6779 dev->netdev_ops = &rtl_netdev_ops;
6780 tp = netdev_priv(dev);
6783 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
6787 mii->mdio_read = rtl_mdio_read;
6788 mii->mdio_write = rtl_mdio_write;
6789 mii->phy_id_mask = 0x1f;
6790 mii->reg_num_mask = 0x1f;
6791 mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
6793 /* disable ASPM completely as that cause random device stop working
6794 * problems as well as full system hangs for some PCIe devices users */
6795 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
6796 PCIE_LINK_STATE_CLKPM);
6798 /* enable device (incl. PCI PM wakeup and hotplug setup) */
6799 rc = pci_enable_device(pdev);
6801 netif_err(tp, probe, dev, "enable failure\n");
6802 goto err_out_free_dev_1;
6805 if (pci_set_mwi(pdev) < 0)
6806 netif_info(tp, probe, dev, "Mem-Wr-Inval unavailable\n");
6808 /* make sure PCI base addr 1 is MMIO */
6809 if (!(pci_resource_flags(pdev, region) & IORESOURCE_MEM)) {
6810 netif_err(tp, probe, dev,
6811 "region #%d not an MMIO resource, aborting\n",
6817 /* check for weird/broken PCI region reporting */
6818 if (pci_resource_len(pdev, region) < R8169_REGS_SIZE) {
6819 netif_err(tp, probe, dev,
6820 "Invalid PCI region size(s), aborting\n");
6825 rc = pci_request_regions(pdev, MODULENAME);
6827 netif_err(tp, probe, dev, "could not request regions\n");
6831 tp->cp_cmd = RxChkSum;
6833 if ((sizeof(dma_addr_t) > 4) &&
6834 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
6835 tp->cp_cmd |= PCIDAC;
6836 dev->features |= NETIF_F_HIGHDMA;
6838 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
6840 netif_err(tp, probe, dev, "DMA configuration failed\n");
6841 goto err_out_free_res_3;
6845 /* ioremap MMIO region */
6846 ioaddr = ioremap(pci_resource_start(pdev, region), R8169_REGS_SIZE);
6848 netif_err(tp, probe, dev, "cannot remap MMIO, aborting\n");
6850 goto err_out_free_res_3;
6852 tp->mmio_addr = ioaddr;
6854 if (!pci_is_pcie(pdev))
6855 netif_info(tp, probe, dev, "not PCI Express\n");
6857 /* Identify chip attached to board */
6858 rtl8169_get_mac_version(tp, dev, cfg->default_ver);
6862 rtl_irq_disable(tp);
6864 rtl_hw_initialize(tp);
6868 rtl_ack_events(tp, 0xffff);
6870 pci_set_master(pdev);
6873 * Pretend we are using VLANs; This bypasses a nasty bug where
6874 * Interrupts stop flowing on high load on 8110SCd controllers.
6876 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6877 tp->cp_cmd |= RxVlan;
6879 rtl_init_mdio_ops(tp);
6880 rtl_init_pll_power_ops(tp);
6881 rtl_init_jumbo_ops(tp);
6882 rtl_init_csi_ops(tp);
6884 rtl8169_print_mac_version(tp);
6886 chipset = tp->mac_version;
6887 tp->txd_version = rtl_chip_infos[chipset].txd_version;
6889 RTL_W8(Cfg9346, Cfg9346_Unlock);
6890 RTL_W8(Config1, RTL_R8(Config1) | PMEnable);
6891 RTL_W8(Config5, RTL_R8(Config5) & PMEStatus);
6892 if ((RTL_R8(Config3) & (LinkUp | MagicPacket)) != 0)
6893 tp->features |= RTL_FEATURE_WOL;
6894 if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
6895 tp->features |= RTL_FEATURE_WOL;
6896 tp->features |= rtl_try_msi(tp, cfg);
6897 RTL_W8(Cfg9346, Cfg9346_Lock);
6899 if (rtl_tbi_enabled(tp)) {
6900 tp->set_speed = rtl8169_set_speed_tbi;
6901 tp->get_settings = rtl8169_gset_tbi;
6902 tp->phy_reset_enable = rtl8169_tbi_reset_enable;
6903 tp->phy_reset_pending = rtl8169_tbi_reset_pending;
6904 tp->link_ok = rtl8169_tbi_link_ok;
6905 tp->do_ioctl = rtl_tbi_ioctl;
6907 tp->set_speed = rtl8169_set_speed_xmii;
6908 tp->get_settings = rtl8169_gset_xmii;
6909 tp->phy_reset_enable = rtl8169_xmii_reset_enable;
6910 tp->phy_reset_pending = rtl8169_xmii_reset_pending;
6911 tp->link_ok = rtl8169_xmii_link_ok;
6912 tp->do_ioctl = rtl_xmii_ioctl;
6915 mutex_init(&tp->wk.mutex);
6917 /* Get MAC address */
6918 for (i = 0; i < ETH_ALEN; i++)
6919 dev->dev_addr[i] = RTL_R8(MAC0 + i);
6921 SET_ETHTOOL_OPS(dev, &rtl8169_ethtool_ops);
6922 dev->watchdog_timeo = RTL8169_TX_TIMEOUT;
6924 netif_napi_add(dev, &tp->napi, rtl8169_poll, R8169_NAPI_WEIGHT);
6926 /* don't enable SG, IP_CSUM and TSO by default - it might not work
6927 * properly for all devices */
6928 dev->features |= NETIF_F_RXCSUM |
6929 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6931 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6932 NETIF_F_RXCSUM | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6933 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
6936 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
6937 /* 8110SCd requires hardware Rx VLAN - disallow toggling */
6938 dev->hw_features &= ~NETIF_F_HW_VLAN_RX;
6940 dev->hw_features |= NETIF_F_RXALL;
6941 dev->hw_features |= NETIF_F_RXFCS;
6943 tp->hw_start = cfg->hw_start;
6944 tp->event_slow = cfg->event_slow;
6946 tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
6947 ~(RxBOVF | RxFOVF) : ~0;
6949 init_timer(&tp->timer);
6950 tp->timer.data = (unsigned long) dev;
6951 tp->timer.function = rtl8169_phy_timer;
6953 tp->rtl_fw = RTL_FIRMWARE_UNKNOWN;
6955 rc = register_netdev(dev);
6959 pci_set_drvdata(pdev, dev);
6961 netif_info(tp, probe, dev, "%s at 0x%p, %pM, XID %08x IRQ %d\n",
6962 rtl_chip_infos[chipset].name, ioaddr, dev->dev_addr,
6963 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), pdev->irq);
6964 if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
6965 netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
6966 "tx checksumming: %s]\n",
6967 rtl_chip_infos[chipset].jumbo_max,
6968 rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
6971 if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
6972 tp->mac_version == RTL_GIGA_MAC_VER_28 ||
6973 tp->mac_version == RTL_GIGA_MAC_VER_31) {
6974 rtl8168_driver_start(tp);
6977 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
6979 if (pci_dev_run_wake(pdev))
6980 pm_runtime_put_noidle(&pdev->dev);
6982 netif_carrier_off(dev);
6988 netif_napi_del(&tp->napi);
6989 rtl_disable_msi(pdev, tp);
6992 pci_release_regions(pdev);
6994 pci_clear_mwi(pdev);
6995 pci_disable_device(pdev);
7001 static struct pci_driver rtl8169_pci_driver = {
7003 .id_table = rtl8169_pci_tbl,
7004 .probe = rtl_init_one,
7005 .remove = rtl_remove_one,
7006 .shutdown = rtl_shutdown,
7007 .driver.pm = RTL8169_PM_OPS,
7010 module_pci_driver(rtl8169_pci_driver);