1 /* 8139cp.c: A Linux PCI Ethernet driver for the RealTek 8139C+ chips. */
3 Copyright 2001-2004 Jeff Garzik <jgarzik@pobox.com>
5 Copyright (C) 2001, 2002 David S. Miller (davem@redhat.com) [tg3.c]
6 Copyright (C) 2000, 2001 David S. Miller (davem@redhat.com) [sungem.c]
7 Copyright 2001 Manfred Spraul [natsemi.c]
8 Copyright 1999-2001 by Donald Becker. [natsemi.c]
9 Written 1997-2001 by Donald Becker. [8139too.c]
10 Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>. [acenic.c]
12 This software may be used and distributed according to the terms of
13 the GNU General Public License (GPL), incorporated herein by reference.
14 Drivers based on or derived from this code fall under the GPL and must
15 retain the authorship, copyright and license notice. This file is not
16 a complete program and may only be used when the entire operating
17 system is licensed under the GPL.
19 See the file COPYING in this distribution for more information.
23 Wake-on-LAN support - Felipe Damasio <felipewd@terra.com.br>
24 PCI suspend/resume - Felipe Damasio <felipewd@terra.com.br>
25 LinkChg interrupt - Felipe Damasio <felipewd@terra.com.br>
28 * Test Tx checksumming thoroughly
31 * Complete reset on PciErr
32 * Consider Rx interrupt mitigation using TimerIntr
33 * Investigate using skb->priority with h/w VLAN priority
34 * Investigate using High Priority Tx Queue with skb->priority
35 * Adjust Rx FIFO threshold and Max Rx DMA burst on Rx FIFO error
36 * Adjust Tx FIFO threshold and Max Tx DMA burst on Tx FIFO error
37 * Implement Tx software interrupt mitigation via
39 * The real minimum of CP_MIN_MTU is 4 bytes. However,
40 for this to be supported, one must(?) turn on packet padding.
41 * Support external MII transceivers (patch available)
44 * TX checksumming is considered experimental. It is off by
45 default, use ethtool to turn it on.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #define DRV_NAME "8139cp"
52 #define DRV_VERSION "1.3"
53 #define DRV_RELDATE "Mar 22, 2004"
56 #include <linux/module.h>
57 #include <linux/moduleparam.h>
58 #include <linux/kernel.h>
59 #include <linux/compiler.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/init.h>
63 #include <linux/interrupt.h>
64 #include <linux/pci.h>
65 #include <linux/dma-mapping.h>
66 #include <linux/delay.h>
67 #include <linux/ethtool.h>
68 #include <linux/gfp.h>
69 #include <linux/mii.h>
70 #include <linux/if_vlan.h>
71 #include <linux/crc32.h>
74 #include <linux/tcp.h>
75 #include <linux/udp.h>
76 #include <linux/cache.h>
79 #include <asm/uaccess.h>
81 /* These identify the driver base version and may not be removed. */
82 static char version[] =
83 DRV_NAME ": 10/100 PCI Ethernet driver v" DRV_VERSION " (" DRV_RELDATE ")\n";
85 MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
86 MODULE_DESCRIPTION("RealTek RTL-8139C+ series 10/100 PCI Ethernet driver");
87 MODULE_VERSION(DRV_VERSION);
88 MODULE_LICENSE("GPL");
90 static int debug = -1;
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC (debug, "8139cp: bitmapped message enable number");
94 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
95 The RTL chips use a 64 element hash table based on the Ethernet CRC. */
96 static int multicast_filter_limit = 32;
97 module_param(multicast_filter_limit, int, 0);
98 MODULE_PARM_DESC (multicast_filter_limit, "8139cp: maximum number of filtered multicast addresses");
100 #define CP_DEF_MSG_ENABLE (NETIF_MSG_DRV | \
103 #define CP_NUM_STATS 14 /* struct cp_dma_stats, plus one */
104 #define CP_STATS_SIZE 64 /* size in bytes of DMA stats block */
105 #define CP_REGS_SIZE (0xff + 1)
106 #define CP_REGS_VER 1 /* version 1 */
107 #define CP_RX_RING_SIZE 64
108 #define CP_TX_RING_SIZE 64
109 #define CP_RING_BYTES \
110 ((sizeof(struct cp_desc) * CP_RX_RING_SIZE) + \
111 (sizeof(struct cp_desc) * CP_TX_RING_SIZE) + \
113 #define NEXT_TX(N) (((N) + 1) & (CP_TX_RING_SIZE - 1))
114 #define NEXT_RX(N) (((N) + 1) & (CP_RX_RING_SIZE - 1))
115 #define TX_BUFFS_AVAIL(CP) \
116 (((CP)->tx_tail <= (CP)->tx_head) ? \
117 (CP)->tx_tail + (CP_TX_RING_SIZE - 1) - (CP)->tx_head : \
118 (CP)->tx_tail - (CP)->tx_head - 1)
120 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
121 #define CP_INTERNAL_PHY 32
123 /* The following settings are log_2(bytes)-4: 0 == 16 bytes .. 6==1024, 7==end of packet. */
124 #define RX_FIFO_THRESH 5 /* Rx buffer level before first PCI xfer. */
125 #define RX_DMA_BURST 4 /* Maximum PCI burst, '4' is 256 */
126 #define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
127 #define TX_EARLY_THRESH 256 /* Early Tx threshold, in bytes */
129 /* Time in jiffies before concluding the transmitter is hung. */
130 #define TX_TIMEOUT (6*HZ)
132 /* hardware minimum and maximum for a single frame's data payload */
133 #define CP_MIN_MTU 60 /* TODO: allow lower, but pad */
134 #define CP_MAX_MTU 4096
137 /* NIC register offsets */
138 MAC0 = 0x00, /* Ethernet hardware address. */
139 MAR0 = 0x08, /* Multicast filter. */
140 StatsAddr = 0x10, /* 64-bit start addr of 64-byte DMA stats blk */
141 TxRingAddr = 0x20, /* 64-bit start addr of Tx ring */
142 HiTxRingAddr = 0x28, /* 64-bit start addr of high priority Tx ring */
143 Cmd = 0x37, /* Command register */
144 IntrMask = 0x3C, /* Interrupt mask */
145 IntrStatus = 0x3E, /* Interrupt status */
146 TxConfig = 0x40, /* Tx configuration */
147 ChipVersion = 0x43, /* 8-bit chip version, inside TxConfig */
148 RxConfig = 0x44, /* Rx configuration */
149 RxMissed = 0x4C, /* 24 bits valid, write clears */
150 Cfg9346 = 0x50, /* EEPROM select/control; Cfg reg [un]lock */
151 Config1 = 0x52, /* Config1 */
152 Config3 = 0x59, /* Config3 */
153 Config4 = 0x5A, /* Config4 */
154 MultiIntr = 0x5C, /* Multiple interrupt select */
155 BasicModeCtrl = 0x62, /* MII BMCR */
156 BasicModeStatus = 0x64, /* MII BMSR */
157 NWayAdvert = 0x66, /* MII ADVERTISE */
158 NWayLPAR = 0x68, /* MII LPA */
159 NWayExpansion = 0x6A, /* MII Expansion */
160 Config5 = 0xD8, /* Config5 */
161 TxPoll = 0xD9, /* Tell chip to check Tx descriptors for work */
162 RxMaxSize = 0xDA, /* Max size of an Rx packet (8169 only) */
163 CpCmd = 0xE0, /* C+ Command register (C+ mode only) */
164 IntrMitigate = 0xE2, /* rx/tx interrupt mitigation control */
165 RxRingAddr = 0xE4, /* 64-bit start addr of Rx ring */
166 TxThresh = 0xEC, /* Early Tx threshold */
167 OldRxBufAddr = 0x30, /* DMA address of Rx ring buffer (C mode) */
168 OldTSD0 = 0x10, /* DMA address of first Tx desc (C mode) */
170 /* Tx and Rx status descriptors */
171 DescOwn = (1 << 31), /* Descriptor is owned by NIC */
172 RingEnd = (1 << 30), /* End of descriptor ring */
173 FirstFrag = (1 << 29), /* First segment of a packet */
174 LastFrag = (1 << 28), /* Final segment of a packet */
175 LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
176 MSSShift = 16, /* MSS value position */
177 MSSMask = 0xfff, /* MSS value: 11 bits */
178 TxError = (1 << 23), /* Tx error summary */
179 RxError = (1 << 20), /* Rx error summary */
180 IPCS = (1 << 18), /* Calculate IP checksum */
181 UDPCS = (1 << 17), /* Calculate UDP/IP checksum */
182 TCPCS = (1 << 16), /* Calculate TCP/IP checksum */
183 TxVlanTag = (1 << 17), /* Add VLAN tag */
184 RxVlanTagged = (1 << 16), /* Rx VLAN tag available */
185 IPFail = (1 << 15), /* IP checksum failed */
186 UDPFail = (1 << 14), /* UDP/IP checksum failed */
187 TCPFail = (1 << 13), /* TCP/IP checksum failed */
188 NormalTxPoll = (1 << 6), /* One or more normal Tx packets to send */
189 PID1 = (1 << 17), /* 2 protocol id bits: 0==non-IP, */
190 PID0 = (1 << 16), /* 1==UDP/IP, 2==TCP/IP, 3==IP */
194 TxFIFOUnder = (1 << 25), /* Tx FIFO underrun */
195 TxOWC = (1 << 22), /* Tx Out-of-window collision */
196 TxLinkFail = (1 << 21), /* Link failed during Tx of packet */
197 TxMaxCol = (1 << 20), /* Tx aborted due to excessive collisions */
198 TxColCntShift = 16, /* Shift, to get 4-bit Tx collision cnt */
199 TxColCntMask = 0x01 | 0x02 | 0x04 | 0x08, /* 4-bit collision count */
200 RxErrFrame = (1 << 27), /* Rx frame alignment error */
201 RxMcast = (1 << 26), /* Rx multicast packet rcv'd */
202 RxErrCRC = (1 << 18), /* Rx CRC error */
203 RxErrRunt = (1 << 19), /* Rx error, packet < 64 bytes */
204 RxErrLong = (1 << 21), /* Rx error, packet > 4096 bytes */
205 RxErrFIFO = (1 << 22), /* Rx error, FIFO overflowed, pkt bad */
207 /* StatsAddr register */
208 DumpStats = (1 << 3), /* Begin stats dump */
210 /* RxConfig register */
211 RxCfgFIFOShift = 13, /* Shift, to get Rx FIFO thresh value */
212 RxCfgDMAShift = 8, /* Shift, to get Rx Max DMA value */
213 AcceptErr = 0x20, /* Accept packets with CRC errors */
214 AcceptRunt = 0x10, /* Accept runt (<64 bytes) packets */
215 AcceptBroadcast = 0x08, /* Accept broadcast packets */
216 AcceptMulticast = 0x04, /* Accept multicast packets */
217 AcceptMyPhys = 0x02, /* Accept pkts with our MAC as dest */
218 AcceptAllPhys = 0x01, /* Accept all pkts w/ physical dest */
220 /* IntrMask / IntrStatus registers */
221 PciErr = (1 << 15), /* System error on the PCI bus */
222 TimerIntr = (1 << 14), /* Asserted when TCTR reaches TimerInt value */
223 LenChg = (1 << 13), /* Cable length change */
224 SWInt = (1 << 8), /* Software-requested interrupt */
225 TxEmpty = (1 << 7), /* No Tx descriptors available */
226 RxFIFOOvr = (1 << 6), /* Rx FIFO Overflow */
227 LinkChg = (1 << 5), /* Packet underrun, or link change */
228 RxEmpty = (1 << 4), /* No Rx descriptors available */
229 TxErr = (1 << 3), /* Tx error */
230 TxOK = (1 << 2), /* Tx packet sent */
231 RxErr = (1 << 1), /* Rx error */
232 RxOK = (1 << 0), /* Rx packet received */
233 IntrResvd = (1 << 10), /* reserved, according to RealTek engineers,
234 but hardware likes to raise it */
236 IntrAll = PciErr | TimerIntr | LenChg | SWInt | TxEmpty |
237 RxFIFOOvr | LinkChg | RxEmpty | TxErr | TxOK |
238 RxErr | RxOK | IntrResvd,
240 /* C mode command register */
241 CmdReset = (1 << 4), /* Enable to reset; self-clearing */
242 RxOn = (1 << 3), /* Rx mode enable */
243 TxOn = (1 << 2), /* Tx mode enable */
245 /* C+ mode command register */
246 RxVlanOn = (1 << 6), /* Rx VLAN de-tagging enable */
247 RxChkSum = (1 << 5), /* Rx checksum offload enable */
248 PCIDAC = (1 << 4), /* PCI Dual Address Cycle (64-bit PCI) */
249 PCIMulRW = (1 << 3), /* Enable PCI read/write multiple */
250 CpRxOn = (1 << 1), /* Rx mode enable */
251 CpTxOn = (1 << 0), /* Tx mode enable */
253 /* Cfg9436 EEPROM control register */
254 Cfg9346_Lock = 0x00, /* Lock ConfigX/MII register access */
255 Cfg9346_Unlock = 0xC0, /* Unlock ConfigX/MII register access */
257 /* TxConfig register */
258 IFG = (1 << 25) | (1 << 24), /* standard IEEE interframe gap */
259 TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */
261 /* Early Tx Threshold register */
262 TxThreshMask = 0x3f, /* Mask bits 5-0 */
263 TxThreshMax = 2048, /* Max early Tx threshold */
265 /* Config1 register */
266 DriverLoaded = (1 << 5), /* Software marker, driver is loaded */
267 LWACT = (1 << 4), /* LWAKE active mode */
268 PMEnable = (1 << 0), /* Enable various PM features of chip */
270 /* Config3 register */
271 PARMEnable = (1 << 6), /* Enable auto-loading of PHY parms */
272 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
273 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
275 /* Config4 register */
276 LWPTN = (1 << 1), /* LWAKE Pattern */
277 LWPME = (1 << 4), /* LANWAKE vs PMEB */
279 /* Config5 register */
280 BWF = (1 << 6), /* Accept Broadcast wakeup frame */
281 MWF = (1 << 5), /* Accept Multicast wakeup frame */
282 UWF = (1 << 4), /* Accept Unicast wakeup frame */
283 LANWake = (1 << 1), /* Enable LANWake signal */
284 PMEStatus = (1 << 0), /* PME status can be reset by PCI RST# */
286 cp_norx_intr_mask = PciErr | LinkChg | TxOK | TxErr | TxEmpty,
287 cp_rx_intr_mask = RxOK | RxErr | RxEmpty | RxFIFOOvr,
288 cp_intr_mask = cp_rx_intr_mask | cp_norx_intr_mask,
291 static const unsigned int cp_rx_config =
292 (RX_FIFO_THRESH << RxCfgFIFOShift) |
293 (RX_DMA_BURST << RxCfgDMAShift);
301 struct cp_dma_stats {
317 struct cp_extra_stats {
318 unsigned long rx_frags;
323 struct net_device *dev;
327 struct napi_struct napi;
329 struct pci_dev *pdev;
333 struct cp_extra_stats cp_stats;
335 unsigned rx_head ____cacheline_aligned;
337 struct cp_desc *rx_ring;
338 struct sk_buff *rx_skb[CP_RX_RING_SIZE];
340 unsigned tx_head ____cacheline_aligned;
342 struct cp_desc *tx_ring;
343 struct sk_buff *tx_skb[CP_TX_RING_SIZE];
346 unsigned wol_enabled : 1; /* Is Wake-on-LAN enabled? */
350 struct mii_if_info mii_if;
353 #define cpr8(reg) readb(cp->regs + (reg))
354 #define cpr16(reg) readw(cp->regs + (reg))
355 #define cpr32(reg) readl(cp->regs + (reg))
356 #define cpw8(reg,val) writeb((val), cp->regs + (reg))
357 #define cpw16(reg,val) writew((val), cp->regs + (reg))
358 #define cpw32(reg,val) writel((val), cp->regs + (reg))
359 #define cpw8_f(reg,val) do { \
360 writeb((val), cp->regs + (reg)); \
361 readb(cp->regs + (reg)); \
363 #define cpw16_f(reg,val) do { \
364 writew((val), cp->regs + (reg)); \
365 readw(cp->regs + (reg)); \
367 #define cpw32_f(reg,val) do { \
368 writel((val), cp->regs + (reg)); \
369 readl(cp->regs + (reg)); \
373 static void __cp_set_rx_mode (struct net_device *dev);
374 static void cp_tx (struct cp_private *cp);
375 static void cp_clean_rings (struct cp_private *cp);
376 #ifdef CONFIG_NET_POLL_CONTROLLER
377 static void cp_poll_controller(struct net_device *dev);
379 static int cp_get_eeprom_len(struct net_device *dev);
380 static int cp_get_eeprom(struct net_device *dev,
381 struct ethtool_eeprom *eeprom, u8 *data);
382 static int cp_set_eeprom(struct net_device *dev,
383 struct ethtool_eeprom *eeprom, u8 *data);
385 static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
386 { PCI_DEVICE(PCI_VENDOR_ID_REALTEK, PCI_DEVICE_ID_REALTEK_8139), },
387 { PCI_DEVICE(PCI_VENDOR_ID_TTTECH, PCI_DEVICE_ID_TTTECH_MC322), },
390 MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
393 const char str[ETH_GSTRING_LEN];
394 } ethtool_stats_keys[] = {
412 static inline void cp_set_rxbufsize (struct cp_private *cp)
414 unsigned int mtu = cp->dev->mtu;
416 if (mtu > ETH_DATA_LEN)
417 /* MTU + ethernet header + FCS + optional VLAN tag */
418 cp->rx_buf_sz = mtu + ETH_HLEN + 8;
420 cp->rx_buf_sz = PKT_BUF_SZ;
423 static inline void cp_rx_skb (struct cp_private *cp, struct sk_buff *skb,
424 struct cp_desc *desc)
426 u32 opts2 = le32_to_cpu(desc->opts2);
428 skb->protocol = eth_type_trans (skb, cp->dev);
430 cp->dev->stats.rx_packets++;
431 cp->dev->stats.rx_bytes += skb->len;
433 if (opts2 & RxVlanTagged)
434 __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff));
436 napi_gro_receive(&cp->napi, skb);
439 static void cp_rx_err_acct (struct cp_private *cp, unsigned rx_tail,
442 netif_dbg(cp, rx_err, cp->dev, "rx err, slot %d status 0x%x len %d\n",
443 rx_tail, status, len);
444 cp->dev->stats.rx_errors++;
445 if (status & RxErrFrame)
446 cp->dev->stats.rx_frame_errors++;
447 if (status & RxErrCRC)
448 cp->dev->stats.rx_crc_errors++;
449 if ((status & RxErrRunt) || (status & RxErrLong))
450 cp->dev->stats.rx_length_errors++;
451 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag))
452 cp->dev->stats.rx_length_errors++;
453 if (status & RxErrFIFO)
454 cp->dev->stats.rx_fifo_errors++;
457 static inline unsigned int cp_rx_csum_ok (u32 status)
459 unsigned int protocol = (status >> 16) & 0x3;
461 if (((protocol == RxProtoTCP) && !(status & TCPFail)) ||
462 ((protocol == RxProtoUDP) && !(status & UDPFail)))
468 static int cp_rx_poll(struct napi_struct *napi, int budget)
470 struct cp_private *cp = container_of(napi, struct cp_private, napi);
471 struct net_device *dev = cp->dev;
472 unsigned int rx_tail = cp->rx_tail;
477 cpw16(IntrStatus, cp_rx_intr_mask);
482 struct sk_buff *skb, *new_skb;
483 struct cp_desc *desc;
484 const unsigned buflen = cp->rx_buf_sz;
486 skb = cp->rx_skb[rx_tail];
489 desc = &cp->rx_ring[rx_tail];
490 status = le32_to_cpu(desc->opts1);
491 if (status & DescOwn)
494 len = (status & 0x1fff) - 4;
495 mapping = le64_to_cpu(desc->addr);
497 if ((status & (FirstFrag | LastFrag)) != (FirstFrag | LastFrag)) {
498 /* we don't support incoming fragmented frames.
499 * instead, we attempt to ensure that the
500 * pre-allocated RX skbs are properly sized such
501 * that RX fragments are never encountered
503 cp_rx_err_acct(cp, rx_tail, status, len);
504 dev->stats.rx_dropped++;
505 cp->cp_stats.rx_frags++;
509 if (status & (RxError | RxErrFIFO)) {
510 cp_rx_err_acct(cp, rx_tail, status, len);
514 netif_dbg(cp, rx_status, dev, "rx slot %d status 0x%x len %d\n",
515 rx_tail, status, len);
517 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
519 dev->stats.rx_dropped++;
523 dma_unmap_single(&cp->pdev->dev, mapping,
524 buflen, PCI_DMA_FROMDEVICE);
526 /* Handle checksum offloading for incoming packets. */
527 if (cp_rx_csum_ok(status))
528 skb->ip_summed = CHECKSUM_UNNECESSARY;
530 skb_checksum_none_assert(skb);
534 mapping = dma_map_single(&cp->pdev->dev, new_skb->data, buflen,
536 cp->rx_skb[rx_tail] = new_skb;
538 cp_rx_skb(cp, skb, desc);
542 cp->rx_ring[rx_tail].opts2 = 0;
543 cp->rx_ring[rx_tail].addr = cpu_to_le64(mapping);
544 if (rx_tail == (CP_RX_RING_SIZE - 1))
545 desc->opts1 = cpu_to_le32(DescOwn | RingEnd |
548 desc->opts1 = cpu_to_le32(DescOwn | cp->rx_buf_sz);
549 rx_tail = NEXT_RX(rx_tail);
555 cp->rx_tail = rx_tail;
557 /* if we did not reach work limit, then we're done with
558 * this round of polling
563 if (cpr16(IntrStatus) & cp_rx_intr_mask)
566 napi_gro_flush(napi);
567 spin_lock_irqsave(&cp->lock, flags);
568 __napi_complete(napi);
569 cpw16_f(IntrMask, cp_intr_mask);
570 spin_unlock_irqrestore(&cp->lock, flags);
576 static irqreturn_t cp_interrupt (int irq, void *dev_instance)
578 struct net_device *dev = dev_instance;
579 struct cp_private *cp;
582 if (unlikely(dev == NULL))
584 cp = netdev_priv(dev);
586 status = cpr16(IntrStatus);
587 if (!status || (status == 0xFFFF))
590 netif_dbg(cp, intr, dev, "intr, status %04x cmd %02x cpcmd %04x\n",
591 status, cpr8(Cmd), cpr16(CpCmd));
593 cpw16(IntrStatus, status & ~cp_rx_intr_mask);
595 spin_lock(&cp->lock);
597 /* close possible race's with dev_close */
598 if (unlikely(!netif_running(dev))) {
600 spin_unlock(&cp->lock);
604 if (status & (RxOK | RxErr | RxEmpty | RxFIFOOvr))
605 if (napi_schedule_prep(&cp->napi)) {
606 cpw16_f(IntrMask, cp_norx_intr_mask);
607 __napi_schedule(&cp->napi);
610 if (status & (TxOK | TxErr | TxEmpty | SWInt))
612 if (status & LinkChg)
613 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
615 spin_unlock(&cp->lock);
617 if (status & PciErr) {
620 pci_read_config_word(cp->pdev, PCI_STATUS, &pci_status);
621 pci_write_config_word(cp->pdev, PCI_STATUS, pci_status);
622 netdev_err(dev, "PCI bus error, status=%04x, PCI status=%04x\n",
625 /* TODO: reset hardware */
631 #ifdef CONFIG_NET_POLL_CONTROLLER
633 * Polling receive - used by netconsole and other diagnostic tools
634 * to allow network i/o with interrupts disabled.
636 static void cp_poll_controller(struct net_device *dev)
638 disable_irq(dev->irq);
639 cp_interrupt(dev->irq, dev);
640 enable_irq(dev->irq);
644 static void cp_tx (struct cp_private *cp)
646 unsigned tx_head = cp->tx_head;
647 unsigned tx_tail = cp->tx_tail;
649 while (tx_tail != tx_head) {
650 struct cp_desc *txd = cp->tx_ring + tx_tail;
655 status = le32_to_cpu(txd->opts1);
656 if (status & DescOwn)
659 skb = cp->tx_skb[tx_tail];
662 dma_unmap_single(&cp->pdev->dev, le64_to_cpu(txd->addr),
663 le32_to_cpu(txd->opts1) & 0xffff,
666 if (status & LastFrag) {
667 if (status & (TxError | TxFIFOUnder)) {
668 netif_dbg(cp, tx_err, cp->dev,
669 "tx err, status 0x%x\n", status);
670 cp->dev->stats.tx_errors++;
672 cp->dev->stats.tx_window_errors++;
673 if (status & TxMaxCol)
674 cp->dev->stats.tx_aborted_errors++;
675 if (status & TxLinkFail)
676 cp->dev->stats.tx_carrier_errors++;
677 if (status & TxFIFOUnder)
678 cp->dev->stats.tx_fifo_errors++;
680 cp->dev->stats.collisions +=
681 ((status >> TxColCntShift) & TxColCntMask);
682 cp->dev->stats.tx_packets++;
683 cp->dev->stats.tx_bytes += skb->len;
684 netif_dbg(cp, tx_done, cp->dev,
685 "tx done, slot %d\n", tx_tail);
687 dev_kfree_skb_irq(skb);
690 cp->tx_skb[tx_tail] = NULL;
692 tx_tail = NEXT_TX(tx_tail);
695 cp->tx_tail = tx_tail;
697 if (TX_BUFFS_AVAIL(cp) > (MAX_SKB_FRAGS + 1))
698 netif_wake_queue(cp->dev);
701 static inline u32 cp_tx_vlan_tag(struct sk_buff *skb)
703 return vlan_tx_tag_present(skb) ?
704 TxVlanTag | swab16(vlan_tx_tag_get(skb)) : 0x00;
707 static netdev_tx_t cp_start_xmit (struct sk_buff *skb,
708 struct net_device *dev)
710 struct cp_private *cp = netdev_priv(dev);
713 unsigned long intr_flags;
717 spin_lock_irqsave(&cp->lock, intr_flags);
719 /* This is a hard error, log it. */
720 if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
721 netif_stop_queue(dev);
722 spin_unlock_irqrestore(&cp->lock, intr_flags);
723 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
724 return NETDEV_TX_BUSY;
728 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
729 mss = skb_shinfo(skb)->gso_size;
731 opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
733 if (skb_shinfo(skb)->nr_frags == 0) {
734 struct cp_desc *txd = &cp->tx_ring[entry];
739 mapping = dma_map_single(&cp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
741 txd->addr = cpu_to_le64(mapping);
744 flags = eor | len | DescOwn | FirstFrag | LastFrag;
747 flags |= LargeSend | ((mss & MSSMask) << MSSShift);
748 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
749 const struct iphdr *ip = ip_hdr(skb);
750 if (ip->protocol == IPPROTO_TCP)
751 flags |= IPCS | TCPCS;
752 else if (ip->protocol == IPPROTO_UDP)
753 flags |= IPCS | UDPCS;
755 WARN_ON(1); /* we need a WARN() */
758 txd->opts1 = cpu_to_le32(flags);
761 cp->tx_skb[entry] = skb;
762 entry = NEXT_TX(entry);
765 u32 first_len, first_eor;
766 dma_addr_t first_mapping;
767 int frag, first_entry = entry;
768 const struct iphdr *ip = ip_hdr(skb);
770 /* We must give this initial chunk to the device last.
771 * Otherwise we could race with the device.
774 first_len = skb_headlen(skb);
775 first_mapping = dma_map_single(&cp->pdev->dev, skb->data,
776 first_len, PCI_DMA_TODEVICE);
777 cp->tx_skb[entry] = skb;
778 entry = NEXT_TX(entry);
780 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
781 const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
786 len = skb_frag_size(this_frag);
787 mapping = dma_map_single(&cp->pdev->dev,
788 skb_frag_address(this_frag),
789 len, PCI_DMA_TODEVICE);
790 eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
792 ctrl = eor | len | DescOwn;
796 ((mss & MSSMask) << MSSShift);
797 else if (skb->ip_summed == CHECKSUM_PARTIAL) {
798 if (ip->protocol == IPPROTO_TCP)
799 ctrl |= IPCS | TCPCS;
800 else if (ip->protocol == IPPROTO_UDP)
801 ctrl |= IPCS | UDPCS;
806 if (frag == skb_shinfo(skb)->nr_frags - 1)
809 txd = &cp->tx_ring[entry];
811 txd->addr = cpu_to_le64(mapping);
814 txd->opts1 = cpu_to_le32(ctrl);
817 cp->tx_skb[entry] = skb;
818 entry = NEXT_TX(entry);
821 txd = &cp->tx_ring[first_entry];
823 txd->addr = cpu_to_le64(first_mapping);
826 if (skb->ip_summed == CHECKSUM_PARTIAL) {
827 if (ip->protocol == IPPROTO_TCP)
828 txd->opts1 = cpu_to_le32(first_eor | first_len |
829 FirstFrag | DescOwn |
831 else if (ip->protocol == IPPROTO_UDP)
832 txd->opts1 = cpu_to_le32(first_eor | first_len |
833 FirstFrag | DescOwn |
838 txd->opts1 = cpu_to_le32(first_eor | first_len |
839 FirstFrag | DescOwn);
843 netif_dbg(cp, tx_queued, cp->dev, "tx queued, slot %d, skblen %d\n",
845 if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
846 netif_stop_queue(dev);
848 spin_unlock_irqrestore(&cp->lock, intr_flags);
850 cpw8(TxPoll, NormalTxPoll);
855 /* Set or clear the multicast filter for this adaptor.
856 This routine is not state sensitive and need not be SMP locked. */
858 static void __cp_set_rx_mode (struct net_device *dev)
860 struct cp_private *cp = netdev_priv(dev);
861 u32 mc_filter[2]; /* Multicast hash filter */
864 /* Note: do not reorder, GCC is clever about common statements. */
865 if (dev->flags & IFF_PROMISC) {
866 /* Unconditionally log net taps. */
868 AcceptBroadcast | AcceptMulticast | AcceptMyPhys |
870 mc_filter[1] = mc_filter[0] = 0xffffffff;
871 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
872 (dev->flags & IFF_ALLMULTI)) {
873 /* Too many to filter perfectly -- accept all multicasts. */
874 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
875 mc_filter[1] = mc_filter[0] = 0xffffffff;
877 struct netdev_hw_addr *ha;
878 rx_mode = AcceptBroadcast | AcceptMyPhys;
879 mc_filter[1] = mc_filter[0] = 0;
880 netdev_for_each_mc_addr(ha, dev) {
881 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
883 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
884 rx_mode |= AcceptMulticast;
888 /* We can safely update without stopping the chip. */
889 cp->rx_config = cp_rx_config | rx_mode;
890 cpw32_f(RxConfig, cp->rx_config);
892 cpw32_f (MAR0 + 0, mc_filter[0]);
893 cpw32_f (MAR0 + 4, mc_filter[1]);
896 static void cp_set_rx_mode (struct net_device *dev)
899 struct cp_private *cp = netdev_priv(dev);
901 spin_lock_irqsave (&cp->lock, flags);
902 __cp_set_rx_mode(dev);
903 spin_unlock_irqrestore (&cp->lock, flags);
906 static void __cp_get_stats(struct cp_private *cp)
908 /* only lower 24 bits valid; write any value to clear */
909 cp->dev->stats.rx_missed_errors += (cpr32 (RxMissed) & 0xffffff);
913 static struct net_device_stats *cp_get_stats(struct net_device *dev)
915 struct cp_private *cp = netdev_priv(dev);
918 /* The chip only need report frame silently dropped. */
919 spin_lock_irqsave(&cp->lock, flags);
920 if (netif_running(dev) && netif_device_present(dev))
922 spin_unlock_irqrestore(&cp->lock, flags);
927 static void cp_stop_hw (struct cp_private *cp)
929 cpw16(IntrStatus, ~(cpr16(IntrStatus)));
930 cpw16_f(IntrMask, 0);
933 cpw16_f(IntrStatus, ~(cpr16(IntrStatus)));
936 cp->tx_head = cp->tx_tail = 0;
939 static void cp_reset_hw (struct cp_private *cp)
941 unsigned work = 1000;
946 if (!(cpr8(Cmd) & CmdReset))
949 schedule_timeout_uninterruptible(10);
952 netdev_err(cp->dev, "hardware reset timeout\n");
955 static inline void cp_start_hw (struct cp_private *cp)
957 cpw16(CpCmd, cp->cpcmd);
958 cpw8(Cmd, RxOn | TxOn);
961 static void cp_init_hw (struct cp_private *cp)
963 struct net_device *dev = cp->dev;
968 cpw8_f (Cfg9346, Cfg9346_Unlock);
970 /* Restore our idea of the MAC address. */
971 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
972 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
975 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
977 __cp_set_rx_mode(dev);
978 cpw32_f (TxConfig, IFG | (TX_DMA_BURST << TxDMAShift));
980 cpw8(Config1, cpr8(Config1) | DriverLoaded | PMEnable);
981 /* Disable Wake-on-LAN. Can be turned on with ETHTOOL_SWOL */
982 cpw8(Config3, PARMEnable);
985 cpw8(Config5, cpr8(Config5) & PMEStatus);
987 cpw32_f(HiTxRingAddr, 0);
988 cpw32_f(HiTxRingAddr + 4, 0);
990 ring_dma = cp->ring_dma;
991 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
992 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
994 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
995 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
996 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1000 cpw16_f(IntrMask, cp_intr_mask);
1002 cpw8_f(Cfg9346, Cfg9346_Lock);
1005 static int cp_refill_rx(struct cp_private *cp)
1007 struct net_device *dev = cp->dev;
1010 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1011 struct sk_buff *skb;
1014 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1018 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1019 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1020 cp->rx_skb[i] = skb;
1022 cp->rx_ring[i].opts2 = 0;
1023 cp->rx_ring[i].addr = cpu_to_le64(mapping);
1024 if (i == (CP_RX_RING_SIZE - 1))
1025 cp->rx_ring[i].opts1 =
1026 cpu_to_le32(DescOwn | RingEnd | cp->rx_buf_sz);
1028 cp->rx_ring[i].opts1 =
1029 cpu_to_le32(DescOwn | cp->rx_buf_sz);
1039 static void cp_init_rings_index (struct cp_private *cp)
1042 cp->tx_head = cp->tx_tail = 0;
1045 static int cp_init_rings (struct cp_private *cp)
1047 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1048 cp->tx_ring[CP_TX_RING_SIZE - 1].opts1 = cpu_to_le32(RingEnd);
1050 cp_init_rings_index(cp);
1052 return cp_refill_rx (cp);
1055 static int cp_alloc_rings (struct cp_private *cp)
1059 mem = dma_alloc_coherent(&cp->pdev->dev, CP_RING_BYTES,
1060 &cp->ring_dma, GFP_KERNEL);
1065 cp->tx_ring = &cp->rx_ring[CP_RX_RING_SIZE];
1067 return cp_init_rings(cp);
1070 static void cp_clean_rings (struct cp_private *cp)
1072 struct cp_desc *desc;
1075 for (i = 0; i < CP_RX_RING_SIZE; i++) {
1076 if (cp->rx_skb[i]) {
1077 desc = cp->rx_ring + i;
1078 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1079 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1080 dev_kfree_skb(cp->rx_skb[i]);
1084 for (i = 0; i < CP_TX_RING_SIZE; i++) {
1085 if (cp->tx_skb[i]) {
1086 struct sk_buff *skb = cp->tx_skb[i];
1088 desc = cp->tx_ring + i;
1089 dma_unmap_single(&cp->pdev->dev,le64_to_cpu(desc->addr),
1090 le32_to_cpu(desc->opts1) & 0xffff,
1092 if (le32_to_cpu(desc->opts1) & LastFrag)
1094 cp->dev->stats.tx_dropped++;
1098 memset(cp->rx_ring, 0, sizeof(struct cp_desc) * CP_RX_RING_SIZE);
1099 memset(cp->tx_ring, 0, sizeof(struct cp_desc) * CP_TX_RING_SIZE);
1101 memset(cp->rx_skb, 0, sizeof(struct sk_buff *) * CP_RX_RING_SIZE);
1102 memset(cp->tx_skb, 0, sizeof(struct sk_buff *) * CP_TX_RING_SIZE);
1105 static void cp_free_rings (struct cp_private *cp)
1108 dma_free_coherent(&cp->pdev->dev, CP_RING_BYTES, cp->rx_ring,
1114 static int cp_open (struct net_device *dev)
1116 struct cp_private *cp = netdev_priv(dev);
1119 netif_dbg(cp, ifup, dev, "enabling interface\n");
1121 rc = cp_alloc_rings(cp);
1125 napi_enable(&cp->napi);
1129 rc = request_irq(dev->irq, cp_interrupt, IRQF_SHARED, dev->name, dev);
1133 netif_carrier_off(dev);
1134 mii_check_media(&cp->mii_if, netif_msg_link(cp), true);
1135 netif_start_queue(dev);
1140 napi_disable(&cp->napi);
1146 static int cp_close (struct net_device *dev)
1148 struct cp_private *cp = netdev_priv(dev);
1149 unsigned long flags;
1151 napi_disable(&cp->napi);
1153 netif_dbg(cp, ifdown, dev, "disabling interface\n");
1155 spin_lock_irqsave(&cp->lock, flags);
1157 netif_stop_queue(dev);
1158 netif_carrier_off(dev);
1162 spin_unlock_irqrestore(&cp->lock, flags);
1164 free_irq(dev->irq, dev);
1170 static void cp_tx_timeout(struct net_device *dev)
1172 struct cp_private *cp = netdev_priv(dev);
1173 unsigned long flags;
1176 netdev_warn(dev, "Transmit timeout, status %2x %4x %4x %4x\n",
1177 cpr8(Cmd), cpr16(CpCmd),
1178 cpr16(IntrStatus), cpr16(IntrMask));
1180 spin_lock_irqsave(&cp->lock, flags);
1184 rc = cp_init_rings(cp);
1187 netif_wake_queue(dev);
1189 spin_unlock_irqrestore(&cp->lock, flags);
1193 static int cp_change_mtu(struct net_device *dev, int new_mtu)
1195 struct cp_private *cp = netdev_priv(dev);
1197 unsigned long flags;
1199 /* check for invalid MTU, according to hardware limits */
1200 if (new_mtu < CP_MIN_MTU || new_mtu > CP_MAX_MTU)
1203 /* if network interface not up, no need for complexity */
1204 if (!netif_running(dev)) {
1206 cp_set_rxbufsize(cp); /* set new rx buf size */
1210 spin_lock_irqsave(&cp->lock, flags);
1212 cp_stop_hw(cp); /* stop h/w and free rings */
1216 cp_set_rxbufsize(cp); /* set new rx buf size */
1218 rc = cp_init_rings(cp); /* realloc and restart h/w */
1221 spin_unlock_irqrestore(&cp->lock, flags);
1227 static const char mii_2_8139_map[8] = {
1238 static int mdio_read(struct net_device *dev, int phy_id, int location)
1240 struct cp_private *cp = netdev_priv(dev);
1242 return location < 8 && mii_2_8139_map[location] ?
1243 readw(cp->regs + mii_2_8139_map[location]) : 0;
1247 static void mdio_write(struct net_device *dev, int phy_id, int location,
1250 struct cp_private *cp = netdev_priv(dev);
1252 if (location == 0) {
1253 cpw8(Cfg9346, Cfg9346_Unlock);
1254 cpw16(BasicModeCtrl, value);
1255 cpw8(Cfg9346, Cfg9346_Lock);
1256 } else if (location < 8 && mii_2_8139_map[location])
1257 cpw16(mii_2_8139_map[location], value);
1260 /* Set the ethtool Wake-on-LAN settings */
1261 static int netdev_set_wol (struct cp_private *cp,
1262 const struct ethtool_wolinfo *wol)
1266 options = cpr8 (Config3) & ~(LinkUp | MagicPacket);
1267 /* If WOL is being disabled, no need for complexity */
1269 if (wol->wolopts & WAKE_PHY) options |= LinkUp;
1270 if (wol->wolopts & WAKE_MAGIC) options |= MagicPacket;
1273 cpw8 (Cfg9346, Cfg9346_Unlock);
1274 cpw8 (Config3, options);
1275 cpw8 (Cfg9346, Cfg9346_Lock);
1277 options = 0; /* Paranoia setting */
1278 options = cpr8 (Config5) & ~(UWF | MWF | BWF);
1279 /* If WOL is being disabled, no need for complexity */
1281 if (wol->wolopts & WAKE_UCAST) options |= UWF;
1282 if (wol->wolopts & WAKE_BCAST) options |= BWF;
1283 if (wol->wolopts & WAKE_MCAST) options |= MWF;
1286 cpw8 (Config5, options);
1288 cp->wol_enabled = (wol->wolopts) ? 1 : 0;
1293 /* Get the ethtool Wake-on-LAN settings */
1294 static void netdev_get_wol (struct cp_private *cp,
1295 struct ethtool_wolinfo *wol)
1299 wol->wolopts = 0; /* Start from scratch */
1300 wol->supported = WAKE_PHY | WAKE_BCAST | WAKE_MAGIC |
1301 WAKE_MCAST | WAKE_UCAST;
1302 /* We don't need to go on if WOL is disabled */
1303 if (!cp->wol_enabled) return;
1305 options = cpr8 (Config3);
1306 if (options & LinkUp) wol->wolopts |= WAKE_PHY;
1307 if (options & MagicPacket) wol->wolopts |= WAKE_MAGIC;
1309 options = 0; /* Paranoia setting */
1310 options = cpr8 (Config5);
1311 if (options & UWF) wol->wolopts |= WAKE_UCAST;
1312 if (options & BWF) wol->wolopts |= WAKE_BCAST;
1313 if (options & MWF) wol->wolopts |= WAKE_MCAST;
1316 static void cp_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info)
1318 struct cp_private *cp = netdev_priv(dev);
1320 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
1321 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1322 strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info));
1325 static void cp_get_ringparam(struct net_device *dev,
1326 struct ethtool_ringparam *ring)
1328 ring->rx_max_pending = CP_RX_RING_SIZE;
1329 ring->tx_max_pending = CP_TX_RING_SIZE;
1330 ring->rx_pending = CP_RX_RING_SIZE;
1331 ring->tx_pending = CP_TX_RING_SIZE;
1334 static int cp_get_regs_len(struct net_device *dev)
1336 return CP_REGS_SIZE;
1339 static int cp_get_sset_count (struct net_device *dev, int sset)
1343 return CP_NUM_STATS;
1349 static int cp_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1351 struct cp_private *cp = netdev_priv(dev);
1353 unsigned long flags;
1355 spin_lock_irqsave(&cp->lock, flags);
1356 rc = mii_ethtool_gset(&cp->mii_if, cmd);
1357 spin_unlock_irqrestore(&cp->lock, flags);
1362 static int cp_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1364 struct cp_private *cp = netdev_priv(dev);
1366 unsigned long flags;
1368 spin_lock_irqsave(&cp->lock, flags);
1369 rc = mii_ethtool_sset(&cp->mii_if, cmd);
1370 spin_unlock_irqrestore(&cp->lock, flags);
1375 static int cp_nway_reset(struct net_device *dev)
1377 struct cp_private *cp = netdev_priv(dev);
1378 return mii_nway_restart(&cp->mii_if);
1381 static u32 cp_get_msglevel(struct net_device *dev)
1383 struct cp_private *cp = netdev_priv(dev);
1384 return cp->msg_enable;
1387 static void cp_set_msglevel(struct net_device *dev, u32 value)
1389 struct cp_private *cp = netdev_priv(dev);
1390 cp->msg_enable = value;
1393 static int cp_set_features(struct net_device *dev, netdev_features_t features)
1395 struct cp_private *cp = netdev_priv(dev);
1396 unsigned long flags;
1398 if (!((dev->features ^ features) & NETIF_F_RXCSUM))
1401 spin_lock_irqsave(&cp->lock, flags);
1403 if (features & NETIF_F_RXCSUM)
1404 cp->cpcmd |= RxChkSum;
1406 cp->cpcmd &= ~RxChkSum;
1408 if (features & NETIF_F_HW_VLAN_RX)
1409 cp->cpcmd |= RxVlanOn;
1411 cp->cpcmd &= ~RxVlanOn;
1413 cpw16_f(CpCmd, cp->cpcmd);
1414 spin_unlock_irqrestore(&cp->lock, flags);
1419 static void cp_get_regs(struct net_device *dev, struct ethtool_regs *regs,
1422 struct cp_private *cp = netdev_priv(dev);
1423 unsigned long flags;
1425 if (regs->len < CP_REGS_SIZE)
1426 return /* -EINVAL */;
1428 regs->version = CP_REGS_VER;
1430 spin_lock_irqsave(&cp->lock, flags);
1431 memcpy_fromio(p, cp->regs, CP_REGS_SIZE);
1432 spin_unlock_irqrestore(&cp->lock, flags);
1435 static void cp_get_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1437 struct cp_private *cp = netdev_priv(dev);
1438 unsigned long flags;
1440 spin_lock_irqsave (&cp->lock, flags);
1441 netdev_get_wol (cp, wol);
1442 spin_unlock_irqrestore (&cp->lock, flags);
1445 static int cp_set_wol (struct net_device *dev, struct ethtool_wolinfo *wol)
1447 struct cp_private *cp = netdev_priv(dev);
1448 unsigned long flags;
1451 spin_lock_irqsave (&cp->lock, flags);
1452 rc = netdev_set_wol (cp, wol);
1453 spin_unlock_irqrestore (&cp->lock, flags);
1458 static void cp_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
1460 switch (stringset) {
1462 memcpy(buf, ðtool_stats_keys, sizeof(ethtool_stats_keys));
1470 static void cp_get_ethtool_stats (struct net_device *dev,
1471 struct ethtool_stats *estats, u64 *tmp_stats)
1473 struct cp_private *cp = netdev_priv(dev);
1474 struct cp_dma_stats *nic_stats;
1478 nic_stats = dma_alloc_coherent(&cp->pdev->dev, sizeof(*nic_stats),
1483 /* begin NIC statistics dump */
1484 cpw32(StatsAddr + 4, (u64)dma >> 32);
1485 cpw32(StatsAddr, ((u64)dma & DMA_BIT_MASK(32)) | DumpStats);
1488 for (i = 0; i < 1000; i++) {
1489 if ((cpr32(StatsAddr) & DumpStats) == 0)
1493 cpw32(StatsAddr, 0);
1494 cpw32(StatsAddr + 4, 0);
1498 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_ok);
1499 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok);
1500 tmp_stats[i++] = le64_to_cpu(nic_stats->tx_err);
1501 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_err);
1502 tmp_stats[i++] = le16_to_cpu(nic_stats->rx_fifo);
1503 tmp_stats[i++] = le16_to_cpu(nic_stats->frame_align);
1504 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_1col);
1505 tmp_stats[i++] = le32_to_cpu(nic_stats->tx_ok_mcol);
1506 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_phys);
1507 tmp_stats[i++] = le64_to_cpu(nic_stats->rx_ok_bcast);
1508 tmp_stats[i++] = le32_to_cpu(nic_stats->rx_ok_mcast);
1509 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_abort);
1510 tmp_stats[i++] = le16_to_cpu(nic_stats->tx_underrun);
1511 tmp_stats[i++] = cp->cp_stats.rx_frags;
1512 BUG_ON(i != CP_NUM_STATS);
1514 dma_free_coherent(&cp->pdev->dev, sizeof(*nic_stats), nic_stats, dma);
1517 static const struct ethtool_ops cp_ethtool_ops = {
1518 .get_drvinfo = cp_get_drvinfo,
1519 .get_regs_len = cp_get_regs_len,
1520 .get_sset_count = cp_get_sset_count,
1521 .get_settings = cp_get_settings,
1522 .set_settings = cp_set_settings,
1523 .nway_reset = cp_nway_reset,
1524 .get_link = ethtool_op_get_link,
1525 .get_msglevel = cp_get_msglevel,
1526 .set_msglevel = cp_set_msglevel,
1527 .get_regs = cp_get_regs,
1528 .get_wol = cp_get_wol,
1529 .set_wol = cp_set_wol,
1530 .get_strings = cp_get_strings,
1531 .get_ethtool_stats = cp_get_ethtool_stats,
1532 .get_eeprom_len = cp_get_eeprom_len,
1533 .get_eeprom = cp_get_eeprom,
1534 .set_eeprom = cp_set_eeprom,
1535 .get_ringparam = cp_get_ringparam,
1538 static int cp_ioctl (struct net_device *dev, struct ifreq *rq, int cmd)
1540 struct cp_private *cp = netdev_priv(dev);
1542 unsigned long flags;
1544 if (!netif_running(dev))
1547 spin_lock_irqsave(&cp->lock, flags);
1548 rc = generic_mii_ioctl(&cp->mii_if, if_mii(rq), cmd, NULL);
1549 spin_unlock_irqrestore(&cp->lock, flags);
1553 static int cp_set_mac_address(struct net_device *dev, void *p)
1555 struct cp_private *cp = netdev_priv(dev);
1556 struct sockaddr *addr = p;
1558 if (!is_valid_ether_addr(addr->sa_data))
1559 return -EADDRNOTAVAIL;
1561 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1563 spin_lock_irq(&cp->lock);
1565 cpw8_f(Cfg9346, Cfg9346_Unlock);
1566 cpw32_f(MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
1567 cpw32_f(MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
1568 cpw8_f(Cfg9346, Cfg9346_Lock);
1570 spin_unlock_irq(&cp->lock);
1575 /* Serial EEPROM section. */
1577 /* EEPROM_Ctrl bits. */
1578 #define EE_SHIFT_CLK 0x04 /* EEPROM shift clock. */
1579 #define EE_CS 0x08 /* EEPROM chip select. */
1580 #define EE_DATA_WRITE 0x02 /* EEPROM chip data in. */
1581 #define EE_WRITE_0 0x00
1582 #define EE_WRITE_1 0x02
1583 #define EE_DATA_READ 0x01 /* EEPROM chip data out. */
1584 #define EE_ENB (0x80 | EE_CS)
1586 /* Delay between EEPROM clock transitions.
1587 No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
1590 #define eeprom_delay() readb(ee_addr)
1592 /* The EEPROM commands include the alway-set leading bit. */
1593 #define EE_EXTEND_CMD (4)
1594 #define EE_WRITE_CMD (5)
1595 #define EE_READ_CMD (6)
1596 #define EE_ERASE_CMD (7)
1598 #define EE_EWDS_ADDR (0)
1599 #define EE_WRAL_ADDR (1)
1600 #define EE_ERAL_ADDR (2)
1601 #define EE_EWEN_ADDR (3)
1603 #define CP_EEPROM_MAGIC PCI_DEVICE_ID_REALTEK_8139
1605 static void eeprom_cmd_start(void __iomem *ee_addr)
1607 writeb (EE_ENB & ~EE_CS, ee_addr);
1608 writeb (EE_ENB, ee_addr);
1612 static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1616 /* Shift the command bits out. */
1617 for (i = cmd_len - 1; i >= 0; i--) {
1618 int dataval = (cmd & (1 << i)) ? EE_DATA_WRITE : 0;
1619 writeb (EE_ENB | dataval, ee_addr);
1621 writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
1624 writeb (EE_ENB, ee_addr);
1628 static void eeprom_cmd_end(void __iomem *ee_addr)
1630 writeb (~EE_CS, ee_addr);
1634 static void eeprom_extend_cmd(void __iomem *ee_addr, int extend_cmd,
1637 int cmd = (EE_EXTEND_CMD << addr_len) | (extend_cmd << (addr_len - 2));
1639 eeprom_cmd_start(ee_addr);
1640 eeprom_cmd(ee_addr, cmd, 3 + addr_len);
1641 eeprom_cmd_end(ee_addr);
1644 static u16 read_eeprom (void __iomem *ioaddr, int location, int addr_len)
1648 void __iomem *ee_addr = ioaddr + Cfg9346;
1649 int read_cmd = location | (EE_READ_CMD << addr_len);
1651 eeprom_cmd_start(ee_addr);
1652 eeprom_cmd(ee_addr, read_cmd, 3 + addr_len);
1654 for (i = 16; i > 0; i--) {
1655 writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
1658 (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
1660 writeb (EE_ENB, ee_addr);
1664 eeprom_cmd_end(ee_addr);
1669 static void write_eeprom(void __iomem *ioaddr, int location, u16 val,
1673 void __iomem *ee_addr = ioaddr + Cfg9346;
1674 int write_cmd = location | (EE_WRITE_CMD << addr_len);
1676 eeprom_extend_cmd(ee_addr, EE_EWEN_ADDR, addr_len);
1678 eeprom_cmd_start(ee_addr);
1679 eeprom_cmd(ee_addr, write_cmd, 3 + addr_len);
1680 eeprom_cmd(ee_addr, val, 16);
1681 eeprom_cmd_end(ee_addr);
1683 eeprom_cmd_start(ee_addr);
1684 for (i = 0; i < 20000; i++)
1685 if (readb(ee_addr) & EE_DATA_READ)
1687 eeprom_cmd_end(ee_addr);
1689 eeprom_extend_cmd(ee_addr, EE_EWDS_ADDR, addr_len);
1692 static int cp_get_eeprom_len(struct net_device *dev)
1694 struct cp_private *cp = netdev_priv(dev);
1697 spin_lock_irq(&cp->lock);
1698 size = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 256 : 128;
1699 spin_unlock_irq(&cp->lock);
1704 static int cp_get_eeprom(struct net_device *dev,
1705 struct ethtool_eeprom *eeprom, u8 *data)
1707 struct cp_private *cp = netdev_priv(dev);
1708 unsigned int addr_len;
1710 u32 offset = eeprom->offset >> 1;
1711 u32 len = eeprom->len;
1714 eeprom->magic = CP_EEPROM_MAGIC;
1716 spin_lock_irq(&cp->lock);
1718 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1720 if (eeprom->offset & 1) {
1721 val = read_eeprom(cp->regs, offset, addr_len);
1722 data[i++] = (u8)(val >> 8);
1726 while (i < len - 1) {
1727 val = read_eeprom(cp->regs, offset, addr_len);
1728 data[i++] = (u8)val;
1729 data[i++] = (u8)(val >> 8);
1734 val = read_eeprom(cp->regs, offset, addr_len);
1738 spin_unlock_irq(&cp->lock);
1742 static int cp_set_eeprom(struct net_device *dev,
1743 struct ethtool_eeprom *eeprom, u8 *data)
1745 struct cp_private *cp = netdev_priv(dev);
1746 unsigned int addr_len;
1748 u32 offset = eeprom->offset >> 1;
1749 u32 len = eeprom->len;
1752 if (eeprom->magic != CP_EEPROM_MAGIC)
1755 spin_lock_irq(&cp->lock);
1757 addr_len = read_eeprom(cp->regs, 0, 8) == 0x8129 ? 8 : 6;
1759 if (eeprom->offset & 1) {
1760 val = read_eeprom(cp->regs, offset, addr_len) & 0xff;
1761 val |= (u16)data[i++] << 8;
1762 write_eeprom(cp->regs, offset, val, addr_len);
1766 while (i < len - 1) {
1767 val = (u16)data[i++];
1768 val |= (u16)data[i++] << 8;
1769 write_eeprom(cp->regs, offset, val, addr_len);
1774 val = read_eeprom(cp->regs, offset, addr_len) & 0xff00;
1775 val |= (u16)data[i];
1776 write_eeprom(cp->regs, offset, val, addr_len);
1779 spin_unlock_irq(&cp->lock);
1783 /* Put the board into D3cold state and wait for WakeUp signal */
1784 static void cp_set_d3_state (struct cp_private *cp)
1786 pci_enable_wake (cp->pdev, 0, 1); /* Enable PME# generation */
1787 pci_set_power_state (cp->pdev, PCI_D3hot);
1790 static const struct net_device_ops cp_netdev_ops = {
1791 .ndo_open = cp_open,
1792 .ndo_stop = cp_close,
1793 .ndo_validate_addr = eth_validate_addr,
1794 .ndo_set_mac_address = cp_set_mac_address,
1795 .ndo_set_rx_mode = cp_set_rx_mode,
1796 .ndo_get_stats = cp_get_stats,
1797 .ndo_do_ioctl = cp_ioctl,
1798 .ndo_start_xmit = cp_start_xmit,
1799 .ndo_tx_timeout = cp_tx_timeout,
1800 .ndo_set_features = cp_set_features,
1802 .ndo_change_mtu = cp_change_mtu,
1805 #ifdef CONFIG_NET_POLL_CONTROLLER
1806 .ndo_poll_controller = cp_poll_controller,
1810 static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
1812 struct net_device *dev;
1813 struct cp_private *cp;
1816 resource_size_t pciaddr;
1817 unsigned int addr_len, i, pci_using_dac;
1820 static int version_printed;
1821 if (version_printed++ == 0)
1822 pr_info("%s", version);
1825 if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
1826 pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
1827 dev_info(&pdev->dev,
1828 "This (id %04x:%04x rev %02x) is not an 8139C+ compatible chip, use 8139too\n",
1829 pdev->vendor, pdev->device, pdev->revision);
1833 dev = alloc_etherdev(sizeof(struct cp_private));
1836 SET_NETDEV_DEV(dev, &pdev->dev);
1838 cp = netdev_priv(dev);
1841 cp->msg_enable = (debug < 0 ? CP_DEF_MSG_ENABLE : debug);
1842 spin_lock_init (&cp->lock);
1843 cp->mii_if.dev = dev;
1844 cp->mii_if.mdio_read = mdio_read;
1845 cp->mii_if.mdio_write = mdio_write;
1846 cp->mii_if.phy_id = CP_INTERNAL_PHY;
1847 cp->mii_if.phy_id_mask = 0x1f;
1848 cp->mii_if.reg_num_mask = 0x1f;
1849 cp_set_rxbufsize(cp);
1851 rc = pci_enable_device(pdev);
1855 rc = pci_set_mwi(pdev);
1857 goto err_out_disable;
1859 rc = pci_request_regions(pdev, DRV_NAME);
1863 pciaddr = pci_resource_start(pdev, 1);
1866 dev_err(&pdev->dev, "no MMIO resource\n");
1869 if (pci_resource_len(pdev, 1) < CP_REGS_SIZE) {
1871 dev_err(&pdev->dev, "MMIO resource (%llx) too small\n",
1872 (unsigned long long)pci_resource_len(pdev, 1));
1876 /* Configure DMA attributes. */
1877 if ((sizeof(dma_addr_t) > 4) &&
1878 !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) &&
1879 !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
1884 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1887 "No usable DMA configuration, aborting\n");
1890 rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1893 "No usable consistent DMA configuration, aborting\n");
1898 cp->cpcmd = (pci_using_dac ? PCIDAC : 0) |
1899 PCIMulRW | RxChkSum | CpRxOn | CpTxOn;
1901 dev->features |= NETIF_F_RXCSUM;
1902 dev->hw_features |= NETIF_F_RXCSUM;
1904 regs = ioremap(pciaddr, CP_REGS_SIZE);
1907 dev_err(&pdev->dev, "Cannot map PCI MMIO (%Lx@%Lx)\n",
1908 (unsigned long long)pci_resource_len(pdev, 1),
1909 (unsigned long long)pciaddr);
1912 dev->base_addr = (unsigned long) regs;
1917 /* read MAC address from EEPROM */
1918 addr_len = read_eeprom (regs, 0, 8) == 0x8129 ? 8 : 6;
1919 for (i = 0; i < 3; i++)
1920 ((__le16 *) (dev->dev_addr))[i] =
1921 cpu_to_le16(read_eeprom (regs, i + 7, addr_len));
1922 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
1924 dev->netdev_ops = &cp_netdev_ops;
1925 netif_napi_add(dev, &cp->napi, cp_rx_poll, 16);
1926 dev->ethtool_ops = &cp_ethtool_ops;
1927 dev->watchdog_timeo = TX_TIMEOUT;
1929 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1932 dev->features |= NETIF_F_HIGHDMA;
1934 /* disabled by default until verified */
1935 dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1936 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
1937 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
1940 dev->irq = pdev->irq;
1942 rc = register_netdev(dev);
1946 netdev_info(dev, "RTL-8139C+ at 0x%lx, %pM, IRQ %d\n",
1947 dev->base_addr, dev->dev_addr, dev->irq);
1949 pci_set_drvdata(pdev, dev);
1951 /* enable busmastering and memory-write-invalidate */
1952 pci_set_master(pdev);
1954 if (cp->wol_enabled)
1955 cp_set_d3_state (cp);
1962 pci_release_regions(pdev);
1964 pci_clear_mwi(pdev);
1966 pci_disable_device(pdev);
1972 static void cp_remove_one (struct pci_dev *pdev)
1974 struct net_device *dev = pci_get_drvdata(pdev);
1975 struct cp_private *cp = netdev_priv(dev);
1977 unregister_netdev(dev);
1979 if (cp->wol_enabled)
1980 pci_set_power_state (pdev, PCI_D0);
1981 pci_release_regions(pdev);
1982 pci_clear_mwi(pdev);
1983 pci_disable_device(pdev);
1984 pci_set_drvdata(pdev, NULL);
1989 static int cp_suspend (struct pci_dev *pdev, pm_message_t state)
1991 struct net_device *dev = pci_get_drvdata(pdev);
1992 struct cp_private *cp = netdev_priv(dev);
1993 unsigned long flags;
1995 if (!netif_running(dev))
1998 netif_device_detach (dev);
1999 netif_stop_queue (dev);
2001 spin_lock_irqsave (&cp->lock, flags);
2003 /* Disable Rx and Tx */
2004 cpw16 (IntrMask, 0);
2005 cpw8 (Cmd, cpr8 (Cmd) & (~RxOn | ~TxOn));
2007 spin_unlock_irqrestore (&cp->lock, flags);
2009 pci_save_state(pdev);
2010 pci_enable_wake(pdev, pci_choose_state(pdev, state), cp->wol_enabled);
2011 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2016 static int cp_resume (struct pci_dev *pdev)
2018 struct net_device *dev = pci_get_drvdata (pdev);
2019 struct cp_private *cp = netdev_priv(dev);
2020 unsigned long flags;
2022 if (!netif_running(dev))
2025 netif_device_attach (dev);
2027 pci_set_power_state(pdev, PCI_D0);
2028 pci_restore_state(pdev);
2029 pci_enable_wake(pdev, PCI_D0, 0);
2031 /* FIXME: sh*t may happen if the Rx ring buffer is depleted */
2032 cp_init_rings_index (cp);
2034 netif_start_queue (dev);
2036 spin_lock_irqsave (&cp->lock, flags);
2038 mii_check_media(&cp->mii_if, netif_msg_link(cp), false);
2040 spin_unlock_irqrestore (&cp->lock, flags);
2044 #endif /* CONFIG_PM */
2046 static struct pci_driver cp_driver = {
2048 .id_table = cp_pci_tbl,
2049 .probe = cp_init_one,
2050 .remove = cp_remove_one,
2052 .resume = cp_resume,
2053 .suspend = cp_suspend,
2057 static int __init cp_init (void)
2060 pr_info("%s", version);
2062 return pci_register_driver(&cp_driver);
2065 static void __exit cp_exit (void)
2067 pci_unregister_driver (&cp_driver);
2070 module_init(cp_init);
2071 module_exit(cp_exit);