1 /* Intel PRO/1000 Linux driver
2 * Copyright(c) 1999 - 2014 Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * The full GNU General Public License is included in this distribution in
14 * the file called "COPYING".
16 * Contact Information:
17 * Linux NICS <linux.nics@intel.com>
18 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
19 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
22 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24 #include <linux/module.h>
25 #include <linux/types.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/vmalloc.h>
29 #include <linux/pagemap.h>
30 #include <linux/delay.h>
31 #include <linux/netdevice.h>
32 #include <linux/interrupt.h>
33 #include <linux/tcp.h>
34 #include <linux/ipv6.h>
35 #include <linux/slab.h>
36 #include <net/checksum.h>
37 #include <net/ip6_checksum.h>
38 #include <linux/ethtool.h>
39 #include <linux/if_vlan.h>
40 #include <linux/cpu.h>
41 #include <linux/smp.h>
42 #include <linux/pm_qos.h>
43 #include <linux/pm_runtime.h>
44 #include <linux/aer.h>
45 #include <linux/prefetch.h>
49 #define DRV_EXTRAVERSION "-k"
51 #define DRV_VERSION "2.3.2" DRV_EXTRAVERSION
52 char e1000e_driver_name[] = "e1000e";
53 const char e1000e_driver_version[] = DRV_VERSION;
55 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
56 static int debug = -1;
57 module_param(debug, int, 0);
58 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
60 static const struct e1000_info *e1000_info_tbl[] = {
61 [board_82571] = &e1000_82571_info,
62 [board_82572] = &e1000_82572_info,
63 [board_82573] = &e1000_82573_info,
64 [board_82574] = &e1000_82574_info,
65 [board_82583] = &e1000_82583_info,
66 [board_80003es2lan] = &e1000_es2_info,
67 [board_ich8lan] = &e1000_ich8_info,
68 [board_ich9lan] = &e1000_ich9_info,
69 [board_ich10lan] = &e1000_ich10_info,
70 [board_pchlan] = &e1000_pch_info,
71 [board_pch2lan] = &e1000_pch2_info,
72 [board_pch_lpt] = &e1000_pch_lpt_info,
75 struct e1000_reg_info {
80 static const struct e1000_reg_info e1000_reg_info_tbl[] = {
81 /* General Registers */
83 {E1000_STATUS, "STATUS"},
84 {E1000_CTRL_EXT, "CTRL_EXT"},
86 /* Interrupt Registers */
91 {E1000_RDLEN(0), "RDLEN"},
92 {E1000_RDH(0), "RDH"},
93 {E1000_RDT(0), "RDT"},
95 {E1000_RXDCTL(0), "RXDCTL"},
97 {E1000_RDBAL(0), "RDBAL"},
98 {E1000_RDBAH(0), "RDBAH"},
100 {E1000_RDFT, "RDFT"},
101 {E1000_RDFHS, "RDFHS"},
102 {E1000_RDFTS, "RDFTS"},
103 {E1000_RDFPC, "RDFPC"},
106 {E1000_TCTL, "TCTL"},
107 {E1000_TDBAL(0), "TDBAL"},
108 {E1000_TDBAH(0), "TDBAH"},
109 {E1000_TDLEN(0), "TDLEN"},
110 {E1000_TDH(0), "TDH"},
111 {E1000_TDT(0), "TDT"},
112 {E1000_TIDV, "TIDV"},
113 {E1000_TXDCTL(0), "TXDCTL"},
114 {E1000_TADV, "TADV"},
115 {E1000_TARC(0), "TARC"},
116 {E1000_TDFH, "TDFH"},
117 {E1000_TDFT, "TDFT"},
118 {E1000_TDFHS, "TDFHS"},
119 {E1000_TDFTS, "TDFTS"},
120 {E1000_TDFPC, "TDFPC"},
122 /* List Terminator */
127 * __ew32_prepare - prepare to write to MAC CSR register on certain parts
128 * @hw: pointer to the HW structure
130 * When updating the MAC CSR registers, the Manageability Engine (ME) could
131 * be accessing the registers at the same time. Normally, this is handled in
132 * h/w by an arbiter but on some parts there is a bug that acknowledges Host
133 * accesses later than it should which could result in the register to have
134 * an incorrect value. Workaround this by checking the FWSM register which
135 * has bit 24 set while ME is accessing MAC CSR registers, wait if it is set
136 * and try again a number of times.
138 s32 __ew32_prepare(struct e1000_hw *hw)
140 s32 i = E1000_ICH_FWSM_PCIM2PCI_COUNT;
142 while ((er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI) && --i)
148 void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
150 if (hw->adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
153 writel(val, hw->hw_addr + reg);
157 * e1000_regdump - register printout routine
158 * @hw: pointer to the HW structure
159 * @reginfo: pointer to the register info table
161 static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
167 switch (reginfo->ofs) {
168 case E1000_RXDCTL(0):
169 for (n = 0; n < 2; n++)
170 regs[n] = __er32(hw, E1000_RXDCTL(n));
172 case E1000_TXDCTL(0):
173 for (n = 0; n < 2; n++)
174 regs[n] = __er32(hw, E1000_TXDCTL(n));
177 for (n = 0; n < 2; n++)
178 regs[n] = __er32(hw, E1000_TARC(n));
181 pr_info("%-15s %08x\n",
182 reginfo->name, __er32(hw, reginfo->ofs));
186 snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
187 pr_info("%-15s %08x %08x\n", rname, regs[0], regs[1]);
190 static void e1000e_dump_ps_pages(struct e1000_adapter *adapter,
191 struct e1000_buffer *bi)
194 struct e1000_ps_page *ps_page;
196 for (i = 0; i < adapter->rx_ps_pages; i++) {
197 ps_page = &bi->ps_pages[i];
200 pr_info("packet dump for ps_page %d:\n", i);
201 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
202 16, 1, page_address(ps_page->page),
209 * e1000e_dump - Print registers, Tx-ring and Rx-ring
210 * @adapter: board private structure
212 static void e1000e_dump(struct e1000_adapter *adapter)
214 struct net_device *netdev = adapter->netdev;
215 struct e1000_hw *hw = &adapter->hw;
216 struct e1000_reg_info *reginfo;
217 struct e1000_ring *tx_ring = adapter->tx_ring;
218 struct e1000_tx_desc *tx_desc;
223 struct e1000_buffer *buffer_info;
224 struct e1000_ring *rx_ring = adapter->rx_ring;
225 union e1000_rx_desc_packet_split *rx_desc_ps;
226 union e1000_rx_desc_extended *rx_desc;
236 if (!netif_msg_hw(adapter))
239 /* Print netdevice Info */
241 dev_info(&adapter->pdev->dev, "Net device Info\n");
242 pr_info("Device Name state trans_start last_rx\n");
243 pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
244 netdev->state, netdev->trans_start, netdev->last_rx);
247 /* Print Registers */
248 dev_info(&adapter->pdev->dev, "Register Dump\n");
249 pr_info(" Register Name Value\n");
250 for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
251 reginfo->name; reginfo++) {
252 e1000_regdump(hw, reginfo);
255 /* Print Tx Ring Summary */
256 if (!netdev || !netif_running(netdev))
259 dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
260 pr_info("Queue [NTU] [NTC] [bi(ntc)->dma ] leng ntw timestamp\n");
261 buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
262 pr_info(" %5d %5X %5X %016llX %04X %3X %016llX\n",
263 0, tx_ring->next_to_use, tx_ring->next_to_clean,
264 (unsigned long long)buffer_info->dma,
266 buffer_info->next_to_watch,
267 (unsigned long long)buffer_info->time_stamp);
270 if (!netif_msg_tx_done(adapter))
271 goto rx_ring_summary;
273 dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
275 /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
277 * Legacy Transmit Descriptor
278 * +--------------------------------------------------------------+
279 * 0 | Buffer Address [63:0] (Reserved on Write Back) |
280 * +--------------------------------------------------------------+
281 * 8 | Special | CSS | Status | CMD | CSO | Length |
282 * +--------------------------------------------------------------+
283 * 63 48 47 36 35 32 31 24 23 16 15 0
285 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
286 * 63 48 47 40 39 32 31 16 15 8 7 0
287 * +----------------------------------------------------------------+
288 * 0 | TUCSE | TUCS0 | TUCSS | IPCSE | IPCS0 | IPCSS |
289 * +----------------------------------------------------------------+
290 * 8 | MSS | HDRLEN | RSV | STA | TUCMD | DTYP | PAYLEN |
291 * +----------------------------------------------------------------+
292 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
294 * Extended Data Descriptor (DTYP=0x1)
295 * +----------------------------------------------------------------+
296 * 0 | Buffer Address [63:0] |
297 * +----------------------------------------------------------------+
298 * 8 | VLAN tag | POPTS | Rsvd | Status | Command | DTYP | DTALEN |
299 * +----------------------------------------------------------------+
300 * 63 48 47 40 39 36 35 32 31 24 23 20 19 0
302 pr_info("Tl[desc] [address 63:0 ] [SpeCssSCmCsLen] [bi->dma ] leng ntw timestamp bi->skb <-- Legacy format\n");
303 pr_info("Tc[desc] [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Context format\n");
304 pr_info("Td[desc] [address 63:0 ] [VlaPoRSCm1Dlen] [bi->dma ] leng ntw timestamp bi->skb <-- Ext Data format\n");
305 for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
306 const char *next_desc;
307 tx_desc = E1000_TX_DESC(*tx_ring, i);
308 buffer_info = &tx_ring->buffer_info[i];
309 u0 = (struct my_u0 *)tx_desc;
310 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
311 next_desc = " NTC/U";
312 else if (i == tx_ring->next_to_use)
314 else if (i == tx_ring->next_to_clean)
318 pr_info("T%c[0x%03X] %016llX %016llX %016llX %04X %3X %016llX %p%s\n",
319 (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
320 ((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')),
322 (unsigned long long)le64_to_cpu(u0->a),
323 (unsigned long long)le64_to_cpu(u0->b),
324 (unsigned long long)buffer_info->dma,
325 buffer_info->length, buffer_info->next_to_watch,
326 (unsigned long long)buffer_info->time_stamp,
327 buffer_info->skb, next_desc);
329 if (netif_msg_pktdata(adapter) && buffer_info->skb)
330 print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
331 16, 1, buffer_info->skb->data,
332 buffer_info->skb->len, true);
335 /* Print Rx Ring Summary */
337 dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
338 pr_info("Queue [NTU] [NTC]\n");
339 pr_info(" %5d %5X %5X\n",
340 0, rx_ring->next_to_use, rx_ring->next_to_clean);
343 if (!netif_msg_rx_status(adapter))
346 dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
347 switch (adapter->rx_ps_pages) {
351 /* [Extended] Packet Split Receive Descriptor Format
353 * +-----------------------------------------------------+
354 * 0 | Buffer Address 0 [63:0] |
355 * +-----------------------------------------------------+
356 * 8 | Buffer Address 1 [63:0] |
357 * +-----------------------------------------------------+
358 * 16 | Buffer Address 2 [63:0] |
359 * +-----------------------------------------------------+
360 * 24 | Buffer Address 3 [63:0] |
361 * +-----------------------------------------------------+
363 pr_info("R [desc] [buffer 0 63:0 ] [buffer 1 63:0 ] [buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma ] [bi->skb] <-- Ext Pkt Split format\n");
364 /* [Extended] Receive Descriptor (Write-Back) Format
366 * 63 48 47 32 31 13 12 8 7 4 3 0
367 * +------------------------------------------------------+
368 * 0 | Packet | IP | Rsvd | MRQ | Rsvd | MRQ RSS |
369 * | Checksum | Ident | | Queue | | Type |
370 * +------------------------------------------------------+
371 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
372 * +------------------------------------------------------+
373 * 63 48 47 32 31 20 19 0
375 pr_info("RWB[desc] [ck ipid mrqhsh] [vl l0 ee es] [ l3 l2 l1 hs] [reserved ] ---------------- [bi->skb] <-- Ext Rx Write-Back format\n");
376 for (i = 0; i < rx_ring->count; i++) {
377 const char *next_desc;
378 buffer_info = &rx_ring->buffer_info[i];
379 rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
380 u1 = (struct my_u1 *)rx_desc_ps;
382 le32_to_cpu(rx_desc_ps->wb.middle.status_error);
384 if (i == rx_ring->next_to_use)
386 else if (i == rx_ring->next_to_clean)
391 if (staterr & E1000_RXD_STAT_DD) {
392 /* Descriptor Done */
393 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX ---------------- %p%s\n",
395 (unsigned long long)le64_to_cpu(u1->a),
396 (unsigned long long)le64_to_cpu(u1->b),
397 (unsigned long long)le64_to_cpu(u1->c),
398 (unsigned long long)le64_to_cpu(u1->d),
399 buffer_info->skb, next_desc);
401 pr_info("%s[0x%03X] %016llX %016llX %016llX %016llX %016llX %p%s\n",
403 (unsigned long long)le64_to_cpu(u1->a),
404 (unsigned long long)le64_to_cpu(u1->b),
405 (unsigned long long)le64_to_cpu(u1->c),
406 (unsigned long long)le64_to_cpu(u1->d),
407 (unsigned long long)buffer_info->dma,
408 buffer_info->skb, next_desc);
410 if (netif_msg_pktdata(adapter))
411 e1000e_dump_ps_pages(adapter,
418 /* Extended Receive Descriptor (Read) Format
420 * +-----------------------------------------------------+
421 * 0 | Buffer Address [63:0] |
422 * +-----------------------------------------------------+
424 * +-----------------------------------------------------+
426 pr_info("R [desc] [buf addr 63:0 ] [reserved 63:0 ] [bi->dma ] [bi->skb] <-- Ext (Read) format\n");
427 /* Extended Receive Descriptor (Write-Back) Format
429 * 63 48 47 32 31 24 23 4 3 0
430 * +------------------------------------------------------+
432 * 0 +-------------------+ Rsvd | Reserved | MRQ RSS |
433 * | Packet | IP | | | Type |
434 * | Checksum | Ident | | | |
435 * +------------------------------------------------------+
436 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
437 * +------------------------------------------------------+
438 * 63 48 47 32 31 20 19 0
440 pr_info("RWB[desc] [cs ipid mrq] [vt ln xe xs] [bi->skb] <-- Ext (Write-Back) format\n");
442 for (i = 0; i < rx_ring->count; i++) {
443 const char *next_desc;
445 buffer_info = &rx_ring->buffer_info[i];
446 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
447 u1 = (struct my_u1 *)rx_desc;
448 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
450 if (i == rx_ring->next_to_use)
452 else if (i == rx_ring->next_to_clean)
457 if (staterr & E1000_RXD_STAT_DD) {
458 /* Descriptor Done */
459 pr_info("%s[0x%03X] %016llX %016llX ---------------- %p%s\n",
461 (unsigned long long)le64_to_cpu(u1->a),
462 (unsigned long long)le64_to_cpu(u1->b),
463 buffer_info->skb, next_desc);
465 pr_info("%s[0x%03X] %016llX %016llX %016llX %p%s\n",
467 (unsigned long long)le64_to_cpu(u1->a),
468 (unsigned long long)le64_to_cpu(u1->b),
469 (unsigned long long)buffer_info->dma,
470 buffer_info->skb, next_desc);
472 if (netif_msg_pktdata(adapter) &&
474 print_hex_dump(KERN_INFO, "",
475 DUMP_PREFIX_ADDRESS, 16,
477 buffer_info->skb->data,
478 adapter->rx_buffer_len,
486 * e1000_desc_unused - calculate if we have unused descriptors
488 static int e1000_desc_unused(struct e1000_ring *ring)
490 if (ring->next_to_clean > ring->next_to_use)
491 return ring->next_to_clean - ring->next_to_use - 1;
493 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
497 * e1000e_systim_to_hwtstamp - convert system time value to hw time stamp
498 * @adapter: board private structure
499 * @hwtstamps: time stamp structure to update
500 * @systim: unsigned 64bit system time value.
502 * Convert the system time value stored in the RX/TXSTMP registers into a
503 * hwtstamp which can be used by the upper level time stamping functions.
505 * The 'systim_lock' spinlock is used to protect the consistency of the
506 * system time value. This is needed because reading the 64 bit time
507 * value involves reading two 32 bit registers. The first read latches the
510 static void e1000e_systim_to_hwtstamp(struct e1000_adapter *adapter,
511 struct skb_shared_hwtstamps *hwtstamps,
517 spin_lock_irqsave(&adapter->systim_lock, flags);
518 ns = timecounter_cyc2time(&adapter->tc, systim);
519 spin_unlock_irqrestore(&adapter->systim_lock, flags);
521 memset(hwtstamps, 0, sizeof(*hwtstamps));
522 hwtstamps->hwtstamp = ns_to_ktime(ns);
526 * e1000e_rx_hwtstamp - utility function which checks for Rx time stamp
527 * @adapter: board private structure
528 * @status: descriptor extended error and status field
529 * @skb: particular skb to include time stamp
531 * If the time stamp is valid, convert it into the timecounter ns value
532 * and store that result into the shhwtstamps structure which is passed
533 * up the network stack.
535 static void e1000e_rx_hwtstamp(struct e1000_adapter *adapter, u32 status,
538 struct e1000_hw *hw = &adapter->hw;
541 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP) ||
542 !(status & E1000_RXDEXT_STATERR_TST) ||
543 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
546 /* The Rx time stamp registers contain the time stamp. No other
547 * received packet will be time stamped until the Rx time stamp
548 * registers are read. Because only one packet can be time stamped
549 * at a time, the register values must belong to this packet and
550 * therefore none of the other additional attributes need to be
553 rxstmp = (u64)er32(RXSTMPL);
554 rxstmp |= (u64)er32(RXSTMPH) << 32;
555 e1000e_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), rxstmp);
557 adapter->flags2 &= ~FLAG2_CHECK_RX_HWTSTAMP;
561 * e1000_receive_skb - helper function to handle Rx indications
562 * @adapter: board private structure
563 * @staterr: descriptor extended error and status field as written by hardware
564 * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
565 * @skb: pointer to sk_buff to be indicated to stack
567 static void e1000_receive_skb(struct e1000_adapter *adapter,
568 struct net_device *netdev, struct sk_buff *skb,
569 u32 staterr, __le16 vlan)
571 u16 tag = le16_to_cpu(vlan);
573 e1000e_rx_hwtstamp(adapter, staterr, skb);
575 skb->protocol = eth_type_trans(skb, netdev);
577 if (staterr & E1000_RXD_STAT_VP)
578 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tag);
580 napi_gro_receive(&adapter->napi, skb);
584 * e1000_rx_checksum - Receive Checksum Offload
585 * @adapter: board private structure
586 * @status_err: receive descriptor status and error fields
587 * @csum: receive descriptor csum field
588 * @sk_buff: socket buffer with received data
590 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
593 u16 status = (u16)status_err;
594 u8 errors = (u8)(status_err >> 24);
596 skb_checksum_none_assert(skb);
598 /* Rx checksum disabled */
599 if (!(adapter->netdev->features & NETIF_F_RXCSUM))
602 /* Ignore Checksum bit is set */
603 if (status & E1000_RXD_STAT_IXSM)
606 /* TCP/UDP checksum error bit or IP checksum error bit is set */
607 if (errors & (E1000_RXD_ERR_TCPE | E1000_RXD_ERR_IPE)) {
608 /* let the stack verify checksum errors */
609 adapter->hw_csum_err++;
613 /* TCP/UDP Checksum has not been calculated */
614 if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
617 /* It must be a TCP or UDP packet with a valid checksum */
618 skb->ip_summed = CHECKSUM_UNNECESSARY;
619 adapter->hw_csum_good++;
622 static void e1000e_update_rdt_wa(struct e1000_ring *rx_ring, unsigned int i)
624 struct e1000_adapter *adapter = rx_ring->adapter;
625 struct e1000_hw *hw = &adapter->hw;
626 s32 ret_val = __ew32_prepare(hw);
628 writel(i, rx_ring->tail);
630 if (unlikely(!ret_val && (i != readl(rx_ring->tail)))) {
631 u32 rctl = er32(RCTL);
633 ew32(RCTL, rctl & ~E1000_RCTL_EN);
634 e_err("ME firmware caused invalid RDT - resetting\n");
635 schedule_work(&adapter->reset_task);
639 static void e1000e_update_tdt_wa(struct e1000_ring *tx_ring, unsigned int i)
641 struct e1000_adapter *adapter = tx_ring->adapter;
642 struct e1000_hw *hw = &adapter->hw;
643 s32 ret_val = __ew32_prepare(hw);
645 writel(i, tx_ring->tail);
647 if (unlikely(!ret_val && (i != readl(tx_ring->tail)))) {
648 u32 tctl = er32(TCTL);
650 ew32(TCTL, tctl & ~E1000_TCTL_EN);
651 e_err("ME firmware caused invalid TDT - resetting\n");
652 schedule_work(&adapter->reset_task);
657 * e1000_alloc_rx_buffers - Replace used receive buffers
658 * @rx_ring: Rx descriptor ring
660 static void e1000_alloc_rx_buffers(struct e1000_ring *rx_ring,
661 int cleaned_count, gfp_t gfp)
663 struct e1000_adapter *adapter = rx_ring->adapter;
664 struct net_device *netdev = adapter->netdev;
665 struct pci_dev *pdev = adapter->pdev;
666 union e1000_rx_desc_extended *rx_desc;
667 struct e1000_buffer *buffer_info;
670 unsigned int bufsz = adapter->rx_buffer_len;
672 i = rx_ring->next_to_use;
673 buffer_info = &rx_ring->buffer_info[i];
675 while (cleaned_count--) {
676 skb = buffer_info->skb;
682 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
684 /* Better luck next round */
685 adapter->alloc_rx_buff_failed++;
689 buffer_info->skb = skb;
691 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
692 adapter->rx_buffer_len,
694 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
695 dev_err(&pdev->dev, "Rx DMA map failed\n");
696 adapter->rx_dma_failed++;
700 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
701 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
703 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
704 /* Force memory writes to complete before letting h/w
705 * know there are new descriptors to fetch. (Only
706 * applicable for weak-ordered memory model archs,
710 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
711 e1000e_update_rdt_wa(rx_ring, i);
713 writel(i, rx_ring->tail);
716 if (i == rx_ring->count)
718 buffer_info = &rx_ring->buffer_info[i];
721 rx_ring->next_to_use = i;
725 * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
726 * @rx_ring: Rx descriptor ring
728 static void e1000_alloc_rx_buffers_ps(struct e1000_ring *rx_ring,
729 int cleaned_count, gfp_t gfp)
731 struct e1000_adapter *adapter = rx_ring->adapter;
732 struct net_device *netdev = adapter->netdev;
733 struct pci_dev *pdev = adapter->pdev;
734 union e1000_rx_desc_packet_split *rx_desc;
735 struct e1000_buffer *buffer_info;
736 struct e1000_ps_page *ps_page;
740 i = rx_ring->next_to_use;
741 buffer_info = &rx_ring->buffer_info[i];
743 while (cleaned_count--) {
744 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
746 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
747 ps_page = &buffer_info->ps_pages[j];
748 if (j >= adapter->rx_ps_pages) {
749 /* all unused desc entries get hw null ptr */
750 rx_desc->read.buffer_addr[j + 1] =
754 if (!ps_page->page) {
755 ps_page->page = alloc_page(gfp);
756 if (!ps_page->page) {
757 adapter->alloc_rx_buff_failed++;
760 ps_page->dma = dma_map_page(&pdev->dev,
764 if (dma_mapping_error(&pdev->dev,
766 dev_err(&adapter->pdev->dev,
767 "Rx DMA page map failed\n");
768 adapter->rx_dma_failed++;
772 /* Refresh the desc even if buffer_addrs
773 * didn't change because each write-back
776 rx_desc->read.buffer_addr[j + 1] =
777 cpu_to_le64(ps_page->dma);
780 skb = __netdev_alloc_skb_ip_align(netdev, adapter->rx_ps_bsize0,
784 adapter->alloc_rx_buff_failed++;
788 buffer_info->skb = skb;
789 buffer_info->dma = dma_map_single(&pdev->dev, skb->data,
790 adapter->rx_ps_bsize0,
792 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
793 dev_err(&pdev->dev, "Rx DMA map failed\n");
794 adapter->rx_dma_failed++;
796 dev_kfree_skb_any(skb);
797 buffer_info->skb = NULL;
801 rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
803 if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
804 /* Force memory writes to complete before letting h/w
805 * know there are new descriptors to fetch. (Only
806 * applicable for weak-ordered memory model archs,
810 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
811 e1000e_update_rdt_wa(rx_ring, i << 1);
813 writel(i << 1, rx_ring->tail);
817 if (i == rx_ring->count)
819 buffer_info = &rx_ring->buffer_info[i];
823 rx_ring->next_to_use = i;
827 * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
828 * @rx_ring: Rx descriptor ring
829 * @cleaned_count: number of buffers to allocate this pass
832 static void e1000_alloc_jumbo_rx_buffers(struct e1000_ring *rx_ring,
833 int cleaned_count, gfp_t gfp)
835 struct e1000_adapter *adapter = rx_ring->adapter;
836 struct net_device *netdev = adapter->netdev;
837 struct pci_dev *pdev = adapter->pdev;
838 union e1000_rx_desc_extended *rx_desc;
839 struct e1000_buffer *buffer_info;
842 unsigned int bufsz = 256 - 16; /* for skb_reserve */
844 i = rx_ring->next_to_use;
845 buffer_info = &rx_ring->buffer_info[i];
847 while (cleaned_count--) {
848 skb = buffer_info->skb;
854 skb = __netdev_alloc_skb_ip_align(netdev, bufsz, gfp);
855 if (unlikely(!skb)) {
856 /* Better luck next round */
857 adapter->alloc_rx_buff_failed++;
861 buffer_info->skb = skb;
863 /* allocate a new page if necessary */
864 if (!buffer_info->page) {
865 buffer_info->page = alloc_page(gfp);
866 if (unlikely(!buffer_info->page)) {
867 adapter->alloc_rx_buff_failed++;
872 if (!buffer_info->dma) {
873 buffer_info->dma = dma_map_page(&pdev->dev,
874 buffer_info->page, 0,
877 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
878 adapter->alloc_rx_buff_failed++;
883 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
884 rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
886 if (unlikely(++i == rx_ring->count))
888 buffer_info = &rx_ring->buffer_info[i];
891 if (likely(rx_ring->next_to_use != i)) {
892 rx_ring->next_to_use = i;
893 if (unlikely(i-- == 0))
894 i = (rx_ring->count - 1);
896 /* Force memory writes to complete before letting h/w
897 * know there are new descriptors to fetch. (Only
898 * applicable for weak-ordered memory model archs,
902 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
903 e1000e_update_rdt_wa(rx_ring, i);
905 writel(i, rx_ring->tail);
909 static inline void e1000_rx_hash(struct net_device *netdev, __le32 rss,
912 if (netdev->features & NETIF_F_RXHASH)
913 skb_set_hash(skb, le32_to_cpu(rss), PKT_HASH_TYPE_L3);
917 * e1000_clean_rx_irq - Send received data up the network stack
918 * @rx_ring: Rx descriptor ring
920 * the return value indicates whether actual cleaning was done, there
921 * is no guarantee that everything was cleaned
923 static bool e1000_clean_rx_irq(struct e1000_ring *rx_ring, int *work_done,
926 struct e1000_adapter *adapter = rx_ring->adapter;
927 struct net_device *netdev = adapter->netdev;
928 struct pci_dev *pdev = adapter->pdev;
929 struct e1000_hw *hw = &adapter->hw;
930 union e1000_rx_desc_extended *rx_desc, *next_rxd;
931 struct e1000_buffer *buffer_info, *next_buffer;
934 int cleaned_count = 0;
935 bool cleaned = false;
936 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
938 i = rx_ring->next_to_clean;
939 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
940 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
941 buffer_info = &rx_ring->buffer_info[i];
943 while (staterr & E1000_RXD_STAT_DD) {
946 if (*work_done >= work_to_do)
949 rmb(); /* read descriptor and rx_buffer_info after status DD */
951 skb = buffer_info->skb;
952 buffer_info->skb = NULL;
954 prefetch(skb->data - NET_IP_ALIGN);
957 if (i == rx_ring->count)
959 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
962 next_buffer = &rx_ring->buffer_info[i];
966 dma_unmap_single(&pdev->dev, buffer_info->dma,
967 adapter->rx_buffer_len, DMA_FROM_DEVICE);
968 buffer_info->dma = 0;
970 length = le16_to_cpu(rx_desc->wb.upper.length);
972 /* !EOP means multiple descriptors were used to store a single
973 * packet, if that's the case we need to toss it. In fact, we
974 * need to toss every packet with the EOP bit clear and the
975 * next frame that _does_ have the EOP bit set, as it is by
976 * definition only a frame fragment
978 if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
979 adapter->flags2 |= FLAG2_IS_DISCARDING;
981 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
982 /* All receives must fit into a single buffer */
983 e_dbg("Receive packet consumed multiple buffers\n");
985 buffer_info->skb = skb;
986 if (staterr & E1000_RXD_STAT_EOP)
987 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
991 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
992 !(netdev->features & NETIF_F_RXALL))) {
994 buffer_info->skb = skb;
998 /* adjust length to remove Ethernet CRC */
999 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1000 /* If configured to store CRC, don't subtract FCS,
1001 * but keep the FCS bytes out of the total_rx_bytes
1004 if (netdev->features & NETIF_F_RXFCS)
1005 total_rx_bytes -= 4;
1010 total_rx_bytes += length;
1013 /* code added for copybreak, this should improve
1014 * performance for small packets with large amounts
1015 * of reassembly being done in the stack
1017 if (length < copybreak) {
1018 struct sk_buff *new_skb =
1019 napi_alloc_skb(&adapter->napi, length);
1021 skb_copy_to_linear_data_offset(new_skb,
1027 /* save the skb in buffer_info as good */
1028 buffer_info->skb = skb;
1031 /* else just continue with the old one */
1033 /* end copybreak code */
1034 skb_put(skb, length);
1036 /* Receive Checksum Offload */
1037 e1000_rx_checksum(adapter, staterr, skb);
1039 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1041 e1000_receive_skb(adapter, netdev, skb, staterr,
1042 rx_desc->wb.upper.vlan);
1045 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1047 /* return some buffers to hardware, one at a time is too slow */
1048 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1049 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1054 /* use prefetched values */
1056 buffer_info = next_buffer;
1058 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1060 rx_ring->next_to_clean = i;
1062 cleaned_count = e1000_desc_unused(rx_ring);
1064 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1066 adapter->total_rx_bytes += total_rx_bytes;
1067 adapter->total_rx_packets += total_rx_packets;
1071 static void e1000_put_txbuf(struct e1000_ring *tx_ring,
1072 struct e1000_buffer *buffer_info)
1074 struct e1000_adapter *adapter = tx_ring->adapter;
1076 if (buffer_info->dma) {
1077 if (buffer_info->mapped_as_page)
1078 dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1079 buffer_info->length, DMA_TO_DEVICE);
1081 dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1082 buffer_info->length, DMA_TO_DEVICE);
1083 buffer_info->dma = 0;
1085 if (buffer_info->skb) {
1086 dev_kfree_skb_any(buffer_info->skb);
1087 buffer_info->skb = NULL;
1089 buffer_info->time_stamp = 0;
1092 static void e1000_print_hw_hang(struct work_struct *work)
1094 struct e1000_adapter *adapter = container_of(work,
1095 struct e1000_adapter,
1097 struct net_device *netdev = adapter->netdev;
1098 struct e1000_ring *tx_ring = adapter->tx_ring;
1099 unsigned int i = tx_ring->next_to_clean;
1100 unsigned int eop = tx_ring->buffer_info[i].next_to_watch;
1101 struct e1000_tx_desc *eop_desc = E1000_TX_DESC(*tx_ring, eop);
1102 struct e1000_hw *hw = &adapter->hw;
1103 u16 phy_status, phy_1000t_status, phy_ext_status;
1106 if (test_bit(__E1000_DOWN, &adapter->state))
1109 if (!adapter->tx_hang_recheck && (adapter->flags2 & FLAG2_DMA_BURST)) {
1110 /* May be block on write-back, flush and detect again
1111 * flush pending descriptor writebacks to memory
1113 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1114 /* execute the writes immediately */
1116 /* Due to rare timing issues, write to TIDV again to ensure
1117 * the write is successful
1119 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
1120 /* execute the writes immediately */
1122 adapter->tx_hang_recheck = true;
1125 adapter->tx_hang_recheck = false;
1127 if (er32(TDH(0)) == er32(TDT(0))) {
1128 e_dbg("false hang detected, ignoring\n");
1132 /* Real hang detected */
1133 netif_stop_queue(netdev);
1135 e1e_rphy(hw, MII_BMSR, &phy_status);
1136 e1e_rphy(hw, MII_STAT1000, &phy_1000t_status);
1137 e1e_rphy(hw, MII_ESTATUS, &phy_ext_status);
1139 pci_read_config_word(adapter->pdev, PCI_STATUS, &pci_status);
1141 /* detected Hardware unit hang */
1142 e_err("Detected Hardware Unit Hang:\n"
1145 " next_to_use <%x>\n"
1146 " next_to_clean <%x>\n"
1147 "buffer_info[next_to_clean]:\n"
1148 " time_stamp <%lx>\n"
1149 " next_to_watch <%x>\n"
1151 " next_to_watch.status <%x>\n"
1154 "PHY 1000BASE-T Status <%x>\n"
1155 "PHY Extended Status <%x>\n"
1156 "PCI Status <%x>\n",
1157 readl(tx_ring->head), readl(tx_ring->tail), tx_ring->next_to_use,
1158 tx_ring->next_to_clean, tx_ring->buffer_info[eop].time_stamp,
1159 eop, jiffies, eop_desc->upper.fields.status, er32(STATUS),
1160 phy_status, phy_1000t_status, phy_ext_status, pci_status);
1162 e1000e_dump(adapter);
1164 /* Suggest workaround for known h/w issue */
1165 if ((hw->mac.type == e1000_pchlan) && (er32(CTRL) & E1000_CTRL_TFCE))
1166 e_err("Try turning off Tx pause (flow control) via ethtool\n");
1170 * e1000e_tx_hwtstamp_work - check for Tx time stamp
1171 * @work: pointer to work struct
1173 * This work function polls the TSYNCTXCTL valid bit to determine when a
1174 * timestamp has been taken for the current stored skb. The timestamp must
1175 * be for this skb because only one such packet is allowed in the queue.
1177 static void e1000e_tx_hwtstamp_work(struct work_struct *work)
1179 struct e1000_adapter *adapter = container_of(work, struct e1000_adapter,
1181 struct e1000_hw *hw = &adapter->hw;
1183 if (er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID) {
1184 struct skb_shared_hwtstamps shhwtstamps;
1187 txstmp = er32(TXSTMPL);
1188 txstmp |= (u64)er32(TXSTMPH) << 32;
1190 e1000e_systim_to_hwtstamp(adapter, &shhwtstamps, txstmp);
1192 skb_tstamp_tx(adapter->tx_hwtstamp_skb, &shhwtstamps);
1193 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1194 adapter->tx_hwtstamp_skb = NULL;
1195 } else if (time_after(jiffies, adapter->tx_hwtstamp_start
1196 + adapter->tx_timeout_factor * HZ)) {
1197 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
1198 adapter->tx_hwtstamp_skb = NULL;
1199 adapter->tx_hwtstamp_timeouts++;
1200 e_warn("clearing Tx timestamp hang\n");
1202 /* reschedule to check later */
1203 schedule_work(&adapter->tx_hwtstamp_work);
1208 * e1000_clean_tx_irq - Reclaim resources after transmit completes
1209 * @tx_ring: Tx descriptor ring
1211 * the return value indicates whether actual cleaning was done, there
1212 * is no guarantee that everything was cleaned
1214 static bool e1000_clean_tx_irq(struct e1000_ring *tx_ring)
1216 struct e1000_adapter *adapter = tx_ring->adapter;
1217 struct net_device *netdev = adapter->netdev;
1218 struct e1000_hw *hw = &adapter->hw;
1219 struct e1000_tx_desc *tx_desc, *eop_desc;
1220 struct e1000_buffer *buffer_info;
1221 unsigned int i, eop;
1222 unsigned int count = 0;
1223 unsigned int total_tx_bytes = 0, total_tx_packets = 0;
1224 unsigned int bytes_compl = 0, pkts_compl = 0;
1226 i = tx_ring->next_to_clean;
1227 eop = tx_ring->buffer_info[i].next_to_watch;
1228 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1230 while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
1231 (count < tx_ring->count)) {
1232 bool cleaned = false;
1234 rmb(); /* read buffer_info after eop_desc */
1235 for (; !cleaned; count++) {
1236 tx_desc = E1000_TX_DESC(*tx_ring, i);
1237 buffer_info = &tx_ring->buffer_info[i];
1238 cleaned = (i == eop);
1241 total_tx_packets += buffer_info->segs;
1242 total_tx_bytes += buffer_info->bytecount;
1243 if (buffer_info->skb) {
1244 bytes_compl += buffer_info->skb->len;
1249 e1000_put_txbuf(tx_ring, buffer_info);
1250 tx_desc->upper.data = 0;
1253 if (i == tx_ring->count)
1257 if (i == tx_ring->next_to_use)
1259 eop = tx_ring->buffer_info[i].next_to_watch;
1260 eop_desc = E1000_TX_DESC(*tx_ring, eop);
1263 tx_ring->next_to_clean = i;
1265 netdev_completed_queue(netdev, pkts_compl, bytes_compl);
1267 #define TX_WAKE_THRESHOLD 32
1268 if (count && netif_carrier_ok(netdev) &&
1269 e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
1270 /* Make sure that anybody stopping the queue after this
1271 * sees the new next_to_clean.
1275 if (netif_queue_stopped(netdev) &&
1276 !(test_bit(__E1000_DOWN, &adapter->state))) {
1277 netif_wake_queue(netdev);
1278 ++adapter->restart_queue;
1282 if (adapter->detect_tx_hung) {
1283 /* Detect a transmit hang in hardware, this serializes the
1284 * check with the clearing of time_stamp and movement of i
1286 adapter->detect_tx_hung = false;
1287 if (tx_ring->buffer_info[i].time_stamp &&
1288 time_after(jiffies, tx_ring->buffer_info[i].time_stamp
1289 + (adapter->tx_timeout_factor * HZ)) &&
1290 !(er32(STATUS) & E1000_STATUS_TXOFF))
1291 schedule_work(&adapter->print_hang_task);
1293 adapter->tx_hang_recheck = false;
1295 adapter->total_tx_bytes += total_tx_bytes;
1296 adapter->total_tx_packets += total_tx_packets;
1297 return count < tx_ring->count;
1301 * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
1302 * @rx_ring: Rx descriptor ring
1304 * the return value indicates whether actual cleaning was done, there
1305 * is no guarantee that everything was cleaned
1307 static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1310 struct e1000_adapter *adapter = rx_ring->adapter;
1311 struct e1000_hw *hw = &adapter->hw;
1312 union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
1313 struct net_device *netdev = adapter->netdev;
1314 struct pci_dev *pdev = adapter->pdev;
1315 struct e1000_buffer *buffer_info, *next_buffer;
1316 struct e1000_ps_page *ps_page;
1317 struct sk_buff *skb;
1319 u32 length, staterr;
1320 int cleaned_count = 0;
1321 bool cleaned = false;
1322 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1324 i = rx_ring->next_to_clean;
1325 rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
1326 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1327 buffer_info = &rx_ring->buffer_info[i];
1329 while (staterr & E1000_RXD_STAT_DD) {
1330 if (*work_done >= work_to_do)
1333 skb = buffer_info->skb;
1334 rmb(); /* read descriptor and rx_buffer_info after status DD */
1336 /* in the packet split case this is header only */
1337 prefetch(skb->data - NET_IP_ALIGN);
1340 if (i == rx_ring->count)
1342 next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
1345 next_buffer = &rx_ring->buffer_info[i];
1349 dma_unmap_single(&pdev->dev, buffer_info->dma,
1350 adapter->rx_ps_bsize0, DMA_FROM_DEVICE);
1351 buffer_info->dma = 0;
1353 /* see !EOP comment in other Rx routine */
1354 if (!(staterr & E1000_RXD_STAT_EOP))
1355 adapter->flags2 |= FLAG2_IS_DISCARDING;
1357 if (adapter->flags2 & FLAG2_IS_DISCARDING) {
1358 e_dbg("Packet Split buffers didn't pick up the full packet\n");
1359 dev_kfree_skb_irq(skb);
1360 if (staterr & E1000_RXD_STAT_EOP)
1361 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1365 if (unlikely((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1366 !(netdev->features & NETIF_F_RXALL))) {
1367 dev_kfree_skb_irq(skb);
1371 length = le16_to_cpu(rx_desc->wb.middle.length0);
1374 e_dbg("Last part of the packet spanning multiple descriptors\n");
1375 dev_kfree_skb_irq(skb);
1380 skb_put(skb, length);
1383 /* this looks ugly, but it seems compiler issues make
1384 * it more efficient than reusing j
1386 int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
1388 /* page alloc/put takes too long and effects small
1389 * packet throughput, so unsplit small packets and
1390 * save the alloc/put only valid in softirq (napi)
1391 * context to call kmap_*
1393 if (l1 && (l1 <= copybreak) &&
1394 ((length + l1) <= adapter->rx_ps_bsize0)) {
1397 ps_page = &buffer_info->ps_pages[0];
1399 /* there is no documentation about how to call
1400 * kmap_atomic, so we can't hold the mapping
1403 dma_sync_single_for_cpu(&pdev->dev,
1407 vaddr = kmap_atomic(ps_page->page);
1408 memcpy(skb_tail_pointer(skb), vaddr, l1);
1409 kunmap_atomic(vaddr);
1410 dma_sync_single_for_device(&pdev->dev,
1415 /* remove the CRC */
1416 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1417 if (!(netdev->features & NETIF_F_RXFCS))
1426 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1427 length = le16_to_cpu(rx_desc->wb.upper.length[j]);
1431 ps_page = &buffer_info->ps_pages[j];
1432 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1435 skb_fill_page_desc(skb, j, ps_page->page, 0, length);
1436 ps_page->page = NULL;
1438 skb->data_len += length;
1439 skb->truesize += PAGE_SIZE;
1442 /* strip the ethernet crc, problem is we're using pages now so
1443 * this whole operation can get a little cpu intensive
1445 if (!(adapter->flags2 & FLAG2_CRC_STRIPPING)) {
1446 if (!(netdev->features & NETIF_F_RXFCS))
1447 pskb_trim(skb, skb->len - 4);
1451 total_rx_bytes += skb->len;
1454 e1000_rx_checksum(adapter, staterr, skb);
1456 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1458 if (rx_desc->wb.upper.header_status &
1459 cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP))
1460 adapter->rx_hdr_split++;
1462 e1000_receive_skb(adapter, netdev, skb, staterr,
1463 rx_desc->wb.middle.vlan);
1466 rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
1467 buffer_info->skb = NULL;
1469 /* return some buffers to hardware, one at a time is too slow */
1470 if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
1471 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1476 /* use prefetched values */
1478 buffer_info = next_buffer;
1480 staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
1482 rx_ring->next_to_clean = i;
1484 cleaned_count = e1000_desc_unused(rx_ring);
1486 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1488 adapter->total_rx_bytes += total_rx_bytes;
1489 adapter->total_rx_packets += total_rx_packets;
1494 * e1000_consume_page - helper function
1496 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
1501 skb->data_len += length;
1502 skb->truesize += PAGE_SIZE;
1506 * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
1507 * @adapter: board private structure
1509 * the return value indicates whether actual cleaning was done, there
1510 * is no guarantee that everything was cleaned
1512 static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1515 struct e1000_adapter *adapter = rx_ring->adapter;
1516 struct net_device *netdev = adapter->netdev;
1517 struct pci_dev *pdev = adapter->pdev;
1518 union e1000_rx_desc_extended *rx_desc, *next_rxd;
1519 struct e1000_buffer *buffer_info, *next_buffer;
1520 u32 length, staterr;
1522 int cleaned_count = 0;
1523 bool cleaned = false;
1524 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1525 struct skb_shared_info *shinfo;
1527 i = rx_ring->next_to_clean;
1528 rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
1529 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1530 buffer_info = &rx_ring->buffer_info[i];
1532 while (staterr & E1000_RXD_STAT_DD) {
1533 struct sk_buff *skb;
1535 if (*work_done >= work_to_do)
1538 rmb(); /* read descriptor and rx_buffer_info after status DD */
1540 skb = buffer_info->skb;
1541 buffer_info->skb = NULL;
1544 if (i == rx_ring->count)
1546 next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
1549 next_buffer = &rx_ring->buffer_info[i];
1553 dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
1555 buffer_info->dma = 0;
1557 length = le16_to_cpu(rx_desc->wb.upper.length);
1559 /* errors is only valid for DD + EOP descriptors */
1560 if (unlikely((staterr & E1000_RXD_STAT_EOP) &&
1561 ((staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) &&
1562 !(netdev->features & NETIF_F_RXALL)))) {
1563 /* recycle both page and skb */
1564 buffer_info->skb = skb;
1565 /* an error means any chain goes out the window too */
1566 if (rx_ring->rx_skb_top)
1567 dev_kfree_skb_irq(rx_ring->rx_skb_top);
1568 rx_ring->rx_skb_top = NULL;
1571 #define rxtop (rx_ring->rx_skb_top)
1572 if (!(staterr & E1000_RXD_STAT_EOP)) {
1573 /* this descriptor is only the beginning (or middle) */
1575 /* this is the beginning of a chain */
1577 skb_fill_page_desc(rxtop, 0, buffer_info->page,
1580 /* this is the middle of a chain */
1581 shinfo = skb_shinfo(rxtop);
1582 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1583 buffer_info->page, 0,
1585 /* re-use the skb, only consumed the page */
1586 buffer_info->skb = skb;
1588 e1000_consume_page(buffer_info, rxtop, length);
1592 /* end of the chain */
1593 shinfo = skb_shinfo(rxtop);
1594 skb_fill_page_desc(rxtop, shinfo->nr_frags,
1595 buffer_info->page, 0,
1597 /* re-use the current skb, we only consumed the
1600 buffer_info->skb = skb;
1603 e1000_consume_page(buffer_info, skb, length);
1605 /* no chain, got EOP, this buf is the packet
1606 * copybreak to save the put_page/alloc_page
1608 if (length <= copybreak &&
1609 skb_tailroom(skb) >= length) {
1611 vaddr = kmap_atomic(buffer_info->page);
1612 memcpy(skb_tail_pointer(skb), vaddr,
1614 kunmap_atomic(vaddr);
1615 /* re-use the page, so don't erase
1618 skb_put(skb, length);
1620 skb_fill_page_desc(skb, 0,
1621 buffer_info->page, 0,
1623 e1000_consume_page(buffer_info, skb,
1629 /* Receive Checksum Offload */
1630 e1000_rx_checksum(adapter, staterr, skb);
1632 e1000_rx_hash(netdev, rx_desc->wb.lower.hi_dword.rss, skb);
1634 /* probably a little skewed due to removing CRC */
1635 total_rx_bytes += skb->len;
1638 /* eth type trans needs skb->data to point to something */
1639 if (!pskb_may_pull(skb, ETH_HLEN)) {
1640 e_err("pskb_may_pull failed.\n");
1641 dev_kfree_skb_irq(skb);
1645 e1000_receive_skb(adapter, netdev, skb, staterr,
1646 rx_desc->wb.upper.vlan);
1649 rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
1651 /* return some buffers to hardware, one at a time is too slow */
1652 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
1653 adapter->alloc_rx_buf(rx_ring, cleaned_count,
1658 /* use prefetched values */
1660 buffer_info = next_buffer;
1662 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1664 rx_ring->next_to_clean = i;
1666 cleaned_count = e1000_desc_unused(rx_ring);
1668 adapter->alloc_rx_buf(rx_ring, cleaned_count, GFP_ATOMIC);
1670 adapter->total_rx_bytes += total_rx_bytes;
1671 adapter->total_rx_packets += total_rx_packets;
1676 * e1000_clean_rx_ring - Free Rx Buffers per Queue
1677 * @rx_ring: Rx descriptor ring
1679 static void e1000_clean_rx_ring(struct e1000_ring *rx_ring)
1681 struct e1000_adapter *adapter = rx_ring->adapter;
1682 struct e1000_buffer *buffer_info;
1683 struct e1000_ps_page *ps_page;
1684 struct pci_dev *pdev = adapter->pdev;
1687 /* Free all the Rx ring sk_buffs */
1688 for (i = 0; i < rx_ring->count; i++) {
1689 buffer_info = &rx_ring->buffer_info[i];
1690 if (buffer_info->dma) {
1691 if (adapter->clean_rx == e1000_clean_rx_irq)
1692 dma_unmap_single(&pdev->dev, buffer_info->dma,
1693 adapter->rx_buffer_len,
1695 else if (adapter->clean_rx == e1000_clean_jumbo_rx_irq)
1696 dma_unmap_page(&pdev->dev, buffer_info->dma,
1697 PAGE_SIZE, DMA_FROM_DEVICE);
1698 else if (adapter->clean_rx == e1000_clean_rx_irq_ps)
1699 dma_unmap_single(&pdev->dev, buffer_info->dma,
1700 adapter->rx_ps_bsize0,
1702 buffer_info->dma = 0;
1705 if (buffer_info->page) {
1706 put_page(buffer_info->page);
1707 buffer_info->page = NULL;
1710 if (buffer_info->skb) {
1711 dev_kfree_skb(buffer_info->skb);
1712 buffer_info->skb = NULL;
1715 for (j = 0; j < PS_PAGE_BUFFERS; j++) {
1716 ps_page = &buffer_info->ps_pages[j];
1719 dma_unmap_page(&pdev->dev, ps_page->dma, PAGE_SIZE,
1722 put_page(ps_page->page);
1723 ps_page->page = NULL;
1727 /* there also may be some cached data from a chained receive */
1728 if (rx_ring->rx_skb_top) {
1729 dev_kfree_skb(rx_ring->rx_skb_top);
1730 rx_ring->rx_skb_top = NULL;
1733 /* Zero out the descriptor ring */
1734 memset(rx_ring->desc, 0, rx_ring->size);
1736 rx_ring->next_to_clean = 0;
1737 rx_ring->next_to_use = 0;
1738 adapter->flags2 &= ~FLAG2_IS_DISCARDING;
1740 writel(0, rx_ring->head);
1741 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
1742 e1000e_update_rdt_wa(rx_ring, 0);
1744 writel(0, rx_ring->tail);
1747 static void e1000e_downshift_workaround(struct work_struct *work)
1749 struct e1000_adapter *adapter = container_of(work,
1750 struct e1000_adapter,
1753 if (test_bit(__E1000_DOWN, &adapter->state))
1756 e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
1760 * e1000_intr_msi - Interrupt Handler
1761 * @irq: interrupt number
1762 * @data: pointer to a network interface device structure
1764 static irqreturn_t e1000_intr_msi(int __always_unused irq, void *data)
1766 struct net_device *netdev = data;
1767 struct e1000_adapter *adapter = netdev_priv(netdev);
1768 struct e1000_hw *hw = &adapter->hw;
1769 u32 icr = er32(ICR);
1771 /* read ICR disables interrupts using IAM */
1772 if (icr & E1000_ICR_LSC) {
1773 hw->mac.get_link_status = true;
1774 /* ICH8 workaround-- Call gig speed drop workaround on cable
1775 * disconnect (LSC) before accessing any PHY registers
1777 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1778 (!(er32(STATUS) & E1000_STATUS_LU)))
1779 schedule_work(&adapter->downshift_task);
1781 /* 80003ES2LAN workaround-- For packet buffer work-around on
1782 * link down event; disable receives here in the ISR and reset
1783 * adapter in watchdog
1785 if (netif_carrier_ok(netdev) &&
1786 adapter->flags & FLAG_RX_NEEDS_RESTART) {
1787 /* disable receives */
1788 u32 rctl = er32(RCTL);
1790 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1791 adapter->flags |= FLAG_RESTART_NOW;
1793 /* guard against interrupt when we're going down */
1794 if (!test_bit(__E1000_DOWN, &adapter->state))
1795 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1798 /* Reset on uncorrectable ECC error */
1799 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1800 u32 pbeccsts = er32(PBECCSTS);
1802 adapter->corr_errors +=
1803 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1804 adapter->uncorr_errors +=
1805 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1806 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1808 /* Do the reset outside of interrupt context */
1809 schedule_work(&adapter->reset_task);
1811 /* return immediately since reset is imminent */
1815 if (napi_schedule_prep(&adapter->napi)) {
1816 adapter->total_tx_bytes = 0;
1817 adapter->total_tx_packets = 0;
1818 adapter->total_rx_bytes = 0;
1819 adapter->total_rx_packets = 0;
1820 __napi_schedule(&adapter->napi);
1827 * e1000_intr - Interrupt Handler
1828 * @irq: interrupt number
1829 * @data: pointer to a network interface device structure
1831 static irqreturn_t e1000_intr(int __always_unused irq, void *data)
1833 struct net_device *netdev = data;
1834 struct e1000_adapter *adapter = netdev_priv(netdev);
1835 struct e1000_hw *hw = &adapter->hw;
1836 u32 rctl, icr = er32(ICR);
1838 if (!icr || test_bit(__E1000_DOWN, &adapter->state))
1839 return IRQ_NONE; /* Not our interrupt */
1841 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
1842 * not set, then the adapter didn't send an interrupt
1844 if (!(icr & E1000_ICR_INT_ASSERTED))
1847 /* Interrupt Auto-Mask...upon reading ICR,
1848 * interrupts are masked. No need for the
1852 if (icr & E1000_ICR_LSC) {
1853 hw->mac.get_link_status = true;
1854 /* ICH8 workaround-- Call gig speed drop workaround on cable
1855 * disconnect (LSC) before accessing any PHY registers
1857 if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
1858 (!(er32(STATUS) & E1000_STATUS_LU)))
1859 schedule_work(&adapter->downshift_task);
1861 /* 80003ES2LAN workaround--
1862 * For packet buffer work-around on link down event;
1863 * disable receives here in the ISR and
1864 * reset adapter in watchdog
1866 if (netif_carrier_ok(netdev) &&
1867 (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
1868 /* disable receives */
1870 ew32(RCTL, rctl & ~E1000_RCTL_EN);
1871 adapter->flags |= FLAG_RESTART_NOW;
1873 /* guard against interrupt when we're going down */
1874 if (!test_bit(__E1000_DOWN, &adapter->state))
1875 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1878 /* Reset on uncorrectable ECC error */
1879 if ((icr & E1000_ICR_ECCER) && (hw->mac.type == e1000_pch_lpt)) {
1880 u32 pbeccsts = er32(PBECCSTS);
1882 adapter->corr_errors +=
1883 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
1884 adapter->uncorr_errors +=
1885 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
1886 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
1888 /* Do the reset outside of interrupt context */
1889 schedule_work(&adapter->reset_task);
1891 /* return immediately since reset is imminent */
1895 if (napi_schedule_prep(&adapter->napi)) {
1896 adapter->total_tx_bytes = 0;
1897 adapter->total_tx_packets = 0;
1898 adapter->total_rx_bytes = 0;
1899 adapter->total_rx_packets = 0;
1900 __napi_schedule(&adapter->napi);
1906 static irqreturn_t e1000_msix_other(int __always_unused irq, void *data)
1908 struct net_device *netdev = data;
1909 struct e1000_adapter *adapter = netdev_priv(netdev);
1910 struct e1000_hw *hw = &adapter->hw;
1911 u32 icr = er32(ICR);
1913 if (!(icr & E1000_ICR_INT_ASSERTED)) {
1914 if (!test_bit(__E1000_DOWN, &adapter->state))
1915 ew32(IMS, E1000_IMS_OTHER);
1919 if (icr & adapter->eiac_mask)
1920 ew32(ICS, (icr & adapter->eiac_mask));
1922 if (icr & E1000_ICR_OTHER) {
1923 if (!(icr & E1000_ICR_LSC))
1924 goto no_link_interrupt;
1925 hw->mac.get_link_status = true;
1926 /* guard against interrupt when we're going down */
1927 if (!test_bit(__E1000_DOWN, &adapter->state))
1928 mod_timer(&adapter->watchdog_timer, jiffies + 1);
1932 if (!test_bit(__E1000_DOWN, &adapter->state))
1933 ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
1938 static irqreturn_t e1000_intr_msix_tx(int __always_unused irq, void *data)
1940 struct net_device *netdev = data;
1941 struct e1000_adapter *adapter = netdev_priv(netdev);
1942 struct e1000_hw *hw = &adapter->hw;
1943 struct e1000_ring *tx_ring = adapter->tx_ring;
1945 adapter->total_tx_bytes = 0;
1946 adapter->total_tx_packets = 0;
1948 if (!e1000_clean_tx_irq(tx_ring))
1949 /* Ring was not completely cleaned, so fire another interrupt */
1950 ew32(ICS, tx_ring->ims_val);
1955 static irqreturn_t e1000_intr_msix_rx(int __always_unused irq, void *data)
1957 struct net_device *netdev = data;
1958 struct e1000_adapter *adapter = netdev_priv(netdev);
1959 struct e1000_ring *rx_ring = adapter->rx_ring;
1961 /* Write the ITR value calculated at the end of the
1962 * previous interrupt.
1964 if (rx_ring->set_itr) {
1965 writel(1000000000 / (rx_ring->itr_val * 256),
1966 rx_ring->itr_register);
1967 rx_ring->set_itr = 0;
1970 if (napi_schedule_prep(&adapter->napi)) {
1971 adapter->total_rx_bytes = 0;
1972 adapter->total_rx_packets = 0;
1973 __napi_schedule(&adapter->napi);
1979 * e1000_configure_msix - Configure MSI-X hardware
1981 * e1000_configure_msix sets up the hardware to properly
1982 * generate MSI-X interrupts.
1984 static void e1000_configure_msix(struct e1000_adapter *adapter)
1986 struct e1000_hw *hw = &adapter->hw;
1987 struct e1000_ring *rx_ring = adapter->rx_ring;
1988 struct e1000_ring *tx_ring = adapter->tx_ring;
1990 u32 ctrl_ext, ivar = 0;
1992 adapter->eiac_mask = 0;
1994 /* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
1995 if (hw->mac.type == e1000_82574) {
1996 u32 rfctl = er32(RFCTL);
1998 rfctl |= E1000_RFCTL_ACK_DIS;
2002 /* Configure Rx vector */
2003 rx_ring->ims_val = E1000_IMS_RXQ0;
2004 adapter->eiac_mask |= rx_ring->ims_val;
2005 if (rx_ring->itr_val)
2006 writel(1000000000 / (rx_ring->itr_val * 256),
2007 rx_ring->itr_register);
2009 writel(1, rx_ring->itr_register);
2010 ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
2012 /* Configure Tx vector */
2013 tx_ring->ims_val = E1000_IMS_TXQ0;
2015 if (tx_ring->itr_val)
2016 writel(1000000000 / (tx_ring->itr_val * 256),
2017 tx_ring->itr_register);
2019 writel(1, tx_ring->itr_register);
2020 adapter->eiac_mask |= tx_ring->ims_val;
2021 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
2023 /* set vector for Other Causes, e.g. link changes */
2025 ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
2026 if (rx_ring->itr_val)
2027 writel(1000000000 / (rx_ring->itr_val * 256),
2028 hw->hw_addr + E1000_EITR_82574(vector));
2030 writel(1, hw->hw_addr + E1000_EITR_82574(vector));
2032 /* Cause Tx interrupts on every write back */
2037 /* enable MSI-X PBA support */
2038 ctrl_ext = er32(CTRL_EXT);
2039 ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
2041 /* Auto-Mask Other interrupts upon ICR read */
2042 ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
2043 ctrl_ext |= E1000_CTRL_EXT_EIAME;
2044 ew32(CTRL_EXT, ctrl_ext);
2048 void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
2050 if (adapter->msix_entries) {
2051 pci_disable_msix(adapter->pdev);
2052 kfree(adapter->msix_entries);
2053 adapter->msix_entries = NULL;
2054 } else if (adapter->flags & FLAG_MSI_ENABLED) {
2055 pci_disable_msi(adapter->pdev);
2056 adapter->flags &= ~FLAG_MSI_ENABLED;
2061 * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
2063 * Attempt to configure interrupts using the best available
2064 * capabilities of the hardware and kernel.
2066 void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
2071 switch (adapter->int_mode) {
2072 case E1000E_INT_MODE_MSIX:
2073 if (adapter->flags & FLAG_HAS_MSIX) {
2074 adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
2075 adapter->msix_entries = kcalloc(adapter->num_vectors,
2079 if (adapter->msix_entries) {
2080 struct e1000_adapter *a = adapter;
2082 for (i = 0; i < adapter->num_vectors; i++)
2083 adapter->msix_entries[i].entry = i;
2085 err = pci_enable_msix_range(a->pdev,
2092 /* MSI-X failed, so fall through and try MSI */
2093 e_err("Failed to initialize MSI-X interrupts. Falling back to MSI interrupts.\n");
2094 e1000e_reset_interrupt_capability(adapter);
2096 adapter->int_mode = E1000E_INT_MODE_MSI;
2098 case E1000E_INT_MODE_MSI:
2099 if (!pci_enable_msi(adapter->pdev)) {
2100 adapter->flags |= FLAG_MSI_ENABLED;
2102 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2103 e_err("Failed to initialize MSI interrupts. Falling back to legacy interrupts.\n");
2106 case E1000E_INT_MODE_LEGACY:
2107 /* Don't do anything; this is the system default */
2111 /* store the number of vectors being used */
2112 adapter->num_vectors = 1;
2116 * e1000_request_msix - Initialize MSI-X interrupts
2118 * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
2121 static int e1000_request_msix(struct e1000_adapter *adapter)
2123 struct net_device *netdev = adapter->netdev;
2124 int err = 0, vector = 0;
2126 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2127 snprintf(adapter->rx_ring->name,
2128 sizeof(adapter->rx_ring->name) - 1,
2129 "%s-rx-0", netdev->name);
2131 memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
2132 err = request_irq(adapter->msix_entries[vector].vector,
2133 e1000_intr_msix_rx, 0, adapter->rx_ring->name,
2137 adapter->rx_ring->itr_register = adapter->hw.hw_addr +
2138 E1000_EITR_82574(vector);
2139 adapter->rx_ring->itr_val = adapter->itr;
2142 if (strlen(netdev->name) < (IFNAMSIZ - 5))
2143 snprintf(adapter->tx_ring->name,
2144 sizeof(adapter->tx_ring->name) - 1,
2145 "%s-tx-0", netdev->name);
2147 memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
2148 err = request_irq(adapter->msix_entries[vector].vector,
2149 e1000_intr_msix_tx, 0, adapter->tx_ring->name,
2153 adapter->tx_ring->itr_register = adapter->hw.hw_addr +
2154 E1000_EITR_82574(vector);
2155 adapter->tx_ring->itr_val = adapter->itr;
2158 err = request_irq(adapter->msix_entries[vector].vector,
2159 e1000_msix_other, 0, netdev->name, netdev);
2163 e1000_configure_msix(adapter);
2169 * e1000_request_irq - initialize interrupts
2171 * Attempts to configure interrupts using the best available
2172 * capabilities of the hardware and kernel.
2174 static int e1000_request_irq(struct e1000_adapter *adapter)
2176 struct net_device *netdev = adapter->netdev;
2179 if (adapter->msix_entries) {
2180 err = e1000_request_msix(adapter);
2183 /* fall back to MSI */
2184 e1000e_reset_interrupt_capability(adapter);
2185 adapter->int_mode = E1000E_INT_MODE_MSI;
2186 e1000e_set_interrupt_capability(adapter);
2188 if (adapter->flags & FLAG_MSI_ENABLED) {
2189 err = request_irq(adapter->pdev->irq, e1000_intr_msi, 0,
2190 netdev->name, netdev);
2194 /* fall back to legacy interrupt */
2195 e1000e_reset_interrupt_capability(adapter);
2196 adapter->int_mode = E1000E_INT_MODE_LEGACY;
2199 err = request_irq(adapter->pdev->irq, e1000_intr, IRQF_SHARED,
2200 netdev->name, netdev);
2202 e_err("Unable to allocate interrupt, Error: %d\n", err);
2207 static void e1000_free_irq(struct e1000_adapter *adapter)
2209 struct net_device *netdev = adapter->netdev;
2211 if (adapter->msix_entries) {
2214 free_irq(adapter->msix_entries[vector].vector, netdev);
2217 free_irq(adapter->msix_entries[vector].vector, netdev);
2220 /* Other Causes interrupt vector */
2221 free_irq(adapter->msix_entries[vector].vector, netdev);
2225 free_irq(adapter->pdev->irq, netdev);
2229 * e1000_irq_disable - Mask off interrupt generation on the NIC
2231 static void e1000_irq_disable(struct e1000_adapter *adapter)
2233 struct e1000_hw *hw = &adapter->hw;
2236 if (adapter->msix_entries)
2237 ew32(EIAC_82574, 0);
2240 if (adapter->msix_entries) {
2243 for (i = 0; i < adapter->num_vectors; i++)
2244 synchronize_irq(adapter->msix_entries[i].vector);
2246 synchronize_irq(adapter->pdev->irq);
2251 * e1000_irq_enable - Enable default interrupt generation settings
2253 static void e1000_irq_enable(struct e1000_adapter *adapter)
2255 struct e1000_hw *hw = &adapter->hw;
2257 if (adapter->msix_entries) {
2258 ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
2259 ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
2260 } else if (hw->mac.type == e1000_pch_lpt) {
2261 ew32(IMS, IMS_ENABLE_MASK | E1000_IMS_ECCER);
2263 ew32(IMS, IMS_ENABLE_MASK);
2269 * e1000e_get_hw_control - get control of the h/w from f/w
2270 * @adapter: address of board private structure
2272 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2273 * For ASF and Pass Through versions of f/w this means that
2274 * the driver is loaded. For AMT version (only with 82573)
2275 * of the f/w this means that the network i/f is open.
2277 void e1000e_get_hw_control(struct e1000_adapter *adapter)
2279 struct e1000_hw *hw = &adapter->hw;
2283 /* Let firmware know the driver has taken over */
2284 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2286 ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
2287 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2288 ctrl_ext = er32(CTRL_EXT);
2289 ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
2294 * e1000e_release_hw_control - release control of the h/w to f/w
2295 * @adapter: address of board private structure
2297 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2298 * For ASF and Pass Through versions of f/w this means that the
2299 * driver is no longer loaded. For AMT version (only with 82573) i
2300 * of the f/w this means that the network i/f is closed.
2303 void e1000e_release_hw_control(struct e1000_adapter *adapter)
2305 struct e1000_hw *hw = &adapter->hw;
2309 /* Let firmware taken over control of h/w */
2310 if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
2312 ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
2313 } else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
2314 ctrl_ext = er32(CTRL_EXT);
2315 ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
2320 * e1000_alloc_ring_dma - allocate memory for a ring structure
2322 static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
2323 struct e1000_ring *ring)
2325 struct pci_dev *pdev = adapter->pdev;
2327 ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
2336 * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
2337 * @tx_ring: Tx descriptor ring
2339 * Return 0 on success, negative on failure
2341 int e1000e_setup_tx_resources(struct e1000_ring *tx_ring)
2343 struct e1000_adapter *adapter = tx_ring->adapter;
2344 int err = -ENOMEM, size;
2346 size = sizeof(struct e1000_buffer) * tx_ring->count;
2347 tx_ring->buffer_info = vzalloc(size);
2348 if (!tx_ring->buffer_info)
2351 /* round up to nearest 4K */
2352 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
2353 tx_ring->size = ALIGN(tx_ring->size, 4096);
2355 err = e1000_alloc_ring_dma(adapter, tx_ring);
2359 tx_ring->next_to_use = 0;
2360 tx_ring->next_to_clean = 0;
2364 vfree(tx_ring->buffer_info);
2365 e_err("Unable to allocate memory for the transmit descriptor ring\n");
2370 * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
2371 * @rx_ring: Rx descriptor ring
2373 * Returns 0 on success, negative on failure
2375 int e1000e_setup_rx_resources(struct e1000_ring *rx_ring)
2377 struct e1000_adapter *adapter = rx_ring->adapter;
2378 struct e1000_buffer *buffer_info;
2379 int i, size, desc_len, err = -ENOMEM;
2381 size = sizeof(struct e1000_buffer) * rx_ring->count;
2382 rx_ring->buffer_info = vzalloc(size);
2383 if (!rx_ring->buffer_info)
2386 for (i = 0; i < rx_ring->count; i++) {
2387 buffer_info = &rx_ring->buffer_info[i];
2388 buffer_info->ps_pages = kcalloc(PS_PAGE_BUFFERS,
2389 sizeof(struct e1000_ps_page),
2391 if (!buffer_info->ps_pages)
2395 desc_len = sizeof(union e1000_rx_desc_packet_split);
2397 /* Round up to nearest 4K */
2398 rx_ring->size = rx_ring->count * desc_len;
2399 rx_ring->size = ALIGN(rx_ring->size, 4096);
2401 err = e1000_alloc_ring_dma(adapter, rx_ring);
2405 rx_ring->next_to_clean = 0;
2406 rx_ring->next_to_use = 0;
2407 rx_ring->rx_skb_top = NULL;
2412 for (i = 0; i < rx_ring->count; i++) {
2413 buffer_info = &rx_ring->buffer_info[i];
2414 kfree(buffer_info->ps_pages);
2417 vfree(rx_ring->buffer_info);
2418 e_err("Unable to allocate memory for the receive descriptor ring\n");
2423 * e1000_clean_tx_ring - Free Tx Buffers
2424 * @tx_ring: Tx descriptor ring
2426 static void e1000_clean_tx_ring(struct e1000_ring *tx_ring)
2428 struct e1000_adapter *adapter = tx_ring->adapter;
2429 struct e1000_buffer *buffer_info;
2433 for (i = 0; i < tx_ring->count; i++) {
2434 buffer_info = &tx_ring->buffer_info[i];
2435 e1000_put_txbuf(tx_ring, buffer_info);
2438 netdev_reset_queue(adapter->netdev);
2439 size = sizeof(struct e1000_buffer) * tx_ring->count;
2440 memset(tx_ring->buffer_info, 0, size);
2442 memset(tx_ring->desc, 0, tx_ring->size);
2444 tx_ring->next_to_use = 0;
2445 tx_ring->next_to_clean = 0;
2447 writel(0, tx_ring->head);
2448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
2449 e1000e_update_tdt_wa(tx_ring, 0);
2451 writel(0, tx_ring->tail);
2455 * e1000e_free_tx_resources - Free Tx Resources per Queue
2456 * @tx_ring: Tx descriptor ring
2458 * Free all transmit software resources
2460 void e1000e_free_tx_resources(struct e1000_ring *tx_ring)
2462 struct e1000_adapter *adapter = tx_ring->adapter;
2463 struct pci_dev *pdev = adapter->pdev;
2465 e1000_clean_tx_ring(tx_ring);
2467 vfree(tx_ring->buffer_info);
2468 tx_ring->buffer_info = NULL;
2470 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
2472 tx_ring->desc = NULL;
2476 * e1000e_free_rx_resources - Free Rx Resources
2477 * @rx_ring: Rx descriptor ring
2479 * Free all receive software resources
2481 void e1000e_free_rx_resources(struct e1000_ring *rx_ring)
2483 struct e1000_adapter *adapter = rx_ring->adapter;
2484 struct pci_dev *pdev = adapter->pdev;
2487 e1000_clean_rx_ring(rx_ring);
2489 for (i = 0; i < rx_ring->count; i++)
2490 kfree(rx_ring->buffer_info[i].ps_pages);
2492 vfree(rx_ring->buffer_info);
2493 rx_ring->buffer_info = NULL;
2495 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2497 rx_ring->desc = NULL;
2501 * e1000_update_itr - update the dynamic ITR value based on statistics
2502 * @adapter: pointer to adapter
2503 * @itr_setting: current adapter->itr
2504 * @packets: the number of packets during this measurement interval
2505 * @bytes: the number of bytes during this measurement interval
2507 * Stores a new ITR value based on packets and byte
2508 * counts during the last interrupt. The advantage of per interrupt
2509 * computation is faster updates and more accurate ITR for the current
2510 * traffic pattern. Constants in this function were computed
2511 * based on theoretical maximum wire speed and thresholds were set based
2512 * on testing data as well as attempting to minimize response time
2513 * while increasing bulk throughput. This functionality is controlled
2514 * by the InterruptThrottleRate module parameter.
2516 static unsigned int e1000_update_itr(u16 itr_setting, int packets, int bytes)
2518 unsigned int retval = itr_setting;
2523 switch (itr_setting) {
2524 case lowest_latency:
2525 /* handle TSO and jumbo frames */
2526 if (bytes / packets > 8000)
2527 retval = bulk_latency;
2528 else if ((packets < 5) && (bytes > 512))
2529 retval = low_latency;
2531 case low_latency: /* 50 usec aka 20000 ints/s */
2532 if (bytes > 10000) {
2533 /* this if handles the TSO accounting */
2534 if (bytes / packets > 8000)
2535 retval = bulk_latency;
2536 else if ((packets < 10) || ((bytes / packets) > 1200))
2537 retval = bulk_latency;
2538 else if ((packets > 35))
2539 retval = lowest_latency;
2540 } else if (bytes / packets > 2000) {
2541 retval = bulk_latency;
2542 } else if (packets <= 2 && bytes < 512) {
2543 retval = lowest_latency;
2546 case bulk_latency: /* 250 usec aka 4000 ints/s */
2547 if (bytes > 25000) {
2549 retval = low_latency;
2550 } else if (bytes < 6000) {
2551 retval = low_latency;
2559 static void e1000_set_itr(struct e1000_adapter *adapter)
2562 u32 new_itr = adapter->itr;
2564 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2565 if (adapter->link_speed != SPEED_1000) {
2571 if (adapter->flags2 & FLAG2_DISABLE_AIM) {
2576 adapter->tx_itr = e1000_update_itr(adapter->tx_itr,
2577 adapter->total_tx_packets,
2578 adapter->total_tx_bytes);
2579 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2580 if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2581 adapter->tx_itr = low_latency;
2583 adapter->rx_itr = e1000_update_itr(adapter->rx_itr,
2584 adapter->total_rx_packets,
2585 adapter->total_rx_bytes);
2586 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2587 if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2588 adapter->rx_itr = low_latency;
2590 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2592 /* counts and packets in update_itr are dependent on these numbers */
2593 switch (current_itr) {
2594 case lowest_latency:
2598 new_itr = 20000; /* aka hwitr = ~200 */
2608 if (new_itr != adapter->itr) {
2609 /* this attempts to bias the interrupt rate towards Bulk
2610 * by adding intermediate steps when interrupt rate is
2613 new_itr = new_itr > adapter->itr ?
2614 min(adapter->itr + (new_itr >> 2), new_itr) : new_itr;
2615 adapter->itr = new_itr;
2616 adapter->rx_ring->itr_val = new_itr;
2617 if (adapter->msix_entries)
2618 adapter->rx_ring->set_itr = 1;
2620 e1000e_write_itr(adapter, new_itr);
2625 * e1000e_write_itr - write the ITR value to the appropriate registers
2626 * @adapter: address of board private structure
2627 * @itr: new ITR value to program
2629 * e1000e_write_itr determines if the adapter is in MSI-X mode
2630 * and, if so, writes the EITR registers with the ITR value.
2631 * Otherwise, it writes the ITR value into the ITR register.
2633 void e1000e_write_itr(struct e1000_adapter *adapter, u32 itr)
2635 struct e1000_hw *hw = &adapter->hw;
2636 u32 new_itr = itr ? 1000000000 / (itr * 256) : 0;
2638 if (adapter->msix_entries) {
2641 for (vector = 0; vector < adapter->num_vectors; vector++)
2642 writel(new_itr, hw->hw_addr + E1000_EITR_82574(vector));
2649 * e1000_alloc_queues - Allocate memory for all rings
2650 * @adapter: board private structure to initialize
2652 static int e1000_alloc_queues(struct e1000_adapter *adapter)
2654 int size = sizeof(struct e1000_ring);
2656 adapter->tx_ring = kzalloc(size, GFP_KERNEL);
2657 if (!adapter->tx_ring)
2659 adapter->tx_ring->count = adapter->tx_ring_count;
2660 adapter->tx_ring->adapter = adapter;
2662 adapter->rx_ring = kzalloc(size, GFP_KERNEL);
2663 if (!adapter->rx_ring)
2665 adapter->rx_ring->count = adapter->rx_ring_count;
2666 adapter->rx_ring->adapter = adapter;
2670 e_err("Unable to allocate memory for queues\n");
2671 kfree(adapter->rx_ring);
2672 kfree(adapter->tx_ring);
2677 * e1000e_poll - NAPI Rx polling callback
2678 * @napi: struct associated with this polling callback
2679 * @weight: number of packets driver is allowed to process this poll
2681 static int e1000e_poll(struct napi_struct *napi, int weight)
2683 struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter,
2685 struct e1000_hw *hw = &adapter->hw;
2686 struct net_device *poll_dev = adapter->netdev;
2687 int tx_cleaned = 1, work_done = 0;
2689 adapter = netdev_priv(poll_dev);
2691 if (!adapter->msix_entries ||
2692 (adapter->rx_ring->ims_val & adapter->tx_ring->ims_val))
2693 tx_cleaned = e1000_clean_tx_irq(adapter->tx_ring);
2695 adapter->clean_rx(adapter->rx_ring, &work_done, weight);
2700 /* If weight not fully consumed, exit the polling mode */
2701 if (work_done < weight) {
2702 if (adapter->itr_setting & 3)
2703 e1000_set_itr(adapter);
2704 napi_complete(napi);
2705 if (!test_bit(__E1000_DOWN, &adapter->state)) {
2706 if (adapter->msix_entries)
2707 ew32(IMS, adapter->rx_ring->ims_val);
2709 e1000_irq_enable(adapter);
2716 static int e1000_vlan_rx_add_vid(struct net_device *netdev,
2717 __always_unused __be16 proto, u16 vid)
2719 struct e1000_adapter *adapter = netdev_priv(netdev);
2720 struct e1000_hw *hw = &adapter->hw;
2723 /* don't update vlan cookie if already programmed */
2724 if ((adapter->hw.mng_cookie.status &
2725 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2726 (vid == adapter->mng_vlan_id))
2729 /* add VID to filter table */
2730 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2731 index = (vid >> 5) & 0x7F;
2732 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2733 vfta |= (1 << (vid & 0x1F));
2734 hw->mac.ops.write_vfta(hw, index, vfta);
2737 set_bit(vid, adapter->active_vlans);
2742 static int e1000_vlan_rx_kill_vid(struct net_device *netdev,
2743 __always_unused __be16 proto, u16 vid)
2745 struct e1000_adapter *adapter = netdev_priv(netdev);
2746 struct e1000_hw *hw = &adapter->hw;
2749 if ((adapter->hw.mng_cookie.status &
2750 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2751 (vid == adapter->mng_vlan_id)) {
2752 /* release control to f/w */
2753 e1000e_release_hw_control(adapter);
2757 /* remove VID from filter table */
2758 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2759 index = (vid >> 5) & 0x7F;
2760 vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
2761 vfta &= ~(1 << (vid & 0x1F));
2762 hw->mac.ops.write_vfta(hw, index, vfta);
2765 clear_bit(vid, adapter->active_vlans);
2771 * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
2772 * @adapter: board private structure to initialize
2774 static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
2776 struct net_device *netdev = adapter->netdev;
2777 struct e1000_hw *hw = &adapter->hw;
2780 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2781 /* disable VLAN receive filtering */
2783 rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
2786 if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
2787 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
2788 adapter->mng_vlan_id);
2789 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
2795 * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
2796 * @adapter: board private structure to initialize
2798 static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
2800 struct e1000_hw *hw = &adapter->hw;
2803 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
2804 /* enable VLAN receive filtering */
2806 rctl |= E1000_RCTL_VFE;
2807 rctl &= ~E1000_RCTL_CFIEN;
2813 * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
2814 * @adapter: board private structure to initialize
2816 static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
2818 struct e1000_hw *hw = &adapter->hw;
2821 /* disable VLAN tag insert/strip */
2823 ctrl &= ~E1000_CTRL_VME;
2828 * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
2829 * @adapter: board private structure to initialize
2831 static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
2833 struct e1000_hw *hw = &adapter->hw;
2836 /* enable VLAN tag insert/strip */
2838 ctrl |= E1000_CTRL_VME;
2842 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
2844 struct net_device *netdev = adapter->netdev;
2845 u16 vid = adapter->hw.mng_cookie.vlan_id;
2846 u16 old_vid = adapter->mng_vlan_id;
2848 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
2849 e1000_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
2850 adapter->mng_vlan_id = vid;
2853 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
2854 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q), old_vid);
2857 static void e1000_restore_vlan(struct e1000_adapter *adapter)
2861 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0);
2863 for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
2864 e1000_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
2867 static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
2869 struct e1000_hw *hw = &adapter->hw;
2870 u32 manc, manc2h, mdef, i, j;
2872 if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
2877 /* enable receiving management packets to the host. this will probably
2878 * generate destination unreachable messages from the host OS, but
2879 * the packets will be handled on SMBUS
2881 manc |= E1000_MANC_EN_MNG2HOST;
2882 manc2h = er32(MANC2H);
2884 switch (hw->mac.type) {
2886 manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
2890 /* Check if IPMI pass-through decision filter already exists;
2893 for (i = 0, j = 0; i < 8; i++) {
2894 mdef = er32(MDEF(i));
2896 /* Ignore filters with anything other than IPMI ports */
2897 if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2900 /* Enable this decision filter in MANC2H */
2907 if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
2910 /* Create new decision filter in an empty filter */
2911 for (i = 0, j = 0; i < 8; i++)
2912 if (er32(MDEF(i)) == 0) {
2913 ew32(MDEF(i), (E1000_MDEF_PORT_623 |
2914 E1000_MDEF_PORT_664));
2921 e_warn("Unable to create IPMI pass-through filter\n");
2925 ew32(MANC2H, manc2h);
2930 * e1000_configure_tx - Configure Transmit Unit after Reset
2931 * @adapter: board private structure
2933 * Configure the Tx unit of the MAC after a reset.
2935 static void e1000_configure_tx(struct e1000_adapter *adapter)
2937 struct e1000_hw *hw = &adapter->hw;
2938 struct e1000_ring *tx_ring = adapter->tx_ring;
2940 u32 tdlen, tctl, tarc;
2942 /* Setup the HW Tx Head and Tail descriptor pointers */
2943 tdba = tx_ring->dma;
2944 tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
2945 ew32(TDBAL(0), (tdba & DMA_BIT_MASK(32)));
2946 ew32(TDBAH(0), (tdba >> 32));
2947 ew32(TDLEN(0), tdlen);
2950 tx_ring->head = adapter->hw.hw_addr + E1000_TDH(0);
2951 tx_ring->tail = adapter->hw.hw_addr + E1000_TDT(0);
2953 /* Set the Tx Interrupt Delay register */
2954 ew32(TIDV, adapter->tx_int_delay);
2955 /* Tx irq moderation */
2956 ew32(TADV, adapter->tx_abs_int_delay);
2958 if (adapter->flags2 & FLAG2_DMA_BURST) {
2959 u32 txdctl = er32(TXDCTL(0));
2961 txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
2962 E1000_TXDCTL_WTHRESH);
2963 /* set up some performance related parameters to encourage the
2964 * hardware to use the bus more efficiently in bursts, depends
2965 * on the tx_int_delay to be enabled,
2966 * wthresh = 1 ==> burst write is disabled to avoid Tx stalls
2967 * hthresh = 1 ==> prefetch when one or more available
2968 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
2969 * BEWARE: this seems to work but should be considered first if
2970 * there are Tx hangs or other Tx related bugs
2972 txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
2973 ew32(TXDCTL(0), txdctl);
2975 /* erratum work around: set txdctl the same for both queues */
2976 ew32(TXDCTL(1), er32(TXDCTL(0)));
2978 /* Program the Transmit Control Register */
2980 tctl &= ~E1000_TCTL_CT;
2981 tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
2982 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
2984 if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
2985 tarc = er32(TARC(0));
2986 /* set the speed mode bit, we'll clear it if we're not at
2987 * gigabit link later
2989 #define SPEED_MODE_BIT (1 << 21)
2990 tarc |= SPEED_MODE_BIT;
2991 ew32(TARC(0), tarc);
2994 /* errata: program both queues to unweighted RR */
2995 if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
2996 tarc = er32(TARC(0));
2998 ew32(TARC(0), tarc);
2999 tarc = er32(TARC(1));
3001 ew32(TARC(1), tarc);
3004 /* Setup Transmit Descriptor Settings for eop descriptor */
3005 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
3007 /* only set IDE if we are delaying interrupts using the timers */
3008 if (adapter->tx_int_delay)
3009 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
3011 /* enable Report Status bit */
3012 adapter->txd_cmd |= E1000_TXD_CMD_RS;
3016 hw->mac.ops.config_collision_dist(hw);
3020 * e1000_setup_rctl - configure the receive control registers
3021 * @adapter: Board private structure
3023 #define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
3024 (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
3025 static void e1000_setup_rctl(struct e1000_adapter *adapter)
3027 struct e1000_hw *hw = &adapter->hw;
3031 /* Workaround Si errata on PCHx - configure jumbo frame flow.
3032 * If jumbo frames not set, program related MAC/PHY registers
3035 if (hw->mac.type >= e1000_pch2lan) {
3038 if (adapter->netdev->mtu > ETH_DATA_LEN)
3039 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
3041 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
3044 e_dbg("failed to enable|disable jumbo frame workaround mode\n");
3047 /* Program MC offset vector base */
3049 rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
3050 rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
3051 E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
3052 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
3054 /* Do not Store bad packets */
3055 rctl &= ~E1000_RCTL_SBP;
3057 /* Enable Long Packet receive */
3058 if (adapter->netdev->mtu <= ETH_DATA_LEN)
3059 rctl &= ~E1000_RCTL_LPE;
3061 rctl |= E1000_RCTL_LPE;
3063 /* Some systems expect that the CRC is included in SMBUS traffic. The
3064 * hardware strips the CRC before sending to both SMBUS (BMC) and to
3065 * host memory when this is enabled
3067 if (adapter->flags2 & FLAG2_CRC_STRIPPING)
3068 rctl |= E1000_RCTL_SECRC;
3070 /* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
3071 if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
3074 e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
3076 phy_data |= (1 << 2);
3077 e1e_wphy(hw, PHY_REG(770, 26), phy_data);
3079 e1e_rphy(hw, 22, &phy_data);
3081 phy_data |= (1 << 14);
3082 e1e_wphy(hw, 0x10, 0x2823);
3083 e1e_wphy(hw, 0x11, 0x0003);
3084 e1e_wphy(hw, 22, phy_data);
3087 /* Setup buffer sizes */
3088 rctl &= ~E1000_RCTL_SZ_4096;
3089 rctl |= E1000_RCTL_BSEX;
3090 switch (adapter->rx_buffer_len) {
3093 rctl |= E1000_RCTL_SZ_2048;
3094 rctl &= ~E1000_RCTL_BSEX;
3097 rctl |= E1000_RCTL_SZ_4096;
3100 rctl |= E1000_RCTL_SZ_8192;
3103 rctl |= E1000_RCTL_SZ_16384;
3107 /* Enable Extended Status in all Receive Descriptors */
3108 rfctl = er32(RFCTL);
3109 rfctl |= E1000_RFCTL_EXTEN;
3112 /* 82571 and greater support packet-split where the protocol
3113 * header is placed in skb->data and the packet data is
3114 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
3115 * In the case of a non-split, skb->data is linearly filled,
3116 * followed by the page buffers. Therefore, skb->data is
3117 * sized to hold the largest protocol header.
3119 * allocations using alloc_page take too long for regular MTU
3120 * so only enable packet split for jumbo frames
3122 * Using pages when the page size is greater than 16k wastes
3123 * a lot of memory, since we allocate 3 pages at all times
3126 pages = PAGE_USE_COUNT(adapter->netdev->mtu);
3127 if ((pages <= 3) && (PAGE_SIZE <= 16384) && (rctl & E1000_RCTL_LPE))
3128 adapter->rx_ps_pages = pages;
3130 adapter->rx_ps_pages = 0;
3132 if (adapter->rx_ps_pages) {
3135 /* Enable Packet split descriptors */
3136 rctl |= E1000_RCTL_DTYP_PS;
3138 psrctl |= adapter->rx_ps_bsize0 >> E1000_PSRCTL_BSIZE0_SHIFT;
3140 switch (adapter->rx_ps_pages) {
3142 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE3_SHIFT;
3145 psrctl |= PAGE_SIZE << E1000_PSRCTL_BSIZE2_SHIFT;
3148 psrctl |= PAGE_SIZE >> E1000_PSRCTL_BSIZE1_SHIFT;
3152 ew32(PSRCTL, psrctl);
3155 /* This is useful for sniffing bad packets. */
3156 if (adapter->netdev->features & NETIF_F_RXALL) {
3157 /* UPE and MPE will be handled by normal PROMISC logic
3158 * in e1000e_set_rx_mode
3160 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
3161 E1000_RCTL_BAM | /* RX All Bcast Pkts */
3162 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
3164 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
3165 E1000_RCTL_DPF | /* Allow filtered pause */
3166 E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
3167 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
3168 * and that breaks VLANs.
3173 /* just started the receive unit, no need to restart */
3174 adapter->flags &= ~FLAG_RESTART_NOW;
3178 * e1000_configure_rx - Configure Receive Unit after Reset
3179 * @adapter: board private structure
3181 * Configure the Rx unit of the MAC after a reset.
3183 static void e1000_configure_rx(struct e1000_adapter *adapter)
3185 struct e1000_hw *hw = &adapter->hw;
3186 struct e1000_ring *rx_ring = adapter->rx_ring;
3188 u32 rdlen, rctl, rxcsum, ctrl_ext;
3190 if (adapter->rx_ps_pages) {
3191 /* this is a 32 byte descriptor */
3192 rdlen = rx_ring->count *
3193 sizeof(union e1000_rx_desc_packet_split);
3194 adapter->clean_rx = e1000_clean_rx_irq_ps;
3195 adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
3196 } else if (adapter->netdev->mtu > ETH_FRAME_LEN + ETH_FCS_LEN) {
3197 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3198 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
3199 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
3201 rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
3202 adapter->clean_rx = e1000_clean_rx_irq;
3203 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
3206 /* disable receives while setting up the descriptors */
3208 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
3209 ew32(RCTL, rctl & ~E1000_RCTL_EN);
3211 usleep_range(10000, 20000);
3213 if (adapter->flags2 & FLAG2_DMA_BURST) {
3214 /* set the writeback threshold (only takes effect if the RDTR
3215 * is set). set GRAN=1 and write back up to 0x4 worth, and
3216 * enable prefetching of 0x20 Rx descriptors
3222 ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
3223 ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
3225 /* override the delay timers for enabling bursting, only if
3226 * the value was not set by the user via module options
3228 if (adapter->rx_int_delay == DEFAULT_RDTR)
3229 adapter->rx_int_delay = BURST_RDTR;
3230 if (adapter->rx_abs_int_delay == DEFAULT_RADV)
3231 adapter->rx_abs_int_delay = BURST_RADV;
3234 /* set the Receive Delay Timer Register */
3235 ew32(RDTR, adapter->rx_int_delay);
3237 /* irq moderation */
3238 ew32(RADV, adapter->rx_abs_int_delay);
3239 if ((adapter->itr_setting != 0) && (adapter->itr != 0))
3240 e1000e_write_itr(adapter, adapter->itr);
3242 ctrl_ext = er32(CTRL_EXT);
3243 /* Auto-Mask interrupts upon ICR access */
3244 ctrl_ext |= E1000_CTRL_EXT_IAME;
3245 ew32(IAM, 0xffffffff);
3246 ew32(CTRL_EXT, ctrl_ext);
3249 /* Setup the HW Rx Head and Tail Descriptor Pointers and
3250 * the Base and Length of the Rx Descriptor Ring
3252 rdba = rx_ring->dma;
3253 ew32(RDBAL(0), (rdba & DMA_BIT_MASK(32)));
3254 ew32(RDBAH(0), (rdba >> 32));
3255 ew32(RDLEN(0), rdlen);
3258 rx_ring->head = adapter->hw.hw_addr + E1000_RDH(0);
3259 rx_ring->tail = adapter->hw.hw_addr + E1000_RDT(0);
3261 /* Enable Receive Checksum Offload for TCP and UDP */
3262 rxcsum = er32(RXCSUM);
3263 if (adapter->netdev->features & NETIF_F_RXCSUM)
3264 rxcsum |= E1000_RXCSUM_TUOFL;
3266 rxcsum &= ~E1000_RXCSUM_TUOFL;
3267 ew32(RXCSUM, rxcsum);
3269 /* With jumbo frames, excessive C-state transition latencies result
3270 * in dropped transactions.
3272 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3274 ((er32(PBA) & E1000_PBA_RXA_MASK) * 1024 -
3275 adapter->max_frame_size) * 8 / 1000;
3277 if (adapter->flags & FLAG_IS_ICH) {
3278 u32 rxdctl = er32(RXDCTL(0));
3280 ew32(RXDCTL(0), rxdctl | 0x3);
3283 pm_qos_update_request(&adapter->netdev->pm_qos_req, lat);
3285 pm_qos_update_request(&adapter->netdev->pm_qos_req,
3286 PM_QOS_DEFAULT_VALUE);
3289 /* Enable Receives */
3294 * e1000e_write_mc_addr_list - write multicast addresses to MTA
3295 * @netdev: network interface device structure
3297 * Writes multicast address list to the MTA hash table.
3298 * Returns: -ENOMEM on failure
3299 * 0 on no addresses written
3300 * X on writing X addresses to MTA
3302 static int e1000e_write_mc_addr_list(struct net_device *netdev)
3304 struct e1000_adapter *adapter = netdev_priv(netdev);
3305 struct e1000_hw *hw = &adapter->hw;
3306 struct netdev_hw_addr *ha;
3310 if (netdev_mc_empty(netdev)) {
3311 /* nothing to program, so clear mc list */
3312 hw->mac.ops.update_mc_addr_list(hw, NULL, 0);
3316 mta_list = kzalloc(netdev_mc_count(netdev) * ETH_ALEN, GFP_ATOMIC);
3320 /* update_mc_addr_list expects a packed array of only addresses. */
3322 netdev_for_each_mc_addr(ha, netdev)
3323 memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
3325 hw->mac.ops.update_mc_addr_list(hw, mta_list, i);
3328 return netdev_mc_count(netdev);
3332 * e1000e_write_uc_addr_list - write unicast addresses to RAR table
3333 * @netdev: network interface device structure
3335 * Writes unicast address list to the RAR table.
3336 * Returns: -ENOMEM on failure/insufficient address space
3337 * 0 on no addresses written
3338 * X on writing X addresses to the RAR table
3340 static int e1000e_write_uc_addr_list(struct net_device *netdev)
3342 struct e1000_adapter *adapter = netdev_priv(netdev);
3343 struct e1000_hw *hw = &adapter->hw;
3344 unsigned int rar_entries;
3347 rar_entries = hw->mac.ops.rar_get_count(hw);
3349 /* save a rar entry for our hardware address */
3352 /* save a rar entry for the LAA workaround */
3353 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA)
3356 /* return ENOMEM indicating insufficient memory for addresses */
3357 if (netdev_uc_count(netdev) > rar_entries)
3360 if (!netdev_uc_empty(netdev) && rar_entries) {
3361 struct netdev_hw_addr *ha;
3363 /* write the addresses in reverse order to avoid write
3366 netdev_for_each_uc_addr(ha, netdev) {
3371 rval = hw->mac.ops.rar_set(hw, ha->addr, rar_entries--);
3378 /* zero out the remaining RAR entries not used above */
3379 for (; rar_entries > 0; rar_entries--) {
3380 ew32(RAH(rar_entries), 0);
3381 ew32(RAL(rar_entries), 0);
3389 * e1000e_set_rx_mode - secondary unicast, Multicast and Promiscuous mode set
3390 * @netdev: network interface device structure
3392 * The ndo_set_rx_mode entry point is called whenever the unicast or multicast
3393 * address list or the network interface flags are updated. This routine is
3394 * responsible for configuring the hardware for proper unicast, multicast,
3395 * promiscuous mode, and all-multi behavior.
3397 static void e1000e_set_rx_mode(struct net_device *netdev)
3399 struct e1000_adapter *adapter = netdev_priv(netdev);
3400 struct e1000_hw *hw = &adapter->hw;
3403 if (pm_runtime_suspended(netdev->dev.parent))
3406 /* Check for Promiscuous and All Multicast modes */
3409 /* clear the affected bits */
3410 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
3412 if (netdev->flags & IFF_PROMISC) {
3413 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
3414 /* Do not hardware filter VLANs in promisc mode */
3415 e1000e_vlan_filter_disable(adapter);
3419 if (netdev->flags & IFF_ALLMULTI) {
3420 rctl |= E1000_RCTL_MPE;
3422 /* Write addresses to the MTA, if the attempt fails
3423 * then we should just turn on promiscuous mode so
3424 * that we can at least receive multicast traffic
3426 count = e1000e_write_mc_addr_list(netdev);
3428 rctl |= E1000_RCTL_MPE;
3430 e1000e_vlan_filter_enable(adapter);
3431 /* Write addresses to available RAR registers, if there is not
3432 * sufficient space to store all the addresses then enable
3433 * unicast promiscuous mode
3435 count = e1000e_write_uc_addr_list(netdev);
3437 rctl |= E1000_RCTL_UPE;
3442 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
3443 e1000e_vlan_strip_enable(adapter);
3445 e1000e_vlan_strip_disable(adapter);
3448 static void e1000e_setup_rss_hash(struct e1000_adapter *adapter)
3450 struct e1000_hw *hw = &adapter->hw;
3455 netdev_rss_key_fill(rss_key, sizeof(rss_key));
3456 for (i = 0; i < 10; i++)
3457 ew32(RSSRK(i), rss_key[i]);
3459 /* Direct all traffic to queue 0 */
3460 for (i = 0; i < 32; i++)
3463 /* Disable raw packet checksumming so that RSS hash is placed in
3464 * descriptor on writeback.
3466 rxcsum = er32(RXCSUM);
3467 rxcsum |= E1000_RXCSUM_PCSD;
3469 ew32(RXCSUM, rxcsum);
3471 mrqc = (E1000_MRQC_RSS_FIELD_IPV4 |
3472 E1000_MRQC_RSS_FIELD_IPV4_TCP |
3473 E1000_MRQC_RSS_FIELD_IPV6 |
3474 E1000_MRQC_RSS_FIELD_IPV6_TCP |
3475 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
3481 * e1000e_get_base_timinca - get default SYSTIM time increment attributes
3482 * @adapter: board private structure
3483 * @timinca: pointer to returned time increment attributes
3485 * Get attributes for incrementing the System Time Register SYSTIML/H at
3486 * the default base frequency, and set the cyclecounter shift value.
3488 s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
3490 struct e1000_hw *hw = &adapter->hw;
3491 u32 incvalue, incperiod, shift;
3493 /* Make sure clock is enabled on I217 before checking the frequency */
3494 if ((hw->mac.type == e1000_pch_lpt) &&
3495 !(er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) &&
3496 !(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_ENABLED)) {
3497 u32 fextnvm7 = er32(FEXTNVM7);
3499 if (!(fextnvm7 & (1 << 0))) {
3500 ew32(FEXTNVM7, fextnvm7 | (1 << 0));
3505 switch (hw->mac.type) {
3508 /* On I217, the clock frequency is 25MHz or 96MHz as
3509 * indicated by the System Clock Frequency Indication
3511 if ((hw->mac.type != e1000_pch_lpt) ||
3512 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
3513 /* Stable 96MHz frequency */
3514 incperiod = INCPERIOD_96MHz;
3515 incvalue = INCVALUE_96MHz;
3516 shift = INCVALUE_SHIFT_96MHz;
3517 adapter->cc.shift = shift + INCPERIOD_SHIFT_96MHz;
3523 /* Stable 25MHz frequency */
3524 incperiod = INCPERIOD_25MHz;
3525 incvalue = INCVALUE_25MHz;
3526 shift = INCVALUE_SHIFT_25MHz;
3527 adapter->cc.shift = shift;
3533 *timinca = ((incperiod << E1000_TIMINCA_INCPERIOD_SHIFT) |
3534 ((incvalue << shift) & E1000_TIMINCA_INCVALUE_MASK));
3540 * e1000e_config_hwtstamp - configure the hwtstamp registers and enable/disable
3541 * @adapter: board private structure
3543 * Outgoing time stamping can be enabled and disabled. Play nice and
3544 * disable it when requested, although it shouldn't cause any overhead
3545 * when no packet needs it. At most one packet in the queue may be
3546 * marked for time stamping, otherwise it would be impossible to tell
3547 * for sure to which packet the hardware time stamp belongs.
3549 * Incoming time stamping has to be configured via the hardware filters.
3550 * Not all combinations are supported, in particular event type has to be
3551 * specified. Matching the kind of event packet is not supported, with the
3552 * exception of "all V2 events regardless of level 2 or 4".
3554 static int e1000e_config_hwtstamp(struct e1000_adapter *adapter,
3555 struct hwtstamp_config *config)
3557 struct e1000_hw *hw = &adapter->hw;
3558 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
3559 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
3567 if (!(adapter->flags & FLAG_HAS_HW_TIMESTAMP))
3570 /* flags reserved for future extensions - must be zero */
3574 switch (config->tx_type) {
3575 case HWTSTAMP_TX_OFF:
3578 case HWTSTAMP_TX_ON:
3584 switch (config->rx_filter) {
3585 case HWTSTAMP_FILTER_NONE:
3588 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3589 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3590 rxmtrl = E1000_RXMTRL_PTP_V1_SYNC_MESSAGE;
3593 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3594 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
3595 rxmtrl = E1000_RXMTRL_PTP_V1_DELAY_REQ_MESSAGE;
3598 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3599 /* Also time stamps V2 L2 Path Delay Request/Response */
3600 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3601 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3604 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3605 /* Also time stamps V2 L2 Path Delay Request/Response. */
3606 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_V2;
3607 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3610 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3611 /* Hardware cannot filter just V2 L4 Sync messages;
3612 * fall-through to V2 (both L2 and L4) Sync.
3614 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3615 /* Also time stamps V2 Path Delay Request/Response. */
3616 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3617 rxmtrl = E1000_RXMTRL_PTP_V2_SYNC_MESSAGE;
3621 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3622 /* Hardware cannot filter just V2 L4 Delay Request messages;
3623 * fall-through to V2 (both L2 and L4) Delay Request.
3625 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3626 /* Also time stamps V2 Path Delay Request/Response. */
3627 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
3628 rxmtrl = E1000_RXMTRL_PTP_V2_DELAY_REQ_MESSAGE;
3632 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3633 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3634 /* Hardware cannot filter just V2 L4 or L2 Event messages;
3635 * fall-through to all V2 (both L2 and L4) Events.
3637 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3638 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
3639 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
3643 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3644 /* For V1, the hardware can only filter Sync messages or
3645 * Delay Request messages but not both so fall-through to
3646 * time stamp all packets.
3648 case HWTSTAMP_FILTER_ALL:
3651 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
3652 config->rx_filter = HWTSTAMP_FILTER_ALL;
3658 adapter->hwtstamp_config = *config;
3660 /* enable/disable Tx h/w time stamping */
3661 regval = er32(TSYNCTXCTL);
3662 regval &= ~E1000_TSYNCTXCTL_ENABLED;
3663 regval |= tsync_tx_ctl;
3664 ew32(TSYNCTXCTL, regval);
3665 if ((er32(TSYNCTXCTL) & E1000_TSYNCTXCTL_ENABLED) !=
3666 (regval & E1000_TSYNCTXCTL_ENABLED)) {
3667 e_err("Timesync Tx Control register not set as expected\n");
3671 /* enable/disable Rx h/w time stamping */
3672 regval = er32(TSYNCRXCTL);
3673 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
3674 regval |= tsync_rx_ctl;
3675 ew32(TSYNCRXCTL, regval);
3676 if ((er32(TSYNCRXCTL) & (E1000_TSYNCRXCTL_ENABLED |
3677 E1000_TSYNCRXCTL_TYPE_MASK)) !=
3678 (regval & (E1000_TSYNCRXCTL_ENABLED |
3679 E1000_TSYNCRXCTL_TYPE_MASK))) {
3680 e_err("Timesync Rx Control register not set as expected\n");
3684 /* L2: define ethertype filter for time stamped packets */
3686 rxmtrl |= ETH_P_1588;
3688 /* define which PTP packets get time stamped */
3689 ew32(RXMTRL, rxmtrl);
3691 /* Filter by destination port */
3693 rxudp = PTP_EV_PORT;
3694 cpu_to_be16s(&rxudp);
3700 /* Clear TSYNCRXCTL_VALID & TSYNCTXCTL_VALID bit */
3704 /* Get and set the System Time Register SYSTIM base frequency */
3705 ret_val = e1000e_get_base_timinca(adapter, ®val);
3708 ew32(TIMINCA, regval);
3710 /* reset the ns time counter */
3711 timecounter_init(&adapter->tc, &adapter->cc,
3712 ktime_to_ns(ktime_get_real()));
3718 * e1000_configure - configure the hardware for Rx and Tx
3719 * @adapter: private board structure
3721 static void e1000_configure(struct e1000_adapter *adapter)
3723 struct e1000_ring *rx_ring = adapter->rx_ring;
3725 e1000e_set_rx_mode(adapter->netdev);
3727 e1000_restore_vlan(adapter);
3728 e1000_init_manageability_pt(adapter);
3730 e1000_configure_tx(adapter);
3732 if (adapter->netdev->features & NETIF_F_RXHASH)
3733 e1000e_setup_rss_hash(adapter);
3734 e1000_setup_rctl(adapter);
3735 e1000_configure_rx(adapter);
3736 adapter->alloc_rx_buf(rx_ring, e1000_desc_unused(rx_ring), GFP_KERNEL);
3740 * e1000e_power_up_phy - restore link in case the phy was powered down
3741 * @adapter: address of board private structure
3743 * The phy may be powered down to save power and turn off link when the
3744 * driver is unloaded and wake on lan is not enabled (among others)
3745 * *** this routine MUST be followed by a call to e1000e_reset ***
3747 void e1000e_power_up_phy(struct e1000_adapter *adapter)
3749 if (adapter->hw.phy.ops.power_up)
3750 adapter->hw.phy.ops.power_up(&adapter->hw);
3752 adapter->hw.mac.ops.setup_link(&adapter->hw);
3756 * e1000_power_down_phy - Power down the PHY
3758 * Power down the PHY so no link is implied when interface is down.
3759 * The PHY cannot be powered down if management or WoL is active.
3761 static void e1000_power_down_phy(struct e1000_adapter *adapter)
3763 if (adapter->hw.phy.ops.power_down)
3764 adapter->hw.phy.ops.power_down(&adapter->hw);
3768 * e1000e_reset - bring the hardware into a known good state
3770 * This function boots the hardware and enables some settings that
3771 * require a configuration cycle of the hardware - those cannot be
3772 * set/changed during runtime. After reset the device needs to be
3773 * properly configured for Rx, Tx etc.
3775 void e1000e_reset(struct e1000_adapter *adapter)
3777 struct e1000_mac_info *mac = &adapter->hw.mac;
3778 struct e1000_fc_info *fc = &adapter->hw.fc;
3779 struct e1000_hw *hw = &adapter->hw;
3780 u32 tx_space, min_tx_space, min_rx_space;
3781 u32 pba = adapter->pba;
3784 /* reset Packet Buffer Allocation to default */
3787 if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
3788 /* To maintain wire speed transmits, the Tx FIFO should be
3789 * large enough to accommodate two full transmit packets,
3790 * rounded up to the next 1KB and expressed in KB. Likewise,
3791 * the Rx FIFO should be large enough to accommodate at least
3792 * one full receive packet and is similarly rounded up and
3796 /* upper 16 bits has Tx packet buffer allocation size in KB */
3797 tx_space = pba >> 16;
3798 /* lower 16 bits has Rx packet buffer allocation size in KB */
3800 /* the Tx fifo also stores 16 bytes of information about the Tx
3801 * but don't include ethernet FCS because hardware appends it
3803 min_tx_space = (adapter->max_frame_size +
3804 sizeof(struct e1000_tx_desc) - ETH_FCS_LEN) * 2;
3805 min_tx_space = ALIGN(min_tx_space, 1024);
3806 min_tx_space >>= 10;
3807 /* software strips receive CRC, so leave room for it */
3808 min_rx_space = adapter->max_frame_size;
3809 min_rx_space = ALIGN(min_rx_space, 1024);
3810 min_rx_space >>= 10;
3812 /* If current Tx allocation is less than the min Tx FIFO size,
3813 * and the min Tx FIFO size is less than the current Rx FIFO
3814 * allocation, take space away from current Rx allocation
3816 if ((tx_space < min_tx_space) &&
3817 ((min_tx_space - tx_space) < pba)) {
3818 pba -= min_tx_space - tx_space;
3820 /* if short on Rx space, Rx wins and must trump Tx
3823 if (pba < min_rx_space)
3830 /* flow control settings
3832 * The high water mark must be low enough to fit one full frame
3833 * (or the size used for early receive) above it in the Rx FIFO.
3834 * Set it to the lower of:
3835 * - 90% of the Rx FIFO size, and
3836 * - the full Rx FIFO size minus one full frame
3838 if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
3839 fc->pause_time = 0xFFFF;
3841 fc->pause_time = E1000_FC_PAUSE_TIME;
3842 fc->send_xon = true;
3843 fc->current_mode = fc->requested_mode;
3845 switch (hw->mac.type) {
3847 case e1000_ich10lan:
3848 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3851 fc->high_water = 0x2800;
3852 fc->low_water = fc->high_water - 8;
3857 hwm = min(((pba << 10) * 9 / 10),
3858 ((pba << 10) - adapter->max_frame_size));
3860 fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
3861 fc->low_water = fc->high_water - 8;
3864 /* Workaround PCH LOM adapter hangs with certain network
3865 * loads. If hangs persist, try disabling Tx flow control.
3867 if (adapter->netdev->mtu > ETH_DATA_LEN) {
3868 fc->high_water = 0x3500;
3869 fc->low_water = 0x1500;
3871 fc->high_water = 0x5000;
3872 fc->low_water = 0x3000;
3874 fc->refresh_time = 0x1000;
3878 fc->refresh_time = 0x0400;
3880 if (adapter->netdev->mtu <= ETH_DATA_LEN) {
3881 fc->high_water = 0x05C20;
3882 fc->low_water = 0x05048;
3883 fc->pause_time = 0x0650;
3889 fc->high_water = ((pba << 10) * 9 / 10) & E1000_FCRTH_RTH;
3890 fc->low_water = ((pba << 10) * 8 / 10) & E1000_FCRTL_RTL;
3894 /* Alignment of Tx data is on an arbitrary byte boundary with the
3895 * maximum size per Tx descriptor limited only to the transmit
3896 * allocation of the packet buffer minus 96 bytes with an upper
3897 * limit of 24KB due to receive synchronization limitations.
3899 adapter->tx_fifo_limit = min_t(u32, ((er32(PBA) >> 16) << 10) - 96,
3902 /* Disable Adaptive Interrupt Moderation if 2 full packets cannot
3903 * fit in receive buffer.
3905 if (adapter->itr_setting & 0x3) {
3906 if ((adapter->max_frame_size * 2) > (pba << 10)) {
3907 if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
3908 dev_info(&adapter->pdev->dev,
3909 "Interrupt Throttle Rate off\n");
3910 adapter->flags2 |= FLAG2_DISABLE_AIM;
3911 e1000e_write_itr(adapter, 0);
3913 } else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
3914 dev_info(&adapter->pdev->dev,
3915 "Interrupt Throttle Rate on\n");
3916 adapter->flags2 &= ~FLAG2_DISABLE_AIM;
3917 adapter->itr = 20000;
3918 e1000e_write_itr(adapter, adapter->itr);
3922 /* Allow time for pending master requests to run */
3923 mac->ops.reset_hw(hw);
3925 /* For parts with AMT enabled, let the firmware know
3926 * that the network interface is in control
3928 if (adapter->flags & FLAG_HAS_AMT)
3929 e1000e_get_hw_control(adapter);
3933 if (mac->ops.init_hw(hw))
3934 e_err("Hardware Error\n");
3936 e1000_update_mng_vlan(adapter);
3938 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
3939 ew32(VET, ETH_P_8021Q);
3941 e1000e_reset_adaptive(hw);
3943 /* initialize systim and reset the ns time counter */
3944 e1000e_config_hwtstamp(adapter, &adapter->hwtstamp_config);
3946 /* Set EEE advertisement as appropriate */
3947 if (adapter->flags2 & FLAG2_HAS_EEE) {
3951 switch (hw->phy.type) {
3952 case e1000_phy_82579:
3953 adv_addr = I82579_EEE_ADVERTISEMENT;
3955 case e1000_phy_i217:
3956 adv_addr = I217_EEE_ADVERTISEMENT;
3959 dev_err(&adapter->pdev->dev,
3960 "Invalid PHY type setting EEE advertisement\n");
3964 ret_val = hw->phy.ops.acquire(hw);
3966 dev_err(&adapter->pdev->dev,
3967 "EEE advertisement - unable to acquire PHY\n");
3971 e1000_write_emi_reg_locked(hw, adv_addr,
3972 hw->dev_spec.ich8lan.eee_disable ?
3973 0 : adapter->eee_advert);
3975 hw->phy.ops.release(hw);
3978 if (!netif_running(adapter->netdev) &&
3979 !test_bit(__E1000_TESTING, &adapter->state))
3980 e1000_power_down_phy(adapter);
3982 e1000_get_phy_info(hw);
3984 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
3985 !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
3987 /* speed up time to link by disabling smart power down, ignore
3988 * the return value of this function because there is nothing
3989 * different we would do if it failed
3991 e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
3992 phy_data &= ~IGP02E1000_PM_SPD;
3993 e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
3997 int e1000e_up(struct e1000_adapter *adapter)
3999 struct e1000_hw *hw = &adapter->hw;
4001 /* hardware has been reset, we need to reload some things */
4002 e1000_configure(adapter);
4004 clear_bit(__E1000_DOWN, &adapter->state);
4006 if (adapter->msix_entries)
4007 e1000_configure_msix(adapter);
4008 e1000_irq_enable(adapter);
4010 netif_start_queue(adapter->netdev);
4012 /* fire a link change interrupt to start the watchdog */
4013 if (adapter->msix_entries)
4014 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4016 ew32(ICS, E1000_ICS_LSC);
4021 static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
4023 struct e1000_hw *hw = &adapter->hw;
4025 if (!(adapter->flags2 & FLAG2_DMA_BURST))
4028 /* flush pending descriptor writebacks to memory */
4029 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4030 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4032 /* execute the writes immediately */
4035 /* due to rare timing issues, write to TIDV/RDTR again to ensure the
4036 * write is successful
4038 ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
4039 ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
4041 /* execute the writes immediately */
4045 static void e1000e_update_stats(struct e1000_adapter *adapter);
4048 * e1000e_down - quiesce the device and optionally reset the hardware
4049 * @adapter: board private structure
4050 * @reset: boolean flag to reset the hardware or not
4052 void e1000e_down(struct e1000_adapter *adapter, bool reset)
4054 struct net_device *netdev = adapter->netdev;
4055 struct e1000_hw *hw = &adapter->hw;
4058 /* signal that we're down so the interrupt handler does not
4059 * reschedule our watchdog timer
4061 set_bit(__E1000_DOWN, &adapter->state);
4063 /* disable receives in the hardware */
4065 if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
4066 ew32(RCTL, rctl & ~E1000_RCTL_EN);
4067 /* flush and sleep below */
4069 netif_stop_queue(netdev);
4071 /* disable transmits in the hardware */
4073 tctl &= ~E1000_TCTL_EN;
4076 /* flush both disables and wait for them to finish */
4078 usleep_range(10000, 20000);
4080 e1000_irq_disable(adapter);
4082 napi_synchronize(&adapter->napi);
4084 del_timer_sync(&adapter->watchdog_timer);
4085 del_timer_sync(&adapter->phy_info_timer);
4087 netif_carrier_off(netdev);
4089 spin_lock(&adapter->stats64_lock);
4090 e1000e_update_stats(adapter);
4091 spin_unlock(&adapter->stats64_lock);
4093 e1000e_flush_descriptors(adapter);
4094 e1000_clean_tx_ring(adapter->tx_ring);
4095 e1000_clean_rx_ring(adapter->rx_ring);
4097 adapter->link_speed = 0;
4098 adapter->link_duplex = 0;
4100 /* Disable Si errata workaround on PCHx for jumbo frame flow */
4101 if ((hw->mac.type >= e1000_pch2lan) &&
4102 (adapter->netdev->mtu > ETH_DATA_LEN) &&
4103 e1000_lv_jumbo_workaround_ich8lan(hw, false))
4104 e_dbg("failed to disable jumbo frame workaround mode\n");
4106 if (reset && !pci_channel_offline(adapter->pdev))
4107 e1000e_reset(adapter);
4110 void e1000e_reinit_locked(struct e1000_adapter *adapter)
4113 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
4114 usleep_range(1000, 2000);
4115 e1000e_down(adapter, true);
4117 clear_bit(__E1000_RESETTING, &adapter->state);
4121 * e1000e_cyclecounter_read - read raw cycle counter (used by time counter)
4122 * @cc: cyclecounter structure
4124 static cycle_t e1000e_cyclecounter_read(const struct cyclecounter *cc)
4126 struct e1000_adapter *adapter = container_of(cc, struct e1000_adapter,
4128 struct e1000_hw *hw = &adapter->hw;
4129 cycle_t systim, systim_next;
4131 /* latch SYSTIMH on read of SYSTIML */
4132 systim = (cycle_t)er32(SYSTIML);
4133 systim |= (cycle_t)er32(SYSTIMH) << 32;
4135 if ((hw->mac.type == e1000_82574) || (hw->mac.type == e1000_82583)) {
4136 u64 incvalue, time_delta, rem, temp;
4139 /* errata for 82574/82583 possible bad bits read from SYSTIMH/L
4140 * check to see that the time is incrementing at a reasonable
4141 * rate and is a multiple of incvalue
4143 incvalue = er32(TIMINCA) & E1000_TIMINCA_INCVALUE_MASK;
4144 for (i = 0; i < E1000_MAX_82574_SYSTIM_REREADS; i++) {
4145 /* latch SYSTIMH on read of SYSTIML */
4146 systim_next = (cycle_t)er32(SYSTIML);
4147 systim_next |= (cycle_t)er32(SYSTIMH) << 32;
4149 time_delta = systim_next - systim;
4151 rem = do_div(temp, incvalue);
4153 systim = systim_next;
4155 if ((time_delta < E1000_82574_SYSTIM_EPSILON) &&
4164 * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
4165 * @adapter: board private structure to initialize
4167 * e1000_sw_init initializes the Adapter private data structure.
4168 * Fields are initialized based on PCI device information and
4169 * OS network device settings (MTU size).
4171 static int e1000_sw_init(struct e1000_adapter *adapter)
4173 struct net_device *netdev = adapter->netdev;
4175 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
4176 adapter->rx_ps_bsize0 = 128;
4177 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
4178 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
4179 adapter->tx_ring_count = E1000_DEFAULT_TXD;
4180 adapter->rx_ring_count = E1000_DEFAULT_RXD;
4182 spin_lock_init(&adapter->stats64_lock);
4184 e1000e_set_interrupt_capability(adapter);
4186 if (e1000_alloc_queues(adapter))
4189 /* Setup hardware time stamping cyclecounter */
4190 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
4191 adapter->cc.read = e1000e_cyclecounter_read;
4192 adapter->cc.mask = CLOCKSOURCE_MASK(64);
4193 adapter->cc.mult = 1;
4194 /* cc.shift set in e1000e_get_base_tininca() */
4196 spin_lock_init(&adapter->systim_lock);
4197 INIT_WORK(&adapter->tx_hwtstamp_work, e1000e_tx_hwtstamp_work);
4200 /* Explicitly disable IRQ since the NIC can be in any state. */
4201 e1000_irq_disable(adapter);
4203 set_bit(__E1000_DOWN, &adapter->state);
4208 * e1000_intr_msi_test - Interrupt Handler
4209 * @irq: interrupt number
4210 * @data: pointer to a network interface device structure
4212 static irqreturn_t e1000_intr_msi_test(int __always_unused irq, void *data)
4214 struct net_device *netdev = data;
4215 struct e1000_adapter *adapter = netdev_priv(netdev);
4216 struct e1000_hw *hw = &adapter->hw;
4217 u32 icr = er32(ICR);
4219 e_dbg("icr is %08X\n", icr);
4220 if (icr & E1000_ICR_RXSEQ) {
4221 adapter->flags &= ~FLAG_MSI_TEST_FAILED;
4222 /* Force memory writes to complete before acknowledging the
4223 * interrupt is handled.
4232 * e1000_test_msi_interrupt - Returns 0 for successful test
4233 * @adapter: board private struct
4235 * code flow taken from tg3.c
4237 static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
4239 struct net_device *netdev = adapter->netdev;
4240 struct e1000_hw *hw = &adapter->hw;
4243 /* poll_enable hasn't been called yet, so don't need disable */
4244 /* clear any pending events */
4247 /* free the real vector and request a test handler */
4248 e1000_free_irq(adapter);
4249 e1000e_reset_interrupt_capability(adapter);
4251 /* Assume that the test fails, if it succeeds then the test
4252 * MSI irq handler will unset this flag
4254 adapter->flags |= FLAG_MSI_TEST_FAILED;
4256 err = pci_enable_msi(adapter->pdev);
4258 goto msi_test_failed;
4260 err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
4261 netdev->name, netdev);
4263 pci_disable_msi(adapter->pdev);
4264 goto msi_test_failed;
4267 /* Force memory writes to complete before enabling and firing an
4272 e1000_irq_enable(adapter);
4274 /* fire an unusual interrupt on the test handler */
4275 ew32(ICS, E1000_ICS_RXSEQ);
4279 e1000_irq_disable(adapter);
4281 rmb(); /* read flags after interrupt has been fired */
4283 if (adapter->flags & FLAG_MSI_TEST_FAILED) {
4284 adapter->int_mode = E1000E_INT_MODE_LEGACY;
4285 e_info("MSI interrupt test failed, using legacy interrupt.\n");
4287 e_dbg("MSI interrupt test succeeded!\n");
4290 free_irq(adapter->pdev->irq, netdev);
4291 pci_disable_msi(adapter->pdev);
4294 e1000e_set_interrupt_capability(adapter);
4295 return e1000_request_irq(adapter);
4299 * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
4300 * @adapter: board private struct
4302 * code flow taken from tg3.c, called with e1000 interrupts disabled.
4304 static int e1000_test_msi(struct e1000_adapter *adapter)
4309 if (!(adapter->flags & FLAG_MSI_ENABLED))
4312 /* disable SERR in case the MSI write causes a master abort */
4313 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4314 if (pci_cmd & PCI_COMMAND_SERR)
4315 pci_write_config_word(adapter->pdev, PCI_COMMAND,
4316 pci_cmd & ~PCI_COMMAND_SERR);
4318 err = e1000_test_msi_interrupt(adapter);
4320 /* re-enable SERR */
4321 if (pci_cmd & PCI_COMMAND_SERR) {
4322 pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
4323 pci_cmd |= PCI_COMMAND_SERR;
4324 pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
4331 * e1000_open - Called when a network interface is made active
4332 * @netdev: network interface device structure
4334 * Returns 0 on success, negative value on failure
4336 * The open entry point is called when a network interface is made
4337 * active by the system (IFF_UP). At this point all resources needed
4338 * for transmit and receive operations are allocated, the interrupt
4339 * handler is registered with the OS, the watchdog timer is started,
4340 * and the stack is notified that the interface is ready.
4342 static int e1000_open(struct net_device *netdev)
4344 struct e1000_adapter *adapter = netdev_priv(netdev);
4345 struct e1000_hw *hw = &adapter->hw;
4346 struct pci_dev *pdev = adapter->pdev;
4349 /* disallow open during test */
4350 if (test_bit(__E1000_TESTING, &adapter->state))
4353 pm_runtime_get_sync(&pdev->dev);
4355 netif_carrier_off(netdev);
4357 /* allocate transmit descriptors */
4358 err = e1000e_setup_tx_resources(adapter->tx_ring);
4362 /* allocate receive descriptors */
4363 err = e1000e_setup_rx_resources(adapter->rx_ring);
4367 /* If AMT is enabled, let the firmware know that the network
4368 * interface is now open and reset the part to a known state.
4370 if (adapter->flags & FLAG_HAS_AMT) {
4371 e1000e_get_hw_control(adapter);
4372 e1000e_reset(adapter);
4375 e1000e_power_up_phy(adapter);
4377 adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
4378 if ((adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
4379 e1000_update_mng_vlan(adapter);
4381 /* DMA latency requirement to workaround jumbo issue */
4382 pm_qos_add_request(&adapter->netdev->pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
4383 PM_QOS_DEFAULT_VALUE);
4385 /* before we allocate an interrupt, we must be ready to handle it.
4386 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
4387 * as soon as we call pci_request_irq, so we have to setup our
4388 * clean_rx handler before we do so.
4390 e1000_configure(adapter);
4392 err = e1000_request_irq(adapter);
4396 /* Work around PCIe errata with MSI interrupts causing some chipsets to
4397 * ignore e1000e MSI messages, which means we need to test our MSI
4400 if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
4401 err = e1000_test_msi(adapter);
4403 e_err("Interrupt allocation failed\n");
4408 /* From here on the code is the same as e1000e_up() */
4409 clear_bit(__E1000_DOWN, &adapter->state);
4411 napi_enable(&adapter->napi);
4413 e1000_irq_enable(adapter);
4415 adapter->tx_hang_recheck = false;
4416 netif_start_queue(netdev);
4418 hw->mac.get_link_status = true;
4419 pm_runtime_put(&pdev->dev);
4421 /* fire a link status change interrupt to start the watchdog */
4422 if (adapter->msix_entries)
4423 ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
4425 ew32(ICS, E1000_ICS_LSC);
4430 e1000e_release_hw_control(adapter);
4431 e1000_power_down_phy(adapter);
4432 e1000e_free_rx_resources(adapter->rx_ring);
4434 e1000e_free_tx_resources(adapter->tx_ring);
4436 e1000e_reset(adapter);
4437 pm_runtime_put_sync(&pdev->dev);
4443 * e1000_close - Disables a network interface
4444 * @netdev: network interface device structure
4446 * Returns 0, this is not allowed to fail
4448 * The close entry point is called when an interface is de-activated
4449 * by the OS. The hardware is still under the drivers control, but
4450 * needs to be disabled. A global MAC reset is issued to stop the
4451 * hardware, and all transmit and receive resources are freed.
4453 static int e1000_close(struct net_device *netdev)
4455 struct e1000_adapter *adapter = netdev_priv(netdev);
4456 struct pci_dev *pdev = adapter->pdev;
4457 int count = E1000_CHECK_RESET_COUNT;
4459 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
4460 usleep_range(10000, 20000);
4462 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
4464 pm_runtime_get_sync(&pdev->dev);
4466 if (!test_bit(__E1000_DOWN, &adapter->state)) {
4467 e1000e_down(adapter, true);
4468 e1000_free_irq(adapter);
4470 /* Link status message must follow this format */
4471 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
4474 napi_disable(&adapter->napi);
4476 e1000e_free_tx_resources(adapter->tx_ring);
4477 e1000e_free_rx_resources(adapter->rx_ring);
4479 /* kill manageability vlan ID if supported, but not if a vlan with
4480 * the same ID is registered on the host OS (let 8021q kill it)
4482 if (adapter->hw.mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
4483 e1000_vlan_rx_kill_vid(netdev, htons(ETH_P_8021Q),
4484 adapter->mng_vlan_id);
4486 /* If AMT is enabled, let the firmware know that the network
4487 * interface is now closed
4489 if ((adapter->flags & FLAG_HAS_AMT) &&
4490 !test_bit(__E1000_TESTING, &adapter->state))
4491 e1000e_release_hw_control(adapter);
4493 pm_qos_remove_request(&adapter->netdev->pm_qos_req);
4495 pm_runtime_put_sync(&pdev->dev);
4501 * e1000_set_mac - Change the Ethernet Address of the NIC
4502 * @netdev: network interface device structure
4503 * @p: pointer to an address structure
4505 * Returns 0 on success, negative on failure
4507 static int e1000_set_mac(struct net_device *netdev, void *p)
4509 struct e1000_adapter *adapter = netdev_priv(netdev);
4510 struct e1000_hw *hw = &adapter->hw;
4511 struct sockaddr *addr = p;
4513 if (!is_valid_ether_addr(addr->sa_data))
4514 return -EADDRNOTAVAIL;
4516 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
4517 memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
4519 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
4521 if (adapter->flags & FLAG_RESET_OVERWRITES_LAA) {
4522 /* activate the work around */
4523 e1000e_set_laa_state_82571(&adapter->hw, 1);
4525 /* Hold a copy of the LAA in RAR[14] This is done so that
4526 * between the time RAR[0] gets clobbered and the time it
4527 * gets fixed (in e1000_watchdog), the actual LAA is in one
4528 * of the RARs and no incoming packets directed to this port
4529 * are dropped. Eventually the LAA will be in RAR[0] and
4532 hw->mac.ops.rar_set(&adapter->hw, adapter->hw.mac.addr,
4533 adapter->hw.mac.rar_entry_count - 1);
4540 * e1000e_update_phy_task - work thread to update phy
4541 * @work: pointer to our work struct
4543 * this worker thread exists because we must acquire a
4544 * semaphore to read the phy, which we could msleep while
4545 * waiting for it, and we can't msleep in a timer.
4547 static void e1000e_update_phy_task(struct work_struct *work)
4549 struct e1000_adapter *adapter = container_of(work,
4550 struct e1000_adapter,
4552 struct e1000_hw *hw = &adapter->hw;
4554 if (test_bit(__E1000_DOWN, &adapter->state))
4557 e1000_get_phy_info(hw);
4559 /* Enable EEE on 82579 after link up */
4560 if (hw->phy.type >= e1000_phy_82579)
4561 e1000_set_eee_pchlan(hw);
4565 * e1000_update_phy_info - timre call-back to update PHY info
4566 * @data: pointer to adapter cast into an unsigned long
4568 * Need to wait a few seconds after link up to get diagnostic information from
4571 static void e1000_update_phy_info(unsigned long data)
4573 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4575 if (test_bit(__E1000_DOWN, &adapter->state))
4578 schedule_work(&adapter->update_phy_task);
4582 * e1000e_update_phy_stats - Update the PHY statistics counters
4583 * @adapter: board private structure
4585 * Read/clear the upper 16-bit PHY registers and read/accumulate lower
4587 static void e1000e_update_phy_stats(struct e1000_adapter *adapter)
4589 struct e1000_hw *hw = &adapter->hw;
4593 ret_val = hw->phy.ops.acquire(hw);
4597 /* A page set is expensive so check if already on desired page.
4598 * If not, set to the page with the PHY status registers.
4601 ret_val = e1000e_read_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT,
4605 if (phy_data != (HV_STATS_PAGE << IGP_PAGE_SHIFT)) {
4606 ret_val = hw->phy.ops.set_page(hw,
4607 HV_STATS_PAGE << IGP_PAGE_SHIFT);
4612 /* Single Collision Count */
4613 hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
4614 ret_val = hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
4616 adapter->stats.scc += phy_data;
4618 /* Excessive Collision Count */
4619 hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
4620 ret_val = hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
4622 adapter->stats.ecol += phy_data;
4624 /* Multiple Collision Count */
4625 hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
4626 ret_val = hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
4628 adapter->stats.mcc += phy_data;
4630 /* Late Collision Count */
4631 hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
4632 ret_val = hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
4634 adapter->stats.latecol += phy_data;
4636 /* Collision Count - also used for adaptive IFS */
4637 hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
4638 ret_val = hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
4640 hw->mac.collision_delta = phy_data;
4643 hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
4644 ret_val = hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
4646 adapter->stats.dc += phy_data;
4648 /* Transmit with no CRS */
4649 hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
4650 ret_val = hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
4652 adapter->stats.tncrs += phy_data;
4655 hw->phy.ops.release(hw);
4659 * e1000e_update_stats - Update the board statistics counters
4660 * @adapter: board private structure
4662 static void e1000e_update_stats(struct e1000_adapter *adapter)
4664 struct net_device *netdev = adapter->netdev;
4665 struct e1000_hw *hw = &adapter->hw;
4666 struct pci_dev *pdev = adapter->pdev;
4668 /* Prevent stats update while adapter is being reset, or if the pci
4669 * connection is down.
4671 if (adapter->link_speed == 0)
4673 if (pci_channel_offline(pdev))
4676 adapter->stats.crcerrs += er32(CRCERRS);
4677 adapter->stats.gprc += er32(GPRC);
4678 adapter->stats.gorc += er32(GORCL);
4679 er32(GORCH); /* Clear gorc */
4680 adapter->stats.bprc += er32(BPRC);
4681 adapter->stats.mprc += er32(MPRC);
4682 adapter->stats.roc += er32(ROC);
4684 adapter->stats.mpc += er32(MPC);
4686 /* Half-duplex statistics */
4687 if (adapter->link_duplex == HALF_DUPLEX) {
4688 if (adapter->flags2 & FLAG2_HAS_PHY_STATS) {
4689 e1000e_update_phy_stats(adapter);
4691 adapter->stats.scc += er32(SCC);
4692 adapter->stats.ecol += er32(ECOL);
4693 adapter->stats.mcc += er32(MCC);
4694 adapter->stats.latecol += er32(LATECOL);
4695 adapter->stats.dc += er32(DC);
4697 hw->mac.collision_delta = er32(COLC);
4699 if ((hw->mac.type != e1000_82574) &&
4700 (hw->mac.type != e1000_82583))
4701 adapter->stats.tncrs += er32(TNCRS);
4703 adapter->stats.colc += hw->mac.collision_delta;
4706 adapter->stats.xonrxc += er32(XONRXC);
4707 adapter->stats.xontxc += er32(XONTXC);
4708 adapter->stats.xoffrxc += er32(XOFFRXC);
4709 adapter->stats.xofftxc += er32(XOFFTXC);
4710 adapter->stats.gptc += er32(GPTC);
4711 adapter->stats.gotc += er32(GOTCL);
4712 er32(GOTCH); /* Clear gotc */
4713 adapter->stats.rnbc += er32(RNBC);
4714 adapter->stats.ruc += er32(RUC);
4716 adapter->stats.mptc += er32(MPTC);
4717 adapter->stats.bptc += er32(BPTC);
4719 /* used for adaptive IFS */
4721 hw->mac.tx_packet_delta = er32(TPT);
4722 adapter->stats.tpt += hw->mac.tx_packet_delta;
4724 adapter->stats.algnerrc += er32(ALGNERRC);
4725 adapter->stats.rxerrc += er32(RXERRC);
4726 adapter->stats.cexterr += er32(CEXTERR);
4727 adapter->stats.tsctc += er32(TSCTC);
4728 adapter->stats.tsctfc += er32(TSCTFC);
4730 /* Fill out the OS statistics structure */
4731 netdev->stats.multicast = adapter->stats.mprc;
4732 netdev->stats.collisions = adapter->stats.colc;
4736 /* RLEC on some newer hardware can be incorrect so build
4737 * our own version based on RUC and ROC
4739 netdev->stats.rx_errors = adapter->stats.rxerrc +
4740 adapter->stats.crcerrs + adapter->stats.algnerrc +
4741 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
4742 netdev->stats.rx_length_errors = adapter->stats.ruc +
4744 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4745 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
4746 netdev->stats.rx_missed_errors = adapter->stats.mpc;
4749 netdev->stats.tx_errors = adapter->stats.ecol + adapter->stats.latecol;
4750 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
4751 netdev->stats.tx_window_errors = adapter->stats.latecol;
4752 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
4754 /* Tx Dropped needs to be maintained elsewhere */
4756 /* Management Stats */
4757 adapter->stats.mgptc += er32(MGTPTC);
4758 adapter->stats.mgprc += er32(MGTPRC);
4759 adapter->stats.mgpdc += er32(MGTPDC);
4761 /* Correctable ECC Errors */
4762 if (hw->mac.type == e1000_pch_lpt) {
4763 u32 pbeccsts = er32(PBECCSTS);
4765 adapter->corr_errors +=
4766 pbeccsts & E1000_PBECCSTS_CORR_ERR_CNT_MASK;
4767 adapter->uncorr_errors +=
4768 (pbeccsts & E1000_PBECCSTS_UNCORR_ERR_CNT_MASK) >>
4769 E1000_PBECCSTS_UNCORR_ERR_CNT_SHIFT;
4774 * e1000_phy_read_status - Update the PHY register status snapshot
4775 * @adapter: board private structure
4777 static void e1000_phy_read_status(struct e1000_adapter *adapter)
4779 struct e1000_hw *hw = &adapter->hw;
4780 struct e1000_phy_regs *phy = &adapter->phy_regs;
4782 if (!pm_runtime_suspended((&adapter->pdev->dev)->parent) &&
4783 (er32(STATUS) & E1000_STATUS_LU) &&
4784 (adapter->hw.phy.media_type == e1000_media_type_copper)) {
4787 ret_val = e1e_rphy(hw, MII_BMCR, &phy->bmcr);
4788 ret_val |= e1e_rphy(hw, MII_BMSR, &phy->bmsr);
4789 ret_val |= e1e_rphy(hw, MII_ADVERTISE, &phy->advertise);
4790 ret_val |= e1e_rphy(hw, MII_LPA, &phy->lpa);
4791 ret_val |= e1e_rphy(hw, MII_EXPANSION, &phy->expansion);
4792 ret_val |= e1e_rphy(hw, MII_CTRL1000, &phy->ctrl1000);
4793 ret_val |= e1e_rphy(hw, MII_STAT1000, &phy->stat1000);
4794 ret_val |= e1e_rphy(hw, MII_ESTATUS, &phy->estatus);
4796 e_warn("Error reading PHY register\n");
4798 /* Do not read PHY registers if link is not up
4799 * Set values to typical power-on defaults
4801 phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
4802 phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
4803 BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
4805 phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
4806 ADVERTISE_ALL | ADVERTISE_CSMA);
4808 phy->expansion = EXPANSION_ENABLENPAGE;
4809 phy->ctrl1000 = ADVERTISE_1000FULL;
4811 phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
4815 static void e1000_print_link_info(struct e1000_adapter *adapter)
4817 struct e1000_hw *hw = &adapter->hw;
4818 u32 ctrl = er32(CTRL);
4820 /* Link status message must follow this format for user tools */
4821 pr_info("%s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
4822 adapter->netdev->name, adapter->link_speed,
4823 adapter->link_duplex == FULL_DUPLEX ? "Full" : "Half",
4824 (ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE) ? "Rx/Tx" :
4825 (ctrl & E1000_CTRL_RFCE) ? "Rx" :
4826 (ctrl & E1000_CTRL_TFCE) ? "Tx" : "None");
4829 static bool e1000e_has_link(struct e1000_adapter *adapter)
4831 struct e1000_hw *hw = &adapter->hw;
4832 bool link_active = false;
4835 /* get_link_status is set on LSC (link status) interrupt or
4836 * Rx sequence error interrupt. get_link_status will stay
4837 * false until the check_for_link establishes link
4838 * for copper adapters ONLY
4840 switch (hw->phy.media_type) {
4841 case e1000_media_type_copper:
4842 if (hw->mac.get_link_status) {
4843 ret_val = hw->mac.ops.check_for_link(hw);
4844 link_active = !hw->mac.get_link_status;
4849 case e1000_media_type_fiber:
4850 ret_val = hw->mac.ops.check_for_link(hw);
4851 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
4853 case e1000_media_type_internal_serdes:
4854 ret_val = hw->mac.ops.check_for_link(hw);
4855 link_active = adapter->hw.mac.serdes_has_link;
4858 case e1000_media_type_unknown:
4862 if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
4863 (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
4864 /* See e1000_kmrn_lock_loss_workaround_ich8lan() */
4865 e_info("Gigabit has been disabled, downgrading speed\n");
4871 static void e1000e_enable_receives(struct e1000_adapter *adapter)
4873 /* make sure the receive unit is started */
4874 if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
4875 (adapter->flags & FLAG_RESTART_NOW)) {
4876 struct e1000_hw *hw = &adapter->hw;
4877 u32 rctl = er32(RCTL);
4879 ew32(RCTL, rctl | E1000_RCTL_EN);
4880 adapter->flags &= ~FLAG_RESTART_NOW;
4884 static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
4886 struct e1000_hw *hw = &adapter->hw;
4888 /* With 82574 controllers, PHY needs to be checked periodically
4889 * for hung state and reset, if two calls return true
4891 if (e1000_check_phy_82574(hw))
4892 adapter->phy_hang_count++;
4894 adapter->phy_hang_count = 0;
4896 if (adapter->phy_hang_count > 1) {
4897 adapter->phy_hang_count = 0;
4898 e_dbg("PHY appears hung - resetting\n");
4899 schedule_work(&adapter->reset_task);
4904 * e1000_watchdog - Timer Call-back
4905 * @data: pointer to adapter cast into an unsigned long
4907 static void e1000_watchdog(unsigned long data)
4909 struct e1000_adapter *adapter = (struct e1000_adapter *)data;
4911 /* Do the rest outside of interrupt context */
4912 schedule_work(&adapter->watchdog_task);
4914 /* TODO: make this use queue_delayed_work() */
4917 static void e1000_watchdog_task(struct work_struct *work)
4919 struct e1000_adapter *adapter = container_of(work,
4920 struct e1000_adapter,
4922 struct net_device *netdev = adapter->netdev;
4923 struct e1000_mac_info *mac = &adapter->hw.mac;
4924 struct e1000_phy_info *phy = &adapter->hw.phy;
4925 struct e1000_ring *tx_ring = adapter->tx_ring;
4926 struct e1000_hw *hw = &adapter->hw;
4929 if (test_bit(__E1000_DOWN, &adapter->state))
4932 link = e1000e_has_link(adapter);
4933 if ((netif_carrier_ok(netdev)) && link) {
4934 /* Cancel scheduled suspend requests. */
4935 pm_runtime_resume(netdev->dev.parent);
4937 e1000e_enable_receives(adapter);
4941 if ((e1000e_enable_tx_pkt_filtering(hw)) &&
4942 (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
4943 e1000_update_mng_vlan(adapter);
4946 if (!netif_carrier_ok(netdev)) {
4949 /* Cancel scheduled suspend requests. */
4950 pm_runtime_resume(netdev->dev.parent);
4952 /* update snapshot of PHY registers on LSC */
4953 e1000_phy_read_status(adapter);
4954 mac->ops.get_link_up_info(&adapter->hw,
4955 &adapter->link_speed,
4956 &adapter->link_duplex);
4957 e1000_print_link_info(adapter);
4959 /* check if SmartSpeed worked */
4960 e1000e_check_downshift(hw);
4961 if (phy->speed_downgraded)
4963 "Link Speed was downgraded by SmartSpeed\n");
4965 /* On supported PHYs, check for duplex mismatch only
4966 * if link has autonegotiated at 10/100 half
4968 if ((hw->phy.type == e1000_phy_igp_3 ||
4969 hw->phy.type == e1000_phy_bm) &&
4971 (adapter->link_speed == SPEED_10 ||
4972 adapter->link_speed == SPEED_100) &&
4973 (adapter->link_duplex == HALF_DUPLEX)) {
4976 e1e_rphy(hw, MII_EXPANSION, &autoneg_exp);
4978 if (!(autoneg_exp & EXPANSION_NWAY))
4979 e_info("Autonegotiated half duplex but link partner cannot autoneg. Try forcing full duplex if link gets many collisions.\n");
4982 /* adjust timeout factor according to speed/duplex */
4983 adapter->tx_timeout_factor = 1;
4984 switch (adapter->link_speed) {
4987 adapter->tx_timeout_factor = 16;
4991 adapter->tx_timeout_factor = 10;
4995 /* workaround: re-program speed mode bit after
4998 if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
5002 tarc0 = er32(TARC(0));
5003 tarc0 &= ~SPEED_MODE_BIT;
5004 ew32(TARC(0), tarc0);
5007 /* disable TSO for pcie and 10/100 speeds, to avoid
5008 * some hardware issues
5010 if (!(adapter->flags & FLAG_TSO_FORCE)) {
5011 switch (adapter->link_speed) {
5014 e_info("10/100 speed: disabling TSO\n");
5015 netdev->features &= ~NETIF_F_TSO;
5016 netdev->features &= ~NETIF_F_TSO6;
5019 netdev->features |= NETIF_F_TSO;
5020 netdev->features |= NETIF_F_TSO6;
5028 /* enable transmits in the hardware, need to do this
5029 * after setting TARC(0)
5032 tctl |= E1000_TCTL_EN;
5035 /* Perform any post-link-up configuration before
5036 * reporting link up.
5038 if (phy->ops.cfg_on_link_up)
5039 phy->ops.cfg_on_link_up(hw);
5041 netif_carrier_on(netdev);
5043 if (!test_bit(__E1000_DOWN, &adapter->state))
5044 mod_timer(&adapter->phy_info_timer,
5045 round_jiffies(jiffies + 2 * HZ));
5048 if (netif_carrier_ok(netdev)) {
5049 adapter->link_speed = 0;
5050 adapter->link_duplex = 0;
5051 /* Link status message must follow this format */
5052 pr_info("%s NIC Link is Down\n", adapter->netdev->name);
5053 netif_carrier_off(netdev);
5054 if (!test_bit(__E1000_DOWN, &adapter->state))
5055 mod_timer(&adapter->phy_info_timer,
5056 round_jiffies(jiffies + 2 * HZ));
5058 /* 8000ES2LAN requires a Rx packet buffer work-around
5059 * on link down event; reset the controller to flush
5060 * the Rx packet buffer.
5062 if (adapter->flags & FLAG_RX_NEEDS_RESTART)
5063 adapter->flags |= FLAG_RESTART_NOW;
5065 pm_schedule_suspend(netdev->dev.parent,
5071 spin_lock(&adapter->stats64_lock);
5072 e1000e_update_stats(adapter);
5074 mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
5075 adapter->tpt_old = adapter->stats.tpt;
5076 mac->collision_delta = adapter->stats.colc - adapter->colc_old;
5077 adapter->colc_old = adapter->stats.colc;
5079 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
5080 adapter->gorc_old = adapter->stats.gorc;
5081 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
5082 adapter->gotc_old = adapter->stats.gotc;
5083 spin_unlock(&adapter->stats64_lock);
5085 /* If the link is lost the controller stops DMA, but
5086 * if there is queued Tx work it cannot be done. So
5087 * reset the controller to flush the Tx packet buffers.
5089 if (!netif_carrier_ok(netdev) &&
5090 (e1000_desc_unused(tx_ring) + 1 < tx_ring->count))
5091 adapter->flags |= FLAG_RESTART_NOW;
5093 /* If reset is necessary, do it outside of interrupt context. */
5094 if (adapter->flags & FLAG_RESTART_NOW) {
5095 schedule_work(&adapter->reset_task);
5096 /* return immediately since reset is imminent */
5100 e1000e_update_adaptive(&adapter->hw);
5102 /* Simple mode for Interrupt Throttle Rate (ITR) */
5103 if (adapter->itr_setting == 4) {
5104 /* Symmetric Tx/Rx gets a reduced ITR=2000;
5105 * Total asymmetrical Tx or Rx gets ITR=8000;
5106 * everyone else is between 2000-8000.
5108 u32 goc = (adapter->gotc + adapter->gorc) / 10000;
5109 u32 dif = (adapter->gotc > adapter->gorc ?
5110 adapter->gotc - adapter->gorc :
5111 adapter->gorc - adapter->gotc) / 10000;
5112 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
5114 e1000e_write_itr(adapter, itr);
5117 /* Cause software interrupt to ensure Rx ring is cleaned */
5118 if (adapter->msix_entries)
5119 ew32(ICS, adapter->rx_ring->ims_val);
5121 ew32(ICS, E1000_ICS_RXDMT0);
5123 /* flush pending descriptors to memory before detecting Tx hang */
5124 e1000e_flush_descriptors(adapter);
5126 /* Force detection of hung controller every watchdog period */
5127 adapter->detect_tx_hung = true;
5129 /* With 82571 controllers, LAA may be overwritten due to controller
5130 * reset from the other port. Set the appropriate LAA in RAR[0]
5132 if (e1000e_get_laa_state_82571(hw))
5133 hw->mac.ops.rar_set(hw, adapter->hw.mac.addr, 0);
5135 if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
5136 e1000e_check_82574_phy_workaround(adapter);
5138 /* Clear valid timestamp stuck in RXSTMPL/H due to a Rx error */
5139 if (adapter->hwtstamp_config.rx_filter != HWTSTAMP_FILTER_NONE) {
5140 if ((adapter->flags2 & FLAG2_CHECK_RX_HWTSTAMP) &&
5141 (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID)) {
5143 adapter->rx_hwtstamp_cleared++;
5145 adapter->flags2 |= FLAG2_CHECK_RX_HWTSTAMP;
5149 /* Reset the timer */
5150 if (!test_bit(__E1000_DOWN, &adapter->state))
5151 mod_timer(&adapter->watchdog_timer,
5152 round_jiffies(jiffies + 2 * HZ));
5155 #define E1000_TX_FLAGS_CSUM 0x00000001
5156 #define E1000_TX_FLAGS_VLAN 0x00000002
5157 #define E1000_TX_FLAGS_TSO 0x00000004
5158 #define E1000_TX_FLAGS_IPV4 0x00000008
5159 #define E1000_TX_FLAGS_NO_FCS 0x00000010
5160 #define E1000_TX_FLAGS_HWTSTAMP 0x00000020
5161 #define E1000_TX_FLAGS_VLAN_MASK 0xffff0000
5162 #define E1000_TX_FLAGS_VLAN_SHIFT 16
5164 static int e1000_tso(struct e1000_ring *tx_ring, struct sk_buff *skb,
5167 struct e1000_context_desc *context_desc;
5168 struct e1000_buffer *buffer_info;
5172 u8 ipcss, ipcso, tucss, tucso, hdr_len;
5175 if (!skb_is_gso(skb))
5178 err = skb_cow_head(skb, 0);
5182 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5183 mss = skb_shinfo(skb)->gso_size;
5184 if (protocol == htons(ETH_P_IP)) {
5185 struct iphdr *iph = ip_hdr(skb);
5188 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
5190 cmd_length = E1000_TXD_CMD_IP;
5191 ipcse = skb_transport_offset(skb) - 1;
5192 } else if (skb_is_gso_v6(skb)) {
5193 ipv6_hdr(skb)->payload_len = 0;
5194 tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
5195 &ipv6_hdr(skb)->daddr,
5199 ipcss = skb_network_offset(skb);
5200 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
5201 tucss = skb_transport_offset(skb);
5202 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
5204 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
5205 E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
5207 i = tx_ring->next_to_use;
5208 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5209 buffer_info = &tx_ring->buffer_info[i];
5211 context_desc->lower_setup.ip_fields.ipcss = ipcss;
5212 context_desc->lower_setup.ip_fields.ipcso = ipcso;
5213 context_desc->lower_setup.ip_fields.ipcse = cpu_to_le16(ipcse);
5214 context_desc->upper_setup.tcp_fields.tucss = tucss;
5215 context_desc->upper_setup.tcp_fields.tucso = tucso;
5216 context_desc->upper_setup.tcp_fields.tucse = 0;
5217 context_desc->tcp_seg_setup.fields.mss = cpu_to_le16(mss);
5218 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
5219 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
5221 buffer_info->time_stamp = jiffies;
5222 buffer_info->next_to_watch = i;
5225 if (i == tx_ring->count)
5227 tx_ring->next_to_use = i;
5232 static bool e1000_tx_csum(struct e1000_ring *tx_ring, struct sk_buff *skb,
5235 struct e1000_adapter *adapter = tx_ring->adapter;
5236 struct e1000_context_desc *context_desc;
5237 struct e1000_buffer *buffer_info;
5240 u32 cmd_len = E1000_TXD_CMD_DEXT;
5242 if (skb->ip_summed != CHECKSUM_PARTIAL)
5246 case cpu_to_be16(ETH_P_IP):
5247 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
5248 cmd_len |= E1000_TXD_CMD_TCP;
5250 case cpu_to_be16(ETH_P_IPV6):
5251 /* XXX not handling all IPV6 headers */
5252 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
5253 cmd_len |= E1000_TXD_CMD_TCP;
5256 if (unlikely(net_ratelimit()))
5257 e_warn("checksum_partial proto=%x!\n",
5258 be16_to_cpu(protocol));
5262 css = skb_checksum_start_offset(skb);
5264 i = tx_ring->next_to_use;
5265 buffer_info = &tx_ring->buffer_info[i];
5266 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
5268 context_desc->lower_setup.ip_config = 0;
5269 context_desc->upper_setup.tcp_fields.tucss = css;
5270 context_desc->upper_setup.tcp_fields.tucso = css + skb->csum_offset;
5271 context_desc->upper_setup.tcp_fields.tucse = 0;
5272 context_desc->tcp_seg_setup.data = 0;
5273 context_desc->cmd_and_length = cpu_to_le32(cmd_len);
5275 buffer_info->time_stamp = jiffies;
5276 buffer_info->next_to_watch = i;
5279 if (i == tx_ring->count)
5281 tx_ring->next_to_use = i;
5286 static int e1000_tx_map(struct e1000_ring *tx_ring, struct sk_buff *skb,
5287 unsigned int first, unsigned int max_per_txd,
5288 unsigned int nr_frags)
5290 struct e1000_adapter *adapter = tx_ring->adapter;
5291 struct pci_dev *pdev = adapter->pdev;
5292 struct e1000_buffer *buffer_info;
5293 unsigned int len = skb_headlen(skb);
5294 unsigned int offset = 0, size, count = 0, i;
5295 unsigned int f, bytecount, segs;
5297 i = tx_ring->next_to_use;
5300 buffer_info = &tx_ring->buffer_info[i];
5301 size = min(len, max_per_txd);
5303 buffer_info->length = size;
5304 buffer_info->time_stamp = jiffies;
5305 buffer_info->next_to_watch = i;
5306 buffer_info->dma = dma_map_single(&pdev->dev,
5308 size, DMA_TO_DEVICE);
5309 buffer_info->mapped_as_page = false;
5310 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5319 if (i == tx_ring->count)
5324 for (f = 0; f < nr_frags; f++) {
5325 const struct skb_frag_struct *frag;
5327 frag = &skb_shinfo(skb)->frags[f];
5328 len = skb_frag_size(frag);
5333 if (i == tx_ring->count)
5336 buffer_info = &tx_ring->buffer_info[i];
5337 size = min(len, max_per_txd);
5339 buffer_info->length = size;
5340 buffer_info->time_stamp = jiffies;
5341 buffer_info->next_to_watch = i;
5342 buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
5345 buffer_info->mapped_as_page = true;
5346 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
5355 segs = skb_shinfo(skb)->gso_segs ? : 1;
5356 /* multiply data chunks by size of headers */
5357 bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
5359 tx_ring->buffer_info[i].skb = skb;
5360 tx_ring->buffer_info[i].segs = segs;
5361 tx_ring->buffer_info[i].bytecount = bytecount;
5362 tx_ring->buffer_info[first].next_to_watch = i;
5367 dev_err(&pdev->dev, "Tx DMA map failed\n");
5368 buffer_info->dma = 0;
5374 i += tx_ring->count;
5376 buffer_info = &tx_ring->buffer_info[i];
5377 e1000_put_txbuf(tx_ring, buffer_info);
5383 static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
5385 struct e1000_adapter *adapter = tx_ring->adapter;
5386 struct e1000_tx_desc *tx_desc = NULL;
5387 struct e1000_buffer *buffer_info;
5388 u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
5391 if (tx_flags & E1000_TX_FLAGS_TSO) {
5392 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
5394 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5396 if (tx_flags & E1000_TX_FLAGS_IPV4)
5397 txd_upper |= E1000_TXD_POPTS_IXSM << 8;
5400 if (tx_flags & E1000_TX_FLAGS_CSUM) {
5401 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5402 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
5405 if (tx_flags & E1000_TX_FLAGS_VLAN) {
5406 txd_lower |= E1000_TXD_CMD_VLE;
5407 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
5410 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5411 txd_lower &= ~(E1000_TXD_CMD_IFCS);
5413 if (unlikely(tx_flags & E1000_TX_FLAGS_HWTSTAMP)) {
5414 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
5415 txd_upper |= E1000_TXD_EXTCMD_TSTAMP;
5418 i = tx_ring->next_to_use;
5421 buffer_info = &tx_ring->buffer_info[i];
5422 tx_desc = E1000_TX_DESC(*tx_ring, i);
5423 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
5424 tx_desc->lower.data = cpu_to_le32(txd_lower |
5425 buffer_info->length);
5426 tx_desc->upper.data = cpu_to_le32(txd_upper);
5429 if (i == tx_ring->count)
5431 } while (--count > 0);
5433 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
5435 /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
5436 if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
5437 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
5439 /* Force memory writes to complete before letting h/w
5440 * know there are new descriptors to fetch. (Only
5441 * applicable for weak-ordered memory model archs,
5446 tx_ring->next_to_use = i;
5448 if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
5449 e1000e_update_tdt_wa(tx_ring, i);
5451 writel(i, tx_ring->tail);
5453 /* we need this if more than one processor can write to our tail
5454 * at a time, it synchronizes IO on IA64/Altix systems
5459 #define MINIMUM_DHCP_PACKET_SIZE 282
5460 static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
5461 struct sk_buff *skb)
5463 struct e1000_hw *hw = &adapter->hw;
5466 if (vlan_tx_tag_present(skb) &&
5467 !((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id) &&
5468 (adapter->hw.mng_cookie.status &
5469 E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
5472 if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
5475 if (((struct ethhdr *)skb->data)->h_proto != htons(ETH_P_IP))
5479 const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data + 14);
5482 if (ip->protocol != IPPROTO_UDP)
5485 udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
5486 if (ntohs(udp->dest) != 67)
5489 offset = (u8 *)udp + 8 - skb->data;
5490 length = skb->len - offset;
5491 return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
5497 static int __e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5499 struct e1000_adapter *adapter = tx_ring->adapter;
5501 netif_stop_queue(adapter->netdev);
5502 /* Herbert's original patch had:
5503 * smp_mb__after_netif_stop_queue();
5504 * but since that doesn't exist yet, just open code it.
5508 /* We need to check again in a case another CPU has just
5509 * made room available.
5511 if (e1000_desc_unused(tx_ring) < size)
5515 netif_start_queue(adapter->netdev);
5516 ++adapter->restart_queue;
5520 static int e1000_maybe_stop_tx(struct e1000_ring *tx_ring, int size)
5522 BUG_ON(size > tx_ring->count);
5524 if (e1000_desc_unused(tx_ring) >= size)
5526 return __e1000_maybe_stop_tx(tx_ring, size);
5529 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
5530 struct net_device *netdev)
5532 struct e1000_adapter *adapter = netdev_priv(netdev);
5533 struct e1000_ring *tx_ring = adapter->tx_ring;
5535 unsigned int tx_flags = 0;
5536 unsigned int len = skb_headlen(skb);
5537 unsigned int nr_frags;
5542 __be16 protocol = vlan_get_protocol(skb);
5544 if (test_bit(__E1000_DOWN, &adapter->state)) {
5545 dev_kfree_skb_any(skb);
5546 return NETDEV_TX_OK;
5549 if (skb->len <= 0) {
5550 dev_kfree_skb_any(skb);
5551 return NETDEV_TX_OK;
5554 /* The minimum packet size with TCTL.PSP set is 17 bytes so
5555 * pad skb in order to meet this minimum size requirement
5557 if (skb_put_padto(skb, 17))
5558 return NETDEV_TX_OK;
5560 mss = skb_shinfo(skb)->gso_size;
5564 /* TSO Workaround for 82571/2/3 Controllers -- if skb->data
5565 * points to just header, pull a few bytes of payload from
5566 * frags into skb->data
5568 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
5569 /* we do this workaround for ES2LAN, but it is un-necessary,
5570 * avoiding it could save a lot of cycles
5572 if (skb->data_len && (hdr_len == len)) {
5573 unsigned int pull_size;
5575 pull_size = min_t(unsigned int, 4, skb->data_len);
5576 if (!__pskb_pull_tail(skb, pull_size)) {
5577 e_err("__pskb_pull_tail failed.\n");
5578 dev_kfree_skb_any(skb);
5579 return NETDEV_TX_OK;
5581 len = skb_headlen(skb);
5585 /* reserve a descriptor for the offload context */
5586 if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
5590 count += DIV_ROUND_UP(len, adapter->tx_fifo_limit);
5592 nr_frags = skb_shinfo(skb)->nr_frags;
5593 for (f = 0; f < nr_frags; f++)
5594 count += DIV_ROUND_UP(skb_frag_size(&skb_shinfo(skb)->frags[f]),
5595 adapter->tx_fifo_limit);
5597 if (adapter->hw.mac.tx_pkt_filtering)
5598 e1000_transfer_dhcp_info(adapter, skb);
5600 /* need: count + 2 desc gap to keep tail from touching
5601 * head, otherwise try next time
5603 if (e1000_maybe_stop_tx(tx_ring, count + 2))
5604 return NETDEV_TX_BUSY;
5606 if (vlan_tx_tag_present(skb)) {
5607 tx_flags |= E1000_TX_FLAGS_VLAN;
5608 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
5611 first = tx_ring->next_to_use;
5613 tso = e1000_tso(tx_ring, skb, protocol);
5615 dev_kfree_skb_any(skb);
5616 return NETDEV_TX_OK;
5620 tx_flags |= E1000_TX_FLAGS_TSO;
5621 else if (e1000_tx_csum(tx_ring, skb, protocol))
5622 tx_flags |= E1000_TX_FLAGS_CSUM;
5624 /* Old method was to assume IPv4 packet by default if TSO was enabled.
5625 * 82571 hardware supports TSO capabilities for IPv6 as well...
5626 * no longer assume, we must.
5628 if (protocol == htons(ETH_P_IP))
5629 tx_flags |= E1000_TX_FLAGS_IPV4;
5631 if (unlikely(skb->no_fcs))
5632 tx_flags |= E1000_TX_FLAGS_NO_FCS;
5634 /* if count is 0 then mapping error has occurred */
5635 count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
5638 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
5639 !adapter->tx_hwtstamp_skb)) {
5640 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
5641 tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
5642 adapter->tx_hwtstamp_skb = skb_get(skb);
5643 adapter->tx_hwtstamp_start = jiffies;
5644 schedule_work(&adapter->tx_hwtstamp_work);
5646 skb_tx_timestamp(skb);
5649 netdev_sent_queue(netdev, skb->len);
5650 e1000_tx_queue(tx_ring, tx_flags, count);
5651 /* Make sure there is space in the ring for the next send. */
5652 e1000_maybe_stop_tx(tx_ring,
5654 DIV_ROUND_UP(PAGE_SIZE,
5655 adapter->tx_fifo_limit) + 2));
5657 dev_kfree_skb_any(skb);
5658 tx_ring->buffer_info[first].time_stamp = 0;
5659 tx_ring->next_to_use = first;
5662 return NETDEV_TX_OK;
5666 * e1000_tx_timeout - Respond to a Tx Hang
5667 * @netdev: network interface device structure
5669 static void e1000_tx_timeout(struct net_device *netdev)
5671 struct e1000_adapter *adapter = netdev_priv(netdev);
5673 /* Do the reset outside of interrupt context */
5674 adapter->tx_timeout_count++;
5675 schedule_work(&adapter->reset_task);
5678 static void e1000_reset_task(struct work_struct *work)
5680 struct e1000_adapter *adapter;
5681 adapter = container_of(work, struct e1000_adapter, reset_task);
5683 /* don't run the task if already down */
5684 if (test_bit(__E1000_DOWN, &adapter->state))
5687 if (!(adapter->flags & FLAG_RESTART_NOW)) {
5688 e1000e_dump(adapter);
5689 e_err("Reset adapter unexpectedly\n");
5691 e1000e_reinit_locked(adapter);
5695 * e1000_get_stats64 - Get System Network Statistics
5696 * @netdev: network interface device structure
5697 * @stats: rtnl_link_stats64 pointer
5699 * Returns the address of the device statistics structure.
5701 struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
5702 struct rtnl_link_stats64 *stats)
5704 struct e1000_adapter *adapter = netdev_priv(netdev);
5706 memset(stats, 0, sizeof(struct rtnl_link_stats64));
5707 spin_lock(&adapter->stats64_lock);
5708 e1000e_update_stats(adapter);
5709 /* Fill out the OS statistics structure */
5710 stats->rx_bytes = adapter->stats.gorc;
5711 stats->rx_packets = adapter->stats.gprc;
5712 stats->tx_bytes = adapter->stats.gotc;
5713 stats->tx_packets = adapter->stats.gptc;
5714 stats->multicast = adapter->stats.mprc;
5715 stats->collisions = adapter->stats.colc;
5719 /* RLEC on some newer hardware can be incorrect so build
5720 * our own version based on RUC and ROC
5722 stats->rx_errors = adapter->stats.rxerrc +
5723 adapter->stats.crcerrs + adapter->stats.algnerrc +
5724 adapter->stats.ruc + adapter->stats.roc + adapter->stats.cexterr;
5725 stats->rx_length_errors = adapter->stats.ruc + adapter->stats.roc;
5726 stats->rx_crc_errors = adapter->stats.crcerrs;
5727 stats->rx_frame_errors = adapter->stats.algnerrc;
5728 stats->rx_missed_errors = adapter->stats.mpc;
5731 stats->tx_errors = adapter->stats.ecol + adapter->stats.latecol;
5732 stats->tx_aborted_errors = adapter->stats.ecol;
5733 stats->tx_window_errors = adapter->stats.latecol;
5734 stats->tx_carrier_errors = adapter->stats.tncrs;
5736 /* Tx Dropped needs to be maintained elsewhere */
5738 spin_unlock(&adapter->stats64_lock);
5743 * e1000_change_mtu - Change the Maximum Transfer Unit
5744 * @netdev: network interface device structure
5745 * @new_mtu: new value for maximum frame size
5747 * Returns 0 on success, negative on failure
5749 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
5751 struct e1000_adapter *adapter = netdev_priv(netdev);
5752 int max_frame = new_mtu + VLAN_HLEN + ETH_HLEN + ETH_FCS_LEN;
5754 /* Jumbo frame support */
5755 if ((max_frame > ETH_FRAME_LEN + ETH_FCS_LEN) &&
5756 !(adapter->flags & FLAG_HAS_JUMBO_FRAMES)) {
5757 e_err("Jumbo Frames not supported.\n");
5761 /* Supported frame sizes */
5762 if ((new_mtu < ETH_ZLEN + ETH_FCS_LEN + VLAN_HLEN) ||
5763 (max_frame > adapter->max_hw_frame_size)) {
5764 e_err("Unsupported MTU setting\n");
5768 /* Jumbo frame workaround on 82579 and newer requires CRC be stripped */
5769 if ((adapter->hw.mac.type >= e1000_pch2lan) &&
5770 !(adapter->flags2 & FLAG2_CRC_STRIPPING) &&
5771 (new_mtu > ETH_DATA_LEN)) {
5772 e_err("Jumbo Frames not supported on this device when CRC stripping is disabled.\n");
5776 while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
5777 usleep_range(1000, 2000);
5778 /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
5779 adapter->max_frame_size = max_frame;
5780 e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
5781 netdev->mtu = new_mtu;
5783 pm_runtime_get_sync(netdev->dev.parent);
5785 if (netif_running(netdev))
5786 e1000e_down(adapter, true);
5788 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
5789 * means we reserve 2 more, this pushes us to allocate from the next
5791 * i.e. RXBUFFER_2048 --> size-4096 slab
5792 * However with the new *_jumbo_rx* routines, jumbo receives will use
5796 if (max_frame <= 2048)
5797 adapter->rx_buffer_len = 2048;
5799 adapter->rx_buffer_len = 4096;
5801 /* adjust allocation if LPE protects us, and we aren't using SBP */
5802 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) ||
5803 (max_frame == ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN))
5804 adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
5807 if (netif_running(netdev))
5810 e1000e_reset(adapter);
5812 pm_runtime_put_sync(netdev->dev.parent);
5814 clear_bit(__E1000_RESETTING, &adapter->state);
5819 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
5822 struct e1000_adapter *adapter = netdev_priv(netdev);
5823 struct mii_ioctl_data *data = if_mii(ifr);
5825 if (adapter->hw.phy.media_type != e1000_media_type_copper)
5830 data->phy_id = adapter->hw.phy.addr;
5833 e1000_phy_read_status(adapter);
5835 switch (data->reg_num & 0x1F) {
5837 data->val_out = adapter->phy_regs.bmcr;
5840 data->val_out = adapter->phy_regs.bmsr;
5843 data->val_out = (adapter->hw.phy.id >> 16);
5846 data->val_out = (adapter->hw.phy.id & 0xFFFF);
5849 data->val_out = adapter->phy_regs.advertise;
5852 data->val_out = adapter->phy_regs.lpa;
5855 data->val_out = adapter->phy_regs.expansion;
5858 data->val_out = adapter->phy_regs.ctrl1000;
5861 data->val_out = adapter->phy_regs.stat1000;
5864 data->val_out = adapter->phy_regs.estatus;
5878 * e1000e_hwtstamp_ioctl - control hardware time stamping
5879 * @netdev: network interface device structure
5880 * @ifreq: interface request
5882 * Outgoing time stamping can be enabled and disabled. Play nice and
5883 * disable it when requested, although it shouldn't cause any overhead
5884 * when no packet needs it. At most one packet in the queue may be
5885 * marked for time stamping, otherwise it would be impossible to tell
5886 * for sure to which packet the hardware time stamp belongs.
5888 * Incoming time stamping has to be configured via the hardware filters.
5889 * Not all combinations are supported, in particular event type has to be
5890 * specified. Matching the kind of event packet is not supported, with the
5891 * exception of "all V2 events regardless of level 2 or 4".
5893 static int e1000e_hwtstamp_set(struct net_device *netdev, struct ifreq *ifr)
5895 struct e1000_adapter *adapter = netdev_priv(netdev);
5896 struct hwtstamp_config config;
5899 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
5902 ret_val = e1000e_config_hwtstamp(adapter, &config);
5906 switch (config.rx_filter) {
5907 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
5908 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
5909 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5910 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5911 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5912 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5913 /* With V2 type filters which specify a Sync or Delay Request,
5914 * Path Delay Request/Response messages are also time stamped
5915 * by hardware so notify the caller the requested packets plus
5916 * some others are time stamped.
5918 config.rx_filter = HWTSTAMP_FILTER_SOME;
5924 return copy_to_user(ifr->ifr_data, &config,
5925 sizeof(config)) ? -EFAULT : 0;
5928 static int e1000e_hwtstamp_get(struct net_device *netdev, struct ifreq *ifr)
5930 struct e1000_adapter *adapter = netdev_priv(netdev);
5932 return copy_to_user(ifr->ifr_data, &adapter->hwtstamp_config,
5933 sizeof(adapter->hwtstamp_config)) ? -EFAULT : 0;
5936 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
5942 return e1000_mii_ioctl(netdev, ifr, cmd);
5944 return e1000e_hwtstamp_set(netdev, ifr);
5946 return e1000e_hwtstamp_get(netdev, ifr);
5952 static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
5954 struct e1000_hw *hw = &adapter->hw;
5955 u32 i, mac_reg, wuc;
5956 u16 phy_reg, wuc_enable;
5959 /* copy MAC RARs to PHY RARs */
5960 e1000_copy_rx_addrs_to_phy_ich8lan(hw);
5962 retval = hw->phy.ops.acquire(hw);
5964 e_err("Could not acquire PHY\n");
5968 /* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
5969 retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
5973 /* copy MAC MTA to PHY MTA - only needed for pchlan */
5974 for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
5975 mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
5976 hw->phy.ops.write_reg_page(hw, BM_MTA(i),
5977 (u16)(mac_reg & 0xFFFF));
5978 hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
5979 (u16)((mac_reg >> 16) & 0xFFFF));
5982 /* configure PHY Rx Control register */
5983 hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
5984 mac_reg = er32(RCTL);
5985 if (mac_reg & E1000_RCTL_UPE)
5986 phy_reg |= BM_RCTL_UPE;
5987 if (mac_reg & E1000_RCTL_MPE)
5988 phy_reg |= BM_RCTL_MPE;
5989 phy_reg &= ~(BM_RCTL_MO_MASK);
5990 if (mac_reg & E1000_RCTL_MO_3)
5991 phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
5992 << BM_RCTL_MO_SHIFT);
5993 if (mac_reg & E1000_RCTL_BAM)
5994 phy_reg |= BM_RCTL_BAM;
5995 if (mac_reg & E1000_RCTL_PMCF)
5996 phy_reg |= BM_RCTL_PMCF;
5997 mac_reg = er32(CTRL);
5998 if (mac_reg & E1000_CTRL_RFCE)
5999 phy_reg |= BM_RCTL_RFCE;
6000 hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
6002 wuc = E1000_WUC_PME_EN;
6003 if (wufc & (E1000_WUFC_MAG | E1000_WUFC_LNKC))
6004 wuc |= E1000_WUC_APME;
6006 /* enable PHY wakeup in MAC register */
6008 ew32(WUC, (E1000_WUC_PHY_WAKE | E1000_WUC_APMPME |
6009 E1000_WUC_PME_STATUS | wuc));
6011 /* configure and enable PHY wakeup in PHY registers */
6012 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
6013 hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, wuc);
6015 /* activate PHY wakeup */
6016 wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
6017 retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
6019 e_err("Could not set PHY Host Wakeup bit\n");
6021 hw->phy.ops.release(hw);
6026 static void e1000e_flush_lpic(struct pci_dev *pdev)
6028 struct net_device *netdev = pci_get_drvdata(pdev);
6029 struct e1000_adapter *adapter = netdev_priv(netdev);
6030 struct e1000_hw *hw = &adapter->hw;
6033 pm_runtime_get_sync(netdev->dev.parent);
6035 ret_val = hw->phy.ops.acquire(hw);
6039 pr_info("EEE TX LPI TIMER: %08X\n",
6040 er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
6042 hw->phy.ops.release(hw);
6045 pm_runtime_put_sync(netdev->dev.parent);
6048 static int e1000e_pm_freeze(struct device *dev)
6050 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6051 struct e1000_adapter *adapter = netdev_priv(netdev);
6053 netif_device_detach(netdev);
6055 if (netif_running(netdev)) {
6056 int count = E1000_CHECK_RESET_COUNT;
6058 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6059 usleep_range(10000, 20000);
6061 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6063 /* Quiesce the device without resetting the hardware */
6064 e1000e_down(adapter, false);
6065 e1000_free_irq(adapter);
6067 e1000e_reset_interrupt_capability(adapter);
6069 /* Allow time for pending master requests to run */
6070 e1000e_disable_pcie_master(&adapter->hw);
6075 static int __e1000_shutdown(struct pci_dev *pdev, bool runtime)
6077 struct net_device *netdev = pci_get_drvdata(pdev);
6078 struct e1000_adapter *adapter = netdev_priv(netdev);
6079 struct e1000_hw *hw = &adapter->hw;
6080 u32 ctrl, ctrl_ext, rctl, status;
6081 /* Runtime suspend should only enable wakeup for link changes */
6082 u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
6085 status = er32(STATUS);
6086 if (status & E1000_STATUS_LU)
6087 wufc &= ~E1000_WUFC_LNKC;
6090 e1000_setup_rctl(adapter);
6091 e1000e_set_rx_mode(netdev);
6093 /* turn on all-multi mode if wake on multicast is enabled */
6094 if (wufc & E1000_WUFC_MC) {
6096 rctl |= E1000_RCTL_MPE;
6101 ctrl |= E1000_CTRL_ADVD3WUC;
6102 if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
6103 ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
6106 if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
6107 adapter->hw.phy.media_type ==
6108 e1000_media_type_internal_serdes) {
6109 /* keep the laser running in D3 */
6110 ctrl_ext = er32(CTRL_EXT);
6111 ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
6112 ew32(CTRL_EXT, ctrl_ext);
6116 e1000e_power_up_phy(adapter);
6118 if (adapter->flags & FLAG_IS_ICH)
6119 e1000_suspend_workarounds_ich8lan(&adapter->hw);
6121 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6122 /* enable wakeup by the PHY */
6123 retval = e1000_init_phy_wakeup(adapter, wufc);
6127 /* enable wakeup by the MAC */
6129 ew32(WUC, E1000_WUC_PME_EN);
6135 e1000_power_down_phy(adapter);
6138 if (adapter->hw.phy.type == e1000_phy_igp_3) {
6139 e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
6140 } else if (hw->mac.type == e1000_pch_lpt) {
6141 if (!(wufc & (E1000_WUFC_EX | E1000_WUFC_MC | E1000_WUFC_BC)))
6142 /* ULP does not support wake from unicast, multicast
6145 retval = e1000_enable_ulp_lpt_lp(hw, !runtime);
6152 /* Release control of h/w to f/w. If f/w is AMT enabled, this
6153 * would have already happened in close and is redundant.
6155 e1000e_release_hw_control(adapter);
6157 pci_clear_master(pdev);
6159 /* The pci-e switch on some quad port adapters will report a
6160 * correctable error when the MAC transitions from D0 to D3. To
6161 * prevent this we need to mask off the correctable errors on the
6162 * downstream port of the pci-e switch.
6164 * We don't have the associated upstream bridge while assigning
6165 * the PCI device into guest. For example, the KVM on power is
6168 if (adapter->flags & FLAG_IS_QUAD_PORT) {
6169 struct pci_dev *us_dev = pdev->bus->self;
6175 pcie_capability_read_word(us_dev, PCI_EXP_DEVCTL, &devctl);
6176 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL,
6177 (devctl & ~PCI_EXP_DEVCTL_CERE));
6179 pci_save_state(pdev);
6180 pci_prepare_to_sleep(pdev);
6182 pcie_capability_write_word(us_dev, PCI_EXP_DEVCTL, devctl);
6189 * e1000e_disable_aspm - Disable ASPM states
6190 * @pdev: pointer to PCI device struct
6191 * @state: bit-mask of ASPM states to disable
6193 * Some devices *must* have certain ASPM states disabled per hardware errata.
6195 static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
6197 struct pci_dev *parent = pdev->bus->self;
6198 u16 aspm_dis_mask = 0;
6199 u16 pdev_aspmc, parent_aspmc;
6202 case PCIE_LINK_STATE_L0S:
6203 case PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1:
6204 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L0S;
6205 /* fall-through - can't have L1 without L0s */
6206 case PCIE_LINK_STATE_L1:
6207 aspm_dis_mask |= PCI_EXP_LNKCTL_ASPM_L1;
6213 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6214 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6217 pcie_capability_read_word(parent, PCI_EXP_LNKCTL,
6219 parent_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6222 /* Nothing to do if the ASPM states to be disabled already are */
6223 if (!(pdev_aspmc & aspm_dis_mask) &&
6224 (!parent || !(parent_aspmc & aspm_dis_mask)))
6227 dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
6228 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L0S) ?
6230 (aspm_dis_mask & pdev_aspmc & PCI_EXP_LNKCTL_ASPM_L1) ?
6233 #ifdef CONFIG_PCIEASPM
6234 pci_disable_link_state_locked(pdev, state);
6236 /* Double-check ASPM control. If not disabled by the above, the
6237 * BIOS is preventing that from happening (or CONFIG_PCIEASPM is
6238 * not enabled); override by writing PCI config space directly.
6240 pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &pdev_aspmc);
6241 pdev_aspmc &= PCI_EXP_LNKCTL_ASPMC;
6243 if (!(aspm_dis_mask & pdev_aspmc))
6247 /* Both device and parent should have the same ASPM setting.
6248 * Disable ASPM in downstream component first and then upstream.
6250 pcie_capability_clear_word(pdev, PCI_EXP_LNKCTL, aspm_dis_mask);
6253 pcie_capability_clear_word(parent, PCI_EXP_LNKCTL,
6258 static int __e1000_resume(struct pci_dev *pdev)
6260 struct net_device *netdev = pci_get_drvdata(pdev);
6261 struct e1000_adapter *adapter = netdev_priv(netdev);
6262 struct e1000_hw *hw = &adapter->hw;
6263 u16 aspm_disable_flag = 0;
6265 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6266 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6267 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6268 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6269 if (aspm_disable_flag)
6270 e1000e_disable_aspm(pdev, aspm_disable_flag);
6272 pci_set_master(pdev);
6274 if (hw->mac.type >= e1000_pch2lan)
6275 e1000_resume_workarounds_pchlan(&adapter->hw);
6277 e1000e_power_up_phy(adapter);
6279 /* report the system wakeup cause from S3/S4 */
6280 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
6283 e1e_rphy(&adapter->hw, BM_WUS, &phy_data);
6285 e_info("PHY Wakeup cause - %s\n",
6286 phy_data & E1000_WUS_EX ? "Unicast Packet" :
6287 phy_data & E1000_WUS_MC ? "Multicast Packet" :
6288 phy_data & E1000_WUS_BC ? "Broadcast Packet" :
6289 phy_data & E1000_WUS_MAG ? "Magic Packet" :
6290 phy_data & E1000_WUS_LNKC ?
6291 "Link Status Change" : "other");
6293 e1e_wphy(&adapter->hw, BM_WUS, ~0);
6295 u32 wus = er32(WUS);
6298 e_info("MAC Wakeup cause - %s\n",
6299 wus & E1000_WUS_EX ? "Unicast Packet" :
6300 wus & E1000_WUS_MC ? "Multicast Packet" :
6301 wus & E1000_WUS_BC ? "Broadcast Packet" :
6302 wus & E1000_WUS_MAG ? "Magic Packet" :
6303 wus & E1000_WUS_LNKC ? "Link Status Change" :
6309 e1000e_reset(adapter);
6311 e1000_init_manageability_pt(adapter);
6313 /* If the controller has AMT, do not set DRV_LOAD until the interface
6314 * is up. For all other cases, let the f/w know that the h/w is now
6315 * under the control of the driver.
6317 if (!(adapter->flags & FLAG_HAS_AMT))
6318 e1000e_get_hw_control(adapter);
6323 #ifdef CONFIG_PM_SLEEP
6324 static int e1000e_pm_thaw(struct device *dev)
6326 struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
6327 struct e1000_adapter *adapter = netdev_priv(netdev);
6329 e1000e_set_interrupt_capability(adapter);
6330 if (netif_running(netdev)) {
6331 u32 err = e1000_request_irq(adapter);
6339 netif_device_attach(netdev);
6344 static int e1000e_pm_suspend(struct device *dev)
6346 struct pci_dev *pdev = to_pci_dev(dev);
6348 e1000e_flush_lpic(pdev);
6350 e1000e_pm_freeze(dev);
6352 return __e1000_shutdown(pdev, false);
6355 static int e1000e_pm_resume(struct device *dev)
6357 struct pci_dev *pdev = to_pci_dev(dev);
6360 rc = __e1000_resume(pdev);
6364 return e1000e_pm_thaw(dev);
6366 #endif /* CONFIG_PM_SLEEP */
6368 static int e1000e_pm_runtime_idle(struct device *dev)
6370 struct pci_dev *pdev = to_pci_dev(dev);
6371 struct net_device *netdev = pci_get_drvdata(pdev);
6372 struct e1000_adapter *adapter = netdev_priv(netdev);
6375 eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
6377 if (!e1000e_has_link(adapter)) {
6378 adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
6379 pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
6385 static int e1000e_pm_runtime_resume(struct device *dev)
6387 struct pci_dev *pdev = to_pci_dev(dev);
6388 struct net_device *netdev = pci_get_drvdata(pdev);
6389 struct e1000_adapter *adapter = netdev_priv(netdev);
6392 rc = __e1000_resume(pdev);
6396 if (netdev->flags & IFF_UP)
6397 rc = e1000e_up(adapter);
6402 static int e1000e_pm_runtime_suspend(struct device *dev)
6404 struct pci_dev *pdev = to_pci_dev(dev);
6405 struct net_device *netdev = pci_get_drvdata(pdev);
6406 struct e1000_adapter *adapter = netdev_priv(netdev);
6408 if (netdev->flags & IFF_UP) {
6409 int count = E1000_CHECK_RESET_COUNT;
6411 while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
6412 usleep_range(10000, 20000);
6414 WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
6416 /* Down the device without resetting the hardware */
6417 e1000e_down(adapter, false);
6420 if (__e1000_shutdown(pdev, true)) {
6421 e1000e_pm_runtime_resume(dev);
6427 #endif /* CONFIG_PM */
6429 static void e1000_shutdown(struct pci_dev *pdev)
6431 e1000e_flush_lpic(pdev);
6433 e1000e_pm_freeze(&pdev->dev);
6435 __e1000_shutdown(pdev, false);
6438 #ifdef CONFIG_NET_POLL_CONTROLLER
6440 static irqreturn_t e1000_intr_msix(int __always_unused irq, void *data)
6442 struct net_device *netdev = data;
6443 struct e1000_adapter *adapter = netdev_priv(netdev);
6445 if (adapter->msix_entries) {
6446 int vector, msix_irq;
6449 msix_irq = adapter->msix_entries[vector].vector;
6450 disable_irq(msix_irq);
6451 e1000_intr_msix_rx(msix_irq, netdev);
6452 enable_irq(msix_irq);
6455 msix_irq = adapter->msix_entries[vector].vector;
6456 disable_irq(msix_irq);
6457 e1000_intr_msix_tx(msix_irq, netdev);
6458 enable_irq(msix_irq);
6461 msix_irq = adapter->msix_entries[vector].vector;
6462 disable_irq(msix_irq);
6463 e1000_msix_other(msix_irq, netdev);
6464 enable_irq(msix_irq);
6472 * @netdev: network interface device structure
6474 * Polling 'interrupt' - used by things like netconsole to send skbs
6475 * without having to re-enable interrupts. It's not called while
6476 * the interrupt routine is executing.
6478 static void e1000_netpoll(struct net_device *netdev)
6480 struct e1000_adapter *adapter = netdev_priv(netdev);
6482 switch (adapter->int_mode) {
6483 case E1000E_INT_MODE_MSIX:
6484 e1000_intr_msix(adapter->pdev->irq, netdev);
6486 case E1000E_INT_MODE_MSI:
6487 disable_irq(adapter->pdev->irq);
6488 e1000_intr_msi(adapter->pdev->irq, netdev);
6489 enable_irq(adapter->pdev->irq);
6491 default: /* E1000E_INT_MODE_LEGACY */
6492 disable_irq(adapter->pdev->irq);
6493 e1000_intr(adapter->pdev->irq, netdev);
6494 enable_irq(adapter->pdev->irq);
6501 * e1000_io_error_detected - called when PCI error is detected
6502 * @pdev: Pointer to PCI device
6503 * @state: The current pci connection state
6505 * This function is called after a PCI bus error affecting
6506 * this device has been detected.
6508 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
6509 pci_channel_state_t state)
6511 struct net_device *netdev = pci_get_drvdata(pdev);
6512 struct e1000_adapter *adapter = netdev_priv(netdev);
6514 netif_device_detach(netdev);
6516 if (state == pci_channel_io_perm_failure)
6517 return PCI_ERS_RESULT_DISCONNECT;
6519 if (netif_running(netdev))
6520 e1000e_down(adapter, true);
6521 pci_disable_device(pdev);
6523 /* Request a slot slot reset. */
6524 return PCI_ERS_RESULT_NEED_RESET;
6528 * e1000_io_slot_reset - called after the pci bus has been reset.
6529 * @pdev: Pointer to PCI device
6531 * Restart the card from scratch, as if from a cold-boot. Implementation
6532 * resembles the first-half of the e1000e_pm_resume routine.
6534 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
6536 struct net_device *netdev = pci_get_drvdata(pdev);
6537 struct e1000_adapter *adapter = netdev_priv(netdev);
6538 struct e1000_hw *hw = &adapter->hw;
6539 u16 aspm_disable_flag = 0;
6541 pci_ers_result_t result;
6543 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
6544 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6545 if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
6546 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6547 if (aspm_disable_flag)
6548 e1000e_disable_aspm(pdev, aspm_disable_flag);
6550 err = pci_enable_device_mem(pdev);
6553 "Cannot re-enable PCI device after reset.\n");
6554 result = PCI_ERS_RESULT_DISCONNECT;
6556 pdev->state_saved = true;
6557 pci_restore_state(pdev);
6558 pci_set_master(pdev);
6560 pci_enable_wake(pdev, PCI_D3hot, 0);
6561 pci_enable_wake(pdev, PCI_D3cold, 0);
6563 e1000e_reset(adapter);
6565 result = PCI_ERS_RESULT_RECOVERED;
6568 pci_cleanup_aer_uncorrect_error_status(pdev);
6574 * e1000_io_resume - called when traffic can start flowing again.
6575 * @pdev: Pointer to PCI device
6577 * This callback is called when the error recovery driver tells us that
6578 * its OK to resume normal operation. Implementation resembles the
6579 * second-half of the e1000e_pm_resume routine.
6581 static void e1000_io_resume(struct pci_dev *pdev)
6583 struct net_device *netdev = pci_get_drvdata(pdev);
6584 struct e1000_adapter *adapter = netdev_priv(netdev);
6586 e1000_init_manageability_pt(adapter);
6588 if (netif_running(netdev)) {
6589 if (e1000e_up(adapter)) {
6591 "can't bring device back up after reset\n");
6596 netif_device_attach(netdev);
6598 /* If the controller has AMT, do not set DRV_LOAD until the interface
6599 * is up. For all other cases, let the f/w know that the h/w is now
6600 * under the control of the driver.
6602 if (!(adapter->flags & FLAG_HAS_AMT))
6603 e1000e_get_hw_control(adapter);
6606 static void e1000_print_device_info(struct e1000_adapter *adapter)
6608 struct e1000_hw *hw = &adapter->hw;
6609 struct net_device *netdev = adapter->netdev;
6611 u8 pba_str[E1000_PBANUM_LENGTH];
6613 /* print bus type/speed/width info */
6614 e_info("(PCI Express:2.5GT/s:%s) %pM\n",
6616 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
6620 e_info("Intel(R) PRO/%s Network Connection\n",
6621 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
6622 ret_val = e1000_read_pba_string_generic(hw, pba_str,
6623 E1000_PBANUM_LENGTH);
6625 strlcpy((char *)pba_str, "Unknown", sizeof(pba_str));
6626 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
6627 hw->mac.type, hw->phy.type, pba_str);
6630 static void e1000_eeprom_checks(struct e1000_adapter *adapter)
6632 struct e1000_hw *hw = &adapter->hw;
6636 if (hw->mac.type != e1000_82573)
6639 ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
6641 if (!ret_val && (!(buf & (1 << 0)))) {
6642 /* Deep Smart Power Down (DSPD) */
6643 dev_warn(&adapter->pdev->dev,
6644 "Warning: detected DSPD enabled in EEPROM\n");
6648 static int e1000_set_features(struct net_device *netdev,
6649 netdev_features_t features)
6651 struct e1000_adapter *adapter = netdev_priv(netdev);
6652 netdev_features_t changed = features ^ netdev->features;
6654 if (changed & (NETIF_F_TSO | NETIF_F_TSO6))
6655 adapter->flags |= FLAG_TSO_FORCE;
6657 if (!(changed & (NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_TX |
6658 NETIF_F_RXCSUM | NETIF_F_RXHASH | NETIF_F_RXFCS |
6662 if (changed & NETIF_F_RXFCS) {
6663 if (features & NETIF_F_RXFCS) {
6664 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6666 /* We need to take it back to defaults, which might mean
6667 * stripping is still disabled at the adapter level.
6669 if (adapter->flags2 & FLAG2_DFLT_CRC_STRIPPING)
6670 adapter->flags2 |= FLAG2_CRC_STRIPPING;
6672 adapter->flags2 &= ~FLAG2_CRC_STRIPPING;
6676 netdev->features = features;
6678 if (netif_running(netdev))
6679 e1000e_reinit_locked(adapter);
6681 e1000e_reset(adapter);
6686 static const struct net_device_ops e1000e_netdev_ops = {
6687 .ndo_open = e1000_open,
6688 .ndo_stop = e1000_close,
6689 .ndo_start_xmit = e1000_xmit_frame,
6690 .ndo_get_stats64 = e1000e_get_stats64,
6691 .ndo_set_rx_mode = e1000e_set_rx_mode,
6692 .ndo_set_mac_address = e1000_set_mac,
6693 .ndo_change_mtu = e1000_change_mtu,
6694 .ndo_do_ioctl = e1000_ioctl,
6695 .ndo_tx_timeout = e1000_tx_timeout,
6696 .ndo_validate_addr = eth_validate_addr,
6698 .ndo_vlan_rx_add_vid = e1000_vlan_rx_add_vid,
6699 .ndo_vlan_rx_kill_vid = e1000_vlan_rx_kill_vid,
6700 #ifdef CONFIG_NET_POLL_CONTROLLER
6701 .ndo_poll_controller = e1000_netpoll,
6703 .ndo_set_features = e1000_set_features,
6707 * e1000_probe - Device Initialization Routine
6708 * @pdev: PCI device information struct
6709 * @ent: entry in e1000_pci_tbl
6711 * Returns 0 on success, negative on failure
6713 * e1000_probe initializes an adapter identified by a pci_dev structure.
6714 * The OS initialization, configuring of the adapter private structure,
6715 * and a hardware reset occur.
6717 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
6719 struct net_device *netdev;
6720 struct e1000_adapter *adapter;
6721 struct e1000_hw *hw;
6722 const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
6723 resource_size_t mmio_start, mmio_len;
6724 resource_size_t flash_start, flash_len;
6725 static int cards_found;
6726 u16 aspm_disable_flag = 0;
6727 int bars, i, err, pci_using_dac;
6728 u16 eeprom_data = 0;
6729 u16 eeprom_apme_mask = E1000_EEPROM_APME;
6732 if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
6733 aspm_disable_flag = PCIE_LINK_STATE_L0S;
6734 if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
6735 aspm_disable_flag |= PCIE_LINK_STATE_L1;
6736 if (aspm_disable_flag)
6737 e1000e_disable_aspm(pdev, aspm_disable_flag);
6739 err = pci_enable_device_mem(pdev);
6744 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
6748 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
6751 "No usable DMA configuration, aborting\n");
6756 bars = pci_select_bars(pdev, IORESOURCE_MEM);
6757 err = pci_request_selected_regions_exclusive(pdev, bars,
6758 e1000e_driver_name);
6762 /* AER (Advanced Error Reporting) hooks */
6763 pci_enable_pcie_error_reporting(pdev);
6765 pci_set_master(pdev);
6766 /* PCI config space info */
6767 err = pci_save_state(pdev);
6769 goto err_alloc_etherdev;
6772 netdev = alloc_etherdev(sizeof(struct e1000_adapter));
6774 goto err_alloc_etherdev;
6776 SET_NETDEV_DEV(netdev, &pdev->dev);
6778 netdev->irq = pdev->irq;
6780 pci_set_drvdata(pdev, netdev);
6781 adapter = netdev_priv(netdev);
6783 adapter->netdev = netdev;
6784 adapter->pdev = pdev;
6786 adapter->pba = ei->pba;
6787 adapter->flags = ei->flags;
6788 adapter->flags2 = ei->flags2;
6789 adapter->hw.adapter = adapter;
6790 adapter->hw.mac.type = ei->mac;
6791 adapter->max_hw_frame_size = ei->max_hw_frame_size;
6792 adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
6794 mmio_start = pci_resource_start(pdev, 0);
6795 mmio_len = pci_resource_len(pdev, 0);
6798 adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
6799 if (!adapter->hw.hw_addr)
6802 if ((adapter->flags & FLAG_HAS_FLASH) &&
6803 (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
6804 flash_start = pci_resource_start(pdev, 1);
6805 flash_len = pci_resource_len(pdev, 1);
6806 adapter->hw.flash_address = ioremap(flash_start, flash_len);
6807 if (!adapter->hw.flash_address)
6811 /* Set default EEE advertisement */
6812 if (adapter->flags2 & FLAG2_HAS_EEE)
6813 adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
6815 /* construct the net_device struct */
6816 netdev->netdev_ops = &e1000e_netdev_ops;
6817 e1000e_set_ethtool_ops(netdev);
6818 netdev->watchdog_timeo = 5 * HZ;
6819 netif_napi_add(netdev, &adapter->napi, e1000e_poll, 64);
6820 strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
6822 netdev->mem_start = mmio_start;
6823 netdev->mem_end = mmio_start + mmio_len;
6825 adapter->bd_number = cards_found++;
6827 e1000e_check_options(adapter);
6829 /* setup adapter struct */
6830 err = e1000_sw_init(adapter);
6834 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
6835 memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
6836 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
6838 err = ei->get_variants(adapter);
6842 if ((adapter->flags & FLAG_IS_ICH) &&
6843 (adapter->flags & FLAG_READ_ONLY_NVM))
6844 e1000e_write_protect_nvm_ich8lan(&adapter->hw);
6846 hw->mac.ops.get_bus_info(&adapter->hw);
6848 adapter->hw.phy.autoneg_wait_to_complete = 0;
6850 /* Copper options */
6851 if (adapter->hw.phy.media_type == e1000_media_type_copper) {
6852 adapter->hw.phy.mdix = AUTO_ALL_MODES;
6853 adapter->hw.phy.disable_polarity_correction = 0;
6854 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6857 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6858 dev_info(&pdev->dev,
6859 "PHY reset is blocked due to SOL/IDER session.\n");
6861 /* Set initial default active device features */
6862 netdev->features = (NETIF_F_SG |
6863 NETIF_F_HW_VLAN_CTAG_RX |
6864 NETIF_F_HW_VLAN_CTAG_TX |
6871 /* Set user-changeable features (subset of all device features) */
6872 netdev->hw_features = netdev->features;
6873 netdev->hw_features |= NETIF_F_RXFCS;
6874 netdev->priv_flags |= IFF_SUPP_NOFCS;
6875 netdev->hw_features |= NETIF_F_RXALL;
6877 if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
6878 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6880 netdev->vlan_features |= (NETIF_F_SG |
6885 netdev->priv_flags |= IFF_UNICAST_FLT;
6887 if (pci_using_dac) {
6888 netdev->features |= NETIF_F_HIGHDMA;
6889 netdev->vlan_features |= NETIF_F_HIGHDMA;
6892 if (e1000e_enable_mng_pass_thru(&adapter->hw))
6893 adapter->flags |= FLAG_MNG_PT_ENABLED;
6895 /* before reading the NVM, reset the controller to
6896 * put the device in a known good starting state
6898 adapter->hw.mac.ops.reset_hw(&adapter->hw);
6900 /* systems with ASPM and others may see the checksum fail on the first
6901 * attempt. Let's give it a few tries
6904 if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
6907 dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
6913 e1000_eeprom_checks(adapter);
6915 /* copy the MAC address */
6916 if (e1000e_read_mac_addr(&adapter->hw))
6918 "NVM Read Error while reading MAC address\n");
6920 memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
6922 if (!is_valid_ether_addr(netdev->dev_addr)) {
6923 dev_err(&pdev->dev, "Invalid MAC Address: %pM\n",
6929 init_timer(&adapter->watchdog_timer);
6930 adapter->watchdog_timer.function = e1000_watchdog;
6931 adapter->watchdog_timer.data = (unsigned long)adapter;
6933 init_timer(&adapter->phy_info_timer);
6934 adapter->phy_info_timer.function = e1000_update_phy_info;
6935 adapter->phy_info_timer.data = (unsigned long)adapter;
6937 INIT_WORK(&adapter->reset_task, e1000_reset_task);
6938 INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
6939 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
6940 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
6941 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
6943 /* Initialize link parameters. User can change them with ethtool */
6944 adapter->hw.mac.autoneg = 1;
6945 adapter->fc_autoneg = true;
6946 adapter->hw.fc.requested_mode = e1000_fc_default;
6947 adapter->hw.fc.current_mode = e1000_fc_default;
6948 adapter->hw.phy.autoneg_advertised = 0x2f;
6950 /* Initial Wake on LAN setting - If APM wake is enabled in
6951 * the EEPROM, enable the ACPI Magic Packet filter
6953 if (adapter->flags & FLAG_APME_IN_WUC) {
6954 /* APME bit in EEPROM is mapped to WUC.APME */
6955 eeprom_data = er32(WUC);
6956 eeprom_apme_mask = E1000_WUC_APME;
6957 if ((hw->mac.type > e1000_ich10lan) &&
6958 (eeprom_data & E1000_WUC_PHY_WAKE))
6959 adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
6960 } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
6961 if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
6962 (adapter->hw.bus.func == 1))
6963 rval = e1000_read_nvm(&adapter->hw,
6964 NVM_INIT_CONTROL3_PORT_B,
6967 rval = e1000_read_nvm(&adapter->hw,
6968 NVM_INIT_CONTROL3_PORT_A,
6972 /* fetch WoL from EEPROM */
6974 e_dbg("NVM read error getting WoL initial values: %d\n", rval);
6975 else if (eeprom_data & eeprom_apme_mask)
6976 adapter->eeprom_wol |= E1000_WUFC_MAG;
6978 /* now that we have the eeprom settings, apply the special cases
6979 * where the eeprom may be wrong or the board simply won't support
6980 * wake on lan on a particular port
6982 if (!(adapter->flags & FLAG_HAS_WOL))
6983 adapter->eeprom_wol = 0;
6985 /* initialize the wol settings based on the eeprom settings */
6986 adapter->wol = adapter->eeprom_wol;
6988 /* make sure adapter isn't asleep if manageability is enabled */
6989 if (adapter->wol || (adapter->flags & FLAG_MNG_PT_ENABLED) ||
6990 (hw->mac.ops.check_mng_mode(hw)))
6991 device_wakeup_enable(&pdev->dev);
6993 /* save off EEPROM version number */
6994 rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
6997 e_dbg("NVM read error getting EEPROM version: %d\n", rval);
6998 adapter->eeprom_vers = 0;
7001 /* reset the hardware with the new settings */
7002 e1000e_reset(adapter);
7004 /* If the controller has AMT, do not set DRV_LOAD until the interface
7005 * is up. For all other cases, let the f/w know that the h/w is now
7006 * under the control of the driver.
7008 if (!(adapter->flags & FLAG_HAS_AMT))
7009 e1000e_get_hw_control(adapter);
7011 strlcpy(netdev->name, "eth%d", sizeof(netdev->name));
7012 err = register_netdev(netdev);
7016 /* carrier off reporting is important to ethtool even BEFORE open */
7017 netif_carrier_off(netdev);
7019 /* init PTP hardware clock */
7020 e1000e_ptp_init(adapter);
7022 e1000_print_device_info(adapter);
7024 if (pci_dev_run_wake(pdev))
7025 pm_runtime_put_noidle(&pdev->dev);
7030 if (!(adapter->flags & FLAG_HAS_AMT))
7031 e1000e_release_hw_control(adapter);
7033 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
7034 e1000_phy_hw_reset(&adapter->hw);
7036 kfree(adapter->tx_ring);
7037 kfree(adapter->rx_ring);
7039 if (adapter->hw.flash_address)
7040 iounmap(adapter->hw.flash_address);
7041 e1000e_reset_interrupt_capability(adapter);
7043 iounmap(adapter->hw.hw_addr);
7045 free_netdev(netdev);
7047 pci_release_selected_regions(pdev,
7048 pci_select_bars(pdev, IORESOURCE_MEM));
7051 pci_disable_device(pdev);
7056 * e1000_remove - Device Removal Routine
7057 * @pdev: PCI device information struct
7059 * e1000_remove is called by the PCI subsystem to alert the driver
7060 * that it should release a PCI device. The could be caused by a
7061 * Hot-Plug event, or because the driver is going to be removed from
7064 static void e1000_remove(struct pci_dev *pdev)
7066 struct net_device *netdev = pci_get_drvdata(pdev);
7067 struct e1000_adapter *adapter = netdev_priv(netdev);
7068 bool down = test_bit(__E1000_DOWN, &adapter->state);
7070 e1000e_ptp_remove(adapter);
7072 /* The timers may be rescheduled, so explicitly disable them
7073 * from being rescheduled.
7076 set_bit(__E1000_DOWN, &adapter->state);
7077 del_timer_sync(&adapter->watchdog_timer);
7078 del_timer_sync(&adapter->phy_info_timer);
7080 cancel_work_sync(&adapter->reset_task);
7081 cancel_work_sync(&adapter->watchdog_task);
7082 cancel_work_sync(&adapter->downshift_task);
7083 cancel_work_sync(&adapter->update_phy_task);
7084 cancel_work_sync(&adapter->print_hang_task);
7086 if (adapter->flags & FLAG_HAS_HW_TIMESTAMP) {
7087 cancel_work_sync(&adapter->tx_hwtstamp_work);
7088 if (adapter->tx_hwtstamp_skb) {
7089 dev_kfree_skb_any(adapter->tx_hwtstamp_skb);
7090 adapter->tx_hwtstamp_skb = NULL;
7094 /* Don't lie to e1000_close() down the road. */
7096 clear_bit(__E1000_DOWN, &adapter->state);
7097 unregister_netdev(netdev);
7099 if (pci_dev_run_wake(pdev))
7100 pm_runtime_get_noresume(&pdev->dev);
7102 /* Release control of h/w to f/w. If f/w is AMT enabled, this
7103 * would have already happened in close and is redundant.
7105 e1000e_release_hw_control(adapter);
7107 e1000e_reset_interrupt_capability(adapter);
7108 kfree(adapter->tx_ring);
7109 kfree(adapter->rx_ring);
7111 iounmap(adapter->hw.hw_addr);
7112 if (adapter->hw.flash_address)
7113 iounmap(adapter->hw.flash_address);
7114 pci_release_selected_regions(pdev,
7115 pci_select_bars(pdev, IORESOURCE_MEM));
7117 free_netdev(netdev);
7120 pci_disable_pcie_error_reporting(pdev);
7122 pci_disable_device(pdev);
7125 /* PCI Error Recovery (ERS) */
7126 static const struct pci_error_handlers e1000_err_handler = {
7127 .error_detected = e1000_io_error_detected,
7128 .slot_reset = e1000_io_slot_reset,
7129 .resume = e1000_io_resume,
7132 static const struct pci_device_id e1000_pci_tbl[] = {
7133 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
7134 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
7135 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
7136 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP),
7138 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
7139 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
7140 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
7141 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
7142 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
7144 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
7145 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
7146 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
7147 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
7149 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
7150 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
7151 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
7153 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
7154 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
7155 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
7157 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
7158 board_80003es2lan },
7159 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
7160 board_80003es2lan },
7161 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
7162 board_80003es2lan },
7163 { PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
7164 board_80003es2lan },
7166 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
7167 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
7168 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
7169 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
7170 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
7171 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
7172 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
7173 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
7175 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
7176 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
7177 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
7178 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
7179 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
7180 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
7181 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
7182 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
7183 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
7185 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
7186 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
7187 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
7189 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
7190 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
7191 { PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
7193 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
7194 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
7195 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
7196 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
7198 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
7199 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
7201 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
7202 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
7203 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
7204 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
7205 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM2), board_pch_lpt },
7206 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V2), board_pch_lpt },
7207 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_LM3), board_pch_lpt },
7208 { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_I218_V3), board_pch_lpt },
7210 { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
7212 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
7214 static const struct dev_pm_ops e1000_pm_ops = {
7215 #ifdef CONFIG_PM_SLEEP
7216 .suspend = e1000e_pm_suspend,
7217 .resume = e1000e_pm_resume,
7218 .freeze = e1000e_pm_freeze,
7219 .thaw = e1000e_pm_thaw,
7220 .poweroff = e1000e_pm_suspend,
7221 .restore = e1000e_pm_resume,
7223 SET_RUNTIME_PM_OPS(e1000e_pm_runtime_suspend, e1000e_pm_runtime_resume,
7224 e1000e_pm_runtime_idle)
7227 /* PCI Device API Driver */
7228 static struct pci_driver e1000_driver = {
7229 .name = e1000e_driver_name,
7230 .id_table = e1000_pci_tbl,
7231 .probe = e1000_probe,
7232 .remove = e1000_remove,
7234 .pm = &e1000_pm_ops,
7236 .shutdown = e1000_shutdown,
7237 .err_handler = &e1000_err_handler
7241 * e1000_init_module - Driver Registration Routine
7243 * e1000_init_module is the first routine called when the driver is
7244 * loaded. All it does is register with the PCI subsystem.
7246 static int __init e1000_init_module(void)
7250 pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
7251 e1000e_driver_version);
7252 pr_info("Copyright(c) 1999 - 2014 Intel Corporation.\n");
7253 ret = pci_register_driver(&e1000_driver);
7257 module_init(e1000_init_module);
7260 * e1000_exit_module - Driver Exit Cleanup Routine
7262 * e1000_exit_module is called just before the driver is removed
7265 static void __exit e1000_exit_module(void)
7267 pci_unregister_driver(&e1000_driver);
7269 module_exit(e1000_exit_module);
7271 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
7272 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
7273 MODULE_LICENSE("GPL");
7274 MODULE_VERSION(DRV_VERSION);