Merge branch 'for-3.5-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/tj...
[firefly-linux-kernel-4.4.55.git] / drivers / net / ethernet / intel / e1000 / e1000_main.c
1 /*******************************************************************************
2
3   Intel PRO/1000 Linux driver
4   Copyright(c) 1999 - 2006 Intel Corporation.
5
6   This program is free software; you can redistribute it and/or modify it
7   under the terms and conditions of the GNU General Public License,
8   version 2, as published by the Free Software Foundation.
9
10   This program is distributed in the hope it will be useful, but WITHOUT
11   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13   more details.
14
15   You should have received a copy of the GNU General Public License along with
16   this program; if not, write to the Free Software Foundation, Inc.,
17   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19   The full GNU General Public License is included in this distribution in
20   the file called "COPYING".
21
22   Contact Information:
23   Linux NICS <linux.nics@intel.com>
24   e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25   Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26
27 *******************************************************************************/
28
29 #include "e1000.h"
30 #include <net/ip6_checksum.h>
31 #include <linux/io.h>
32 #include <linux/prefetch.h>
33 #include <linux/bitops.h>
34 #include <linux/if_vlan.h>
35
36 char e1000_driver_name[] = "e1000";
37 static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
38 #define DRV_VERSION "7.3.21-k8-NAPI"
39 const char e1000_driver_version[] = DRV_VERSION;
40 static const char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
41
42 /* e1000_pci_tbl - PCI Device ID Table
43  *
44  * Last entry must be all 0s
45  *
46  * Macro expands to...
47  *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
48  */
49 static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
50         INTEL_E1000_ETHERNET_DEVICE(0x1000),
51         INTEL_E1000_ETHERNET_DEVICE(0x1001),
52         INTEL_E1000_ETHERNET_DEVICE(0x1004),
53         INTEL_E1000_ETHERNET_DEVICE(0x1008),
54         INTEL_E1000_ETHERNET_DEVICE(0x1009),
55         INTEL_E1000_ETHERNET_DEVICE(0x100C),
56         INTEL_E1000_ETHERNET_DEVICE(0x100D),
57         INTEL_E1000_ETHERNET_DEVICE(0x100E),
58         INTEL_E1000_ETHERNET_DEVICE(0x100F),
59         INTEL_E1000_ETHERNET_DEVICE(0x1010),
60         INTEL_E1000_ETHERNET_DEVICE(0x1011),
61         INTEL_E1000_ETHERNET_DEVICE(0x1012),
62         INTEL_E1000_ETHERNET_DEVICE(0x1013),
63         INTEL_E1000_ETHERNET_DEVICE(0x1014),
64         INTEL_E1000_ETHERNET_DEVICE(0x1015),
65         INTEL_E1000_ETHERNET_DEVICE(0x1016),
66         INTEL_E1000_ETHERNET_DEVICE(0x1017),
67         INTEL_E1000_ETHERNET_DEVICE(0x1018),
68         INTEL_E1000_ETHERNET_DEVICE(0x1019),
69         INTEL_E1000_ETHERNET_DEVICE(0x101A),
70         INTEL_E1000_ETHERNET_DEVICE(0x101D),
71         INTEL_E1000_ETHERNET_DEVICE(0x101E),
72         INTEL_E1000_ETHERNET_DEVICE(0x1026),
73         INTEL_E1000_ETHERNET_DEVICE(0x1027),
74         INTEL_E1000_ETHERNET_DEVICE(0x1028),
75         INTEL_E1000_ETHERNET_DEVICE(0x1075),
76         INTEL_E1000_ETHERNET_DEVICE(0x1076),
77         INTEL_E1000_ETHERNET_DEVICE(0x1077),
78         INTEL_E1000_ETHERNET_DEVICE(0x1078),
79         INTEL_E1000_ETHERNET_DEVICE(0x1079),
80         INTEL_E1000_ETHERNET_DEVICE(0x107A),
81         INTEL_E1000_ETHERNET_DEVICE(0x107B),
82         INTEL_E1000_ETHERNET_DEVICE(0x107C),
83         INTEL_E1000_ETHERNET_DEVICE(0x108A),
84         INTEL_E1000_ETHERNET_DEVICE(0x1099),
85         INTEL_E1000_ETHERNET_DEVICE(0x10B5),
86         INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
87         /* required last entry */
88         {0,}
89 };
90
91 MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
92
93 int e1000_up(struct e1000_adapter *adapter);
94 void e1000_down(struct e1000_adapter *adapter);
95 void e1000_reinit_locked(struct e1000_adapter *adapter);
96 void e1000_reset(struct e1000_adapter *adapter);
97 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
98 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
99 void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
100 void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
101 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
102                              struct e1000_tx_ring *txdr);
103 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
104                              struct e1000_rx_ring *rxdr);
105 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
106                              struct e1000_tx_ring *tx_ring);
107 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
108                              struct e1000_rx_ring *rx_ring);
109 void e1000_update_stats(struct e1000_adapter *adapter);
110
111 static int e1000_init_module(void);
112 static void e1000_exit_module(void);
113 static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
114 static void __devexit e1000_remove(struct pci_dev *pdev);
115 static int e1000_alloc_queues(struct e1000_adapter *adapter);
116 static int e1000_sw_init(struct e1000_adapter *adapter);
117 static int e1000_open(struct net_device *netdev);
118 static int e1000_close(struct net_device *netdev);
119 static void e1000_configure_tx(struct e1000_adapter *adapter);
120 static void e1000_configure_rx(struct e1000_adapter *adapter);
121 static void e1000_setup_rctl(struct e1000_adapter *adapter);
122 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
123 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
124 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
125                                 struct e1000_tx_ring *tx_ring);
126 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
127                                 struct e1000_rx_ring *rx_ring);
128 static void e1000_set_rx_mode(struct net_device *netdev);
129 static void e1000_update_phy_info_task(struct work_struct *work);
130 static void e1000_watchdog(struct work_struct *work);
131 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
132 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
133                                     struct net_device *netdev);
134 static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
135 static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
136 static int e1000_set_mac(struct net_device *netdev, void *p);
137 static irqreturn_t e1000_intr(int irq, void *data);
138 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
139                                struct e1000_tx_ring *tx_ring);
140 static int e1000_clean(struct napi_struct *napi, int budget);
141 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
142                                struct e1000_rx_ring *rx_ring,
143                                int *work_done, int work_to_do);
144 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
145                                      struct e1000_rx_ring *rx_ring,
146                                      int *work_done, int work_to_do);
147 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
148                                    struct e1000_rx_ring *rx_ring,
149                                    int cleaned_count);
150 static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
151                                          struct e1000_rx_ring *rx_ring,
152                                          int cleaned_count);
153 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
154 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
155                            int cmd);
156 static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
157 static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
158 static void e1000_tx_timeout(struct net_device *dev);
159 static void e1000_reset_task(struct work_struct *work);
160 static void e1000_smartspeed(struct e1000_adapter *adapter);
161 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
162                                        struct sk_buff *skb);
163
164 static bool e1000_vlan_used(struct e1000_adapter *adapter);
165 static void e1000_vlan_mode(struct net_device *netdev,
166                             netdev_features_t features);
167 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
168                                      bool filter_on);
169 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
170 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
171 static void e1000_restore_vlan(struct e1000_adapter *adapter);
172
173 #ifdef CONFIG_PM
174 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
175 static int e1000_resume(struct pci_dev *pdev);
176 #endif
177 static void e1000_shutdown(struct pci_dev *pdev);
178
179 #ifdef CONFIG_NET_POLL_CONTROLLER
180 /* for netdump / net console */
181 static void e1000_netpoll (struct net_device *netdev);
182 #endif
183
184 #define COPYBREAK_DEFAULT 256
185 static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
186 module_param(copybreak, uint, 0644);
187 MODULE_PARM_DESC(copybreak,
188         "Maximum size of packet that is copied to a new buffer on receive");
189
190 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
191                      pci_channel_state_t state);
192 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
193 static void e1000_io_resume(struct pci_dev *pdev);
194
195 static struct pci_error_handlers e1000_err_handler = {
196         .error_detected = e1000_io_error_detected,
197         .slot_reset = e1000_io_slot_reset,
198         .resume = e1000_io_resume,
199 };
200
201 static struct pci_driver e1000_driver = {
202         .name     = e1000_driver_name,
203         .id_table = e1000_pci_tbl,
204         .probe    = e1000_probe,
205         .remove   = __devexit_p(e1000_remove),
206 #ifdef CONFIG_PM
207         /* Power Management Hooks */
208         .suspend  = e1000_suspend,
209         .resume   = e1000_resume,
210 #endif
211         .shutdown = e1000_shutdown,
212         .err_handler = &e1000_err_handler
213 };
214
215 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
216 MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
217 MODULE_LICENSE("GPL");
218 MODULE_VERSION(DRV_VERSION);
219
220 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
221 static int debug = -1;
222 module_param(debug, int, 0);
223 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
224
225 /**
226  * e1000_get_hw_dev - return device
227  * used by hardware layer to print debugging information
228  *
229  **/
230 struct net_device *e1000_get_hw_dev(struct e1000_hw *hw)
231 {
232         struct e1000_adapter *adapter = hw->back;
233         return adapter->netdev;
234 }
235
236 /**
237  * e1000_init_module - Driver Registration Routine
238  *
239  * e1000_init_module is the first routine called when the driver is
240  * loaded. All it does is register with the PCI subsystem.
241  **/
242
243 static int __init e1000_init_module(void)
244 {
245         int ret;
246         pr_info("%s - version %s\n", e1000_driver_string, e1000_driver_version);
247
248         pr_info("%s\n", e1000_copyright);
249
250         ret = pci_register_driver(&e1000_driver);
251         if (copybreak != COPYBREAK_DEFAULT) {
252                 if (copybreak == 0)
253                         pr_info("copybreak disabled\n");
254                 else
255                         pr_info("copybreak enabled for "
256                                    "packets <= %u bytes\n", copybreak);
257         }
258         return ret;
259 }
260
261 module_init(e1000_init_module);
262
263 /**
264  * e1000_exit_module - Driver Exit Cleanup Routine
265  *
266  * e1000_exit_module is called just before the driver is removed
267  * from memory.
268  **/
269
270 static void __exit e1000_exit_module(void)
271 {
272         pci_unregister_driver(&e1000_driver);
273 }
274
275 module_exit(e1000_exit_module);
276
277 static int e1000_request_irq(struct e1000_adapter *adapter)
278 {
279         struct net_device *netdev = adapter->netdev;
280         irq_handler_t handler = e1000_intr;
281         int irq_flags = IRQF_SHARED;
282         int err;
283
284         err = request_irq(adapter->pdev->irq, handler, irq_flags, netdev->name,
285                           netdev);
286         if (err) {
287                 e_err(probe, "Unable to allocate interrupt Error: %d\n", err);
288         }
289
290         return err;
291 }
292
293 static void e1000_free_irq(struct e1000_adapter *adapter)
294 {
295         struct net_device *netdev = adapter->netdev;
296
297         free_irq(adapter->pdev->irq, netdev);
298 }
299
300 /**
301  * e1000_irq_disable - Mask off interrupt generation on the NIC
302  * @adapter: board private structure
303  **/
304
305 static void e1000_irq_disable(struct e1000_adapter *adapter)
306 {
307         struct e1000_hw *hw = &adapter->hw;
308
309         ew32(IMC, ~0);
310         E1000_WRITE_FLUSH();
311         synchronize_irq(adapter->pdev->irq);
312 }
313
314 /**
315  * e1000_irq_enable - Enable default interrupt generation settings
316  * @adapter: board private structure
317  **/
318
319 static void e1000_irq_enable(struct e1000_adapter *adapter)
320 {
321         struct e1000_hw *hw = &adapter->hw;
322
323         ew32(IMS, IMS_ENABLE_MASK);
324         E1000_WRITE_FLUSH();
325 }
326
327 static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
328 {
329         struct e1000_hw *hw = &adapter->hw;
330         struct net_device *netdev = adapter->netdev;
331         u16 vid = hw->mng_cookie.vlan_id;
332         u16 old_vid = adapter->mng_vlan_id;
333
334         if (!e1000_vlan_used(adapter))
335                 return;
336
337         if (!test_bit(vid, adapter->active_vlans)) {
338                 if (hw->mng_cookie.status &
339                     E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) {
340                         e1000_vlan_rx_add_vid(netdev, vid);
341                         adapter->mng_vlan_id = vid;
342                 } else {
343                         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
344                 }
345                 if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
346                     (vid != old_vid) &&
347                     !test_bit(old_vid, adapter->active_vlans))
348                         e1000_vlan_rx_kill_vid(netdev, old_vid);
349         } else {
350                 adapter->mng_vlan_id = vid;
351         }
352 }
353
354 static void e1000_init_manageability(struct e1000_adapter *adapter)
355 {
356         struct e1000_hw *hw = &adapter->hw;
357
358         if (adapter->en_mng_pt) {
359                 u32 manc = er32(MANC);
360
361                 /* disable hardware interception of ARP */
362                 manc &= ~(E1000_MANC_ARP_EN);
363
364                 ew32(MANC, manc);
365         }
366 }
367
368 static void e1000_release_manageability(struct e1000_adapter *adapter)
369 {
370         struct e1000_hw *hw = &adapter->hw;
371
372         if (adapter->en_mng_pt) {
373                 u32 manc = er32(MANC);
374
375                 /* re-enable hardware interception of ARP */
376                 manc |= E1000_MANC_ARP_EN;
377
378                 ew32(MANC, manc);
379         }
380 }
381
382 /**
383  * e1000_configure - configure the hardware for RX and TX
384  * @adapter = private board structure
385  **/
386 static void e1000_configure(struct e1000_adapter *adapter)
387 {
388         struct net_device *netdev = adapter->netdev;
389         int i;
390
391         e1000_set_rx_mode(netdev);
392
393         e1000_restore_vlan(adapter);
394         e1000_init_manageability(adapter);
395
396         e1000_configure_tx(adapter);
397         e1000_setup_rctl(adapter);
398         e1000_configure_rx(adapter);
399         /* call E1000_DESC_UNUSED which always leaves
400          * at least 1 descriptor unused to make sure
401          * next_to_use != next_to_clean */
402         for (i = 0; i < adapter->num_rx_queues; i++) {
403                 struct e1000_rx_ring *ring = &adapter->rx_ring[i];
404                 adapter->alloc_rx_buf(adapter, ring,
405                                       E1000_DESC_UNUSED(ring));
406         }
407 }
408
409 int e1000_up(struct e1000_adapter *adapter)
410 {
411         struct e1000_hw *hw = &adapter->hw;
412
413         /* hardware has been reset, we need to reload some things */
414         e1000_configure(adapter);
415
416         clear_bit(__E1000_DOWN, &adapter->flags);
417
418         napi_enable(&adapter->napi);
419
420         e1000_irq_enable(adapter);
421
422         netif_wake_queue(adapter->netdev);
423
424         /* fire a link change interrupt to start the watchdog */
425         ew32(ICS, E1000_ICS_LSC);
426         return 0;
427 }
428
429 /**
430  * e1000_power_up_phy - restore link in case the phy was powered down
431  * @adapter: address of board private structure
432  *
433  * The phy may be powered down to save power and turn off link when the
434  * driver is unloaded and wake on lan is not enabled (among others)
435  * *** this routine MUST be followed by a call to e1000_reset ***
436  *
437  **/
438
439 void e1000_power_up_phy(struct e1000_adapter *adapter)
440 {
441         struct e1000_hw *hw = &adapter->hw;
442         u16 mii_reg = 0;
443
444         /* Just clear the power down bit to wake the phy back up */
445         if (hw->media_type == e1000_media_type_copper) {
446                 /* according to the manual, the phy will retain its
447                  * settings across a power-down/up cycle */
448                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
449                 mii_reg &= ~MII_CR_POWER_DOWN;
450                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
451         }
452 }
453
454 static void e1000_power_down_phy(struct e1000_adapter *adapter)
455 {
456         struct e1000_hw *hw = &adapter->hw;
457
458         /* Power down the PHY so no link is implied when interface is down *
459          * The PHY cannot be powered down if any of the following is true *
460          * (a) WoL is enabled
461          * (b) AMT is active
462          * (c) SoL/IDER session is active */
463         if (!adapter->wol && hw->mac_type >= e1000_82540 &&
464            hw->media_type == e1000_media_type_copper) {
465                 u16 mii_reg = 0;
466
467                 switch (hw->mac_type) {
468                 case e1000_82540:
469                 case e1000_82545:
470                 case e1000_82545_rev_3:
471                 case e1000_82546:
472                 case e1000_ce4100:
473                 case e1000_82546_rev_3:
474                 case e1000_82541:
475                 case e1000_82541_rev_2:
476                 case e1000_82547:
477                 case e1000_82547_rev_2:
478                         if (er32(MANC) & E1000_MANC_SMBUS_EN)
479                                 goto out;
480                         break;
481                 default:
482                         goto out;
483                 }
484                 e1000_read_phy_reg(hw, PHY_CTRL, &mii_reg);
485                 mii_reg |= MII_CR_POWER_DOWN;
486                 e1000_write_phy_reg(hw, PHY_CTRL, mii_reg);
487                 msleep(1);
488         }
489 out:
490         return;
491 }
492
493 static void e1000_down_and_stop(struct e1000_adapter *adapter)
494 {
495         set_bit(__E1000_DOWN, &adapter->flags);
496
497         /* Only kill reset task if adapter is not resetting */
498         if (!test_bit(__E1000_RESETTING, &adapter->flags))
499                 cancel_work_sync(&adapter->reset_task);
500
501         cancel_delayed_work_sync(&adapter->watchdog_task);
502         cancel_delayed_work_sync(&adapter->phy_info_task);
503         cancel_delayed_work_sync(&adapter->fifo_stall_task);
504 }
505
506 void e1000_down(struct e1000_adapter *adapter)
507 {
508         struct e1000_hw *hw = &adapter->hw;
509         struct net_device *netdev = adapter->netdev;
510         u32 rctl, tctl;
511
512
513         /* disable receives in the hardware */
514         rctl = er32(RCTL);
515         ew32(RCTL, rctl & ~E1000_RCTL_EN);
516         /* flush and sleep below */
517
518         netif_tx_disable(netdev);
519
520         /* disable transmits in the hardware */
521         tctl = er32(TCTL);
522         tctl &= ~E1000_TCTL_EN;
523         ew32(TCTL, tctl);
524         /* flush both disables and wait for them to finish */
525         E1000_WRITE_FLUSH();
526         msleep(10);
527
528         napi_disable(&adapter->napi);
529
530         e1000_irq_disable(adapter);
531
532         /*
533          * Setting DOWN must be after irq_disable to prevent
534          * a screaming interrupt.  Setting DOWN also prevents
535          * tasks from rescheduling.
536          */
537         e1000_down_and_stop(adapter);
538
539         adapter->link_speed = 0;
540         adapter->link_duplex = 0;
541         netif_carrier_off(netdev);
542
543         e1000_reset(adapter);
544         e1000_clean_all_tx_rings(adapter);
545         e1000_clean_all_rx_rings(adapter);
546 }
547
548 static void e1000_reinit_safe(struct e1000_adapter *adapter)
549 {
550         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
551                 msleep(1);
552         mutex_lock(&adapter->mutex);
553         e1000_down(adapter);
554         e1000_up(adapter);
555         mutex_unlock(&adapter->mutex);
556         clear_bit(__E1000_RESETTING, &adapter->flags);
557 }
558
559 void e1000_reinit_locked(struct e1000_adapter *adapter)
560 {
561         /* if rtnl_lock is not held the call path is bogus */
562         ASSERT_RTNL();
563         WARN_ON(in_interrupt());
564         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
565                 msleep(1);
566         e1000_down(adapter);
567         e1000_up(adapter);
568         clear_bit(__E1000_RESETTING, &adapter->flags);
569 }
570
571 void e1000_reset(struct e1000_adapter *adapter)
572 {
573         struct e1000_hw *hw = &adapter->hw;
574         u32 pba = 0, tx_space, min_tx_space, min_rx_space;
575         bool legacy_pba_adjust = false;
576         u16 hwm;
577
578         /* Repartition Pba for greater than 9k mtu
579          * To take effect CTRL.RST is required.
580          */
581
582         switch (hw->mac_type) {
583         case e1000_82542_rev2_0:
584         case e1000_82542_rev2_1:
585         case e1000_82543:
586         case e1000_82544:
587         case e1000_82540:
588         case e1000_82541:
589         case e1000_82541_rev_2:
590                 legacy_pba_adjust = true;
591                 pba = E1000_PBA_48K;
592                 break;
593         case e1000_82545:
594         case e1000_82545_rev_3:
595         case e1000_82546:
596         case e1000_ce4100:
597         case e1000_82546_rev_3:
598                 pba = E1000_PBA_48K;
599                 break;
600         case e1000_82547:
601         case e1000_82547_rev_2:
602                 legacy_pba_adjust = true;
603                 pba = E1000_PBA_30K;
604                 break;
605         case e1000_undefined:
606         case e1000_num_macs:
607                 break;
608         }
609
610         if (legacy_pba_adjust) {
611                 if (hw->max_frame_size > E1000_RXBUFFER_8192)
612                         pba -= 8; /* allocate more FIFO for Tx */
613
614                 if (hw->mac_type == e1000_82547) {
615                         adapter->tx_fifo_head = 0;
616                         adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
617                         adapter->tx_fifo_size =
618                                 (E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
619                         atomic_set(&adapter->tx_fifo_stall, 0);
620                 }
621         } else if (hw->max_frame_size >  ETH_FRAME_LEN + ETH_FCS_LEN) {
622                 /* adjust PBA for jumbo frames */
623                 ew32(PBA, pba);
624
625                 /* To maintain wire speed transmits, the Tx FIFO should be
626                  * large enough to accommodate two full transmit packets,
627                  * rounded up to the next 1KB and expressed in KB.  Likewise,
628                  * the Rx FIFO should be large enough to accommodate at least
629                  * one full receive packet and is similarly rounded up and
630                  * expressed in KB. */
631                 pba = er32(PBA);
632                 /* upper 16 bits has Tx packet buffer allocation size in KB */
633                 tx_space = pba >> 16;
634                 /* lower 16 bits has Rx packet buffer allocation size in KB */
635                 pba &= 0xffff;
636                 /*
637                  * the tx fifo also stores 16 bytes of information about the tx
638                  * but don't include ethernet FCS because hardware appends it
639                  */
640                 min_tx_space = (hw->max_frame_size +
641                                 sizeof(struct e1000_tx_desc) -
642                                 ETH_FCS_LEN) * 2;
643                 min_tx_space = ALIGN(min_tx_space, 1024);
644                 min_tx_space >>= 10;
645                 /* software strips receive CRC, so leave room for it */
646                 min_rx_space = hw->max_frame_size;
647                 min_rx_space = ALIGN(min_rx_space, 1024);
648                 min_rx_space >>= 10;
649
650                 /* If current Tx allocation is less than the min Tx FIFO size,
651                  * and the min Tx FIFO size is less than the current Rx FIFO
652                  * allocation, take space away from current Rx allocation */
653                 if (tx_space < min_tx_space &&
654                     ((min_tx_space - tx_space) < pba)) {
655                         pba = pba - (min_tx_space - tx_space);
656
657                         /* PCI/PCIx hardware has PBA alignment constraints */
658                         switch (hw->mac_type) {
659                         case e1000_82545 ... e1000_82546_rev_3:
660                                 pba &= ~(E1000_PBA_8K - 1);
661                                 break;
662                         default:
663                                 break;
664                         }
665
666                         /* if short on rx space, rx wins and must trump tx
667                          * adjustment or use Early Receive if available */
668                         if (pba < min_rx_space)
669                                 pba = min_rx_space;
670                 }
671         }
672
673         ew32(PBA, pba);
674
675         /*
676          * flow control settings:
677          * The high water mark must be low enough to fit one full frame
678          * (or the size used for early receive) above it in the Rx FIFO.
679          * Set it to the lower of:
680          * - 90% of the Rx FIFO size, and
681          * - the full Rx FIFO size minus the early receive size (for parts
682          *   with ERT support assuming ERT set to E1000_ERT_2048), or
683          * - the full Rx FIFO size minus one full frame
684          */
685         hwm = min(((pba << 10) * 9 / 10),
686                   ((pba << 10) - hw->max_frame_size));
687
688         hw->fc_high_water = hwm & 0xFFF8;       /* 8-byte granularity */
689         hw->fc_low_water = hw->fc_high_water - 8;
690         hw->fc_pause_time = E1000_FC_PAUSE_TIME;
691         hw->fc_send_xon = 1;
692         hw->fc = hw->original_fc;
693
694         /* Allow time for pending master requests to run */
695         e1000_reset_hw(hw);
696         if (hw->mac_type >= e1000_82544)
697                 ew32(WUC, 0);
698
699         if (e1000_init_hw(hw))
700                 e_dev_err("Hardware Error\n");
701         e1000_update_mng_vlan(adapter);
702
703         /* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
704         if (hw->mac_type >= e1000_82544 &&
705             hw->autoneg == 1 &&
706             hw->autoneg_advertised == ADVERTISE_1000_FULL) {
707                 u32 ctrl = er32(CTRL);
708                 /* clear phy power management bit if we are in gig only mode,
709                  * which if enabled will attempt negotiation to 100Mb, which
710                  * can cause a loss of link at power off or driver unload */
711                 ctrl &= ~E1000_CTRL_SWDPIN3;
712                 ew32(CTRL, ctrl);
713         }
714
715         /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
716         ew32(VET, ETHERNET_IEEE_VLAN_TYPE);
717
718         e1000_reset_adaptive(hw);
719         e1000_phy_get_info(hw, &adapter->phy_info);
720
721         e1000_release_manageability(adapter);
722 }
723
724 /**
725  *  Dump the eeprom for users having checksum issues
726  **/
727 static void e1000_dump_eeprom(struct e1000_adapter *adapter)
728 {
729         struct net_device *netdev = adapter->netdev;
730         struct ethtool_eeprom eeprom;
731         const struct ethtool_ops *ops = netdev->ethtool_ops;
732         u8 *data;
733         int i;
734         u16 csum_old, csum_new = 0;
735
736         eeprom.len = ops->get_eeprom_len(netdev);
737         eeprom.offset = 0;
738
739         data = kmalloc(eeprom.len, GFP_KERNEL);
740         if (!data)
741                 return;
742
743         ops->get_eeprom(netdev, &eeprom, data);
744
745         csum_old = (data[EEPROM_CHECKSUM_REG * 2]) +
746                    (data[EEPROM_CHECKSUM_REG * 2 + 1] << 8);
747         for (i = 0; i < EEPROM_CHECKSUM_REG * 2; i += 2)
748                 csum_new += data[i] + (data[i + 1] << 8);
749         csum_new = EEPROM_SUM - csum_new;
750
751         pr_err("/*********************/\n");
752         pr_err("Current EEPROM Checksum : 0x%04x\n", csum_old);
753         pr_err("Calculated              : 0x%04x\n", csum_new);
754
755         pr_err("Offset    Values\n");
756         pr_err("========  ======\n");
757         print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, data, 128, 0);
758
759         pr_err("Include this output when contacting your support provider.\n");
760         pr_err("This is not a software error! Something bad happened to\n");
761         pr_err("your hardware or EEPROM image. Ignoring this problem could\n");
762         pr_err("result in further problems, possibly loss of data,\n");
763         pr_err("corruption or system hangs!\n");
764         pr_err("The MAC Address will be reset to 00:00:00:00:00:00,\n");
765         pr_err("which is invalid and requires you to set the proper MAC\n");
766         pr_err("address manually before continuing to enable this network\n");
767         pr_err("device. Please inspect the EEPROM dump and report the\n");
768         pr_err("issue to your hardware vendor or Intel Customer Support.\n");
769         pr_err("/*********************/\n");
770
771         kfree(data);
772 }
773
774 /**
775  * e1000_is_need_ioport - determine if an adapter needs ioport resources or not
776  * @pdev: PCI device information struct
777  *
778  * Return true if an adapter needs ioport resources
779  **/
780 static int e1000_is_need_ioport(struct pci_dev *pdev)
781 {
782         switch (pdev->device) {
783         case E1000_DEV_ID_82540EM:
784         case E1000_DEV_ID_82540EM_LOM:
785         case E1000_DEV_ID_82540EP:
786         case E1000_DEV_ID_82540EP_LOM:
787         case E1000_DEV_ID_82540EP_LP:
788         case E1000_DEV_ID_82541EI:
789         case E1000_DEV_ID_82541EI_MOBILE:
790         case E1000_DEV_ID_82541ER:
791         case E1000_DEV_ID_82541ER_LOM:
792         case E1000_DEV_ID_82541GI:
793         case E1000_DEV_ID_82541GI_LF:
794         case E1000_DEV_ID_82541GI_MOBILE:
795         case E1000_DEV_ID_82544EI_COPPER:
796         case E1000_DEV_ID_82544EI_FIBER:
797         case E1000_DEV_ID_82544GC_COPPER:
798         case E1000_DEV_ID_82544GC_LOM:
799         case E1000_DEV_ID_82545EM_COPPER:
800         case E1000_DEV_ID_82545EM_FIBER:
801         case E1000_DEV_ID_82546EB_COPPER:
802         case E1000_DEV_ID_82546EB_FIBER:
803         case E1000_DEV_ID_82546EB_QUAD_COPPER:
804                 return true;
805         default:
806                 return false;
807         }
808 }
809
810 static netdev_features_t e1000_fix_features(struct net_device *netdev,
811         netdev_features_t features)
812 {
813         /*
814          * Since there is no support for separate rx/tx vlan accel
815          * enable/disable make sure tx flag is always in same state as rx.
816          */
817         if (features & NETIF_F_HW_VLAN_RX)
818                 features |= NETIF_F_HW_VLAN_TX;
819         else
820                 features &= ~NETIF_F_HW_VLAN_TX;
821
822         return features;
823 }
824
825 static int e1000_set_features(struct net_device *netdev,
826         netdev_features_t features)
827 {
828         struct e1000_adapter *adapter = netdev_priv(netdev);
829         netdev_features_t changed = features ^ netdev->features;
830
831         if (changed & NETIF_F_HW_VLAN_RX)
832                 e1000_vlan_mode(netdev, features);
833
834         if (!(changed & (NETIF_F_RXCSUM | NETIF_F_RXALL)))
835                 return 0;
836
837         netdev->features = features;
838         adapter->rx_csum = !!(features & NETIF_F_RXCSUM);
839
840         if (netif_running(netdev))
841                 e1000_reinit_locked(adapter);
842         else
843                 e1000_reset(adapter);
844
845         return 0;
846 }
847
848 static const struct net_device_ops e1000_netdev_ops = {
849         .ndo_open               = e1000_open,
850         .ndo_stop               = e1000_close,
851         .ndo_start_xmit         = e1000_xmit_frame,
852         .ndo_get_stats          = e1000_get_stats,
853         .ndo_set_rx_mode        = e1000_set_rx_mode,
854         .ndo_set_mac_address    = e1000_set_mac,
855         .ndo_tx_timeout         = e1000_tx_timeout,
856         .ndo_change_mtu         = e1000_change_mtu,
857         .ndo_do_ioctl           = e1000_ioctl,
858         .ndo_validate_addr      = eth_validate_addr,
859         .ndo_vlan_rx_add_vid    = e1000_vlan_rx_add_vid,
860         .ndo_vlan_rx_kill_vid   = e1000_vlan_rx_kill_vid,
861 #ifdef CONFIG_NET_POLL_CONTROLLER
862         .ndo_poll_controller    = e1000_netpoll,
863 #endif
864         .ndo_fix_features       = e1000_fix_features,
865         .ndo_set_features       = e1000_set_features,
866 };
867
868 /**
869  * e1000_init_hw_struct - initialize members of hw struct
870  * @adapter: board private struct
871  * @hw: structure used by e1000_hw.c
872  *
873  * Factors out initialization of the e1000_hw struct to its own function
874  * that can be called very early at init (just after struct allocation).
875  * Fields are initialized based on PCI device information and
876  * OS network device settings (MTU size).
877  * Returns negative error codes if MAC type setup fails.
878  */
879 static int e1000_init_hw_struct(struct e1000_adapter *adapter,
880                                 struct e1000_hw *hw)
881 {
882         struct pci_dev *pdev = adapter->pdev;
883
884         /* PCI config space info */
885         hw->vendor_id = pdev->vendor;
886         hw->device_id = pdev->device;
887         hw->subsystem_vendor_id = pdev->subsystem_vendor;
888         hw->subsystem_id = pdev->subsystem_device;
889         hw->revision_id = pdev->revision;
890
891         pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
892
893         hw->max_frame_size = adapter->netdev->mtu +
894                              ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
895         hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
896
897         /* identify the MAC */
898         if (e1000_set_mac_type(hw)) {
899                 e_err(probe, "Unknown MAC Type\n");
900                 return -EIO;
901         }
902
903         switch (hw->mac_type) {
904         default:
905                 break;
906         case e1000_82541:
907         case e1000_82547:
908         case e1000_82541_rev_2:
909         case e1000_82547_rev_2:
910                 hw->phy_init_script = 1;
911                 break;
912         }
913
914         e1000_set_media_type(hw);
915         e1000_get_bus_info(hw);
916
917         hw->wait_autoneg_complete = false;
918         hw->tbi_compatibility_en = true;
919         hw->adaptive_ifs = true;
920
921         /* Copper options */
922
923         if (hw->media_type == e1000_media_type_copper) {
924                 hw->mdix = AUTO_ALL_MODES;
925                 hw->disable_polarity_correction = false;
926                 hw->master_slave = E1000_MASTER_SLAVE;
927         }
928
929         return 0;
930 }
931
932 /**
933  * e1000_probe - Device Initialization Routine
934  * @pdev: PCI device information struct
935  * @ent: entry in e1000_pci_tbl
936  *
937  * Returns 0 on success, negative on failure
938  *
939  * e1000_probe initializes an adapter identified by a pci_dev structure.
940  * The OS initialization, configuring of the adapter private structure,
941  * and a hardware reset occur.
942  **/
943 static int __devinit e1000_probe(struct pci_dev *pdev,
944                                  const struct pci_device_id *ent)
945 {
946         struct net_device *netdev;
947         struct e1000_adapter *adapter;
948         struct e1000_hw *hw;
949
950         static int cards_found = 0;
951         static int global_quad_port_a = 0; /* global ksp3 port a indication */
952         int i, err, pci_using_dac;
953         u16 eeprom_data = 0;
954         u16 tmp = 0;
955         u16 eeprom_apme_mask = E1000_EEPROM_APME;
956         int bars, need_ioport;
957
958         /* do not allocate ioport bars when not needed */
959         need_ioport = e1000_is_need_ioport(pdev);
960         if (need_ioport) {
961                 bars = pci_select_bars(pdev, IORESOURCE_MEM | IORESOURCE_IO);
962                 err = pci_enable_device(pdev);
963         } else {
964                 bars = pci_select_bars(pdev, IORESOURCE_MEM);
965                 err = pci_enable_device_mem(pdev);
966         }
967         if (err)
968                 return err;
969
970         err = pci_request_selected_regions(pdev, bars, e1000_driver_name);
971         if (err)
972                 goto err_pci_reg;
973
974         pci_set_master(pdev);
975         err = pci_save_state(pdev);
976         if (err)
977                 goto err_alloc_etherdev;
978
979         err = -ENOMEM;
980         netdev = alloc_etherdev(sizeof(struct e1000_adapter));
981         if (!netdev)
982                 goto err_alloc_etherdev;
983
984         SET_NETDEV_DEV(netdev, &pdev->dev);
985
986         pci_set_drvdata(pdev, netdev);
987         adapter = netdev_priv(netdev);
988         adapter->netdev = netdev;
989         adapter->pdev = pdev;
990         adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
991         adapter->bars = bars;
992         adapter->need_ioport = need_ioport;
993
994         hw = &adapter->hw;
995         hw->back = adapter;
996
997         err = -EIO;
998         hw->hw_addr = pci_ioremap_bar(pdev, BAR_0);
999         if (!hw->hw_addr)
1000                 goto err_ioremap;
1001
1002         if (adapter->need_ioport) {
1003                 for (i = BAR_1; i <= BAR_5; i++) {
1004                         if (pci_resource_len(pdev, i) == 0)
1005                                 continue;
1006                         if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
1007                                 hw->io_base = pci_resource_start(pdev, i);
1008                                 break;
1009                         }
1010                 }
1011         }
1012
1013         /* make ready for any if (hw->...) below */
1014         err = e1000_init_hw_struct(adapter, hw);
1015         if (err)
1016                 goto err_sw_init;
1017
1018         /*
1019          * there is a workaround being applied below that limits
1020          * 64-bit DMA addresses to 64-bit hardware.  There are some
1021          * 32-bit adapters that Tx hang when given 64-bit DMA addresses
1022          */
1023         pci_using_dac = 0;
1024         if ((hw->bus_type == e1000_bus_type_pcix) &&
1025             !dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) {
1026                 /*
1027                  * according to DMA-API-HOWTO, coherent calls will always
1028                  * succeed if the set call did
1029                  */
1030                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
1031                 pci_using_dac = 1;
1032         } else {
1033                 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
1034                 if (err) {
1035                         pr_err("No usable DMA config, aborting\n");
1036                         goto err_dma;
1037                 }
1038                 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
1039         }
1040
1041         netdev->netdev_ops = &e1000_netdev_ops;
1042         e1000_set_ethtool_ops(netdev);
1043         netdev->watchdog_timeo = 5 * HZ;
1044         netif_napi_add(netdev, &adapter->napi, e1000_clean, 64);
1045
1046         strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
1047
1048         adapter->bd_number = cards_found;
1049
1050         /* setup the private structure */
1051
1052         err = e1000_sw_init(adapter);
1053         if (err)
1054                 goto err_sw_init;
1055
1056         err = -EIO;
1057         if (hw->mac_type == e1000_ce4100) {
1058                 hw->ce4100_gbe_mdio_base_virt =
1059                                         ioremap(pci_resource_start(pdev, BAR_1),
1060                                                 pci_resource_len(pdev, BAR_1));
1061
1062                 if (!hw->ce4100_gbe_mdio_base_virt)
1063                         goto err_mdio_ioremap;
1064         }
1065
1066         if (hw->mac_type >= e1000_82543) {
1067                 netdev->hw_features = NETIF_F_SG |
1068                                    NETIF_F_HW_CSUM |
1069                                    NETIF_F_HW_VLAN_RX;
1070                 netdev->features = NETIF_F_HW_VLAN_TX |
1071                                    NETIF_F_HW_VLAN_FILTER;
1072         }
1073
1074         if ((hw->mac_type >= e1000_82544) &&
1075            (hw->mac_type != e1000_82547))
1076                 netdev->hw_features |= NETIF_F_TSO;
1077
1078         netdev->priv_flags |= IFF_SUPP_NOFCS;
1079
1080         netdev->features |= netdev->hw_features;
1081         netdev->hw_features |= NETIF_F_RXCSUM;
1082         netdev->hw_features |= NETIF_F_RXALL;
1083         netdev->hw_features |= NETIF_F_RXFCS;
1084
1085         if (pci_using_dac) {
1086                 netdev->features |= NETIF_F_HIGHDMA;
1087                 netdev->vlan_features |= NETIF_F_HIGHDMA;
1088         }
1089
1090         netdev->vlan_features |= NETIF_F_TSO;
1091         netdev->vlan_features |= NETIF_F_HW_CSUM;
1092         netdev->vlan_features |= NETIF_F_SG;
1093
1094         netdev->priv_flags |= IFF_UNICAST_FLT;
1095
1096         adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1097
1098         /* initialize eeprom parameters */
1099         if (e1000_init_eeprom_params(hw)) {
1100                 e_err(probe, "EEPROM initialization failed\n");
1101                 goto err_eeprom;
1102         }
1103
1104         /* before reading the EEPROM, reset the controller to
1105          * put the device in a known good starting state */
1106
1107         e1000_reset_hw(hw);
1108
1109         /* make sure the EEPROM is good */
1110         if (e1000_validate_eeprom_checksum(hw) < 0) {
1111                 e_err(probe, "The EEPROM Checksum Is Not Valid\n");
1112                 e1000_dump_eeprom(adapter);
1113                 /*
1114                  * set MAC address to all zeroes to invalidate and temporary
1115                  * disable this device for the user. This blocks regular
1116                  * traffic while still permitting ethtool ioctls from reaching
1117                  * the hardware as well as allowing the user to run the
1118                  * interface after manually setting a hw addr using
1119                  * `ip set address`
1120                  */
1121                 memset(hw->mac_addr, 0, netdev->addr_len);
1122         } else {
1123                 /* copy the MAC address out of the EEPROM */
1124                 if (e1000_read_mac_addr(hw))
1125                         e_err(probe, "EEPROM Read Error\n");
1126         }
1127         /* don't block initalization here due to bad MAC address */
1128         memcpy(netdev->dev_addr, hw->mac_addr, netdev->addr_len);
1129         memcpy(netdev->perm_addr, hw->mac_addr, netdev->addr_len);
1130
1131         if (!is_valid_ether_addr(netdev->perm_addr))
1132                 e_err(probe, "Invalid MAC Address\n");
1133
1134
1135         INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
1136         INIT_DELAYED_WORK(&adapter->fifo_stall_task,
1137                           e1000_82547_tx_fifo_stall_task);
1138         INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
1139         INIT_WORK(&adapter->reset_task, e1000_reset_task);
1140
1141         e1000_check_options(adapter);
1142
1143         /* Initial Wake on LAN setting
1144          * If APM wake is enabled in the EEPROM,
1145          * enable the ACPI Magic Packet filter
1146          */
1147
1148         switch (hw->mac_type) {
1149         case e1000_82542_rev2_0:
1150         case e1000_82542_rev2_1:
1151         case e1000_82543:
1152                 break;
1153         case e1000_82544:
1154                 e1000_read_eeprom(hw,
1155                         EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
1156                 eeprom_apme_mask = E1000_EEPROM_82544_APM;
1157                 break;
1158         case e1000_82546:
1159         case e1000_82546_rev_3:
1160                 if (er32(STATUS) & E1000_STATUS_FUNC_1){
1161                         e1000_read_eeprom(hw,
1162                                 EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
1163                         break;
1164                 }
1165                 /* Fall Through */
1166         default:
1167                 e1000_read_eeprom(hw,
1168                         EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
1169                 break;
1170         }
1171         if (eeprom_data & eeprom_apme_mask)
1172                 adapter->eeprom_wol |= E1000_WUFC_MAG;
1173
1174         /* now that we have the eeprom settings, apply the special cases
1175          * where the eeprom may be wrong or the board simply won't support
1176          * wake on lan on a particular port */
1177         switch (pdev->device) {
1178         case E1000_DEV_ID_82546GB_PCIE:
1179                 adapter->eeprom_wol = 0;
1180                 break;
1181         case E1000_DEV_ID_82546EB_FIBER:
1182         case E1000_DEV_ID_82546GB_FIBER:
1183                 /* Wake events only supported on port A for dual fiber
1184                  * regardless of eeprom setting */
1185                 if (er32(STATUS) & E1000_STATUS_FUNC_1)
1186                         adapter->eeprom_wol = 0;
1187                 break;
1188         case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
1189                 /* if quad port adapter, disable WoL on all but port A */
1190                 if (global_quad_port_a != 0)
1191                         adapter->eeprom_wol = 0;
1192                 else
1193                         adapter->quad_port_a = true;
1194                 /* Reset for multiple quad port adapters */
1195                 if (++global_quad_port_a == 4)
1196                         global_quad_port_a = 0;
1197                 break;
1198         }
1199
1200         /* initialize the wol settings based on the eeprom settings */
1201         adapter->wol = adapter->eeprom_wol;
1202         device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1203
1204         /* Auto detect PHY address */
1205         if (hw->mac_type == e1000_ce4100) {
1206                 for (i = 0; i < 32; i++) {
1207                         hw->phy_addr = i;
1208                         e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1209                         if (tmp == 0 || tmp == 0xFF) {
1210                                 if (i == 31)
1211                                         goto err_eeprom;
1212                                 continue;
1213                         } else
1214                                 break;
1215                 }
1216         }
1217
1218         /* reset the hardware with the new settings */
1219         e1000_reset(adapter);
1220
1221         strcpy(netdev->name, "eth%d");
1222         err = register_netdev(netdev);
1223         if (err)
1224                 goto err_register;
1225
1226         e1000_vlan_filter_on_off(adapter, false);
1227
1228         /* print bus type/speed/width info */
1229         e_info(probe, "(PCI%s:%dMHz:%d-bit) %pM\n",
1230                ((hw->bus_type == e1000_bus_type_pcix) ? "-X" : ""),
1231                ((hw->bus_speed == e1000_bus_speed_133) ? 133 :
1232                 (hw->bus_speed == e1000_bus_speed_120) ? 120 :
1233                 (hw->bus_speed == e1000_bus_speed_100) ? 100 :
1234                 (hw->bus_speed == e1000_bus_speed_66) ? 66 : 33),
1235                ((hw->bus_width == e1000_bus_width_64) ? 64 : 32),
1236                netdev->dev_addr);
1237
1238         /* carrier off reporting is important to ethtool even BEFORE open */
1239         netif_carrier_off(netdev);
1240
1241         e_info(probe, "Intel(R) PRO/1000 Network Connection\n");
1242
1243         cards_found++;
1244         return 0;
1245
1246 err_register:
1247 err_eeprom:
1248         e1000_phy_hw_reset(hw);
1249
1250         if (hw->flash_address)
1251                 iounmap(hw->flash_address);
1252         kfree(adapter->tx_ring);
1253         kfree(adapter->rx_ring);
1254 err_dma:
1255 err_sw_init:
1256 err_mdio_ioremap:
1257         iounmap(hw->ce4100_gbe_mdio_base_virt);
1258         iounmap(hw->hw_addr);
1259 err_ioremap:
1260         free_netdev(netdev);
1261 err_alloc_etherdev:
1262         pci_release_selected_regions(pdev, bars);
1263 err_pci_reg:
1264         pci_disable_device(pdev);
1265         return err;
1266 }
1267
1268 /**
1269  * e1000_remove - Device Removal Routine
1270  * @pdev: PCI device information struct
1271  *
1272  * e1000_remove is called by the PCI subsystem to alert the driver
1273  * that it should release a PCI device.  The could be caused by a
1274  * Hot-Plug event, or because the driver is going to be removed from
1275  * memory.
1276  **/
1277
1278 static void __devexit e1000_remove(struct pci_dev *pdev)
1279 {
1280         struct net_device *netdev = pci_get_drvdata(pdev);
1281         struct e1000_adapter *adapter = netdev_priv(netdev);
1282         struct e1000_hw *hw = &adapter->hw;
1283
1284         e1000_down_and_stop(adapter);
1285         e1000_release_manageability(adapter);
1286
1287         unregister_netdev(netdev);
1288
1289         e1000_phy_hw_reset(hw);
1290
1291         kfree(adapter->tx_ring);
1292         kfree(adapter->rx_ring);
1293
1294         if (hw->mac_type == e1000_ce4100)
1295                 iounmap(hw->ce4100_gbe_mdio_base_virt);
1296         iounmap(hw->hw_addr);
1297         if (hw->flash_address)
1298                 iounmap(hw->flash_address);
1299         pci_release_selected_regions(pdev, adapter->bars);
1300
1301         free_netdev(netdev);
1302
1303         pci_disable_device(pdev);
1304 }
1305
1306 /**
1307  * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
1308  * @adapter: board private structure to initialize
1309  *
1310  * e1000_sw_init initializes the Adapter private data structure.
1311  * e1000_init_hw_struct MUST be called before this function
1312  **/
1313
1314 static int __devinit e1000_sw_init(struct e1000_adapter *adapter)
1315 {
1316         adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
1317
1318         adapter->num_tx_queues = 1;
1319         adapter->num_rx_queues = 1;
1320
1321         if (e1000_alloc_queues(adapter)) {
1322                 e_err(probe, "Unable to allocate memory for queues\n");
1323                 return -ENOMEM;
1324         }
1325
1326         /* Explicitly disable IRQ since the NIC can be in any state. */
1327         e1000_irq_disable(adapter);
1328
1329         spin_lock_init(&adapter->stats_lock);
1330         mutex_init(&adapter->mutex);
1331
1332         set_bit(__E1000_DOWN, &adapter->flags);
1333
1334         return 0;
1335 }
1336
1337 /**
1338  * e1000_alloc_queues - Allocate memory for all rings
1339  * @adapter: board private structure to initialize
1340  *
1341  * We allocate one ring per queue at run-time since we don't know the
1342  * number of queues at compile-time.
1343  **/
1344
1345 static int __devinit e1000_alloc_queues(struct e1000_adapter *adapter)
1346 {
1347         adapter->tx_ring = kcalloc(adapter->num_tx_queues,
1348                                    sizeof(struct e1000_tx_ring), GFP_KERNEL);
1349         if (!adapter->tx_ring)
1350                 return -ENOMEM;
1351
1352         adapter->rx_ring = kcalloc(adapter->num_rx_queues,
1353                                    sizeof(struct e1000_rx_ring), GFP_KERNEL);
1354         if (!adapter->rx_ring) {
1355                 kfree(adapter->tx_ring);
1356                 return -ENOMEM;
1357         }
1358
1359         return E1000_SUCCESS;
1360 }
1361
1362 /**
1363  * e1000_open - Called when a network interface is made active
1364  * @netdev: network interface device structure
1365  *
1366  * Returns 0 on success, negative value on failure
1367  *
1368  * The open entry point is called when a network interface is made
1369  * active by the system (IFF_UP).  At this point all resources needed
1370  * for transmit and receive operations are allocated, the interrupt
1371  * handler is registered with the OS, the watchdog task is started,
1372  * and the stack is notified that the interface is ready.
1373  **/
1374
1375 static int e1000_open(struct net_device *netdev)
1376 {
1377         struct e1000_adapter *adapter = netdev_priv(netdev);
1378         struct e1000_hw *hw = &adapter->hw;
1379         int err;
1380
1381         /* disallow open during test */
1382         if (test_bit(__E1000_TESTING, &adapter->flags))
1383                 return -EBUSY;
1384
1385         netif_carrier_off(netdev);
1386
1387         /* allocate transmit descriptors */
1388         err = e1000_setup_all_tx_resources(adapter);
1389         if (err)
1390                 goto err_setup_tx;
1391
1392         /* allocate receive descriptors */
1393         err = e1000_setup_all_rx_resources(adapter);
1394         if (err)
1395                 goto err_setup_rx;
1396
1397         e1000_power_up_phy(adapter);
1398
1399         adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
1400         if ((hw->mng_cookie.status &
1401                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT)) {
1402                 e1000_update_mng_vlan(adapter);
1403         }
1404
1405         /* before we allocate an interrupt, we must be ready to handle it.
1406          * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1407          * as soon as we call pci_request_irq, so we have to setup our
1408          * clean_rx handler before we do so.  */
1409         e1000_configure(adapter);
1410
1411         err = e1000_request_irq(adapter);
1412         if (err)
1413                 goto err_req_irq;
1414
1415         /* From here on the code is the same as e1000_up() */
1416         clear_bit(__E1000_DOWN, &adapter->flags);
1417
1418         napi_enable(&adapter->napi);
1419
1420         e1000_irq_enable(adapter);
1421
1422         netif_start_queue(netdev);
1423
1424         /* fire a link status change interrupt to start the watchdog */
1425         ew32(ICS, E1000_ICS_LSC);
1426
1427         return E1000_SUCCESS;
1428
1429 err_req_irq:
1430         e1000_power_down_phy(adapter);
1431         e1000_free_all_rx_resources(adapter);
1432 err_setup_rx:
1433         e1000_free_all_tx_resources(adapter);
1434 err_setup_tx:
1435         e1000_reset(adapter);
1436
1437         return err;
1438 }
1439
1440 /**
1441  * e1000_close - Disables a network interface
1442  * @netdev: network interface device structure
1443  *
1444  * Returns 0, this is not allowed to fail
1445  *
1446  * The close entry point is called when an interface is de-activated
1447  * by the OS.  The hardware is still under the drivers control, but
1448  * needs to be disabled.  A global MAC reset is issued to stop the
1449  * hardware, and all transmit and receive resources are freed.
1450  **/
1451
1452 static int e1000_close(struct net_device *netdev)
1453 {
1454         struct e1000_adapter *adapter = netdev_priv(netdev);
1455         struct e1000_hw *hw = &adapter->hw;
1456
1457         WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
1458         e1000_down(adapter);
1459         e1000_power_down_phy(adapter);
1460         e1000_free_irq(adapter);
1461
1462         e1000_free_all_tx_resources(adapter);
1463         e1000_free_all_rx_resources(adapter);
1464
1465         /* kill manageability vlan ID if supported, but not if a vlan with
1466          * the same ID is registered on the host OS (let 8021q kill it) */
1467         if ((hw->mng_cookie.status &
1468                           E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
1469              !test_bit(adapter->mng_vlan_id, adapter->active_vlans)) {
1470                 e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1471         }
1472
1473         return 0;
1474 }
1475
1476 /**
1477  * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
1478  * @adapter: address of board private structure
1479  * @start: address of beginning of memory
1480  * @len: length of memory
1481  **/
1482 static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1483                                   unsigned long len)
1484 {
1485         struct e1000_hw *hw = &adapter->hw;
1486         unsigned long begin = (unsigned long)start;
1487         unsigned long end = begin + len;
1488
1489         /* First rev 82545 and 82546 need to not allow any memory
1490          * write location to cross 64k boundary due to errata 23 */
1491         if (hw->mac_type == e1000_82545 ||
1492             hw->mac_type == e1000_ce4100 ||
1493             hw->mac_type == e1000_82546) {
1494                 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1495         }
1496
1497         return true;
1498 }
1499
1500 /**
1501  * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
1502  * @adapter: board private structure
1503  * @txdr:    tx descriptor ring (for a specific queue) to setup
1504  *
1505  * Return 0 on success, negative on failure
1506  **/
1507
1508 static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1509                                     struct e1000_tx_ring *txdr)
1510 {
1511         struct pci_dev *pdev = adapter->pdev;
1512         int size;
1513
1514         size = sizeof(struct e1000_buffer) * txdr->count;
1515         txdr->buffer_info = vzalloc(size);
1516         if (!txdr->buffer_info) {
1517                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1518                       "ring\n");
1519                 return -ENOMEM;
1520         }
1521
1522         /* round up to nearest 4K */
1523
1524         txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
1525         txdr->size = ALIGN(txdr->size, 4096);
1526
1527         txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
1528                                         GFP_KERNEL);
1529         if (!txdr->desc) {
1530 setup_tx_desc_die:
1531                 vfree(txdr->buffer_info);
1532                 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1533                       "ring\n");
1534                 return -ENOMEM;
1535         }
1536
1537         /* Fix for errata 23, can't cross 64kB boundary */
1538         if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1539                 void *olddesc = txdr->desc;
1540                 dma_addr_t olddma = txdr->dma;
1541                 e_err(tx_err, "txdr align check failed: %u bytes at %p\n",
1542                       txdr->size, txdr->desc);
1543                 /* Try again, without freeing the previous */
1544                 txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
1545                                                 &txdr->dma, GFP_KERNEL);
1546                 /* Failed allocation, critical failure */
1547                 if (!txdr->desc) {
1548                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1549                                           olddma);
1550                         goto setup_tx_desc_die;
1551                 }
1552
1553                 if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
1554                         /* give up */
1555                         dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
1556                                           txdr->dma);
1557                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1558                                           olddma);
1559                         e_err(probe, "Unable to allocate aligned memory "
1560                               "for the transmit descriptor ring\n");
1561                         vfree(txdr->buffer_info);
1562                         return -ENOMEM;
1563                 } else {
1564                         /* Free old allocation, new allocation was successful */
1565                         dma_free_coherent(&pdev->dev, txdr->size, olddesc,
1566                                           olddma);
1567                 }
1568         }
1569         memset(txdr->desc, 0, txdr->size);
1570
1571         txdr->next_to_use = 0;
1572         txdr->next_to_clean = 0;
1573
1574         return 0;
1575 }
1576
1577 /**
1578  * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
1579  *                                (Descriptors) for all queues
1580  * @adapter: board private structure
1581  *
1582  * Return 0 on success, negative on failure
1583  **/
1584
1585 int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
1586 {
1587         int i, err = 0;
1588
1589         for (i = 0; i < adapter->num_tx_queues; i++) {
1590                 err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
1591                 if (err) {
1592                         e_err(probe, "Allocation for Tx Queue %u failed\n", i);
1593                         for (i-- ; i >= 0; i--)
1594                                 e1000_free_tx_resources(adapter,
1595                                                         &adapter->tx_ring[i]);
1596                         break;
1597                 }
1598         }
1599
1600         return err;
1601 }
1602
1603 /**
1604  * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
1605  * @adapter: board private structure
1606  *
1607  * Configure the Tx unit of the MAC after a reset.
1608  **/
1609
1610 static void e1000_configure_tx(struct e1000_adapter *adapter)
1611 {
1612         u64 tdba;
1613         struct e1000_hw *hw = &adapter->hw;
1614         u32 tdlen, tctl, tipg;
1615         u32 ipgr1, ipgr2;
1616
1617         /* Setup the HW Tx Head and Tail descriptor pointers */
1618
1619         switch (adapter->num_tx_queues) {
1620         case 1:
1621         default:
1622                 tdba = adapter->tx_ring[0].dma;
1623                 tdlen = adapter->tx_ring[0].count *
1624                         sizeof(struct e1000_tx_desc);
1625                 ew32(TDLEN, tdlen);
1626                 ew32(TDBAH, (tdba >> 32));
1627                 ew32(TDBAL, (tdba & 0x00000000ffffffffULL));
1628                 ew32(TDT, 0);
1629                 ew32(TDH, 0);
1630                 adapter->tx_ring[0].tdh = ((hw->mac_type >= e1000_82543) ? E1000_TDH : E1000_82542_TDH);
1631                 adapter->tx_ring[0].tdt = ((hw->mac_type >= e1000_82543) ? E1000_TDT : E1000_82542_TDT);
1632                 break;
1633         }
1634
1635         /* Set the default values for the Tx Inter Packet Gap timer */
1636         if ((hw->media_type == e1000_media_type_fiber ||
1637              hw->media_type == e1000_media_type_internal_serdes))
1638                 tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
1639         else
1640                 tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
1641
1642         switch (hw->mac_type) {
1643         case e1000_82542_rev2_0:
1644         case e1000_82542_rev2_1:
1645                 tipg = DEFAULT_82542_TIPG_IPGT;
1646                 ipgr1 = DEFAULT_82542_TIPG_IPGR1;
1647                 ipgr2 = DEFAULT_82542_TIPG_IPGR2;
1648                 break;
1649         default:
1650                 ipgr1 = DEFAULT_82543_TIPG_IPGR1;
1651                 ipgr2 = DEFAULT_82543_TIPG_IPGR2;
1652                 break;
1653         }
1654         tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
1655         tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
1656         ew32(TIPG, tipg);
1657
1658         /* Set the Tx Interrupt Delay register */
1659
1660         ew32(TIDV, adapter->tx_int_delay);
1661         if (hw->mac_type >= e1000_82540)
1662                 ew32(TADV, adapter->tx_abs_int_delay);
1663
1664         /* Program the Transmit Control Register */
1665
1666         tctl = er32(TCTL);
1667         tctl &= ~E1000_TCTL_CT;
1668         tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
1669                 (E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
1670
1671         e1000_config_collision_dist(hw);
1672
1673         /* Setup Transmit Descriptor Settings for eop descriptor */
1674         adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
1675
1676         /* only set IDE if we are delaying interrupts using the timers */
1677         if (adapter->tx_int_delay)
1678                 adapter->txd_cmd |= E1000_TXD_CMD_IDE;
1679
1680         if (hw->mac_type < e1000_82543)
1681                 adapter->txd_cmd |= E1000_TXD_CMD_RPS;
1682         else
1683                 adapter->txd_cmd |= E1000_TXD_CMD_RS;
1684
1685         /* Cache if we're 82544 running in PCI-X because we'll
1686          * need this to apply a workaround later in the send path. */
1687         if (hw->mac_type == e1000_82544 &&
1688             hw->bus_type == e1000_bus_type_pcix)
1689                 adapter->pcix_82544 = true;
1690
1691         ew32(TCTL, tctl);
1692
1693 }
1694
1695 /**
1696  * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
1697  * @adapter: board private structure
1698  * @rxdr:    rx descriptor ring (for a specific queue) to setup
1699  *
1700  * Returns 0 on success, negative on failure
1701  **/
1702
1703 static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1704                                     struct e1000_rx_ring *rxdr)
1705 {
1706         struct pci_dev *pdev = adapter->pdev;
1707         int size, desc_len;
1708
1709         size = sizeof(struct e1000_buffer) * rxdr->count;
1710         rxdr->buffer_info = vzalloc(size);
1711         if (!rxdr->buffer_info) {
1712                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1713                       "ring\n");
1714                 return -ENOMEM;
1715         }
1716
1717         desc_len = sizeof(struct e1000_rx_desc);
1718
1719         /* Round up to nearest 4K */
1720
1721         rxdr->size = rxdr->count * desc_len;
1722         rxdr->size = ALIGN(rxdr->size, 4096);
1723
1724         rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
1725                                         GFP_KERNEL);
1726
1727         if (!rxdr->desc) {
1728                 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1729                       "ring\n");
1730 setup_rx_desc_die:
1731                 vfree(rxdr->buffer_info);
1732                 return -ENOMEM;
1733         }
1734
1735         /* Fix for errata 23, can't cross 64kB boundary */
1736         if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1737                 void *olddesc = rxdr->desc;
1738                 dma_addr_t olddma = rxdr->dma;
1739                 e_err(rx_err, "rxdr align check failed: %u bytes at %p\n",
1740                       rxdr->size, rxdr->desc);
1741                 /* Try again, without freeing the previous */
1742                 rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
1743                                                 &rxdr->dma, GFP_KERNEL);
1744                 /* Failed allocation, critical failure */
1745                 if (!rxdr->desc) {
1746                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1747                                           olddma);
1748                         e_err(probe, "Unable to allocate memory for the Rx "
1749                               "descriptor ring\n");
1750                         goto setup_rx_desc_die;
1751                 }
1752
1753                 if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
1754                         /* give up */
1755                         dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
1756                                           rxdr->dma);
1757                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1758                                           olddma);
1759                         e_err(probe, "Unable to allocate aligned memory for "
1760                               "the Rx descriptor ring\n");
1761                         goto setup_rx_desc_die;
1762                 } else {
1763                         /* Free old allocation, new allocation was successful */
1764                         dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
1765                                           olddma);
1766                 }
1767         }
1768         memset(rxdr->desc, 0, rxdr->size);
1769
1770         rxdr->next_to_clean = 0;
1771         rxdr->next_to_use = 0;
1772         rxdr->rx_skb_top = NULL;
1773
1774         return 0;
1775 }
1776
1777 /**
1778  * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
1779  *                                (Descriptors) for all queues
1780  * @adapter: board private structure
1781  *
1782  * Return 0 on success, negative on failure
1783  **/
1784
1785 int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
1786 {
1787         int i, err = 0;
1788
1789         for (i = 0; i < adapter->num_rx_queues; i++) {
1790                 err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
1791                 if (err) {
1792                         e_err(probe, "Allocation for Rx Queue %u failed\n", i);
1793                         for (i-- ; i >= 0; i--)
1794                                 e1000_free_rx_resources(adapter,
1795                                                         &adapter->rx_ring[i]);
1796                         break;
1797                 }
1798         }
1799
1800         return err;
1801 }
1802
1803 /**
1804  * e1000_setup_rctl - configure the receive control registers
1805  * @adapter: Board private structure
1806  **/
1807 static void e1000_setup_rctl(struct e1000_adapter *adapter)
1808 {
1809         struct e1000_hw *hw = &adapter->hw;
1810         u32 rctl;
1811
1812         rctl = er32(RCTL);
1813
1814         rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
1815
1816         rctl |= E1000_RCTL_BAM | E1000_RCTL_LBM_NO |
1817                 E1000_RCTL_RDMTS_HALF |
1818                 (hw->mc_filter_type << E1000_RCTL_MO_SHIFT);
1819
1820         if (hw->tbi_compatibility_on == 1)
1821                 rctl |= E1000_RCTL_SBP;
1822         else
1823                 rctl &= ~E1000_RCTL_SBP;
1824
1825         if (adapter->netdev->mtu <= ETH_DATA_LEN)
1826                 rctl &= ~E1000_RCTL_LPE;
1827         else
1828                 rctl |= E1000_RCTL_LPE;
1829
1830         /* Setup buffer sizes */
1831         rctl &= ~E1000_RCTL_SZ_4096;
1832         rctl |= E1000_RCTL_BSEX;
1833         switch (adapter->rx_buffer_len) {
1834                 case E1000_RXBUFFER_2048:
1835                 default:
1836                         rctl |= E1000_RCTL_SZ_2048;
1837                         rctl &= ~E1000_RCTL_BSEX;
1838                         break;
1839                 case E1000_RXBUFFER_4096:
1840                         rctl |= E1000_RCTL_SZ_4096;
1841                         break;
1842                 case E1000_RXBUFFER_8192:
1843                         rctl |= E1000_RCTL_SZ_8192;
1844                         break;
1845                 case E1000_RXBUFFER_16384:
1846                         rctl |= E1000_RCTL_SZ_16384;
1847                         break;
1848         }
1849
1850         /* This is useful for sniffing bad packets. */
1851         if (adapter->netdev->features & NETIF_F_RXALL) {
1852                 /* UPE and MPE will be handled by normal PROMISC logic
1853                  * in e1000e_set_rx_mode */
1854                 rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
1855                          E1000_RCTL_BAM | /* RX All Bcast Pkts */
1856                          E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
1857
1858                 rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
1859                           E1000_RCTL_DPF | /* Allow filtered pause */
1860                           E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
1861                 /* Do not mess with E1000_CTRL_VME, it affects transmit as well,
1862                  * and that breaks VLANs.
1863                  */
1864         }
1865
1866         ew32(RCTL, rctl);
1867 }
1868
1869 /**
1870  * e1000_configure_rx - Configure 8254x Receive Unit after Reset
1871  * @adapter: board private structure
1872  *
1873  * Configure the Rx unit of the MAC after a reset.
1874  **/
1875
1876 static void e1000_configure_rx(struct e1000_adapter *adapter)
1877 {
1878         u64 rdba;
1879         struct e1000_hw *hw = &adapter->hw;
1880         u32 rdlen, rctl, rxcsum;
1881
1882         if (adapter->netdev->mtu > ETH_DATA_LEN) {
1883                 rdlen = adapter->rx_ring[0].count *
1884                         sizeof(struct e1000_rx_desc);
1885                 adapter->clean_rx = e1000_clean_jumbo_rx_irq;
1886                 adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
1887         } else {
1888                 rdlen = adapter->rx_ring[0].count *
1889                         sizeof(struct e1000_rx_desc);
1890                 adapter->clean_rx = e1000_clean_rx_irq;
1891                 adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
1892         }
1893
1894         /* disable receives while setting up the descriptors */
1895         rctl = er32(RCTL);
1896         ew32(RCTL, rctl & ~E1000_RCTL_EN);
1897
1898         /* set the Receive Delay Timer Register */
1899         ew32(RDTR, adapter->rx_int_delay);
1900
1901         if (hw->mac_type >= e1000_82540) {
1902                 ew32(RADV, adapter->rx_abs_int_delay);
1903                 if (adapter->itr_setting != 0)
1904                         ew32(ITR, 1000000000 / (adapter->itr * 256));
1905         }
1906
1907         /* Setup the HW Rx Head and Tail Descriptor Pointers and
1908          * the Base and Length of the Rx Descriptor Ring */
1909         switch (adapter->num_rx_queues) {
1910         case 1:
1911         default:
1912                 rdba = adapter->rx_ring[0].dma;
1913                 ew32(RDLEN, rdlen);
1914                 ew32(RDBAH, (rdba >> 32));
1915                 ew32(RDBAL, (rdba & 0x00000000ffffffffULL));
1916                 ew32(RDT, 0);
1917                 ew32(RDH, 0);
1918                 adapter->rx_ring[0].rdh = ((hw->mac_type >= e1000_82543) ? E1000_RDH : E1000_82542_RDH);
1919                 adapter->rx_ring[0].rdt = ((hw->mac_type >= e1000_82543) ? E1000_RDT : E1000_82542_RDT);
1920                 break;
1921         }
1922
1923         /* Enable 82543 Receive Checksum Offload for TCP and UDP */
1924         if (hw->mac_type >= e1000_82543) {
1925                 rxcsum = er32(RXCSUM);
1926                 if (adapter->rx_csum)
1927                         rxcsum |= E1000_RXCSUM_TUOFL;
1928                 else
1929                         /* don't need to clear IPPCSE as it defaults to 0 */
1930                         rxcsum &= ~E1000_RXCSUM_TUOFL;
1931                 ew32(RXCSUM, rxcsum);
1932         }
1933
1934         /* Enable Receives */
1935         ew32(RCTL, rctl | E1000_RCTL_EN);
1936 }
1937
1938 /**
1939  * e1000_free_tx_resources - Free Tx Resources per Queue
1940  * @adapter: board private structure
1941  * @tx_ring: Tx descriptor ring for a specific queue
1942  *
1943  * Free all transmit software resources
1944  **/
1945
1946 static void e1000_free_tx_resources(struct e1000_adapter *adapter,
1947                                     struct e1000_tx_ring *tx_ring)
1948 {
1949         struct pci_dev *pdev = adapter->pdev;
1950
1951         e1000_clean_tx_ring(adapter, tx_ring);
1952
1953         vfree(tx_ring->buffer_info);
1954         tx_ring->buffer_info = NULL;
1955
1956         dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
1957                           tx_ring->dma);
1958
1959         tx_ring->desc = NULL;
1960 }
1961
1962 /**
1963  * e1000_free_all_tx_resources - Free Tx Resources for All Queues
1964  * @adapter: board private structure
1965  *
1966  * Free all transmit software resources
1967  **/
1968
1969 void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
1970 {
1971         int i;
1972
1973         for (i = 0; i < adapter->num_tx_queues; i++)
1974                 e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
1975 }
1976
1977 static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
1978                                              struct e1000_buffer *buffer_info)
1979 {
1980         if (buffer_info->dma) {
1981                 if (buffer_info->mapped_as_page)
1982                         dma_unmap_page(&adapter->pdev->dev, buffer_info->dma,
1983                                        buffer_info->length, DMA_TO_DEVICE);
1984                 else
1985                         dma_unmap_single(&adapter->pdev->dev, buffer_info->dma,
1986                                          buffer_info->length,
1987                                          DMA_TO_DEVICE);
1988                 buffer_info->dma = 0;
1989         }
1990         if (buffer_info->skb) {
1991                 dev_kfree_skb_any(buffer_info->skb);
1992                 buffer_info->skb = NULL;
1993         }
1994         buffer_info->time_stamp = 0;
1995         /* buffer_info must be completely set up in the transmit path */
1996 }
1997
1998 /**
1999  * e1000_clean_tx_ring - Free Tx Buffers
2000  * @adapter: board private structure
2001  * @tx_ring: ring to be cleaned
2002  **/
2003
2004 static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
2005                                 struct e1000_tx_ring *tx_ring)
2006 {
2007         struct e1000_hw *hw = &adapter->hw;
2008         struct e1000_buffer *buffer_info;
2009         unsigned long size;
2010         unsigned int i;
2011
2012         /* Free all the Tx ring sk_buffs */
2013
2014         for (i = 0; i < tx_ring->count; i++) {
2015                 buffer_info = &tx_ring->buffer_info[i];
2016                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2017         }
2018
2019         size = sizeof(struct e1000_buffer) * tx_ring->count;
2020         memset(tx_ring->buffer_info, 0, size);
2021
2022         /* Zero out the descriptor ring */
2023
2024         memset(tx_ring->desc, 0, tx_ring->size);
2025
2026         tx_ring->next_to_use = 0;
2027         tx_ring->next_to_clean = 0;
2028         tx_ring->last_tx_tso = false;
2029
2030         writel(0, hw->hw_addr + tx_ring->tdh);
2031         writel(0, hw->hw_addr + tx_ring->tdt);
2032 }
2033
2034 /**
2035  * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
2036  * @adapter: board private structure
2037  **/
2038
2039 static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
2040 {
2041         int i;
2042
2043         for (i = 0; i < adapter->num_tx_queues; i++)
2044                 e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
2045 }
2046
2047 /**
2048  * e1000_free_rx_resources - Free Rx Resources
2049  * @adapter: board private structure
2050  * @rx_ring: ring to clean the resources from
2051  *
2052  * Free all receive software resources
2053  **/
2054
2055 static void e1000_free_rx_resources(struct e1000_adapter *adapter,
2056                                     struct e1000_rx_ring *rx_ring)
2057 {
2058         struct pci_dev *pdev = adapter->pdev;
2059
2060         e1000_clean_rx_ring(adapter, rx_ring);
2061
2062         vfree(rx_ring->buffer_info);
2063         rx_ring->buffer_info = NULL;
2064
2065         dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
2066                           rx_ring->dma);
2067
2068         rx_ring->desc = NULL;
2069 }
2070
2071 /**
2072  * e1000_free_all_rx_resources - Free Rx Resources for All Queues
2073  * @adapter: board private structure
2074  *
2075  * Free all receive software resources
2076  **/
2077
2078 void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
2079 {
2080         int i;
2081
2082         for (i = 0; i < adapter->num_rx_queues; i++)
2083                 e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
2084 }
2085
2086 /**
2087  * e1000_clean_rx_ring - Free Rx Buffers per Queue
2088  * @adapter: board private structure
2089  * @rx_ring: ring to free buffers from
2090  **/
2091
2092 static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
2093                                 struct e1000_rx_ring *rx_ring)
2094 {
2095         struct e1000_hw *hw = &adapter->hw;
2096         struct e1000_buffer *buffer_info;
2097         struct pci_dev *pdev = adapter->pdev;
2098         unsigned long size;
2099         unsigned int i;
2100
2101         /* Free all the Rx ring sk_buffs */
2102         for (i = 0; i < rx_ring->count; i++) {
2103                 buffer_info = &rx_ring->buffer_info[i];
2104                 if (buffer_info->dma &&
2105                     adapter->clean_rx == e1000_clean_rx_irq) {
2106                         dma_unmap_single(&pdev->dev, buffer_info->dma,
2107                                          buffer_info->length,
2108                                          DMA_FROM_DEVICE);
2109                 } else if (buffer_info->dma &&
2110                            adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
2111                         dma_unmap_page(&pdev->dev, buffer_info->dma,
2112                                        buffer_info->length,
2113                                        DMA_FROM_DEVICE);
2114                 }
2115
2116                 buffer_info->dma = 0;
2117                 if (buffer_info->page) {
2118                         put_page(buffer_info->page);
2119                         buffer_info->page = NULL;
2120                 }
2121                 if (buffer_info->skb) {
2122                         dev_kfree_skb(buffer_info->skb);
2123                         buffer_info->skb = NULL;
2124                 }
2125         }
2126
2127         /* there also may be some cached data from a chained receive */
2128         if (rx_ring->rx_skb_top) {
2129                 dev_kfree_skb(rx_ring->rx_skb_top);
2130                 rx_ring->rx_skb_top = NULL;
2131         }
2132
2133         size = sizeof(struct e1000_buffer) * rx_ring->count;
2134         memset(rx_ring->buffer_info, 0, size);
2135
2136         /* Zero out the descriptor ring */
2137         memset(rx_ring->desc, 0, rx_ring->size);
2138
2139         rx_ring->next_to_clean = 0;
2140         rx_ring->next_to_use = 0;
2141
2142         writel(0, hw->hw_addr + rx_ring->rdh);
2143         writel(0, hw->hw_addr + rx_ring->rdt);
2144 }
2145
2146 /**
2147  * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
2148  * @adapter: board private structure
2149  **/
2150
2151 static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
2152 {
2153         int i;
2154
2155         for (i = 0; i < adapter->num_rx_queues; i++)
2156                 e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
2157 }
2158
2159 /* The 82542 2.0 (revision 2) needs to have the receive unit in reset
2160  * and memory write and invalidate disabled for certain operations
2161  */
2162 static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
2163 {
2164         struct e1000_hw *hw = &adapter->hw;
2165         struct net_device *netdev = adapter->netdev;
2166         u32 rctl;
2167
2168         e1000_pci_clear_mwi(hw);
2169
2170         rctl = er32(RCTL);
2171         rctl |= E1000_RCTL_RST;
2172         ew32(RCTL, rctl);
2173         E1000_WRITE_FLUSH();
2174         mdelay(5);
2175
2176         if (netif_running(netdev))
2177                 e1000_clean_all_rx_rings(adapter);
2178 }
2179
2180 static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
2181 {
2182         struct e1000_hw *hw = &adapter->hw;
2183         struct net_device *netdev = adapter->netdev;
2184         u32 rctl;
2185
2186         rctl = er32(RCTL);
2187         rctl &= ~E1000_RCTL_RST;
2188         ew32(RCTL, rctl);
2189         E1000_WRITE_FLUSH();
2190         mdelay(5);
2191
2192         if (hw->pci_cmd_word & PCI_COMMAND_INVALIDATE)
2193                 e1000_pci_set_mwi(hw);
2194
2195         if (netif_running(netdev)) {
2196                 /* No need to loop, because 82542 supports only 1 queue */
2197                 struct e1000_rx_ring *ring = &adapter->rx_ring[0];
2198                 e1000_configure_rx(adapter);
2199                 adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
2200         }
2201 }
2202
2203 /**
2204  * e1000_set_mac - Change the Ethernet Address of the NIC
2205  * @netdev: network interface device structure
2206  * @p: pointer to an address structure
2207  *
2208  * Returns 0 on success, negative on failure
2209  **/
2210
2211 static int e1000_set_mac(struct net_device *netdev, void *p)
2212 {
2213         struct e1000_adapter *adapter = netdev_priv(netdev);
2214         struct e1000_hw *hw = &adapter->hw;
2215         struct sockaddr *addr = p;
2216
2217         if (!is_valid_ether_addr(addr->sa_data))
2218                 return -EADDRNOTAVAIL;
2219
2220         /* 82542 2.0 needs to be in reset to write receive address registers */
2221
2222         if (hw->mac_type == e1000_82542_rev2_0)
2223                 e1000_enter_82542_rst(adapter);
2224
2225         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2226         memcpy(hw->mac_addr, addr->sa_data, netdev->addr_len);
2227
2228         e1000_rar_set(hw, hw->mac_addr, 0);
2229
2230         if (hw->mac_type == e1000_82542_rev2_0)
2231                 e1000_leave_82542_rst(adapter);
2232
2233         return 0;
2234 }
2235
2236 /**
2237  * e1000_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2238  * @netdev: network interface device structure
2239  *
2240  * The set_rx_mode entry point is called whenever the unicast or multicast
2241  * address lists or the network interface flags are updated. This routine is
2242  * responsible for configuring the hardware for proper unicast, multicast,
2243  * promiscuous mode, and all-multi behavior.
2244  **/
2245
2246 static void e1000_set_rx_mode(struct net_device *netdev)
2247 {
2248         struct e1000_adapter *adapter = netdev_priv(netdev);
2249         struct e1000_hw *hw = &adapter->hw;
2250         struct netdev_hw_addr *ha;
2251         bool use_uc = false;
2252         u32 rctl;
2253         u32 hash_value;
2254         int i, rar_entries = E1000_RAR_ENTRIES;
2255         int mta_reg_count = E1000_NUM_MTA_REGISTERS;
2256         u32 *mcarray = kcalloc(mta_reg_count, sizeof(u32), GFP_ATOMIC);
2257
2258         if (!mcarray) {
2259                 e_err(probe, "memory allocation failed\n");
2260                 return;
2261         }
2262
2263         /* Check for Promiscuous and All Multicast modes */
2264
2265         rctl = er32(RCTL);
2266
2267         if (netdev->flags & IFF_PROMISC) {
2268                 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2269                 rctl &= ~E1000_RCTL_VFE;
2270         } else {
2271                 if (netdev->flags & IFF_ALLMULTI)
2272                         rctl |= E1000_RCTL_MPE;
2273                 else
2274                         rctl &= ~E1000_RCTL_MPE;
2275                 /* Enable VLAN filter if there is a VLAN */
2276                 if (e1000_vlan_used(adapter))
2277                         rctl |= E1000_RCTL_VFE;
2278         }
2279
2280         if (netdev_uc_count(netdev) > rar_entries - 1) {
2281                 rctl |= E1000_RCTL_UPE;
2282         } else if (!(netdev->flags & IFF_PROMISC)) {
2283                 rctl &= ~E1000_RCTL_UPE;
2284                 use_uc = true;
2285         }
2286
2287         ew32(RCTL, rctl);
2288
2289         /* 82542 2.0 needs to be in reset to write receive address registers */
2290
2291         if (hw->mac_type == e1000_82542_rev2_0)
2292                 e1000_enter_82542_rst(adapter);
2293
2294         /* load the first 14 addresses into the exact filters 1-14. Unicast
2295          * addresses take precedence to avoid disabling unicast filtering
2296          * when possible.
2297          *
2298          * RAR 0 is used for the station MAC address
2299          * if there are not 14 addresses, go ahead and clear the filters
2300          */
2301         i = 1;
2302         if (use_uc)
2303                 netdev_for_each_uc_addr(ha, netdev) {
2304                         if (i == rar_entries)
2305                                 break;
2306                         e1000_rar_set(hw, ha->addr, i++);
2307                 }
2308
2309         netdev_for_each_mc_addr(ha, netdev) {
2310                 if (i == rar_entries) {
2311                         /* load any remaining addresses into the hash table */
2312                         u32 hash_reg, hash_bit, mta;
2313                         hash_value = e1000_hash_mc_addr(hw, ha->addr);
2314                         hash_reg = (hash_value >> 5) & 0x7F;
2315                         hash_bit = hash_value & 0x1F;
2316                         mta = (1 << hash_bit);
2317                         mcarray[hash_reg] |= mta;
2318                 } else {
2319                         e1000_rar_set(hw, ha->addr, i++);
2320                 }
2321         }
2322
2323         for (; i < rar_entries; i++) {
2324                 E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
2325                 E1000_WRITE_FLUSH();
2326                 E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
2327                 E1000_WRITE_FLUSH();
2328         }
2329
2330         /* write the hash table completely, write from bottom to avoid
2331          * both stupid write combining chipsets, and flushing each write */
2332         for (i = mta_reg_count - 1; i >= 0 ; i--) {
2333                 /*
2334                  * If we are on an 82544 has an errata where writing odd
2335                  * offsets overwrites the previous even offset, but writing
2336                  * backwards over the range solves the issue by always
2337                  * writing the odd offset first
2338                  */
2339                 E1000_WRITE_REG_ARRAY(hw, MTA, i, mcarray[i]);
2340         }
2341         E1000_WRITE_FLUSH();
2342
2343         if (hw->mac_type == e1000_82542_rev2_0)
2344                 e1000_leave_82542_rst(adapter);
2345
2346         kfree(mcarray);
2347 }
2348
2349 /**
2350  * e1000_update_phy_info_task - get phy info
2351  * @work: work struct contained inside adapter struct
2352  *
2353  * Need to wait a few seconds after link up to get diagnostic information from
2354  * the phy
2355  */
2356 static void e1000_update_phy_info_task(struct work_struct *work)
2357 {
2358         struct e1000_adapter *adapter = container_of(work,
2359                                                      struct e1000_adapter,
2360                                                      phy_info_task.work);
2361         if (test_bit(__E1000_DOWN, &adapter->flags))
2362                 return;
2363         mutex_lock(&adapter->mutex);
2364         e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
2365         mutex_unlock(&adapter->mutex);
2366 }
2367
2368 /**
2369  * e1000_82547_tx_fifo_stall_task - task to complete work
2370  * @work: work struct contained inside adapter struct
2371  **/
2372 static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
2373 {
2374         struct e1000_adapter *adapter = container_of(work,
2375                                                      struct e1000_adapter,
2376                                                      fifo_stall_task.work);
2377         struct e1000_hw *hw = &adapter->hw;
2378         struct net_device *netdev = adapter->netdev;
2379         u32 tctl;
2380
2381         if (test_bit(__E1000_DOWN, &adapter->flags))
2382                 return;
2383         mutex_lock(&adapter->mutex);
2384         if (atomic_read(&adapter->tx_fifo_stall)) {
2385                 if ((er32(TDT) == er32(TDH)) &&
2386                    (er32(TDFT) == er32(TDFH)) &&
2387                    (er32(TDFTS) == er32(TDFHS))) {
2388                         tctl = er32(TCTL);
2389                         ew32(TCTL, tctl & ~E1000_TCTL_EN);
2390                         ew32(TDFT, adapter->tx_head_addr);
2391                         ew32(TDFH, adapter->tx_head_addr);
2392                         ew32(TDFTS, adapter->tx_head_addr);
2393                         ew32(TDFHS, adapter->tx_head_addr);
2394                         ew32(TCTL, tctl);
2395                         E1000_WRITE_FLUSH();
2396
2397                         adapter->tx_fifo_head = 0;
2398                         atomic_set(&adapter->tx_fifo_stall, 0);
2399                         netif_wake_queue(netdev);
2400                 } else if (!test_bit(__E1000_DOWN, &adapter->flags)) {
2401                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
2402                 }
2403         }
2404         mutex_unlock(&adapter->mutex);
2405 }
2406
2407 bool e1000_has_link(struct e1000_adapter *adapter)
2408 {
2409         struct e1000_hw *hw = &adapter->hw;
2410         bool link_active = false;
2411
2412         /* get_link_status is set on LSC (link status) interrupt or rx
2413          * sequence error interrupt (except on intel ce4100).
2414          * get_link_status will stay false until the
2415          * e1000_check_for_link establishes link for copper adapters
2416          * ONLY
2417          */
2418         switch (hw->media_type) {
2419         case e1000_media_type_copper:
2420                 if (hw->mac_type == e1000_ce4100)
2421                         hw->get_link_status = 1;
2422                 if (hw->get_link_status) {
2423                         e1000_check_for_link(hw);
2424                         link_active = !hw->get_link_status;
2425                 } else {
2426                         link_active = true;
2427                 }
2428                 break;
2429         case e1000_media_type_fiber:
2430                 e1000_check_for_link(hw);
2431                 link_active = !!(er32(STATUS) & E1000_STATUS_LU);
2432                 break;
2433         case e1000_media_type_internal_serdes:
2434                 e1000_check_for_link(hw);
2435                 link_active = hw->serdes_has_link;
2436                 break;
2437         default:
2438                 break;
2439         }
2440
2441         return link_active;
2442 }
2443
2444 /**
2445  * e1000_watchdog - work function
2446  * @work: work struct contained inside adapter struct
2447  **/
2448 static void e1000_watchdog(struct work_struct *work)
2449 {
2450         struct e1000_adapter *adapter = container_of(work,
2451                                                      struct e1000_adapter,
2452                                                      watchdog_task.work);
2453         struct e1000_hw *hw = &adapter->hw;
2454         struct net_device *netdev = adapter->netdev;
2455         struct e1000_tx_ring *txdr = adapter->tx_ring;
2456         u32 link, tctl;
2457
2458         if (test_bit(__E1000_DOWN, &adapter->flags))
2459                 return;
2460
2461         mutex_lock(&adapter->mutex);
2462         link = e1000_has_link(adapter);
2463         if ((netif_carrier_ok(netdev)) && link)
2464                 goto link_up;
2465
2466         if (link) {
2467                 if (!netif_carrier_ok(netdev)) {
2468                         u32 ctrl;
2469                         bool txb2b = true;
2470                         /* update snapshot of PHY registers on LSC */
2471                         e1000_get_speed_and_duplex(hw,
2472                                                    &adapter->link_speed,
2473                                                    &adapter->link_duplex);
2474
2475                         ctrl = er32(CTRL);
2476                         pr_info("%s NIC Link is Up %d Mbps %s, "
2477                                 "Flow Control: %s\n",
2478                                 netdev->name,
2479                                 adapter->link_speed,
2480                                 adapter->link_duplex == FULL_DUPLEX ?
2481                                 "Full Duplex" : "Half Duplex",
2482                                 ((ctrl & E1000_CTRL_TFCE) && (ctrl &
2483                                 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
2484                                 E1000_CTRL_RFCE) ? "RX" : ((ctrl &
2485                                 E1000_CTRL_TFCE) ? "TX" : "None")));
2486
2487                         /* adjust timeout factor according to speed/duplex */
2488                         adapter->tx_timeout_factor = 1;
2489                         switch (adapter->link_speed) {
2490                         case SPEED_10:
2491                                 txb2b = false;
2492                                 adapter->tx_timeout_factor = 16;
2493                                 break;
2494                         case SPEED_100:
2495                                 txb2b = false;
2496                                 /* maybe add some timeout factor ? */
2497                                 break;
2498                         }
2499
2500                         /* enable transmits in the hardware */
2501                         tctl = er32(TCTL);
2502                         tctl |= E1000_TCTL_EN;
2503                         ew32(TCTL, tctl);
2504
2505                         netif_carrier_on(netdev);
2506                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2507                                 schedule_delayed_work(&adapter->phy_info_task,
2508                                                       2 * HZ);
2509                         adapter->smartspeed = 0;
2510                 }
2511         } else {
2512                 if (netif_carrier_ok(netdev)) {
2513                         adapter->link_speed = 0;
2514                         adapter->link_duplex = 0;
2515                         pr_info("%s NIC Link is Down\n",
2516                                 netdev->name);
2517                         netif_carrier_off(netdev);
2518
2519                         if (!test_bit(__E1000_DOWN, &adapter->flags))
2520                                 schedule_delayed_work(&adapter->phy_info_task,
2521                                                       2 * HZ);
2522                 }
2523
2524                 e1000_smartspeed(adapter);
2525         }
2526
2527 link_up:
2528         e1000_update_stats(adapter);
2529
2530         hw->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
2531         adapter->tpt_old = adapter->stats.tpt;
2532         hw->collision_delta = adapter->stats.colc - adapter->colc_old;
2533         adapter->colc_old = adapter->stats.colc;
2534
2535         adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
2536         adapter->gorcl_old = adapter->stats.gorcl;
2537         adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
2538         adapter->gotcl_old = adapter->stats.gotcl;
2539
2540         e1000_update_adaptive(hw);
2541
2542         if (!netif_carrier_ok(netdev)) {
2543                 if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
2544                         /* We've lost link, so the controller stops DMA,
2545                          * but we've got queued Tx work that's never going
2546                          * to get done, so reset controller to flush Tx.
2547                          * (Do the reset outside of interrupt context). */
2548                         adapter->tx_timeout_count++;
2549                         schedule_work(&adapter->reset_task);
2550                         /* exit immediately since reset is imminent */
2551                         goto unlock;
2552                 }
2553         }
2554
2555         /* Simple mode for Interrupt Throttle Rate (ITR) */
2556         if (hw->mac_type >= e1000_82540 && adapter->itr_setting == 4) {
2557                 /*
2558                  * Symmetric Tx/Rx gets a reduced ITR=2000;
2559                  * Total asymmetrical Tx or Rx gets ITR=8000;
2560                  * everyone else is between 2000-8000.
2561                  */
2562                 u32 goc = (adapter->gotcl + adapter->gorcl) / 10000;
2563                 u32 dif = (adapter->gotcl > adapter->gorcl ?
2564                             adapter->gotcl - adapter->gorcl :
2565                             adapter->gorcl - adapter->gotcl) / 10000;
2566                 u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
2567
2568                 ew32(ITR, 1000000000 / (itr * 256));
2569         }
2570
2571         /* Cause software interrupt to ensure rx ring is cleaned */
2572         ew32(ICS, E1000_ICS_RXDMT0);
2573
2574         /* Force detection of hung controller every watchdog period */
2575         adapter->detect_tx_hung = true;
2576
2577         /* Reschedule the task */
2578         if (!test_bit(__E1000_DOWN, &adapter->flags))
2579                 schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
2580
2581 unlock:
2582         mutex_unlock(&adapter->mutex);
2583 }
2584
2585 enum latency_range {
2586         lowest_latency = 0,
2587         low_latency = 1,
2588         bulk_latency = 2,
2589         latency_invalid = 255
2590 };
2591
2592 /**
2593  * e1000_update_itr - update the dynamic ITR value based on statistics
2594  * @adapter: pointer to adapter
2595  * @itr_setting: current adapter->itr
2596  * @packets: the number of packets during this measurement interval
2597  * @bytes: the number of bytes during this measurement interval
2598  *
2599  *      Stores a new ITR value based on packets and byte
2600  *      counts during the last interrupt.  The advantage of per interrupt
2601  *      computation is faster updates and more accurate ITR for the current
2602  *      traffic pattern.  Constants in this function were computed
2603  *      based on theoretical maximum wire speed and thresholds were set based
2604  *      on testing data as well as attempting to minimize response time
2605  *      while increasing bulk throughput.
2606  *      this functionality is controlled by the InterruptThrottleRate module
2607  *      parameter (see e1000_param.c)
2608  **/
2609 static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2610                                      u16 itr_setting, int packets, int bytes)
2611 {
2612         unsigned int retval = itr_setting;
2613         struct e1000_hw *hw = &adapter->hw;
2614
2615         if (unlikely(hw->mac_type < e1000_82540))
2616                 goto update_itr_done;
2617
2618         if (packets == 0)
2619                 goto update_itr_done;
2620
2621         switch (itr_setting) {
2622         case lowest_latency:
2623                 /* jumbo frames get bulk treatment*/
2624                 if (bytes/packets > 8000)
2625                         retval = bulk_latency;
2626                 else if ((packets < 5) && (bytes > 512))
2627                         retval = low_latency;
2628                 break;
2629         case low_latency:  /* 50 usec aka 20000 ints/s */
2630                 if (bytes > 10000) {
2631                         /* jumbo frames need bulk latency setting */
2632                         if (bytes/packets > 8000)
2633                                 retval = bulk_latency;
2634                         else if ((packets < 10) || ((bytes/packets) > 1200))
2635                                 retval = bulk_latency;
2636                         else if ((packets > 35))
2637                                 retval = lowest_latency;
2638                 } else if (bytes/packets > 2000)
2639                         retval = bulk_latency;
2640                 else if (packets <= 2 && bytes < 512)
2641                         retval = lowest_latency;
2642                 break;
2643         case bulk_latency: /* 250 usec aka 4000 ints/s */
2644                 if (bytes > 25000) {
2645                         if (packets > 35)
2646                                 retval = low_latency;
2647                 } else if (bytes < 6000) {
2648                         retval = low_latency;
2649                 }
2650                 break;
2651         }
2652
2653 update_itr_done:
2654         return retval;
2655 }
2656
2657 static void e1000_set_itr(struct e1000_adapter *adapter)
2658 {
2659         struct e1000_hw *hw = &adapter->hw;
2660         u16 current_itr;
2661         u32 new_itr = adapter->itr;
2662
2663         if (unlikely(hw->mac_type < e1000_82540))
2664                 return;
2665
2666         /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2667         if (unlikely(adapter->link_speed != SPEED_1000)) {
2668                 current_itr = 0;
2669                 new_itr = 4000;
2670                 goto set_itr_now;
2671         }
2672
2673         adapter->tx_itr = e1000_update_itr(adapter,
2674                                     adapter->tx_itr,
2675                                     adapter->total_tx_packets,
2676                                     adapter->total_tx_bytes);
2677         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2678         if (adapter->itr_setting == 3 && adapter->tx_itr == lowest_latency)
2679                 adapter->tx_itr = low_latency;
2680
2681         adapter->rx_itr = e1000_update_itr(adapter,
2682                                     adapter->rx_itr,
2683                                     adapter->total_rx_packets,
2684                                     adapter->total_rx_bytes);
2685         /* conservative mode (itr 3) eliminates the lowest_latency setting */
2686         if (adapter->itr_setting == 3 && adapter->rx_itr == lowest_latency)
2687                 adapter->rx_itr = low_latency;
2688
2689         current_itr = max(adapter->rx_itr, adapter->tx_itr);
2690
2691         switch (current_itr) {
2692         /* counts and packets in update_itr are dependent on these numbers */
2693         case lowest_latency:
2694                 new_itr = 70000;
2695                 break;
2696         case low_latency:
2697                 new_itr = 20000; /* aka hwitr = ~200 */
2698                 break;
2699         case bulk_latency:
2700                 new_itr = 4000;
2701                 break;
2702         default:
2703                 break;
2704         }
2705
2706 set_itr_now:
2707         if (new_itr != adapter->itr) {
2708                 /* this attempts to bias the interrupt rate towards Bulk
2709                  * by adding intermediate steps when interrupt rate is
2710                  * increasing */
2711                 new_itr = new_itr > adapter->itr ?
2712                              min(adapter->itr + (new_itr >> 2), new_itr) :
2713                              new_itr;
2714                 adapter->itr = new_itr;
2715                 ew32(ITR, 1000000000 / (new_itr * 256));
2716         }
2717 }
2718
2719 #define E1000_TX_FLAGS_CSUM             0x00000001
2720 #define E1000_TX_FLAGS_VLAN             0x00000002
2721 #define E1000_TX_FLAGS_TSO              0x00000004
2722 #define E1000_TX_FLAGS_IPV4             0x00000008
2723 #define E1000_TX_FLAGS_NO_FCS           0x00000010
2724 #define E1000_TX_FLAGS_VLAN_MASK        0xffff0000
2725 #define E1000_TX_FLAGS_VLAN_SHIFT       16
2726
2727 static int e1000_tso(struct e1000_adapter *adapter,
2728                      struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2729 {
2730         struct e1000_context_desc *context_desc;
2731         struct e1000_buffer *buffer_info;
2732         unsigned int i;
2733         u32 cmd_length = 0;
2734         u16 ipcse = 0, tucse, mss;
2735         u8 ipcss, ipcso, tucss, tucso, hdr_len;
2736         int err;
2737
2738         if (skb_is_gso(skb)) {
2739                 if (skb_header_cloned(skb)) {
2740                         err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2741                         if (err)
2742                                 return err;
2743                 }
2744
2745                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
2746                 mss = skb_shinfo(skb)->gso_size;
2747                 if (skb->protocol == htons(ETH_P_IP)) {
2748                         struct iphdr *iph = ip_hdr(skb);
2749                         iph->tot_len = 0;
2750                         iph->check = 0;
2751                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2752                                                                  iph->daddr, 0,
2753                                                                  IPPROTO_TCP,
2754                                                                  0);
2755                         cmd_length = E1000_TXD_CMD_IP;
2756                         ipcse = skb_transport_offset(skb) - 1;
2757                 } else if (skb->protocol == htons(ETH_P_IPV6)) {
2758                         ipv6_hdr(skb)->payload_len = 0;
2759                         tcp_hdr(skb)->check =
2760                                 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2761                                                  &ipv6_hdr(skb)->daddr,
2762                                                  0, IPPROTO_TCP, 0);
2763                         ipcse = 0;
2764                 }
2765                 ipcss = skb_network_offset(skb);
2766                 ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
2767                 tucss = skb_transport_offset(skb);
2768                 tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
2769                 tucse = 0;
2770
2771                 cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
2772                                E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
2773
2774                 i = tx_ring->next_to_use;
2775                 context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2776                 buffer_info = &tx_ring->buffer_info[i];
2777
2778                 context_desc->lower_setup.ip_fields.ipcss  = ipcss;
2779                 context_desc->lower_setup.ip_fields.ipcso  = ipcso;
2780                 context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
2781                 context_desc->upper_setup.tcp_fields.tucss = tucss;
2782                 context_desc->upper_setup.tcp_fields.tucso = tucso;
2783                 context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
2784                 context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
2785                 context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
2786                 context_desc->cmd_and_length = cpu_to_le32(cmd_length);
2787
2788                 buffer_info->time_stamp = jiffies;
2789                 buffer_info->next_to_watch = i;
2790
2791                 if (++i == tx_ring->count) i = 0;
2792                 tx_ring->next_to_use = i;
2793
2794                 return true;
2795         }
2796         return false;
2797 }
2798
2799 static bool e1000_tx_csum(struct e1000_adapter *adapter,
2800                           struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
2801 {
2802         struct e1000_context_desc *context_desc;
2803         struct e1000_buffer *buffer_info;
2804         unsigned int i;
2805         u8 css;
2806         u32 cmd_len = E1000_TXD_CMD_DEXT;
2807
2808         if (skb->ip_summed != CHECKSUM_PARTIAL)
2809                 return false;
2810
2811         switch (skb->protocol) {
2812         case cpu_to_be16(ETH_P_IP):
2813                 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
2814                         cmd_len |= E1000_TXD_CMD_TCP;
2815                 break;
2816         case cpu_to_be16(ETH_P_IPV6):
2817                 /* XXX not handling all IPV6 headers */
2818                 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
2819                         cmd_len |= E1000_TXD_CMD_TCP;
2820                 break;
2821         default:
2822                 if (unlikely(net_ratelimit()))
2823                         e_warn(drv, "checksum_partial proto=%x!\n",
2824                                skb->protocol);
2825                 break;
2826         }
2827
2828         css = skb_checksum_start_offset(skb);
2829
2830         i = tx_ring->next_to_use;
2831         buffer_info = &tx_ring->buffer_info[i];
2832         context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
2833
2834         context_desc->lower_setup.ip_config = 0;
2835         context_desc->upper_setup.tcp_fields.tucss = css;
2836         context_desc->upper_setup.tcp_fields.tucso =
2837                 css + skb->csum_offset;
2838         context_desc->upper_setup.tcp_fields.tucse = 0;
2839         context_desc->tcp_seg_setup.data = 0;
2840         context_desc->cmd_and_length = cpu_to_le32(cmd_len);
2841
2842         buffer_info->time_stamp = jiffies;
2843         buffer_info->next_to_watch = i;
2844
2845         if (unlikely(++i == tx_ring->count)) i = 0;
2846         tx_ring->next_to_use = i;
2847
2848         return true;
2849 }
2850
2851 #define E1000_MAX_TXD_PWR       12
2852 #define E1000_MAX_DATA_PER_TXD  (1<<E1000_MAX_TXD_PWR)
2853
2854 static int e1000_tx_map(struct e1000_adapter *adapter,
2855                         struct e1000_tx_ring *tx_ring,
2856                         struct sk_buff *skb, unsigned int first,
2857                         unsigned int max_per_txd, unsigned int nr_frags,
2858                         unsigned int mss)
2859 {
2860         struct e1000_hw *hw = &adapter->hw;
2861         struct pci_dev *pdev = adapter->pdev;
2862         struct e1000_buffer *buffer_info;
2863         unsigned int len = skb_headlen(skb);
2864         unsigned int offset = 0, size, count = 0, i;
2865         unsigned int f, bytecount, segs;
2866
2867         i = tx_ring->next_to_use;
2868
2869         while (len) {
2870                 buffer_info = &tx_ring->buffer_info[i];
2871                 size = min(len, max_per_txd);
2872                 /* Workaround for Controller erratum --
2873                  * descriptor for non-tso packet in a linear SKB that follows a
2874                  * tso gets written back prematurely before the data is fully
2875                  * DMA'd to the controller */
2876                 if (!skb->data_len && tx_ring->last_tx_tso &&
2877                     !skb_is_gso(skb)) {
2878                         tx_ring->last_tx_tso = false;
2879                         size -= 4;
2880                 }
2881
2882                 /* Workaround for premature desc write-backs
2883                  * in TSO mode.  Append 4-byte sentinel desc */
2884                 if (unlikely(mss && !nr_frags && size == len && size > 8))
2885                         size -= 4;
2886                 /* work-around for errata 10 and it applies
2887                  * to all controllers in PCI-X mode
2888                  * The fix is to make sure that the first descriptor of a
2889                  * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
2890                  */
2891                 if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
2892                                 (size > 2015) && count == 0))
2893                         size = 2015;
2894
2895                 /* Workaround for potential 82544 hang in PCI-X.  Avoid
2896                  * terminating buffers within evenly-aligned dwords. */
2897                 if (unlikely(adapter->pcix_82544 &&
2898                    !((unsigned long)(skb->data + offset + size - 1) & 4) &&
2899                    size > 4))
2900                         size -= 4;
2901
2902                 buffer_info->length = size;
2903                 /* set time_stamp *before* dma to help avoid a possible race */
2904                 buffer_info->time_stamp = jiffies;
2905                 buffer_info->mapped_as_page = false;
2906                 buffer_info->dma = dma_map_single(&pdev->dev,
2907                                                   skb->data + offset,
2908                                                   size, DMA_TO_DEVICE);
2909                 if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2910                         goto dma_error;
2911                 buffer_info->next_to_watch = i;
2912
2913                 len -= size;
2914                 offset += size;
2915                 count++;
2916                 if (len) {
2917                         i++;
2918                         if (unlikely(i == tx_ring->count))
2919                                 i = 0;
2920                 }
2921         }
2922
2923         for (f = 0; f < nr_frags; f++) {
2924                 const struct skb_frag_struct *frag;
2925
2926                 frag = &skb_shinfo(skb)->frags[f];
2927                 len = skb_frag_size(frag);
2928                 offset = 0;
2929
2930                 while (len) {
2931                         unsigned long bufend;
2932                         i++;
2933                         if (unlikely(i == tx_ring->count))
2934                                 i = 0;
2935
2936                         buffer_info = &tx_ring->buffer_info[i];
2937                         size = min(len, max_per_txd);
2938                         /* Workaround for premature desc write-backs
2939                          * in TSO mode.  Append 4-byte sentinel desc */
2940                         if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
2941                                 size -= 4;
2942                         /* Workaround for potential 82544 hang in PCI-X.
2943                          * Avoid terminating buffers within evenly-aligned
2944                          * dwords. */
2945                         bufend = (unsigned long)
2946                                 page_to_phys(skb_frag_page(frag));
2947                         bufend += offset + size - 1;
2948                         if (unlikely(adapter->pcix_82544 &&
2949                                      !(bufend & 4) &&
2950                                      size > 4))
2951                                 size -= 4;
2952
2953                         buffer_info->length = size;
2954                         buffer_info->time_stamp = jiffies;
2955                         buffer_info->mapped_as_page = true;
2956                         buffer_info->dma = skb_frag_dma_map(&pdev->dev, frag,
2957                                                 offset, size, DMA_TO_DEVICE);
2958                         if (dma_mapping_error(&pdev->dev, buffer_info->dma))
2959                                 goto dma_error;
2960                         buffer_info->next_to_watch = i;
2961
2962                         len -= size;
2963                         offset += size;
2964                         count++;
2965                 }
2966         }
2967
2968         segs = skb_shinfo(skb)->gso_segs ?: 1;
2969         /* multiply data chunks by size of headers */
2970         bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
2971
2972         tx_ring->buffer_info[i].skb = skb;
2973         tx_ring->buffer_info[i].segs = segs;
2974         tx_ring->buffer_info[i].bytecount = bytecount;
2975         tx_ring->buffer_info[first].next_to_watch = i;
2976
2977         return count;
2978
2979 dma_error:
2980         dev_err(&pdev->dev, "TX DMA map failed\n");
2981         buffer_info->dma = 0;
2982         if (count)
2983                 count--;
2984
2985         while (count--) {
2986                 if (i==0)
2987                         i += tx_ring->count;
2988                 i--;
2989                 buffer_info = &tx_ring->buffer_info[i];
2990                 e1000_unmap_and_free_tx_resource(adapter, buffer_info);
2991         }
2992
2993         return 0;
2994 }
2995
2996 static void e1000_tx_queue(struct e1000_adapter *adapter,
2997                            struct e1000_tx_ring *tx_ring, int tx_flags,
2998                            int count)
2999 {
3000         struct e1000_hw *hw = &adapter->hw;
3001         struct e1000_tx_desc *tx_desc = NULL;
3002         struct e1000_buffer *buffer_info;
3003         u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
3004         unsigned int i;
3005
3006         if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
3007                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
3008                              E1000_TXD_CMD_TSE;
3009                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3010
3011                 if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
3012                         txd_upper |= E1000_TXD_POPTS_IXSM << 8;
3013         }
3014
3015         if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
3016                 txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
3017                 txd_upper |= E1000_TXD_POPTS_TXSM << 8;
3018         }
3019
3020         if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
3021                 txd_lower |= E1000_TXD_CMD_VLE;
3022                 txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
3023         }
3024
3025         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3026                 txd_lower &= ~(E1000_TXD_CMD_IFCS);
3027
3028         i = tx_ring->next_to_use;
3029
3030         while (count--) {
3031                 buffer_info = &tx_ring->buffer_info[i];
3032                 tx_desc = E1000_TX_DESC(*tx_ring, i);
3033                 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
3034                 tx_desc->lower.data =
3035                         cpu_to_le32(txd_lower | buffer_info->length);
3036                 tx_desc->upper.data = cpu_to_le32(txd_upper);
3037                 if (unlikely(++i == tx_ring->count)) i = 0;
3038         }
3039
3040         tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
3041
3042         /* txd_cmd re-enables FCS, so we'll re-disable it here as desired. */
3043         if (unlikely(tx_flags & E1000_TX_FLAGS_NO_FCS))
3044                 tx_desc->lower.data &= ~(cpu_to_le32(E1000_TXD_CMD_IFCS));
3045
3046         /* Force memory writes to complete before letting h/w
3047          * know there are new descriptors to fetch.  (Only
3048          * applicable for weak-ordered memory model archs,
3049          * such as IA-64). */
3050         wmb();
3051
3052         tx_ring->next_to_use = i;
3053         writel(i, hw->hw_addr + tx_ring->tdt);
3054         /* we need this if more than one processor can write to our tail
3055          * at a time, it syncronizes IO on IA64/Altix systems */
3056         mmiowb();
3057 }
3058
3059 /**
3060  * 82547 workaround to avoid controller hang in half-duplex environment.
3061  * The workaround is to avoid queuing a large packet that would span
3062  * the internal Tx FIFO ring boundary by notifying the stack to resend
3063  * the packet at a later time.  This gives the Tx FIFO an opportunity to
3064  * flush all packets.  When that occurs, we reset the Tx FIFO pointers
3065  * to the beginning of the Tx FIFO.
3066  **/
3067
3068 #define E1000_FIFO_HDR                  0x10
3069 #define E1000_82547_PAD_LEN             0x3E0
3070
3071 static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
3072                                        struct sk_buff *skb)
3073 {
3074         u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
3075         u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
3076
3077         skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
3078
3079         if (adapter->link_duplex != HALF_DUPLEX)
3080                 goto no_fifo_stall_required;
3081
3082         if (atomic_read(&adapter->tx_fifo_stall))
3083                 return 1;
3084
3085         if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
3086                 atomic_set(&adapter->tx_fifo_stall, 1);
3087                 return 1;
3088         }
3089
3090 no_fifo_stall_required:
3091         adapter->tx_fifo_head += skb_fifo_len;
3092         if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
3093                 adapter->tx_fifo_head -= adapter->tx_fifo_size;
3094         return 0;
3095 }
3096
3097 static int __e1000_maybe_stop_tx(struct net_device *netdev, int size)
3098 {
3099         struct e1000_adapter *adapter = netdev_priv(netdev);
3100         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3101
3102         netif_stop_queue(netdev);
3103         /* Herbert's original patch had:
3104          *  smp_mb__after_netif_stop_queue();
3105          * but since that doesn't exist yet, just open code it. */
3106         smp_mb();
3107
3108         /* We need to check again in a case another CPU has just
3109          * made room available. */
3110         if (likely(E1000_DESC_UNUSED(tx_ring) < size))
3111                 return -EBUSY;
3112
3113         /* A reprieve! */
3114         netif_start_queue(netdev);
3115         ++adapter->restart_queue;
3116         return 0;
3117 }
3118
3119 static int e1000_maybe_stop_tx(struct net_device *netdev,
3120                                struct e1000_tx_ring *tx_ring, int size)
3121 {
3122         if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
3123                 return 0;
3124         return __e1000_maybe_stop_tx(netdev, size);
3125 }
3126
3127 #define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
3128 static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
3129                                     struct net_device *netdev)
3130 {
3131         struct e1000_adapter *adapter = netdev_priv(netdev);
3132         struct e1000_hw *hw = &adapter->hw;
3133         struct e1000_tx_ring *tx_ring;
3134         unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
3135         unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
3136         unsigned int tx_flags = 0;
3137         unsigned int len = skb_headlen(skb);
3138         unsigned int nr_frags;
3139         unsigned int mss;
3140         int count = 0;
3141         int tso;
3142         unsigned int f;
3143
3144         /* This goes back to the question of how to logically map a tx queue
3145          * to a flow.  Right now, performance is impacted slightly negatively
3146          * if using multiple tx queues.  If the stack breaks away from a
3147          * single qdisc implementation, we can look at this again. */
3148         tx_ring = adapter->tx_ring;
3149
3150         if (unlikely(skb->len <= 0)) {
3151                 dev_kfree_skb_any(skb);
3152                 return NETDEV_TX_OK;
3153         }
3154
3155         mss = skb_shinfo(skb)->gso_size;
3156         /* The controller does a simple calculation to
3157          * make sure there is enough room in the FIFO before
3158          * initiating the DMA for each buffer.  The calc is:
3159          * 4 = ceil(buffer len/mss).  To make sure we don't
3160          * overrun the FIFO, adjust the max buffer len if mss
3161          * drops. */
3162         if (mss) {
3163                 u8 hdr_len;
3164                 max_per_txd = min(mss << 2, max_per_txd);
3165                 max_txd_pwr = fls(max_per_txd) - 1;
3166
3167                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
3168                 if (skb->data_len && hdr_len == len) {
3169                         switch (hw->mac_type) {
3170                                 unsigned int pull_size;
3171                         case e1000_82544:
3172                                 /* Make sure we have room to chop off 4 bytes,
3173                                  * and that the end alignment will work out to
3174                                  * this hardware's requirements
3175                                  * NOTE: this is a TSO only workaround
3176                                  * if end byte alignment not correct move us
3177                                  * into the next dword */
3178                                 if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
3179                                         break;
3180                                 /* fall through */
3181                                 pull_size = min((unsigned int)4, skb->data_len);
3182                                 if (!__pskb_pull_tail(skb, pull_size)) {
3183                                         e_err(drv, "__pskb_pull_tail "
3184                                               "failed.\n");
3185                                         dev_kfree_skb_any(skb);
3186                                         return NETDEV_TX_OK;
3187                                 }
3188                                 len = skb_headlen(skb);
3189                                 break;
3190                         default:
3191                                 /* do nothing */
3192                                 break;
3193                         }
3194                 }
3195         }
3196
3197         /* reserve a descriptor for the offload context */
3198         if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
3199                 count++;
3200         count++;
3201
3202         /* Controller Erratum workaround */
3203         if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
3204                 count++;
3205
3206         count += TXD_USE_COUNT(len, max_txd_pwr);
3207
3208         if (adapter->pcix_82544)
3209                 count++;
3210
3211         /* work-around for errata 10 and it applies to all controllers
3212          * in PCI-X mode, so add one more descriptor to the count
3213          */
3214         if (unlikely((hw->bus_type == e1000_bus_type_pcix) &&
3215                         (len > 2015)))
3216                 count++;
3217
3218         nr_frags = skb_shinfo(skb)->nr_frags;
3219         for (f = 0; f < nr_frags; f++)
3220                 count += TXD_USE_COUNT(skb_frag_size(&skb_shinfo(skb)->frags[f]),
3221                                        max_txd_pwr);
3222         if (adapter->pcix_82544)
3223                 count += nr_frags;
3224
3225         /* need: count + 2 desc gap to keep tail from touching
3226          * head, otherwise try next time */
3227         if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2)))
3228                 return NETDEV_TX_BUSY;
3229
3230         if (unlikely((hw->mac_type == e1000_82547) &&
3231                      (e1000_82547_fifo_workaround(adapter, skb)))) {
3232                 netif_stop_queue(netdev);
3233                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3234                         schedule_delayed_work(&adapter->fifo_stall_task, 1);
3235                 return NETDEV_TX_BUSY;
3236         }
3237
3238         if (vlan_tx_tag_present(skb)) {
3239                 tx_flags |= E1000_TX_FLAGS_VLAN;
3240                 tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
3241         }
3242
3243         first = tx_ring->next_to_use;
3244
3245         tso = e1000_tso(adapter, tx_ring, skb);
3246         if (tso < 0) {
3247                 dev_kfree_skb_any(skb);
3248                 return NETDEV_TX_OK;
3249         }
3250
3251         if (likely(tso)) {
3252                 if (likely(hw->mac_type != e1000_82544))
3253                         tx_ring->last_tx_tso = true;
3254                 tx_flags |= E1000_TX_FLAGS_TSO;
3255         } else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
3256                 tx_flags |= E1000_TX_FLAGS_CSUM;
3257
3258         if (likely(skb->protocol == htons(ETH_P_IP)))
3259                 tx_flags |= E1000_TX_FLAGS_IPV4;
3260
3261         if (unlikely(skb->no_fcs))
3262                 tx_flags |= E1000_TX_FLAGS_NO_FCS;
3263
3264         count = e1000_tx_map(adapter, tx_ring, skb, first, max_per_txd,
3265                              nr_frags, mss);
3266
3267         if (count) {
3268                 skb_tx_timestamp(skb);
3269
3270                 e1000_tx_queue(adapter, tx_ring, tx_flags, count);
3271                 /* Make sure there is space in the ring for the next send. */
3272                 e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
3273
3274         } else {
3275                 dev_kfree_skb_any(skb);
3276                 tx_ring->buffer_info[first].time_stamp = 0;
3277                 tx_ring->next_to_use = first;
3278         }
3279
3280         return NETDEV_TX_OK;
3281 }
3282
3283 #define NUM_REGS 38 /* 1 based count */
3284 static void e1000_regdump(struct e1000_adapter *adapter)
3285 {
3286         struct e1000_hw *hw = &adapter->hw;
3287         u32 regs[NUM_REGS];
3288         u32 *regs_buff = regs;
3289         int i = 0;
3290
3291         static const char * const reg_name[] = {
3292                 "CTRL",  "STATUS",
3293                 "RCTL", "RDLEN", "RDH", "RDT", "RDTR",
3294                 "TCTL", "TDBAL", "TDBAH", "TDLEN", "TDH", "TDT",
3295                 "TIDV", "TXDCTL", "TADV", "TARC0",
3296                 "TDBAL1", "TDBAH1", "TDLEN1", "TDH1", "TDT1",
3297                 "TXDCTL1", "TARC1",
3298                 "CTRL_EXT", "ERT", "RDBAL", "RDBAH",
3299                 "TDFH", "TDFT", "TDFHS", "TDFTS", "TDFPC",
3300                 "RDFH", "RDFT", "RDFHS", "RDFTS", "RDFPC"
3301         };
3302
3303         regs_buff[0]  = er32(CTRL);
3304         regs_buff[1]  = er32(STATUS);
3305
3306         regs_buff[2]  = er32(RCTL);
3307         regs_buff[3]  = er32(RDLEN);
3308         regs_buff[4]  = er32(RDH);
3309         regs_buff[5]  = er32(RDT);
3310         regs_buff[6]  = er32(RDTR);
3311
3312         regs_buff[7]  = er32(TCTL);
3313         regs_buff[8]  = er32(TDBAL);
3314         regs_buff[9]  = er32(TDBAH);
3315         regs_buff[10] = er32(TDLEN);
3316         regs_buff[11] = er32(TDH);
3317         regs_buff[12] = er32(TDT);
3318         regs_buff[13] = er32(TIDV);
3319         regs_buff[14] = er32(TXDCTL);
3320         regs_buff[15] = er32(TADV);
3321         regs_buff[16] = er32(TARC0);
3322
3323         regs_buff[17] = er32(TDBAL1);
3324         regs_buff[18] = er32(TDBAH1);
3325         regs_buff[19] = er32(TDLEN1);
3326         regs_buff[20] = er32(TDH1);
3327         regs_buff[21] = er32(TDT1);
3328         regs_buff[22] = er32(TXDCTL1);
3329         regs_buff[23] = er32(TARC1);
3330         regs_buff[24] = er32(CTRL_EXT);
3331         regs_buff[25] = er32(ERT);
3332         regs_buff[26] = er32(RDBAL0);
3333         regs_buff[27] = er32(RDBAH0);
3334         regs_buff[28] = er32(TDFH);
3335         regs_buff[29] = er32(TDFT);
3336         regs_buff[30] = er32(TDFHS);
3337         regs_buff[31] = er32(TDFTS);
3338         regs_buff[32] = er32(TDFPC);
3339         regs_buff[33] = er32(RDFH);
3340         regs_buff[34] = er32(RDFT);
3341         regs_buff[35] = er32(RDFHS);
3342         regs_buff[36] = er32(RDFTS);
3343         regs_buff[37] = er32(RDFPC);
3344
3345         pr_info("Register dump\n");
3346         for (i = 0; i < NUM_REGS; i++)
3347                 pr_info("%-15s  %08x\n", reg_name[i], regs_buff[i]);
3348 }
3349
3350 /*
3351  * e1000_dump: Print registers, tx ring and rx ring
3352  */
3353 static void e1000_dump(struct e1000_adapter *adapter)
3354 {
3355         /* this code doesn't handle multiple rings */
3356         struct e1000_tx_ring *tx_ring = adapter->tx_ring;
3357         struct e1000_rx_ring *rx_ring = adapter->rx_ring;
3358         int i;
3359
3360         if (!netif_msg_hw(adapter))
3361                 return;
3362
3363         /* Print Registers */
3364         e1000_regdump(adapter);
3365
3366         /*
3367          * transmit dump
3368          */
3369         pr_info("TX Desc ring0 dump\n");
3370
3371         /* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
3372          *
3373          * Legacy Transmit Descriptor
3374          *   +--------------------------------------------------------------+
3375          * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
3376          *   +--------------------------------------------------------------+
3377          * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
3378          *   +--------------------------------------------------------------+
3379          *   63       48 47        36 35    32 31     24 23    16 15        0
3380          *
3381          * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
3382          *   63      48 47    40 39       32 31             16 15    8 7      0
3383          *   +----------------------------------------------------------------+
3384          * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
3385          *   +----------------------------------------------------------------+
3386          * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
3387          *   +----------------------------------------------------------------+
3388          *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
3389          *
3390          * Extended Data Descriptor (DTYP=0x1)
3391          *   +----------------------------------------------------------------+
3392          * 0 |                     Buffer Address [63:0]                      |
3393          *   +----------------------------------------------------------------+
3394          * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
3395          *   +----------------------------------------------------------------+
3396          *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
3397          */
3398         pr_info("Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3399         pr_info("Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen] [bi->dma       ] leng  ntw timestmp         bi->skb\n");
3400
3401         if (!netif_msg_tx_done(adapter))
3402                 goto rx_ring_summary;
3403
3404         for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
3405                 struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
3406                 struct e1000_buffer *buffer_info = &tx_ring->buffer_info[i];
3407                 struct my_u { __le64 a; __le64 b; };
3408                 struct my_u *u = (struct my_u *)tx_desc;
3409                 const char *type;
3410
3411                 if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
3412                         type = "NTC/U";
3413                 else if (i == tx_ring->next_to_use)
3414                         type = "NTU";
3415                 else if (i == tx_ring->next_to_clean)
3416                         type = "NTC";
3417                 else
3418                         type = "";
3419
3420                 pr_info("T%c[0x%03X]    %016llX %016llX %016llX %04X  %3X %016llX %p %s\n",
3421                         ((le64_to_cpu(u->b) & (1<<20)) ? 'd' : 'c'), i,
3422                         le64_to_cpu(u->a), le64_to_cpu(u->b),
3423                         (u64)buffer_info->dma, buffer_info->length,
3424                         buffer_info->next_to_watch,
3425                         (u64)buffer_info->time_stamp, buffer_info->skb, type);
3426         }
3427
3428 rx_ring_summary:
3429         /*
3430          * receive dump
3431          */
3432         pr_info("\nRX Desc ring dump\n");
3433
3434         /* Legacy Receive Descriptor Format
3435          *
3436          * +-----------------------------------------------------+
3437          * |                Buffer Address [63:0]                |
3438          * +-----------------------------------------------------+
3439          * | VLAN Tag | Errors | Status 0 | Packet csum | Length |
3440          * +-----------------------------------------------------+
3441          * 63       48 47    40 39      32 31         16 15      0
3442          */
3443         pr_info("R[desc]      [address 63:0  ] [vl er S cks ln] [bi->dma       ] [bi->skb]\n");
3444
3445         if (!netif_msg_rx_status(adapter))
3446                 goto exit;
3447
3448         for (i = 0; rx_ring->desc && (i < rx_ring->count); i++) {
3449                 struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
3450                 struct e1000_buffer *buffer_info = &rx_ring->buffer_info[i];
3451                 struct my_u { __le64 a; __le64 b; };
3452                 struct my_u *u = (struct my_u *)rx_desc;
3453                 const char *type;
3454
3455                 if (i == rx_ring->next_to_use)
3456                         type = "NTU";
3457                 else if (i == rx_ring->next_to_clean)
3458                         type = "NTC";
3459                 else
3460                         type = "";
3461
3462                 pr_info("R[0x%03X]     %016llX %016llX %016llX %p %s\n",
3463                         i, le64_to_cpu(u->a), le64_to_cpu(u->b),
3464                         (u64)buffer_info->dma, buffer_info->skb, type);
3465         } /* for */
3466
3467         /* dump the descriptor caches */
3468         /* rx */
3469         pr_info("Rx descriptor cache in 64bit format\n");
3470         for (i = 0x6000; i <= 0x63FF ; i += 0x10) {
3471                 pr_info("R%04X: %08X|%08X %08X|%08X\n",
3472                         i,
3473                         readl(adapter->hw.hw_addr + i+4),
3474                         readl(adapter->hw.hw_addr + i),
3475                         readl(adapter->hw.hw_addr + i+12),
3476                         readl(adapter->hw.hw_addr + i+8));
3477         }
3478         /* tx */
3479         pr_info("Tx descriptor cache in 64bit format\n");
3480         for (i = 0x7000; i <= 0x73FF ; i += 0x10) {
3481                 pr_info("T%04X: %08X|%08X %08X|%08X\n",
3482                         i,
3483                         readl(adapter->hw.hw_addr + i+4),
3484                         readl(adapter->hw.hw_addr + i),
3485                         readl(adapter->hw.hw_addr + i+12),
3486                         readl(adapter->hw.hw_addr + i+8));
3487         }
3488 exit:
3489         return;
3490 }
3491
3492 /**
3493  * e1000_tx_timeout - Respond to a Tx Hang
3494  * @netdev: network interface device structure
3495  **/
3496
3497 static void e1000_tx_timeout(struct net_device *netdev)
3498 {
3499         struct e1000_adapter *adapter = netdev_priv(netdev);
3500
3501         /* Do the reset outside of interrupt context */
3502         adapter->tx_timeout_count++;
3503         schedule_work(&adapter->reset_task);
3504 }
3505
3506 static void e1000_reset_task(struct work_struct *work)
3507 {
3508         struct e1000_adapter *adapter =
3509                 container_of(work, struct e1000_adapter, reset_task);
3510
3511         if (test_bit(__E1000_DOWN, &adapter->flags))
3512                 return;
3513         e_err(drv, "Reset adapter\n");
3514         e1000_reinit_safe(adapter);
3515 }
3516
3517 /**
3518  * e1000_get_stats - Get System Network Statistics
3519  * @netdev: network interface device structure
3520  *
3521  * Returns the address of the device statistics structure.
3522  * The statistics are actually updated from the watchdog.
3523  **/
3524
3525 static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3526 {
3527         /* only return the current stats */
3528         return &netdev->stats;
3529 }
3530
3531 /**
3532  * e1000_change_mtu - Change the Maximum Transfer Unit
3533  * @netdev: network interface device structure
3534  * @new_mtu: new value for maximum frame size
3535  *
3536  * Returns 0 on success, negative on failure
3537  **/
3538
3539 static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3540 {
3541         struct e1000_adapter *adapter = netdev_priv(netdev);
3542         struct e1000_hw *hw = &adapter->hw;
3543         int max_frame = new_mtu + ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
3544
3545         if ((max_frame < MINIMUM_ETHERNET_FRAME_SIZE) ||
3546             (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3547                 e_err(probe, "Invalid MTU setting\n");
3548                 return -EINVAL;
3549         }
3550
3551         /* Adapter-specific max frame size limits. */
3552         switch (hw->mac_type) {
3553         case e1000_undefined ... e1000_82542_rev2_1:
3554                 if (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN)) {
3555                         e_err(probe, "Jumbo Frames not supported.\n");
3556                         return -EINVAL;
3557                 }
3558                 break;
3559         default:
3560                 /* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
3561                 break;
3562         }
3563
3564         while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
3565                 msleep(1);
3566         /* e1000_down has a dependency on max_frame_size */
3567         hw->max_frame_size = max_frame;
3568         if (netif_running(netdev))
3569                 e1000_down(adapter);
3570
3571         /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3572          * means we reserve 2 more, this pushes us to allocate from the next
3573          * larger slab size.
3574          * i.e. RXBUFFER_2048 --> size-4096 slab
3575          *  however with the new *_jumbo_rx* routines, jumbo receives will use
3576          *  fragmented skbs */
3577
3578         if (max_frame <= E1000_RXBUFFER_2048)
3579                 adapter->rx_buffer_len = E1000_RXBUFFER_2048;
3580         else
3581 #if (PAGE_SIZE >= E1000_RXBUFFER_16384)
3582                 adapter->rx_buffer_len = E1000_RXBUFFER_16384;
3583 #elif (PAGE_SIZE >= E1000_RXBUFFER_4096)
3584                 adapter->rx_buffer_len = PAGE_SIZE;
3585 #endif
3586
3587         /* adjust allocation if LPE protects us, and we aren't using SBP */
3588         if (!hw->tbi_compatibility_on &&
3589             ((max_frame == (ETH_FRAME_LEN + ETH_FCS_LEN)) ||
3590              (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
3591                 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3592
3593         pr_info("%s changing MTU from %d to %d\n",
3594                 netdev->name, netdev->mtu, new_mtu);
3595         netdev->mtu = new_mtu;
3596
3597         if (netif_running(netdev))
3598                 e1000_up(adapter);
3599         else
3600                 e1000_reset(adapter);
3601
3602         clear_bit(__E1000_RESETTING, &adapter->flags);
3603
3604         return 0;
3605 }
3606
3607 /**
3608  * e1000_update_stats - Update the board statistics counters
3609  * @adapter: board private structure
3610  **/
3611
3612 void e1000_update_stats(struct e1000_adapter *adapter)
3613 {
3614         struct net_device *netdev = adapter->netdev;
3615         struct e1000_hw *hw = &adapter->hw;
3616         struct pci_dev *pdev = adapter->pdev;
3617         unsigned long flags;
3618         u16 phy_tmp;
3619
3620 #define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3621
3622         /*
3623          * Prevent stats update while adapter is being reset, or if the pci
3624          * connection is down.
3625          */
3626         if (adapter->link_speed == 0)
3627                 return;
3628         if (pci_channel_offline(pdev))
3629                 return;
3630
3631         spin_lock_irqsave(&adapter->stats_lock, flags);
3632
3633         /* these counters are modified from e1000_tbi_adjust_stats,
3634          * called from the interrupt context, so they must only
3635          * be written while holding adapter->stats_lock
3636          */
3637
3638         adapter->stats.crcerrs += er32(CRCERRS);
3639         adapter->stats.gprc += er32(GPRC);
3640         adapter->stats.gorcl += er32(GORCL);
3641         adapter->stats.gorch += er32(GORCH);
3642         adapter->stats.bprc += er32(BPRC);
3643         adapter->stats.mprc += er32(MPRC);
3644         adapter->stats.roc += er32(ROC);
3645
3646         adapter->stats.prc64 += er32(PRC64);
3647         adapter->stats.prc127 += er32(PRC127);
3648         adapter->stats.prc255 += er32(PRC255);
3649         adapter->stats.prc511 += er32(PRC511);
3650         adapter->stats.prc1023 += er32(PRC1023);
3651         adapter->stats.prc1522 += er32(PRC1522);
3652
3653         adapter->stats.symerrs += er32(SYMERRS);
3654         adapter->stats.mpc += er32(MPC);
3655         adapter->stats.scc += er32(SCC);
3656         adapter->stats.ecol += er32(ECOL);
3657         adapter->stats.mcc += er32(MCC);
3658         adapter->stats.latecol += er32(LATECOL);
3659         adapter->stats.dc += er32(DC);
3660         adapter->stats.sec += er32(SEC);
3661         adapter->stats.rlec += er32(RLEC);
3662         adapter->stats.xonrxc += er32(XONRXC);
3663         adapter->stats.xontxc += er32(XONTXC);
3664         adapter->stats.xoffrxc += er32(XOFFRXC);
3665         adapter->stats.xofftxc += er32(XOFFTXC);
3666         adapter->stats.fcruc += er32(FCRUC);
3667         adapter->stats.gptc += er32(GPTC);
3668         adapter->stats.gotcl += er32(GOTCL);
3669         adapter->stats.gotch += er32(GOTCH);
3670         adapter->stats.rnbc += er32(RNBC);
3671         adapter->stats.ruc += er32(RUC);
3672         adapter->stats.rfc += er32(RFC);
3673         adapter->stats.rjc += er32(RJC);
3674         adapter->stats.torl += er32(TORL);
3675         adapter->stats.torh += er32(TORH);
3676         adapter->stats.totl += er32(TOTL);
3677         adapter->stats.toth += er32(TOTH);
3678         adapter->stats.tpr += er32(TPR);
3679
3680         adapter->stats.ptc64 += er32(PTC64);
3681         adapter->stats.ptc127 += er32(PTC127);
3682         adapter->stats.ptc255 += er32(PTC255);
3683         adapter->stats.ptc511 += er32(PTC511);
3684         adapter->stats.ptc1023 += er32(PTC1023);
3685         adapter->stats.ptc1522 += er32(PTC1522);
3686
3687         adapter->stats.mptc += er32(MPTC);
3688         adapter->stats.bptc += er32(BPTC);
3689
3690         /* used for adaptive IFS */
3691
3692         hw->tx_packet_delta = er32(TPT);
3693         adapter->stats.tpt += hw->tx_packet_delta;
3694         hw->collision_delta = er32(COLC);
3695         adapter->stats.colc += hw->collision_delta;
3696
3697         if (hw->mac_type >= e1000_82543) {
3698                 adapter->stats.algnerrc += er32(ALGNERRC);
3699                 adapter->stats.rxerrc += er32(RXERRC);
3700                 adapter->stats.tncrs += er32(TNCRS);
3701                 adapter->stats.cexterr += er32(CEXTERR);
3702                 adapter->stats.tsctc += er32(TSCTC);
3703                 adapter->stats.tsctfc += er32(TSCTFC);
3704         }
3705
3706         /* Fill out the OS statistics structure */
3707         netdev->stats.multicast = adapter->stats.mprc;
3708         netdev->stats.collisions = adapter->stats.colc;
3709
3710         /* Rx Errors */
3711
3712         /* RLEC on some newer hardware can be incorrect so build
3713         * our own version based on RUC and ROC */
3714         netdev->stats.rx_errors = adapter->stats.rxerrc +
3715                 adapter->stats.crcerrs + adapter->stats.algnerrc +
3716                 adapter->stats.ruc + adapter->stats.roc +
3717                 adapter->stats.cexterr;
3718         adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3719         netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3720         netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3721         netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3722         netdev->stats.rx_missed_errors = adapter->stats.mpc;
3723
3724         /* Tx Errors */
3725         adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3726         netdev->stats.tx_errors = adapter->stats.txerrc;
3727         netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3728         netdev->stats.tx_window_errors = adapter->stats.latecol;
3729         netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3730         if (hw->bad_tx_carr_stats_fd &&
3731             adapter->link_duplex == FULL_DUPLEX) {
3732                 netdev->stats.tx_carrier_errors = 0;
3733                 adapter->stats.tncrs = 0;
3734         }
3735
3736         /* Tx Dropped needs to be maintained elsewhere */
3737
3738         /* Phy Stats */
3739         if (hw->media_type == e1000_media_type_copper) {
3740                 if ((adapter->link_speed == SPEED_1000) &&
3741                    (!e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_tmp))) {
3742                         phy_tmp &= PHY_IDLE_ERROR_COUNT_MASK;
3743                         adapter->phy_stats.idle_errors += phy_tmp;
3744                 }
3745
3746                 if ((hw->mac_type <= e1000_82546) &&
3747                    (hw->phy_type == e1000_phy_m88) &&
3748                    !e1000_read_phy_reg(hw, M88E1000_RX_ERR_CNTR, &phy_tmp))
3749                         adapter->phy_stats.receive_errors += phy_tmp;
3750         }
3751
3752         /* Management Stats */
3753         if (hw->has_smbus) {
3754                 adapter->stats.mgptc += er32(MGTPTC);
3755                 adapter->stats.mgprc += er32(MGTPRC);
3756                 adapter->stats.mgpdc += er32(MGTPDC);
3757         }
3758
3759         spin_unlock_irqrestore(&adapter->stats_lock, flags);
3760 }
3761
3762 /**
3763  * e1000_intr - Interrupt Handler
3764  * @irq: interrupt number
3765  * @data: pointer to a network interface device structure
3766  **/
3767
3768 static irqreturn_t e1000_intr(int irq, void *data)
3769 {
3770         struct net_device *netdev = data;
3771         struct e1000_adapter *adapter = netdev_priv(netdev);
3772         struct e1000_hw *hw = &adapter->hw;
3773         u32 icr = er32(ICR);
3774
3775         if (unlikely((!icr)))
3776                 return IRQ_NONE;  /* Not our interrupt */
3777
3778         /*
3779          * we might have caused the interrupt, but the above
3780          * read cleared it, and just in case the driver is
3781          * down there is nothing to do so return handled
3782          */
3783         if (unlikely(test_bit(__E1000_DOWN, &adapter->flags)))
3784                 return IRQ_HANDLED;
3785
3786         if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
3787                 hw->get_link_status = 1;
3788                 /* guard against interrupt when we're going down */
3789                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3790                         schedule_delayed_work(&adapter->watchdog_task, 1);
3791         }
3792
3793         /* disable interrupts, without the synchronize_irq bit */
3794         ew32(IMC, ~0);
3795         E1000_WRITE_FLUSH();
3796
3797         if (likely(napi_schedule_prep(&adapter->napi))) {
3798                 adapter->total_tx_bytes = 0;
3799                 adapter->total_tx_packets = 0;
3800                 adapter->total_rx_bytes = 0;
3801                 adapter->total_rx_packets = 0;
3802                 __napi_schedule(&adapter->napi);
3803         } else {
3804                 /* this really should not happen! if it does it is basically a
3805                  * bug, but not a hard error, so enable ints and continue */
3806                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3807                         e1000_irq_enable(adapter);
3808         }
3809
3810         return IRQ_HANDLED;
3811 }
3812
3813 /**
3814  * e1000_clean - NAPI Rx polling callback
3815  * @adapter: board private structure
3816  **/
3817 static int e1000_clean(struct napi_struct *napi, int budget)
3818 {
3819         struct e1000_adapter *adapter = container_of(napi, struct e1000_adapter, napi);
3820         int tx_clean_complete = 0, work_done = 0;
3821
3822         tx_clean_complete = e1000_clean_tx_irq(adapter, &adapter->tx_ring[0]);
3823
3824         adapter->clean_rx(adapter, &adapter->rx_ring[0], &work_done, budget);
3825
3826         if (!tx_clean_complete)
3827                 work_done = budget;
3828
3829         /* If budget not fully consumed, exit the polling mode */
3830         if (work_done < budget) {
3831                 if (likely(adapter->itr_setting & 3))
3832                         e1000_set_itr(adapter);
3833                 napi_complete(napi);
3834                 if (!test_bit(__E1000_DOWN, &adapter->flags))
3835                         e1000_irq_enable(adapter);
3836         }
3837
3838         return work_done;
3839 }
3840
3841 /**
3842  * e1000_clean_tx_irq - Reclaim resources after transmit completes
3843  * @adapter: board private structure
3844  **/
3845 static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3846                                struct e1000_tx_ring *tx_ring)
3847 {
3848         struct e1000_hw *hw = &adapter->hw;
3849         struct net_device *netdev = adapter->netdev;
3850         struct e1000_tx_desc *tx_desc, *eop_desc;
3851         struct e1000_buffer *buffer_info;
3852         unsigned int i, eop;
3853         unsigned int count = 0;
3854         unsigned int total_tx_bytes=0, total_tx_packets=0;
3855
3856         i = tx_ring->next_to_clean;
3857         eop = tx_ring->buffer_info[i].next_to_watch;
3858         eop_desc = E1000_TX_DESC(*tx_ring, eop);
3859
3860         while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
3861                (count < tx_ring->count)) {
3862                 bool cleaned = false;
3863                 rmb();  /* read buffer_info after eop_desc */
3864                 for ( ; !cleaned; count++) {
3865                         tx_desc = E1000_TX_DESC(*tx_ring, i);
3866                         buffer_info = &tx_ring->buffer_info[i];
3867                         cleaned = (i == eop);
3868
3869                         if (cleaned) {
3870                                 total_tx_packets += buffer_info->segs;
3871                                 total_tx_bytes += buffer_info->bytecount;
3872                         }
3873                         e1000_unmap_and_free_tx_resource(adapter, buffer_info);
3874                         tx_desc->upper.data = 0;
3875
3876                         if (unlikely(++i == tx_ring->count)) i = 0;
3877                 }
3878
3879                 eop = tx_ring->buffer_info[i].next_to_watch;
3880                 eop_desc = E1000_TX_DESC(*tx_ring, eop);
3881         }
3882
3883         tx_ring->next_to_clean = i;
3884
3885 #define TX_WAKE_THRESHOLD 32
3886         if (unlikely(count && netif_carrier_ok(netdev) &&
3887                      E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
3888                 /* Make sure that anybody stopping the queue after this
3889                  * sees the new next_to_clean.
3890                  */
3891                 smp_mb();
3892
3893                 if (netif_queue_stopped(netdev) &&
3894                     !(test_bit(__E1000_DOWN, &adapter->flags))) {
3895                         netif_wake_queue(netdev);
3896                         ++adapter->restart_queue;
3897                 }
3898         }
3899
3900         if (adapter->detect_tx_hung) {
3901                 /* Detect a transmit hang in hardware, this serializes the
3902                  * check with the clearing of time_stamp and movement of i */
3903                 adapter->detect_tx_hung = false;
3904                 if (tx_ring->buffer_info[eop].time_stamp &&
3905                     time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
3906                                (adapter->tx_timeout_factor * HZ)) &&
3907                     !(er32(STATUS) & E1000_STATUS_TXOFF)) {
3908
3909                         /* detected Tx unit hang */
3910                         e_err(drv, "Detected Tx Unit Hang\n"
3911                               "  Tx Queue             <%lu>\n"
3912                               "  TDH                  <%x>\n"
3913                               "  TDT                  <%x>\n"
3914                               "  next_to_use          <%x>\n"
3915                               "  next_to_clean        <%x>\n"
3916                               "buffer_info[next_to_clean]\n"
3917                               "  time_stamp           <%lx>\n"
3918                               "  next_to_watch        <%x>\n"
3919                               "  jiffies              <%lx>\n"
3920                               "  next_to_watch.status <%x>\n",
3921                                 (unsigned long)((tx_ring - adapter->tx_ring) /
3922                                         sizeof(struct e1000_tx_ring)),
3923                                 readl(hw->hw_addr + tx_ring->tdh),
3924                                 readl(hw->hw_addr + tx_ring->tdt),
3925                                 tx_ring->next_to_use,
3926                                 tx_ring->next_to_clean,
3927                                 tx_ring->buffer_info[eop].time_stamp,
3928                                 eop,
3929                                 jiffies,
3930                                 eop_desc->upper.fields.status);
3931                         e1000_dump(adapter);
3932                         netif_stop_queue(netdev);
3933                 }
3934         }
3935         adapter->total_tx_bytes += total_tx_bytes;
3936         adapter->total_tx_packets += total_tx_packets;
3937         netdev->stats.tx_bytes += total_tx_bytes;
3938         netdev->stats.tx_packets += total_tx_packets;
3939         return count < tx_ring->count;
3940 }
3941
3942 /**
3943  * e1000_rx_checksum - Receive Checksum Offload for 82543
3944  * @adapter:     board private structure
3945  * @status_err:  receive descriptor status and error fields
3946  * @csum:        receive descriptor csum field
3947  * @sk_buff:     socket buffer with received data
3948  **/
3949
3950 static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
3951                               u32 csum, struct sk_buff *skb)
3952 {
3953         struct e1000_hw *hw = &adapter->hw;
3954         u16 status = (u16)status_err;
3955         u8 errors = (u8)(status_err >> 24);
3956
3957         skb_checksum_none_assert(skb);
3958
3959         /* 82543 or newer only */
3960         if (unlikely(hw->mac_type < e1000_82543)) return;
3961         /* Ignore Checksum bit is set */
3962         if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
3963         /* TCP/UDP checksum error bit is set */
3964         if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
3965                 /* let the stack verify checksum errors */
3966                 adapter->hw_csum_err++;
3967                 return;
3968         }
3969         /* TCP/UDP Checksum has not been calculated */
3970         if (!(status & E1000_RXD_STAT_TCPCS))
3971                 return;
3972
3973         /* It must be a TCP or UDP packet with a valid checksum */
3974         if (likely(status & E1000_RXD_STAT_TCPCS)) {
3975                 /* TCP checksum is good */
3976                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3977         }
3978         adapter->hw_csum_good++;
3979 }
3980
3981 /**
3982  * e1000_consume_page - helper function
3983  **/
3984 static void e1000_consume_page(struct e1000_buffer *bi, struct sk_buff *skb,
3985                                u16 length)
3986 {
3987         bi->page = NULL;
3988         skb->len += length;
3989         skb->data_len += length;
3990         skb->truesize += PAGE_SIZE;
3991 }
3992
3993 /**
3994  * e1000_receive_skb - helper function to handle rx indications
3995  * @adapter: board private structure
3996  * @status: descriptor status field as written by hardware
3997  * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
3998  * @skb: pointer to sk_buff to be indicated to stack
3999  */
4000 static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
4001                               __le16 vlan, struct sk_buff *skb)
4002 {
4003         skb->protocol = eth_type_trans(skb, adapter->netdev);
4004
4005         if (status & E1000_RXD_STAT_VP) {
4006                 u16 vid = le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK;
4007
4008                 __vlan_hwaccel_put_tag(skb, vid);
4009         }
4010         napi_gro_receive(&adapter->napi, skb);
4011 }
4012
4013 /**
4014  * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
4015  * @adapter: board private structure
4016  * @rx_ring: ring to clean
4017  * @work_done: amount of napi work completed this call
4018  * @work_to_do: max amount of work allowed for this call to do
4019  *
4020  * the return value indicates whether actual cleaning was done, there
4021  * is no guarantee that everything was cleaned
4022  */
4023 static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4024                                      struct e1000_rx_ring *rx_ring,
4025                                      int *work_done, int work_to_do)
4026 {
4027         struct e1000_hw *hw = &adapter->hw;
4028         struct net_device *netdev = adapter->netdev;
4029         struct pci_dev *pdev = adapter->pdev;
4030         struct e1000_rx_desc *rx_desc, *next_rxd;
4031         struct e1000_buffer *buffer_info, *next_buffer;
4032         unsigned long irq_flags;
4033         u32 length;
4034         unsigned int i;
4035         int cleaned_count = 0;
4036         bool cleaned = false;
4037         unsigned int total_rx_bytes=0, total_rx_packets=0;
4038
4039         i = rx_ring->next_to_clean;
4040         rx_desc = E1000_RX_DESC(*rx_ring, i);
4041         buffer_info = &rx_ring->buffer_info[i];
4042
4043         while (rx_desc->status & E1000_RXD_STAT_DD) {
4044                 struct sk_buff *skb;
4045                 u8 status;
4046
4047                 if (*work_done >= work_to_do)
4048                         break;
4049                 (*work_done)++;
4050                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4051
4052                 status = rx_desc->status;
4053                 skb = buffer_info->skb;
4054                 buffer_info->skb = NULL;
4055
4056                 if (++i == rx_ring->count) i = 0;
4057                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4058                 prefetch(next_rxd);
4059
4060                 next_buffer = &rx_ring->buffer_info[i];
4061
4062                 cleaned = true;
4063                 cleaned_count++;
4064                 dma_unmap_page(&pdev->dev, buffer_info->dma,
4065                                buffer_info->length, DMA_FROM_DEVICE);
4066                 buffer_info->dma = 0;
4067
4068                 length = le16_to_cpu(rx_desc->length);
4069
4070                 /* errors is only valid for DD + EOP descriptors */
4071                 if (unlikely((status & E1000_RXD_STAT_EOP) &&
4072                     (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
4073                         u8 *mapped;
4074                         u8 last_byte;
4075
4076                         mapped = page_address(buffer_info->page);
4077                         last_byte = *(mapped + length - 1);
4078                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4079                                        last_byte)) {
4080                                 spin_lock_irqsave(&adapter->stats_lock,
4081                                                   irq_flags);
4082                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4083                                                        length, mapped);
4084                                 spin_unlock_irqrestore(&adapter->stats_lock,
4085                                                        irq_flags);
4086                                 length--;
4087                         } else {
4088                                 if (netdev->features & NETIF_F_RXALL)
4089                                         goto process_skb;
4090                                 /* recycle both page and skb */
4091                                 buffer_info->skb = skb;
4092                                 /* an error means any chain goes out the window
4093                                  * too */
4094                                 if (rx_ring->rx_skb_top)
4095                                         dev_kfree_skb(rx_ring->rx_skb_top);
4096                                 rx_ring->rx_skb_top = NULL;
4097                                 goto next_desc;
4098                         }
4099                 }
4100
4101 #define rxtop rx_ring->rx_skb_top
4102 process_skb:
4103                 if (!(status & E1000_RXD_STAT_EOP)) {
4104                         /* this descriptor is only the beginning (or middle) */
4105                         if (!rxtop) {
4106                                 /* this is the beginning of a chain */
4107                                 rxtop = skb;
4108                                 skb_fill_page_desc(rxtop, 0, buffer_info->page,
4109                                                    0, length);
4110                         } else {
4111                                 /* this is the middle of a chain */
4112                                 skb_fill_page_desc(rxtop,
4113                                     skb_shinfo(rxtop)->nr_frags,
4114                                     buffer_info->page, 0, length);
4115                                 /* re-use the skb, only consumed the page */
4116                                 buffer_info->skb = skb;
4117                         }
4118                         e1000_consume_page(buffer_info, rxtop, length);
4119                         goto next_desc;
4120                 } else {
4121                         if (rxtop) {
4122                                 /* end of the chain */
4123                                 skb_fill_page_desc(rxtop,
4124                                     skb_shinfo(rxtop)->nr_frags,
4125                                     buffer_info->page, 0, length);
4126                                 /* re-use the current skb, we only consumed the
4127                                  * page */
4128                                 buffer_info->skb = skb;
4129                                 skb = rxtop;
4130                                 rxtop = NULL;
4131                                 e1000_consume_page(buffer_info, skb, length);
4132                         } else {
4133                                 /* no chain, got EOP, this buf is the packet
4134                                  * copybreak to save the put_page/alloc_page */
4135                                 if (length <= copybreak &&
4136                                     skb_tailroom(skb) >= length) {
4137                                         u8 *vaddr;
4138                                         vaddr = kmap_atomic(buffer_info->page);
4139                                         memcpy(skb_tail_pointer(skb), vaddr, length);
4140                                         kunmap_atomic(vaddr);
4141                                         /* re-use the page, so don't erase
4142                                          * buffer_info->page */
4143                                         skb_put(skb, length);
4144                                 } else {
4145                                         skb_fill_page_desc(skb, 0,
4146                                                            buffer_info->page, 0,
4147                                                            length);
4148                                         e1000_consume_page(buffer_info, skb,
4149                                                            length);
4150                                 }
4151                         }
4152                 }
4153
4154                 /* Receive Checksum Offload XXX recompute due to CRC strip? */
4155                 e1000_rx_checksum(adapter,
4156                                   (u32)(status) |
4157                                   ((u32)(rx_desc->errors) << 24),
4158                                   le16_to_cpu(rx_desc->csum), skb);
4159
4160                 total_rx_bytes += (skb->len - 4); /* don't count FCS */
4161                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4162                         pskb_trim(skb, skb->len - 4);
4163                 total_rx_packets++;
4164
4165                 /* eth type trans needs skb->data to point to something */
4166                 if (!pskb_may_pull(skb, ETH_HLEN)) {
4167                         e_err(drv, "pskb_may_pull failed.\n");
4168                         dev_kfree_skb(skb);
4169                         goto next_desc;
4170                 }
4171
4172                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4173
4174 next_desc:
4175                 rx_desc->status = 0;
4176
4177                 /* return some buffers to hardware, one at a time is too slow */
4178                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4179                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4180                         cleaned_count = 0;
4181                 }
4182
4183                 /* use prefetched values */
4184                 rx_desc = next_rxd;
4185                 buffer_info = next_buffer;
4186         }
4187         rx_ring->next_to_clean = i;
4188
4189         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4190         if (cleaned_count)
4191                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4192
4193         adapter->total_rx_packets += total_rx_packets;
4194         adapter->total_rx_bytes += total_rx_bytes;
4195         netdev->stats.rx_bytes += total_rx_bytes;
4196         netdev->stats.rx_packets += total_rx_packets;
4197         return cleaned;
4198 }
4199
4200 /*
4201  * this should improve performance for small packets with large amounts
4202  * of reassembly being done in the stack
4203  */
4204 static void e1000_check_copybreak(struct net_device *netdev,
4205                                  struct e1000_buffer *buffer_info,
4206                                  u32 length, struct sk_buff **skb)
4207 {
4208         struct sk_buff *new_skb;
4209
4210         if (length > copybreak)
4211                 return;
4212
4213         new_skb = netdev_alloc_skb_ip_align(netdev, length);
4214         if (!new_skb)
4215                 return;
4216
4217         skb_copy_to_linear_data_offset(new_skb, -NET_IP_ALIGN,
4218                                        (*skb)->data - NET_IP_ALIGN,
4219                                        length + NET_IP_ALIGN);
4220         /* save the skb in buffer_info as good */
4221         buffer_info->skb = *skb;
4222         *skb = new_skb;
4223 }
4224
4225 /**
4226  * e1000_clean_rx_irq - Send received data up the network stack; legacy
4227  * @adapter: board private structure
4228  * @rx_ring: ring to clean
4229  * @work_done: amount of napi work completed this call
4230  * @work_to_do: max amount of work allowed for this call to do
4231  */
4232 static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
4233                                struct e1000_rx_ring *rx_ring,
4234                                int *work_done, int work_to_do)
4235 {
4236         struct e1000_hw *hw = &adapter->hw;
4237         struct net_device *netdev = adapter->netdev;
4238         struct pci_dev *pdev = adapter->pdev;
4239         struct e1000_rx_desc *rx_desc, *next_rxd;
4240         struct e1000_buffer *buffer_info, *next_buffer;
4241         unsigned long flags;
4242         u32 length;
4243         unsigned int i;
4244         int cleaned_count = 0;
4245         bool cleaned = false;
4246         unsigned int total_rx_bytes=0, total_rx_packets=0;
4247
4248         i = rx_ring->next_to_clean;
4249         rx_desc = E1000_RX_DESC(*rx_ring, i);
4250         buffer_info = &rx_ring->buffer_info[i];
4251
4252         while (rx_desc->status & E1000_RXD_STAT_DD) {
4253                 struct sk_buff *skb;
4254                 u8 status;
4255
4256                 if (*work_done >= work_to_do)
4257                         break;
4258                 (*work_done)++;
4259                 rmb(); /* read descriptor and rx_buffer_info after status DD */
4260
4261                 status = rx_desc->status;
4262                 skb = buffer_info->skb;
4263                 buffer_info->skb = NULL;
4264
4265                 prefetch(skb->data - NET_IP_ALIGN);
4266
4267                 if (++i == rx_ring->count) i = 0;
4268                 next_rxd = E1000_RX_DESC(*rx_ring, i);
4269                 prefetch(next_rxd);
4270
4271                 next_buffer = &rx_ring->buffer_info[i];
4272
4273                 cleaned = true;
4274                 cleaned_count++;
4275                 dma_unmap_single(&pdev->dev, buffer_info->dma,
4276                                  buffer_info->length, DMA_FROM_DEVICE);
4277                 buffer_info->dma = 0;
4278
4279                 length = le16_to_cpu(rx_desc->length);
4280                 /* !EOP means multiple descriptors were used to store a single
4281                  * packet, if thats the case we need to toss it.  In fact, we
4282                  * to toss every packet with the EOP bit clear and the next
4283                  * frame that _does_ have the EOP bit set, as it is by
4284                  * definition only a frame fragment
4285                  */
4286                 if (unlikely(!(status & E1000_RXD_STAT_EOP)))
4287                         adapter->discarding = true;
4288
4289                 if (adapter->discarding) {
4290                         /* All receives must fit into a single buffer */
4291                         e_dbg("Receive packet consumed multiple buffers\n");
4292                         /* recycle */
4293                         buffer_info->skb = skb;
4294                         if (status & E1000_RXD_STAT_EOP)
4295                                 adapter->discarding = false;
4296                         goto next_desc;
4297                 }
4298
4299                 if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
4300                         u8 last_byte = *(skb->data + length - 1);
4301                         if (TBI_ACCEPT(hw, status, rx_desc->errors, length,
4302                                        last_byte)) {
4303                                 spin_lock_irqsave(&adapter->stats_lock, flags);
4304                                 e1000_tbi_adjust_stats(hw, &adapter->stats,
4305                                                        length, skb->data);
4306                                 spin_unlock_irqrestore(&adapter->stats_lock,
4307                                                        flags);
4308                                 length--;
4309                         } else {
4310                                 if (netdev->features & NETIF_F_RXALL)
4311                                         goto process_skb;
4312                                 /* recycle */
4313                                 buffer_info->skb = skb;
4314                                 goto next_desc;
4315                         }
4316                 }
4317
4318 process_skb:
4319                 total_rx_bytes += (length - 4); /* don't count FCS */
4320                 total_rx_packets++;
4321
4322                 if (likely(!(netdev->features & NETIF_F_RXFCS)))
4323                         /* adjust length to remove Ethernet CRC, this must be
4324                          * done after the TBI_ACCEPT workaround above
4325                          */
4326                         length -= 4;
4327
4328                 e1000_check_copybreak(netdev, buffer_info, length, &skb);
4329
4330                 skb_put(skb, length);
4331
4332                 /* Receive Checksum Offload */
4333                 e1000_rx_checksum(adapter,
4334                                   (u32)(status) |
4335                                   ((u32)(rx_desc->errors) << 24),
4336                                   le16_to_cpu(rx_desc->csum), skb);
4337
4338                 e1000_receive_skb(adapter, status, rx_desc->special, skb);
4339
4340 next_desc:
4341                 rx_desc->status = 0;
4342
4343                 /* return some buffers to hardware, one at a time is too slow */
4344                 if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
4345                         adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4346                         cleaned_count = 0;
4347                 }
4348
4349                 /* use prefetched values */
4350                 rx_desc = next_rxd;
4351                 buffer_info = next_buffer;
4352         }
4353         rx_ring->next_to_clean = i;
4354
4355         cleaned_count = E1000_DESC_UNUSED(rx_ring);
4356         if (cleaned_count)
4357                 adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
4358
4359         adapter->total_rx_packets += total_rx_packets;
4360         adapter->total_rx_bytes += total_rx_bytes;
4361         netdev->stats.rx_bytes += total_rx_bytes;
4362         netdev->stats.rx_packets += total_rx_packets;
4363         return cleaned;
4364 }
4365
4366 /**
4367  * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
4368  * @adapter: address of board private structure
4369  * @rx_ring: pointer to receive ring structure
4370  * @cleaned_count: number of buffers to allocate this pass
4371  **/
4372
4373 static void
4374 e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
4375                              struct e1000_rx_ring *rx_ring, int cleaned_count)
4376 {
4377         struct net_device *netdev = adapter->netdev;
4378         struct pci_dev *pdev = adapter->pdev;
4379         struct e1000_rx_desc *rx_desc;
4380         struct e1000_buffer *buffer_info;
4381         struct sk_buff *skb;
4382         unsigned int i;
4383         unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
4384
4385         i = rx_ring->next_to_use;
4386         buffer_info = &rx_ring->buffer_info[i];
4387
4388         while (cleaned_count--) {
4389                 skb = buffer_info->skb;
4390                 if (skb) {
4391                         skb_trim(skb, 0);
4392                         goto check_page;
4393                 }
4394
4395                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4396                 if (unlikely(!skb)) {
4397                         /* Better luck next round */
4398                         adapter->alloc_rx_buff_failed++;
4399                         break;
4400                 }
4401
4402                 buffer_info->skb = skb;
4403                 buffer_info->length = adapter->rx_buffer_len;
4404 check_page:
4405                 /* allocate a new page if necessary */
4406                 if (!buffer_info->page) {
4407                         buffer_info->page = alloc_page(GFP_ATOMIC);
4408                         if (unlikely(!buffer_info->page)) {
4409                                 adapter->alloc_rx_buff_failed++;
4410                                 break;
4411                         }
4412                 }
4413
4414                 if (!buffer_info->dma) {
4415                         buffer_info->dma = dma_map_page(&pdev->dev,
4416                                                         buffer_info->page, 0,
4417                                                         buffer_info->length,
4418                                                         DMA_FROM_DEVICE);
4419                         if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4420                                 put_page(buffer_info->page);
4421                                 dev_kfree_skb(skb);
4422                                 buffer_info->page = NULL;
4423                                 buffer_info->skb = NULL;
4424                                 buffer_info->dma = 0;
4425                                 adapter->alloc_rx_buff_failed++;
4426                                 break; /* while !buffer_info->skb */
4427                         }
4428                 }
4429
4430                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4431                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4432
4433                 if (unlikely(++i == rx_ring->count))
4434                         i = 0;
4435                 buffer_info = &rx_ring->buffer_info[i];
4436         }
4437
4438         if (likely(rx_ring->next_to_use != i)) {
4439                 rx_ring->next_to_use = i;
4440                 if (unlikely(i-- == 0))
4441                         i = (rx_ring->count - 1);
4442
4443                 /* Force memory writes to complete before letting h/w
4444                  * know there are new descriptors to fetch.  (Only
4445                  * applicable for weak-ordered memory model archs,
4446                  * such as IA-64). */
4447                 wmb();
4448                 writel(i, adapter->hw.hw_addr + rx_ring->rdt);
4449         }
4450 }
4451
4452 /**
4453  * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
4454  * @adapter: address of board private structure
4455  **/
4456
4457 static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4458                                    struct e1000_rx_ring *rx_ring,
4459                                    int cleaned_count)
4460 {
4461         struct e1000_hw *hw = &adapter->hw;
4462         struct net_device *netdev = adapter->netdev;
4463         struct pci_dev *pdev = adapter->pdev;
4464         struct e1000_rx_desc *rx_desc;
4465         struct e1000_buffer *buffer_info;
4466         struct sk_buff *skb;
4467         unsigned int i;
4468         unsigned int bufsz = adapter->rx_buffer_len;
4469
4470         i = rx_ring->next_to_use;
4471         buffer_info = &rx_ring->buffer_info[i];
4472
4473         while (cleaned_count--) {
4474                 skb = buffer_info->skb;
4475                 if (skb) {
4476                         skb_trim(skb, 0);
4477                         goto map_skb;
4478                 }
4479
4480                 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4481                 if (unlikely(!skb)) {
4482                         /* Better luck next round */
4483                         adapter->alloc_rx_buff_failed++;
4484                         break;
4485                 }
4486
4487                 /* Fix for errata 23, can't cross 64kB boundary */
4488                 if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4489                         struct sk_buff *oldskb = skb;
4490                         e_err(rx_err, "skb align check failed: %u bytes at "
4491                               "%p\n", bufsz, skb->data);
4492                         /* Try again, without freeing the previous */
4493                         skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4494                         /* Failed allocation, critical failure */
4495                         if (!skb) {
4496                                 dev_kfree_skb(oldskb);
4497                                 adapter->alloc_rx_buff_failed++;
4498                                 break;
4499                         }
4500
4501                         if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
4502                                 /* give up */
4503                                 dev_kfree_skb(skb);
4504                                 dev_kfree_skb(oldskb);
4505                                 adapter->alloc_rx_buff_failed++;
4506                                 break; /* while !buffer_info->skb */
4507                         }
4508
4509                         /* Use new allocation */
4510                         dev_kfree_skb(oldskb);
4511                 }
4512                 buffer_info->skb = skb;
4513                 buffer_info->length = adapter->rx_buffer_len;
4514 map_skb:
4515                 buffer_info->dma = dma_map_single(&pdev->dev,
4516                                                   skb->data,
4517                                                   buffer_info->length,
4518                                                   DMA_FROM_DEVICE);
4519                 if (dma_mapping_error(&pdev->dev, buffer_info->dma)) {
4520                         dev_kfree_skb(skb);
4521                         buffer_info->skb = NULL;
4522                         buffer_info->dma = 0;
4523                         adapter->alloc_rx_buff_failed++;
4524                         break; /* while !buffer_info->skb */
4525                 }
4526
4527                 /*
4528                  * XXX if it was allocated cleanly it will never map to a
4529                  * boundary crossing
4530                  */
4531
4532                 /* Fix for errata 23, can't cross 64kB boundary */
4533                 if (!e1000_check_64k_bound(adapter,
4534                                         (void *)(unsigned long)buffer_info->dma,
4535                                         adapter->rx_buffer_len)) {
4536                         e_err(rx_err, "dma align check failed: %u bytes at "
4537                               "%p\n", adapter->rx_buffer_len,
4538                               (void *)(unsigned long)buffer_info->dma);
4539                         dev_kfree_skb(skb);
4540                         buffer_info->skb = NULL;
4541
4542                         dma_unmap_single(&pdev->dev, buffer_info->dma,
4543                                          adapter->rx_buffer_len,
4544                                          DMA_FROM_DEVICE);
4545                         buffer_info->dma = 0;
4546
4547                         adapter->alloc_rx_buff_failed++;
4548                         break; /* while !buffer_info->skb */
4549                 }
4550                 rx_desc = E1000_RX_DESC(*rx_ring, i);
4551                 rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
4552
4553                 if (unlikely(++i == rx_ring->count))
4554                         i = 0;
4555                 buffer_info = &rx_ring->buffer_info[i];
4556         }
4557
4558         if (likely(rx_ring->next_to_use != i)) {
4559                 rx_ring->next_to_use = i;
4560                 if (unlikely(i-- == 0))
4561                         i = (rx_ring->count - 1);
4562
4563                 /* Force memory writes to complete before letting h/w
4564                  * know there are new descriptors to fetch.  (Only
4565                  * applicable for weak-ordered memory model archs,
4566                  * such as IA-64). */
4567                 wmb();
4568                 writel(i, hw->hw_addr + rx_ring->rdt);
4569         }
4570 }
4571
4572 /**
4573  * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
4574  * @adapter:
4575  **/
4576
4577 static void e1000_smartspeed(struct e1000_adapter *adapter)
4578 {
4579         struct e1000_hw *hw = &adapter->hw;
4580         u16 phy_status;
4581         u16 phy_ctrl;
4582
4583         if ((hw->phy_type != e1000_phy_igp) || !hw->autoneg ||
4584            !(hw->autoneg_advertised & ADVERTISE_1000_FULL))
4585                 return;
4586
4587         if (adapter->smartspeed == 0) {
4588                 /* If Master/Slave config fault is asserted twice,
4589                  * we assume back-to-back */
4590                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4591                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4592                 e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_status);
4593                 if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
4594                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4595                 if (phy_ctrl & CR_1000T_MS_ENABLE) {
4596                         phy_ctrl &= ~CR_1000T_MS_ENABLE;
4597                         e1000_write_phy_reg(hw, PHY_1000T_CTRL,
4598                                             phy_ctrl);
4599                         adapter->smartspeed++;
4600                         if (!e1000_phy_setup_autoneg(hw) &&
4601                            !e1000_read_phy_reg(hw, PHY_CTRL,
4602                                                &phy_ctrl)) {
4603                                 phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4604                                              MII_CR_RESTART_AUTO_NEG);
4605                                 e1000_write_phy_reg(hw, PHY_CTRL,
4606                                                     phy_ctrl);
4607                         }
4608                 }
4609                 return;
4610         } else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
4611                 /* If still no link, perhaps using 2/3 pair cable */
4612                 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_ctrl);
4613                 phy_ctrl |= CR_1000T_MS_ENABLE;
4614                 e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_ctrl);
4615                 if (!e1000_phy_setup_autoneg(hw) &&
4616                    !e1000_read_phy_reg(hw, PHY_CTRL, &phy_ctrl)) {
4617                         phy_ctrl |= (MII_CR_AUTO_NEG_EN |
4618                                      MII_CR_RESTART_AUTO_NEG);
4619                         e1000_write_phy_reg(hw, PHY_CTRL, phy_ctrl);
4620                 }
4621         }
4622         /* Restart process after E1000_SMARTSPEED_MAX iterations */
4623         if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
4624                 adapter->smartspeed = 0;
4625 }
4626
4627 /**
4628  * e1000_ioctl -
4629  * @netdev:
4630  * @ifreq:
4631  * @cmd:
4632  **/
4633
4634 static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
4635 {
4636         switch (cmd) {
4637         case SIOCGMIIPHY:
4638         case SIOCGMIIREG:
4639         case SIOCSMIIREG:
4640                 return e1000_mii_ioctl(netdev, ifr, cmd);
4641         default:
4642                 return -EOPNOTSUPP;
4643         }
4644 }
4645
4646 /**
4647  * e1000_mii_ioctl -
4648  * @netdev:
4649  * @ifreq:
4650  * @cmd:
4651  **/
4652
4653 static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
4654                            int cmd)
4655 {
4656         struct e1000_adapter *adapter = netdev_priv(netdev);
4657         struct e1000_hw *hw = &adapter->hw;
4658         struct mii_ioctl_data *data = if_mii(ifr);
4659         int retval;
4660         u16 mii_reg;
4661         unsigned long flags;
4662
4663         if (hw->media_type != e1000_media_type_copper)
4664                 return -EOPNOTSUPP;
4665
4666         switch (cmd) {
4667         case SIOCGMIIPHY:
4668                 data->phy_id = hw->phy_addr;
4669                 break;
4670         case SIOCGMIIREG:
4671                 spin_lock_irqsave(&adapter->stats_lock, flags);
4672                 if (e1000_read_phy_reg(hw, data->reg_num & 0x1F,
4673                                    &data->val_out)) {
4674                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4675                         return -EIO;
4676                 }
4677                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4678                 break;
4679         case SIOCSMIIREG:
4680                 if (data->reg_num & ~(0x1F))
4681                         return -EFAULT;
4682                 mii_reg = data->val_in;
4683                 spin_lock_irqsave(&adapter->stats_lock, flags);
4684                 if (e1000_write_phy_reg(hw, data->reg_num,
4685                                         mii_reg)) {
4686                         spin_unlock_irqrestore(&adapter->stats_lock, flags);
4687                         return -EIO;
4688                 }
4689                 spin_unlock_irqrestore(&adapter->stats_lock, flags);
4690                 if (hw->media_type == e1000_media_type_copper) {
4691                         switch (data->reg_num) {
4692                         case PHY_CTRL:
4693                                 if (mii_reg & MII_CR_POWER_DOWN)
4694                                         break;
4695                                 if (mii_reg & MII_CR_AUTO_NEG_EN) {
4696                                         hw->autoneg = 1;
4697                                         hw->autoneg_advertised = 0x2F;
4698                                 } else {
4699                                         u32 speed;
4700                                         if (mii_reg & 0x40)
4701                                                 speed = SPEED_1000;
4702                                         else if (mii_reg & 0x2000)
4703                                                 speed = SPEED_100;
4704                                         else
4705                                                 speed = SPEED_10;
4706                                         retval = e1000_set_spd_dplx(
4707                                                 adapter, speed,
4708                                                 ((mii_reg & 0x100)
4709                                                  ? DUPLEX_FULL :
4710                                                  DUPLEX_HALF));
4711                                         if (retval)
4712                                                 return retval;
4713                                 }
4714                                 if (netif_running(adapter->netdev))
4715                                         e1000_reinit_locked(adapter);
4716                                 else
4717                                         e1000_reset(adapter);
4718                                 break;
4719                         case M88E1000_PHY_SPEC_CTRL:
4720                         case M88E1000_EXT_PHY_SPEC_CTRL:
4721                                 if (e1000_phy_reset(hw))
4722                                         return -EIO;
4723                                 break;
4724                         }
4725                 } else {
4726                         switch (data->reg_num) {
4727                         case PHY_CTRL:
4728                                 if (mii_reg & MII_CR_POWER_DOWN)
4729                                         break;
4730                                 if (netif_running(adapter->netdev))
4731                                         e1000_reinit_locked(adapter);
4732                                 else
4733                                         e1000_reset(adapter);
4734                                 break;
4735                         }
4736                 }
4737                 break;
4738         default:
4739                 return -EOPNOTSUPP;
4740         }
4741         return E1000_SUCCESS;
4742 }
4743
4744 void e1000_pci_set_mwi(struct e1000_hw *hw)
4745 {
4746         struct e1000_adapter *adapter = hw->back;
4747         int ret_val = pci_set_mwi(adapter->pdev);
4748
4749         if (ret_val)
4750                 e_err(probe, "Error in setting MWI\n");
4751 }
4752
4753 void e1000_pci_clear_mwi(struct e1000_hw *hw)
4754 {
4755         struct e1000_adapter *adapter = hw->back;
4756
4757         pci_clear_mwi(adapter->pdev);
4758 }
4759
4760 int e1000_pcix_get_mmrbc(struct e1000_hw *hw)
4761 {
4762         struct e1000_adapter *adapter = hw->back;
4763         return pcix_get_mmrbc(adapter->pdev);
4764 }
4765
4766 void e1000_pcix_set_mmrbc(struct e1000_hw *hw, int mmrbc)
4767 {
4768         struct e1000_adapter *adapter = hw->back;
4769         pcix_set_mmrbc(adapter->pdev, mmrbc);
4770 }
4771
4772 void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value)
4773 {
4774         outl(value, port);
4775 }
4776
4777 static bool e1000_vlan_used(struct e1000_adapter *adapter)
4778 {
4779         u16 vid;
4780
4781         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4782                 return true;
4783         return false;
4784 }
4785
4786 static void __e1000_vlan_mode(struct e1000_adapter *adapter,
4787                               netdev_features_t features)
4788 {
4789         struct e1000_hw *hw = &adapter->hw;
4790         u32 ctrl;
4791
4792         ctrl = er32(CTRL);
4793         if (features & NETIF_F_HW_VLAN_RX) {
4794                 /* enable VLAN tag insert/strip */
4795                 ctrl |= E1000_CTRL_VME;
4796         } else {
4797                 /* disable VLAN tag insert/strip */
4798                 ctrl &= ~E1000_CTRL_VME;
4799         }
4800         ew32(CTRL, ctrl);
4801 }
4802 static void e1000_vlan_filter_on_off(struct e1000_adapter *adapter,
4803                                      bool filter_on)
4804 {
4805         struct e1000_hw *hw = &adapter->hw;
4806         u32 rctl;
4807
4808         if (!test_bit(__E1000_DOWN, &adapter->flags))
4809                 e1000_irq_disable(adapter);
4810
4811         __e1000_vlan_mode(adapter, adapter->netdev->features);
4812         if (filter_on) {
4813                 /* enable VLAN receive filtering */
4814                 rctl = er32(RCTL);
4815                 rctl &= ~E1000_RCTL_CFIEN;
4816                 if (!(adapter->netdev->flags & IFF_PROMISC))
4817                         rctl |= E1000_RCTL_VFE;
4818                 ew32(RCTL, rctl);
4819                 e1000_update_mng_vlan(adapter);
4820         } else {
4821                 /* disable VLAN receive filtering */
4822                 rctl = er32(RCTL);
4823                 rctl &= ~E1000_RCTL_VFE;
4824                 ew32(RCTL, rctl);
4825         }
4826
4827         if (!test_bit(__E1000_DOWN, &adapter->flags))
4828                 e1000_irq_enable(adapter);
4829 }
4830
4831 static void e1000_vlan_mode(struct net_device *netdev,
4832                             netdev_features_t features)
4833 {
4834         struct e1000_adapter *adapter = netdev_priv(netdev);
4835
4836         if (!test_bit(__E1000_DOWN, &adapter->flags))
4837                 e1000_irq_disable(adapter);
4838
4839         __e1000_vlan_mode(adapter, features);
4840
4841         if (!test_bit(__E1000_DOWN, &adapter->flags))
4842                 e1000_irq_enable(adapter);
4843 }
4844
4845 static int e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
4846 {
4847         struct e1000_adapter *adapter = netdev_priv(netdev);
4848         struct e1000_hw *hw = &adapter->hw;
4849         u32 vfta, index;
4850
4851         if ((hw->mng_cookie.status &
4852              E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT) &&
4853             (vid == adapter->mng_vlan_id))
4854                 return 0;
4855
4856         if (!e1000_vlan_used(adapter))
4857                 e1000_vlan_filter_on_off(adapter, true);
4858
4859         /* add VID to filter table */
4860         index = (vid >> 5) & 0x7F;
4861         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4862         vfta |= (1 << (vid & 0x1F));
4863         e1000_write_vfta(hw, index, vfta);
4864
4865         set_bit(vid, adapter->active_vlans);
4866
4867         return 0;
4868 }
4869
4870 static int e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
4871 {
4872         struct e1000_adapter *adapter = netdev_priv(netdev);
4873         struct e1000_hw *hw = &adapter->hw;
4874         u32 vfta, index;
4875
4876         if (!test_bit(__E1000_DOWN, &adapter->flags))
4877                 e1000_irq_disable(adapter);
4878         if (!test_bit(__E1000_DOWN, &adapter->flags))
4879                 e1000_irq_enable(adapter);
4880
4881         /* remove VID from filter table */
4882         index = (vid >> 5) & 0x7F;
4883         vfta = E1000_READ_REG_ARRAY(hw, VFTA, index);
4884         vfta &= ~(1 << (vid & 0x1F));
4885         e1000_write_vfta(hw, index, vfta);
4886
4887         clear_bit(vid, adapter->active_vlans);
4888
4889         if (!e1000_vlan_used(adapter))
4890                 e1000_vlan_filter_on_off(adapter, false);
4891
4892         return 0;
4893 }
4894
4895 static void e1000_restore_vlan(struct e1000_adapter *adapter)
4896 {
4897         u16 vid;
4898
4899         if (!e1000_vlan_used(adapter))
4900                 return;
4901
4902         e1000_vlan_filter_on_off(adapter, true);
4903         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
4904                 e1000_vlan_rx_add_vid(adapter->netdev, vid);
4905 }
4906
4907 int e1000_set_spd_dplx(struct e1000_adapter *adapter, u32 spd, u8 dplx)
4908 {
4909         struct e1000_hw *hw = &adapter->hw;
4910
4911         hw->autoneg = 0;
4912
4913         /* Make sure dplx is at most 1 bit and lsb of speed is not set
4914          * for the switch() below to work */
4915         if ((spd & 1) || (dplx & ~1))
4916                 goto err_inval;
4917
4918         /* Fiber NICs only allow 1000 gbps Full duplex */
4919         if ((hw->media_type == e1000_media_type_fiber) &&
4920             spd != SPEED_1000 &&
4921             dplx != DUPLEX_FULL)
4922                 goto err_inval;
4923
4924         switch (spd + dplx) {
4925         case SPEED_10 + DUPLEX_HALF:
4926                 hw->forced_speed_duplex = e1000_10_half;
4927                 break;
4928         case SPEED_10 + DUPLEX_FULL:
4929                 hw->forced_speed_duplex = e1000_10_full;
4930                 break;
4931         case SPEED_100 + DUPLEX_HALF:
4932                 hw->forced_speed_duplex = e1000_100_half;
4933                 break;
4934         case SPEED_100 + DUPLEX_FULL:
4935                 hw->forced_speed_duplex = e1000_100_full;
4936                 break;
4937         case SPEED_1000 + DUPLEX_FULL:
4938                 hw->autoneg = 1;
4939                 hw->autoneg_advertised = ADVERTISE_1000_FULL;
4940                 break;
4941         case SPEED_1000 + DUPLEX_HALF: /* not supported */
4942         default:
4943                 goto err_inval;
4944         }
4945         return 0;
4946
4947 err_inval:
4948         e_err(probe, "Unsupported Speed/Duplex configuration\n");
4949         return -EINVAL;
4950 }
4951
4952 static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4953 {
4954         struct net_device *netdev = pci_get_drvdata(pdev);
4955         struct e1000_adapter *adapter = netdev_priv(netdev);
4956         struct e1000_hw *hw = &adapter->hw;
4957         u32 ctrl, ctrl_ext, rctl, status;
4958         u32 wufc = adapter->wol;
4959 #ifdef CONFIG_PM
4960         int retval = 0;
4961 #endif
4962
4963         netif_device_detach(netdev);
4964
4965         if (netif_running(netdev)) {
4966                 WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
4967                 e1000_down(adapter);
4968         }
4969
4970 #ifdef CONFIG_PM
4971         retval = pci_save_state(pdev);
4972         if (retval)
4973                 return retval;
4974 #endif
4975
4976         status = er32(STATUS);
4977         if (status & E1000_STATUS_LU)
4978                 wufc &= ~E1000_WUFC_LNKC;
4979
4980         if (wufc) {
4981                 e1000_setup_rctl(adapter);
4982                 e1000_set_rx_mode(netdev);
4983
4984                 rctl = er32(RCTL);
4985
4986                 /* turn on all-multi mode if wake on multicast is enabled */
4987                 if (wufc & E1000_WUFC_MC)
4988                         rctl |= E1000_RCTL_MPE;
4989
4990                 /* enable receives in the hardware */
4991                 ew32(RCTL, rctl | E1000_RCTL_EN);
4992
4993                 if (hw->mac_type >= e1000_82540) {
4994                         ctrl = er32(CTRL);
4995                         /* advertise wake from D3Cold */
4996                         #define E1000_CTRL_ADVD3WUC 0x00100000
4997                         /* phy power management enable */
4998                         #define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
4999                         ctrl |= E1000_CTRL_ADVD3WUC |
5000                                 E1000_CTRL_EN_PHY_PWR_MGMT;
5001                         ew32(CTRL, ctrl);
5002                 }
5003
5004                 if (hw->media_type == e1000_media_type_fiber ||
5005                     hw->media_type == e1000_media_type_internal_serdes) {
5006                         /* keep the laser running in D3 */
5007                         ctrl_ext = er32(CTRL_EXT);
5008                         ctrl_ext |= E1000_CTRL_EXT_SDP7_DATA;
5009                         ew32(CTRL_EXT, ctrl_ext);
5010                 }
5011
5012                 ew32(WUC, E1000_WUC_PME_EN);
5013                 ew32(WUFC, wufc);
5014         } else {
5015                 ew32(WUC, 0);
5016                 ew32(WUFC, 0);
5017         }
5018
5019         e1000_release_manageability(adapter);
5020
5021         *enable_wake = !!wufc;
5022
5023         /* make sure adapter isn't asleep if manageability is enabled */
5024         if (adapter->en_mng_pt)
5025                 *enable_wake = true;
5026
5027         if (netif_running(netdev))
5028                 e1000_free_irq(adapter);
5029
5030         pci_disable_device(pdev);
5031
5032         return 0;
5033 }
5034
5035 #ifdef CONFIG_PM
5036 static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
5037 {
5038         int retval;
5039         bool wake;
5040
5041         retval = __e1000_shutdown(pdev, &wake);
5042         if (retval)
5043                 return retval;
5044
5045         if (wake) {
5046                 pci_prepare_to_sleep(pdev);
5047         } else {
5048                 pci_wake_from_d3(pdev, false);
5049                 pci_set_power_state(pdev, PCI_D3hot);
5050         }
5051
5052         return 0;
5053 }
5054
5055 static int e1000_resume(struct pci_dev *pdev)
5056 {
5057         struct net_device *netdev = pci_get_drvdata(pdev);
5058         struct e1000_adapter *adapter = netdev_priv(netdev);
5059         struct e1000_hw *hw = &adapter->hw;
5060         u32 err;
5061
5062         pci_set_power_state(pdev, PCI_D0);
5063         pci_restore_state(pdev);
5064         pci_save_state(pdev);
5065
5066         if (adapter->need_ioport)
5067                 err = pci_enable_device(pdev);
5068         else
5069                 err = pci_enable_device_mem(pdev);
5070         if (err) {
5071                 pr_err("Cannot enable PCI device from suspend\n");
5072                 return err;
5073         }
5074         pci_set_master(pdev);
5075
5076         pci_enable_wake(pdev, PCI_D3hot, 0);
5077         pci_enable_wake(pdev, PCI_D3cold, 0);
5078
5079         if (netif_running(netdev)) {
5080                 err = e1000_request_irq(adapter);
5081                 if (err)
5082                         return err;
5083         }
5084
5085         e1000_power_up_phy(adapter);
5086         e1000_reset(adapter);
5087         ew32(WUS, ~0);
5088
5089         e1000_init_manageability(adapter);
5090
5091         if (netif_running(netdev))
5092                 e1000_up(adapter);
5093
5094         netif_device_attach(netdev);
5095
5096         return 0;
5097 }
5098 #endif
5099
5100 static void e1000_shutdown(struct pci_dev *pdev)
5101 {
5102         bool wake;
5103
5104         __e1000_shutdown(pdev, &wake);
5105
5106         if (system_state == SYSTEM_POWER_OFF) {
5107                 pci_wake_from_d3(pdev, wake);
5108                 pci_set_power_state(pdev, PCI_D3hot);
5109         }
5110 }
5111
5112 #ifdef CONFIG_NET_POLL_CONTROLLER
5113 /*
5114  * Polling 'interrupt' - used by things like netconsole to send skbs
5115  * without having to re-enable interrupts. It's not called while
5116  * the interrupt routine is executing.
5117  */
5118 static void e1000_netpoll(struct net_device *netdev)
5119 {
5120         struct e1000_adapter *adapter = netdev_priv(netdev);
5121
5122         disable_irq(adapter->pdev->irq);
5123         e1000_intr(adapter->pdev->irq, netdev);
5124         enable_irq(adapter->pdev->irq);
5125 }
5126 #endif
5127
5128 /**
5129  * e1000_io_error_detected - called when PCI error is detected
5130  * @pdev: Pointer to PCI device
5131  * @state: The current pci connection state
5132  *
5133  * This function is called after a PCI bus error affecting
5134  * this device has been detected.
5135  */
5136 static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
5137                                                 pci_channel_state_t state)
5138 {
5139         struct net_device *netdev = pci_get_drvdata(pdev);
5140         struct e1000_adapter *adapter = netdev_priv(netdev);
5141
5142         netif_device_detach(netdev);
5143
5144         if (state == pci_channel_io_perm_failure)
5145                 return PCI_ERS_RESULT_DISCONNECT;
5146
5147         if (netif_running(netdev))
5148                 e1000_down(adapter);
5149         pci_disable_device(pdev);
5150
5151         /* Request a slot slot reset. */
5152         return PCI_ERS_RESULT_NEED_RESET;
5153 }
5154
5155 /**
5156  * e1000_io_slot_reset - called after the pci bus has been reset.
5157  * @pdev: Pointer to PCI device
5158  *
5159  * Restart the card from scratch, as if from a cold-boot. Implementation
5160  * resembles the first-half of the e1000_resume routine.
5161  */
5162 static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
5163 {
5164         struct net_device *netdev = pci_get_drvdata(pdev);
5165         struct e1000_adapter *adapter = netdev_priv(netdev);
5166         struct e1000_hw *hw = &adapter->hw;
5167         int err;
5168
5169         if (adapter->need_ioport)
5170                 err = pci_enable_device(pdev);
5171         else
5172                 err = pci_enable_device_mem(pdev);
5173         if (err) {
5174                 pr_err("Cannot re-enable PCI device after reset.\n");
5175                 return PCI_ERS_RESULT_DISCONNECT;
5176         }
5177         pci_set_master(pdev);
5178
5179         pci_enable_wake(pdev, PCI_D3hot, 0);
5180         pci_enable_wake(pdev, PCI_D3cold, 0);
5181
5182         e1000_reset(adapter);
5183         ew32(WUS, ~0);
5184
5185         return PCI_ERS_RESULT_RECOVERED;
5186 }
5187
5188 /**
5189  * e1000_io_resume - called when traffic can start flowing again.
5190  * @pdev: Pointer to PCI device
5191  *
5192  * This callback is called when the error recovery driver tells us that
5193  * its OK to resume normal operation. Implementation resembles the
5194  * second-half of the e1000_resume routine.
5195  */
5196 static void e1000_io_resume(struct pci_dev *pdev)
5197 {
5198         struct net_device *netdev = pci_get_drvdata(pdev);
5199         struct e1000_adapter *adapter = netdev_priv(netdev);
5200
5201         e1000_init_manageability(adapter);
5202
5203         if (netif_running(netdev)) {
5204                 if (e1000_up(adapter)) {
5205                         pr_info("can't bring device back up after reset\n");
5206                         return;
5207                 }
5208         }
5209
5210         netif_device_attach(netdev);
5211 }
5212
5213 /* e1000_main.c */