Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
authorDavid S. Miller <davem@davemloft.net>
Wed, 7 Apr 2010 06:53:30 +0000 (23:53 -0700)
committerDavid S. Miller <davem@davemloft.net>
Wed, 7 Apr 2010 06:53:30 +0000 (23:53 -0700)
Conflicts:
drivers/net/bonding/bond_main.c
drivers/net/via-velocity.c
drivers/net/wireless/iwlwifi/iwl-agn.c

44 files changed:
1  2 
MAINTAINERS
arch/microblaze/include/asm/system.h
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/benet/be_cmds.c
drivers/net/benet/be_ethtool.c
drivers/net/benet/be_main.c
drivers/net/bnx2.c
drivers/net/bonding/bond_main.c
drivers/net/cxgb4/cxgb4_main.c
drivers/net/e1000/e1000_main.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/netdev.c
drivers/net/gianfar.c
drivers/net/igb/igb.h
drivers/net/igb/igb_main.c
drivers/net/igbvf/netdev.c
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ixgbe/ixgbe_type.h
drivers/net/ixgbevf/ixgbevf_main.c
drivers/net/ksz884x.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pcmcia/smc91c92_cs.c
drivers/net/r8169.c
drivers/net/tulip/uli526x.c
drivers/net/via-velocity.c
drivers/net/wireless/iwlwifi/iwl-4965.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl3945-base.c
drivers/net/wireless/libertas/dev.h
drivers/net/wireless/mwl8k.c
include/linux/skbuff.h
include/linux/socket.h
include/linux/tty.h
net/8021q/vlan.c
net/8021q/vlan_dev.c
net/ipv4/af_inet.c
net/ipv4/devinet.c
net/ipv6/addrconf.c
net/mac80211/tx.c
net/netlink/af_netlink.c
net/socket.c

diff --combined MAINTAINERS
index 704d3d6da1b1f845f508045aafed7c6befbd2ae0,9ff6341e43ba0955c01879ee444cd44a2fbcb78f..2c569f5064987a2015b4066c6433e4e560a5d415
@@@ -1441,6 -1441,15 +1441,15 @@@ F:    arch/powerpc/include/asm/spu*.
  F:    arch/powerpc/oprofile/*cell*
  F:    arch/powerpc/platforms/cell/
  
+ CEPH DISTRIBUTED FILE SYSTEM CLIENT
+ M:    Sage Weil <sage@newdream.net>
+ L:    ceph-devel@lists.sourceforge.net
+ W:    http://ceph.newdream.net/
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
+ S:    Supported
+ F:    Documentation/filesystems/ceph.txt
+ F:    fs/ceph
  CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
  M:    David Vrabel <david.vrabel@csr.com>
  L:    linux-usb@vger.kernel.org
@@@ -1482,10 -1491,9 +1491,10 @@@ M:    Andy Whitcroft <apw@canonical.com
  S:    Supported
  F:    scripts/checkpatch.pl
  
 -CISCO 10G ETHERNET DRIVER
 +CISCO VIC ETHERNET NIC DRIVER
  M:    Scott Feldman <scofeldm@cisco.com>
 -M:    Joe Eykholt <jeykholt@cisco.com>
 +M:    Vasanthy Kolluri <vkolluri@cisco.com>
 +M:    Roopa Prabhu <roprabhu@cisco.com>
  S:    Supported
  F:    drivers/net/enic/
  
@@@ -2990,9 -2998,10 +2999,9 @@@ F:     net/ipv4/netfilter/ipt_MASQUERADE.
  IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
  M:    Francois Romieu <romieu@fr.zoreil.com>
  M:    Sorbica Shieh <sorbica@icplus.com.tw>
 -M:    Jesse Huang <jesse@icplus.com.tw>
  L:    netdev@vger.kernel.org
  S:    Maintained
 -F:    drivers/net/ipg.c
 +F:    drivers/net/ipg.*
  
  IPATH DRIVER
  M:    Ralph Campbell <infinipath@qlogic.com>
@@@ -3074,6 -3083,7 +3083,7 @@@ F:      include/scsi/*iscsi
  ISDN SUBSYSTEM
  M:    Karsten Keil <isdn@linux-pingi.de>
  L:    isdn4linux@listserv.isdn4linux.de (subscribers-only)
+ L:    netdev@vger.kernel.org
  W:    http://www.isdn4linux.de
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
  S:    Maintained
@@@ -3828,6 -3838,7 +3838,6 @@@ M:      Ramkrishna Vepa <ram.vepa@neterion.c
  M:    Rastapur Santosh <santosh.rastapur@neterion.com>
  M:    Sivakumar Subramani <sivakumar.subramani@neterion.com>
  M:    Sreenivasa Honnur <sreenivasa.honnur@neterion.com>
 -M:    Anil Murthy <anil.murthy@neterion.com>
  L:    netdev@vger.kernel.org
  W:    http://trac.neterion.com/cgi-bin/trac.cgi/wiki/Linux?Anonymous
  W:    http://trac.neterion.com/cgi-bin/trac.cgi/wiki/X3100Linux?Anonymous
@@@ -5213,6 -5224,21 +5223,21 @@@ T:    git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    arch/sparc/
  
+ SPARC SERIAL DRIVERS
+ M:    "David S. Miller" <davem@davemloft.net>
+ L:    sparclinux@vger.kernel.org
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-2.6.git
+ T:    git git://git.kernel.org/pub/scm/linux/kernel/git/davem/sparc-next-2.6.git
+ S:    Maintained
+ F:    drivers/serial/suncore.c
+ F:    drivers/serial/suncore.h
+ F:    drivers/serial/sunhv.c
+ F:    drivers/serial/sunsab.c
+ F:    drivers/serial/sunsab.h
+ F:    drivers/serial/sunsu.c
+ F:    drivers/serial/sunzilog.c
+ F:    drivers/serial/sunzilog.h
  SPECIALIX IO8+ MULTIPORT SERIAL CARD DRIVER
  M:    Roger Wolff <R.E.Wolff@BitWizard.nl>
  S:    Supported
@@@ -5398,7 -5424,6 +5423,6 @@@ S:      Maintaine
  F:    sound/soc/codecs/twl4030*
  
  TIPC NETWORK LAYER
- M:    Per Liden <per.liden@ericsson.com>
  M:    Jon Maloy <jon.maloy@ericsson.com>
  M:    Allan Stephens <allan.stephens@windriver.com>
  L:    tipc-discussion@lists.sourceforge.net
index 88fc92cdd8ceeb745062d41513b9652cb354c2e2,59efb3fef9577d93d83ad2bf6ce7a15ab8128144..48c4f0335e3f51b74413a93783da6f1fb8f0970d
@@@ -12,7 -12,6 +12,7 @@@
  #include <asm/registers.h>
  #include <asm/setup.h>
  #include <asm/irqflags.h>
 +#include <asm/cache.h>
  
  #include <asm-generic/cmpxchg.h>
  #include <asm-generic/cmpxchg-local.h>
@@@ -88,20 -87,13 +88,23 @@@ void free_initmem(void)
  extern char *klimit;
  extern void ret_from_fork(void);
  
+ extern void *alloc_maybe_bootmem(size_t size, gfp_t mask);
+ extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
  #ifdef CONFIG_DEBUG_FS
  extern struct dentry *of_debugfs_root;
  #endif
  
  #define arch_align_stack(x) (x)
  
 +/*
 + * MicroBlaze doesn't handle unaligned accesses in hardware.
 + *
 + * Based on this we force the IP header alignment in network drivers.
 + * We also modify NET_SKB_PAD to be a cacheline in size, thus maintaining
 + * cacheline alignment of buffers.
 + */
 +#define NET_IP_ALIGN  2
 +#define NET_SKB_PAD   L1_CACHE_BYTES
 +
  #endif /* _ASM_MICROBLAZE_SYSTEM_H */
diff --combined drivers/net/Kconfig
index 20e2dec1d534c61af0db50fdbbd5d87e33db5f1f,7b832c727f873b6964b1faa1c121a73796b113e4..65db201fd77e06478249874b54dd032c864d4266
@@@ -1916,7 -1916,6 +1916,7 @@@ config FE
        bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
        depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
                MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5
 +      select PHYLIB
        help
          Say Y here if you want to use the built-in 10/100 Fast ethernet
          controller on some Motorola ColdFire and Freescale i.MX processors.
@@@ -2435,8 -2434,8 +2435,8 @@@ config MV643XX_ET
  
  config XILINX_LL_TEMAC
        tristate "Xilinx LL TEMAC (LocalLink Tri-mode Ethernet MAC) driver"
 +      depends on PPC || MICROBLAZE
        select PHYLIB
 -      depends on PPC_DCR_NATIVE
        help
          This driver supports the Xilinx 10/100/1000 LocalLink TEMAC
          core used in Xilinx Spartan and Virtex FPGAs
@@@ -2583,6 -2582,31 +2583,31 @@@ config CHELSIO_T
          To compile this driver as a module, choose M here: the module
          will be called cxgb3.
  
+ config CHELSIO_T4_DEPENDS
+       tristate
+       depends on PCI && INET
+       default y
+ config CHELSIO_T4
+       tristate "Chelsio Communications T4 Ethernet support"
+       depends on CHELSIO_T4_DEPENDS
+       select FW_LOADER
+       select MDIO
+       help
+         This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+         adapters.
+         For general information about Chelsio and our products, visit
+         our website at <http://www.chelsio.com>.
+         For customer support, please visit our customer support page at
+         <http://www.chelsio.com/support.htm>.
+         Please send feedback to <linux-bugs@chelsio.com>.
+         To compile this driver as a module choose M here; the module
+         will be called cxgb4.
  config EHEA
        tristate "eHEA Ethernet support"
        depends on IBMEBUS && INET && SPARSEMEM
          will be called ehea.
  
  config ENIC
 -      tristate "Cisco 10G Ethernet NIC support"
 +      tristate "Cisco VIC Ethernet NIC Support"
        depends on PCI && INET
        select INET_LRO
        help
 -        This enables the support for the Cisco 10G Ethernet card.
 +        This enables the support for the Cisco VIC Ethernet card.
  
  config IXGBE
        tristate "Intel(R) 10GbE PCI Express adapters support"
@@@ -2838,8 -2862,6 +2863,8 @@@ source "drivers/ieee802154/Kconfig
  
  source "drivers/s390/net/Kconfig"
  
 +source "drivers/net/caif/Kconfig"
 +
  config XEN_NETDEV_FRONTEND
        tristate "Xen network device frontend driver"
        depends on XEN
@@@ -3158,12 -3180,17 +3183,12 @@@ config PPPOAT
  
  config PPPOL2TP
        tristate "PPP over L2TP (EXPERIMENTAL)"
 -      depends on EXPERIMENTAL && PPP && INET
 +      depends on EXPERIMENTAL && L2TP && PPP
        help
          Support for PPP-over-L2TP socket family. L2TP is a protocol
          used by ISPs and enterprises to tunnel PPP traffic over UDP
          tunnels. L2TP is replacing PPTP for VPN uses.
  
 -        This kernel component handles only L2TP data packets: a
 -        userland daemon handles L2TP the control protocol (tunnel
 -        and session setup). One such daemon is OpenL2TP
 -        (http://openl2tp.sourceforge.net/).
 -
  config SLIP
        tristate "SLIP (serial line) support"
        ---help---
@@@ -3250,14 -3277,15 +3275,14 @@@ config NET_F
          "SCSI generic support".
  
  config NETCONSOLE
 -      tristate "Network console logging support (EXPERIMENTAL)"
 -      depends on EXPERIMENTAL
 +      tristate "Network console logging support"
        ---help---
        If you want to log kernel messages over the network, enable this.
        See <file:Documentation/networking/netconsole.txt> for details.
  
  config NETCONSOLE_DYNAMIC
 -      bool "Dynamic reconfiguration of logging targets (EXPERIMENTAL)"
 -      depends on NETCONSOLE && SYSFS && EXPERIMENTAL
 +      bool "Dynamic reconfiguration of logging targets"
 +      depends on NETCONSOLE && SYSFS
        select CONFIGFS_FS
        help
          This option enables the ability to dynamically reconfigure target
diff --combined drivers/net/Makefile
index f8444f439a659332de1a3e6725beae2072643711,a583b50d9de8bd4402fccefed7ae6effa6ff4ff8..ebf80b9830636de6bcbdc30e3e4b7fd59bd949aa
@@@ -19,6 -19,7 +19,7 @@@ obj-$(CONFIG_IXGB) += ixgb
  obj-$(CONFIG_IP1000) += ipg.o
  obj-$(CONFIG_CHELSIO_T1) += chelsio/
  obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+ obj-$(CONFIG_CHELSIO_T4) += cxgb4/
  obj-$(CONFIG_EHEA) += ehea/
  obj-$(CONFIG_CAN) += can/
  obj-$(CONFIG_BONDING) += bonding/
@@@ -160,7 -161,7 +161,7 @@@ obj-$(CONFIG_PPP_DEFLATE) += ppp_deflat
  obj-$(CONFIG_PPP_BSDCOMP) += bsd_comp.o
  obj-$(CONFIG_PPP_MPPE) += ppp_mppe.o
  obj-$(CONFIG_PPPOE) += pppox.o pppoe.o
 -obj-$(CONFIG_PPPOL2TP) += pppox.o pppol2tp.o
 +obj-$(CONFIG_PPPOL2TP) += pppox.o
  
  obj-$(CONFIG_SLIP) += slip.o
  obj-$(CONFIG_SLHC) += slhc.o
@@@ -290,6 -291,5 +291,6 @@@ obj-$(CONFIG_VIRTIO_NET) += virtio_net.
  obj-$(CONFIG_SFC) += sfc/
  
  obj-$(CONFIG_WIMAX) += wimax/
 +obj-$(CONFIG_CAIF) += caif/
  
  obj-$(CONFIG_OCTEON_MGMT_ETHERNET) += octeon/
index 61a9afdb83f45eaa128e2c660243b948cbe7c595,d0ef4ac987cde52db2ecb24e2144c43d1339c997..da8793026bb1b09512250a55469c677e33e03c3d
@@@ -843,8 -843,7 +843,8 @@@ int be_cmd_q_destroy(struct be_adapter 
   * Uses mbox
   */
  int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
 -              u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id)
 +              u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
 +              u32 domain)
  {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_if_create *req;
        be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
                OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
  
 +      req->hdr.domain = domain;
        req->capability_flags = cpu_to_le32(cap_flags);
        req->enable_flags = cpu_to_le32(en_flags);
        req->pmac_invalid = pmac_invalid;
@@@ -1159,13 -1157,13 +1159,13 @@@ int be_cmd_multicast_set(struct be_adap
        req->interface_id = if_id;
        if (netdev) {
                int i;
 -              struct dev_mc_list *mc;
 +              struct netdev_hw_addr *ha;
  
                req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
  
                i = 0;
 -              netdev_for_each_mc_addr(mc, netdev)
 -                      memcpy(req->mac[i].byte, mc->dmi_addr, ETH_ALEN);
 +              netdev_for_each_mc_addr(ha, netdev)
 +                      memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
        } else {
                req->promiscuous = 1;
        }
@@@ -1466,8 -1464,8 +1466,8 @@@ int be_cmd_get_flash_crc(struct be_adap
  
        req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
        req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
-       req->params.offset = offset;
-       req->params.data_buf_size = 0x4;
+       req->params.offset = cpu_to_le32(offset);
+       req->params.data_buf_size = cpu_to_le32(0x4);
  
        status = be_mcc_notify_wait(adapter);
        if (!status)
index d7390da470cdf7a276a523b5f624abedd23e625b,51e1065e78977df6f27a06eaf58dcdba64da5502..d488d52d710a968490c4f86415419f71113ae750
@@@ -490,13 -490,13 +490,13 @@@ be_test_ddr_dma(struct be_adapter *adap
  {
        int ret, i;
        struct be_dma_mem ddrdma_cmd;
-       u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5};
+       u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
  
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
        ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
                                        &ddrdma_cmd.dma);
        if (!ddrdma_cmd.va) {
 -              dev_err(&adapter->pdev->dev, "Memory allocation failure \n");
 +              dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                return -ENOMEM;
        }
  
index cb0a4a6d5dea7cabf739b7ec8d8e83344c06bac1,ec6ace802256087c4bd310c4669c48fc919999ae..49d51965312e8d66328a9f574001b04b97d8e434
@@@ -26,11 -26,8 +26,11 @@@ MODULE_AUTHOR("ServerEngines Corporatio
  MODULE_LICENSE("GPL");
  
  static unsigned int rx_frag_size = 2048;
 +static unsigned int num_vfs;
  module_param(rx_frag_size, uint, S_IRUGO);
 +module_param(num_vfs, uint, S_IRUGO);
  MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
 +MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
  
  static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
        { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@@ -141,19 -138,12 +141,19 @@@ static int be_mac_addr_set(struct net_d
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
  
 +      /* MAC addr configuration will be done in hardware for VFs
 +       * by their corresponding PFs. Just copy to netdev addr here
 +       */
 +      if (!be_physfn(adapter))
 +              goto netdev_addr;
 +
        status = be_cmd_pmac_del(adapter, adapter->if_handle, adapter->pmac_id);
        if (status)
                return status;
  
        status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
                        adapter->if_handle, &adapter->pmac_id);
 +netdev_addr:
        if (!status)
                memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
  
@@@ -396,48 -386,26 +396,48 @@@ static void wrb_fill_hdr(struct be_eth_
        AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
  }
  
 +static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb,
 +              bool unmap_single)
 +{
 +      dma_addr_t dma;
 +
 +      be_dws_le_to_cpu(wrb, sizeof(*wrb));
 +
 +      dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
 +      if (dma != 0) {
 +              if (unmap_single)
 +                      pci_unmap_single(pdev, dma, wrb->frag_len,
 +                              PCI_DMA_TODEVICE);
 +              else
 +                      pci_unmap_page(pdev, dma, wrb->frag_len,
 +                              PCI_DMA_TODEVICE);
 +      }
 +}
  
  static int make_tx_wrbs(struct be_adapter *adapter,
                struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
  {
 -      u64 busaddr;
 -      u32 i, copied = 0;
 +      dma_addr_t busaddr;
 +      int i, copied = 0;
        struct pci_dev *pdev = adapter->pdev;
        struct sk_buff *first_skb = skb;
        struct be_queue_info *txq = &adapter->tx_obj.q;
        struct be_eth_wrb *wrb;
        struct be_eth_hdr_wrb *hdr;
 +      bool map_single = false;
 +      u16 map_head;
  
        hdr = queue_head_node(txq);
 -      atomic_add(wrb_cnt, &txq->used);
        queue_head_inc(txq);
 +      map_head = txq->head;
  
        if (skb->len > skb->data_len) {
                int len = skb->len - skb->data_len;
                busaddr = pci_map_single(pdev, skb->data, len,
                                         PCI_DMA_TODEVICE);
 +              if (pci_dma_mapping_error(pdev, busaddr))
 +                      goto dma_err;
 +              map_single = true;
                wrb = queue_head_node(txq);
                wrb_fill(wrb, busaddr, len);
                be_dws_cpu_to_le(wrb, sizeof(*wrb));
                busaddr = pci_map_page(pdev, frag->page,
                                       frag->page_offset,
                                       frag->size, PCI_DMA_TODEVICE);
 +              if (pci_dma_mapping_error(pdev, busaddr))
 +                      goto dma_err;
                wrb = queue_head_node(txq);
                wrb_fill(wrb, busaddr, frag->size);
                be_dws_cpu_to_le(wrb, sizeof(*wrb));
        be_dws_cpu_to_le(hdr, sizeof(*hdr));
  
        return copied;
 +dma_err:
 +      txq->head = map_head;
 +      while (copied) {
 +              wrb = queue_head_node(txq);
 +              unmap_tx_frag(pdev, wrb, map_single);
 +              map_single = false;
 +              copied -= wrb->frag_len;
 +              queue_head_inc(txq);
 +      }
 +      return 0;
  }
  
  static netdev_tx_t be_xmit(struct sk_buff *skb,
                 * *BEFORE* ringing the tx doorbell, so that we serialze the
                 * tx compls of the current transmit which'll wake up the queue
                 */
 +              atomic_add(wrb_cnt, &txq->used);
                if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
                                                                txq->len) {
                        netif_stop_queue(netdev);
@@@ -586,9 -541,6 +586,9 @@@ static void be_vlan_add_vid(struct net_
  {
        struct be_adapter *adapter = netdev_priv(netdev);
  
 +      if (!be_physfn(adapter))
 +              return;
 +
        adapter->vlan_tag[vid] = 1;
        adapter->vlans_added++;
        if (adapter->vlans_added <= (adapter->max_vlans + 1))
@@@ -599,9 -551,6 +599,9 @@@ static void be_vlan_rem_vid(struct net_
  {
        struct be_adapter *adapter = netdev_priv(netdev);
  
 +      if (!be_physfn(adapter))
 +              return;
 +
        adapter->vlan_tag[vid] = 0;
        vlan_group_set_device(adapter->vlan_grp, vid, NULL);
        adapter->vlans_added--;
@@@ -639,28 -588,6 +639,28 @@@ done
        return;
  }
  
 +static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
 +{
 +      struct be_adapter *adapter = netdev_priv(netdev);
 +      int status;
 +
 +      if (!adapter->sriov_enabled)
 +              return -EPERM;
 +
 +      if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
 +              return -EINVAL;
 +
 +      status = be_cmd_pmac_del(adapter, adapter->vf_if_handle[vf],
 +                              adapter->vf_pmac_id[vf]);
 +
 +      status = be_cmd_pmac_add(adapter, mac, adapter->vf_if_handle[vf],
 +                              &adapter->vf_pmac_id[vf]);
 +      if (!status)
 +              dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
 +                              mac, vf);
 +      return status;
 +}
 +
  static void be_rx_rate_update(struct be_adapter *adapter)
  {
        struct be_drvr_stats *stats = drvr_stats(adapter);
@@@ -864,6 -791,7 +864,6 @@@ static void be_rx_compl_process(struct 
  
        skb->truesize = skb->len + sizeof(struct sk_buff);
        skb->protocol = eth_type_trans(skb, adapter->netdev);
 -      skb->dev = adapter->netdev;
  
        vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
        vtm = AMAP_GET_BITS(struct amap_eth_rx_compl, vtm, rxcp);
                        return;
                }
                vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-               vid = be16_to_cpu(vid);
+               vid = swab16(vid);
                vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
        } else {
                netif_receive_skb(skb);
@@@ -956,7 -884,7 +956,7 @@@ static void be_rx_compl_process_gro(str
                napi_gro_frags(&eq_obj->napi);
        } else {
                vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-               vid = be16_to_cpu(vid);
+               vid = swab16(vid);
  
                if (!adapter->vlan_grp || adapter->vlans_added == 0)
                        return;
@@@ -1084,26 -1012,35 +1084,26 @@@ static void be_tx_compl_process(struct 
        struct be_eth_wrb *wrb;
        struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list;
        struct sk_buff *sent_skb;
 -      u64 busaddr;
 -      u16 cur_index, num_wrbs = 0;
 +      u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
 +      bool unmap_skb_hdr = true;
  
 -      cur_index = txq->tail;
 -      sent_skb = sent_skbs[cur_index];
 +      sent_skb = sent_skbs[txq->tail];
        BUG_ON(!sent_skb);
 -      sent_skbs[cur_index] = NULL;
 -      wrb = queue_tail_node(txq);
 -      be_dws_le_to_cpu(wrb, sizeof(*wrb));
 -      busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
 -      if (busaddr != 0) {
 -              pci_unmap_single(adapter->pdev, busaddr,
 -                               wrb->frag_len, PCI_DMA_TODEVICE);
 -      }
 -      num_wrbs++;
 +      sent_skbs[txq->tail] = NULL;
 +
 +      /* skip header wrb */
        queue_tail_inc(txq);
  
 -      while (cur_index != last_index) {
 +      do {
                cur_index = txq->tail;
                wrb = queue_tail_node(txq);
 -              be_dws_le_to_cpu(wrb, sizeof(*wrb));
 -              busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo;
 -              if (busaddr != 0) {
 -                      pci_unmap_page(adapter->pdev, busaddr,
 -                                     wrb->frag_len, PCI_DMA_TODEVICE);
 -              }
 +              unmap_tx_frag(adapter->pdev, wrb, (unmap_skb_hdr &&
 +                                      sent_skb->len > sent_skb->data_len));
 +              unmap_skb_hdr = false;
 +
                num_wrbs++;
                queue_tail_inc(txq);
 -      }
 +      } while (cur_index != last_index);
  
        atomic_sub(num_wrbs, &txq->used);
  
@@@ -1318,8 -1255,6 +1318,8 @@@ static int be_tx_queues_create(struct b
        /* Ask BE to create Tx Event queue */
        if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
                goto tx_eq_free;
 +      adapter->base_eq_id = adapter->tx_eq.q.id;
 +
        /* Alloc TX eth compl queue */
        cq = &adapter->tx_obj.cq;
        if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
@@@ -1447,7 -1382,7 +1447,7 @@@ rx_eq_free
  /* There are 8 evt ids per func. Retruns the evt id's bit number */
  static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
  {
 -      return eq_id % 8;
 +      return eq_id - adapter->base_eq_id;
  }
  
  static irqreturn_t be_intx(int irq, void *dev)
@@@ -1625,28 -1560,6 +1625,28 @@@ static void be_msix_enable(struct be_ad
        return;
  }
  
 +static void be_sriov_enable(struct be_adapter *adapter)
 +{
 +#ifdef CONFIG_PCI_IOV
 +      int status;
 +      if (be_physfn(adapter) && num_vfs) {
 +              status = pci_enable_sriov(adapter->pdev, num_vfs);
 +              adapter->sriov_enabled = status ? false : true;
 +      }
 +#endif
 +      return;
 +}
 +
 +static void be_sriov_disable(struct be_adapter *adapter)
 +{
 +#ifdef CONFIG_PCI_IOV
 +      if (adapter->sriov_enabled) {
 +              pci_disable_sriov(adapter->pdev);
 +              adapter->sriov_enabled = false;
 +      }
 +#endif
 +}
 +
  static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id)
  {
        return adapter->msix_entries[
@@@ -1704,9 -1617,6 +1704,9 @@@ static int be_irq_register(struct be_ad
                status = be_msix_register(adapter);
                if (status == 0)
                        goto done;
 +              /* INTx is not supported for VF */
 +              if (!be_physfn(adapter))
 +                      return status;
        }
  
        /* INTx */
@@@ -1780,17 -1690,14 +1780,17 @@@ static int be_open(struct net_device *n
                goto ret_sts;
        be_link_status_update(adapter, link_up);
  
 -      status = be_vid_config(adapter);
 +      if (be_physfn(adapter))
 +              status = be_vid_config(adapter);
        if (status)
                goto ret_sts;
  
 -      status = be_cmd_set_flow_control(adapter,
 -                                      adapter->tx_fc, adapter->rx_fc);
 -      if (status)
 -              goto ret_sts;
 +      if (be_physfn(adapter)) {
 +              status = be_cmd_set_flow_control(adapter,
 +                              adapter->tx_fc, adapter->rx_fc);
 +              if (status)
 +                      goto ret_sts;
 +      }
  
        schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
  ret_sts:
@@@ -1816,7 -1723,7 +1816,7 @@@ static int be_setup_wol(struct be_adapt
                        PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
                if (status) {
                        dev_err(&adapter->pdev->dev,
 -                              "Could not enable Wake-on-lan \n");
 +                              "Could not enable Wake-on-lan\n");
                        pci_free_consistent(adapter->pdev, cmd.size, cmd.va,
                                        cmd.dma);
                        return status;
  static int be_setup(struct be_adapter *adapter)
  {
        struct net_device *netdev = adapter->netdev;
 -      u32 cap_flags, en_flags;
 +      u32 cap_flags, en_flags, vf = 0;
        int status;
 +      u8 mac[ETH_ALEN];
  
 -      cap_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 -                      BE_IF_FLAGS_MCAST_PROMISCUOUS |
 -                      BE_IF_FLAGS_PROMISCUOUS |
 -                      BE_IF_FLAGS_PASS_L3L4_ERRORS;
 -      en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
 -                      BE_IF_FLAGS_PASS_L3L4_ERRORS;
 +      cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST;
 +
 +      if (be_physfn(adapter)) {
 +              cap_flags |= BE_IF_FLAGS_MCAST_PROMISCUOUS |
 +                              BE_IF_FLAGS_PROMISCUOUS |
 +                              BE_IF_FLAGS_PASS_L3L4_ERRORS;
 +              en_flags |= BE_IF_FLAGS_PASS_L3L4_ERRORS;
 +      }
  
        status = be_cmd_if_create(adapter, cap_flags, en_flags,
                        netdev->dev_addr, false/* pmac_invalid */,
 -                      &adapter->if_handle, &adapter->pmac_id);
 +                      &adapter->if_handle, &adapter->pmac_id, 0);
        if (status != 0)
                goto do_none;
  
 +      if (be_physfn(adapter)) {
 +              while (vf < num_vfs) {
 +                      cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED
 +                                      | BE_IF_FLAGS_BROADCAST;
 +                      status = be_cmd_if_create(adapter, cap_flags, en_flags,
 +                                      mac, true, &adapter->vf_if_handle[vf],
 +                                      NULL, vf+1);
 +                      if (status) {
 +                              dev_err(&adapter->pdev->dev,
 +                              "Interface Create failed for VF %d\n", vf);
 +                              goto if_destroy;
 +                      }
 +                      vf++;
 +              } while (vf < num_vfs);
 +      } else if (!be_physfn(adapter)) {
 +              status = be_cmd_mac_addr_query(adapter, mac,
 +                      MAC_ADDRESS_TYPE_NETWORK, false, adapter->if_handle);
 +              if (!status) {
 +                      memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 +                      memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
 +              }
 +      }
 +
        status = be_tx_queues_create(adapter);
        if (status != 0)
                goto if_destroy;
@@@ -1901,9 -1782,6 +1901,9 @@@ rx_qs_destroy
  tx_qs_destroy:
        be_tx_queues_destroy(adapter);
  if_destroy:
 +      for (vf = 0; vf < num_vfs; vf++)
 +              if (adapter->vf_if_handle[vf])
 +                      be_cmd_if_destroy(adapter, adapter->vf_if_handle[vf]);
        be_cmd_if_destroy(adapter, adapter->if_handle);
  do_none:
        return status;
@@@ -1977,7 -1855,7 +1977,7 @@@ static bool be_flash_redboot(struct be_
        p += crc_offset;
  
        status = be_cmd_get_flash_crc(adapter, flashed_crc,
-                       (img_start + image_size - 4));
+                       (image_size - 4));
        if (status) {
                dev_err(&adapter->pdev->dev,
                "could not get crc from flash, not flashing redboot\n");
@@@ -2113,7 -1991,7 +2113,7 @@@ int be_load_fw(struct be_adapter *adapt
        struct flash_file_hdr_g3 *fhdr3;
        struct image_hdr *img_hdr_ptr = NULL;
        struct be_dma_mem flash_cmd;
-       int status, i = 0;
+       int status, i = 0, num_imgs = 0;
        const u8 *p;
  
        strcpy(fw_file, func);
        if ((adapter->generation == BE_GEN3) &&
                        (get_ufigen_type(fhdr) == BE_GEN3)) {
                fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
-               for (i = 0; i < fhdr3->num_imgs; i++) {
+               num_imgs = le32_to_cpu(fhdr3->num_imgs);
+               for (i = 0; i < num_imgs; i++) {
                        img_hdr_ptr = (struct image_hdr *) (fw->data +
                                        (sizeof(struct flash_file_hdr_g3) +
-                                       i * sizeof(struct image_hdr)));
-                       if (img_hdr_ptr->imageid == 1) {
-                               status = be_flash_data(adapter, fw,
-                                               &flash_cmd, fhdr3->num_imgs);
-                       }
+                                        i * sizeof(struct image_hdr)));
+                       if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
+                               status = be_flash_data(adapter, fw, &flash_cmd,
+                                                       num_imgs);
                }
        } else if ((adapter->generation == BE_GEN2) &&
                        (get_ufigen_type(fhdr) == BE_GEN2)) {
@@@ -2184,7 -2061,6 +2183,7 @@@ static struct net_device_ops be_netdev_
        .ndo_vlan_rx_register   = be_vlan_register,
        .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
        .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
 +      .ndo_set_vf_mac         = be_set_vf_mac
  };
  
  static void be_netdev_init(struct net_device *netdev)
@@@ -2226,48 -2102,37 +2225,48 @@@ static void be_unmap_pci_bars(struct be
                iounmap(adapter->csr);
        if (adapter->db)
                iounmap(adapter->db);
 -      if (adapter->pcicfg)
 +      if (adapter->pcicfg && be_physfn(adapter))
                iounmap(adapter->pcicfg);
  }
  
  static int be_map_pci_bars(struct be_adapter *adapter)
  {
        u8 __iomem *addr;
 -      int pcicfg_reg;
 -
 -      addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
 -                      pci_resource_len(adapter->pdev, 2));
 -      if (addr == NULL)
 -              return -ENOMEM;
 -      adapter->csr = addr;
 +      int pcicfg_reg, db_reg;
  
 -      addr = ioremap_nocache(pci_resource_start(adapter->pdev, 4),
 -                      128 * 1024);
 -      if (addr == NULL)
 -              goto pci_map_err;
 -      adapter->db = addr;
 +      if (be_physfn(adapter)) {
 +              addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
 +                              pci_resource_len(adapter->pdev, 2));
 +              if (addr == NULL)
 +                      return -ENOMEM;
 +              adapter->csr = addr;
 +      }
  
 -      if (adapter->generation == BE_GEN2)
 +      if (adapter->generation == BE_GEN2) {
                pcicfg_reg = 1;
 -      else
 +              db_reg = 4;
 +      } else {
                pcicfg_reg = 0;
 -
 -      addr = ioremap_nocache(pci_resource_start(adapter->pdev, pcicfg_reg),
 -                      pci_resource_len(adapter->pdev, pcicfg_reg));
 +              if (be_physfn(adapter))
 +                      db_reg = 4;
 +              else
 +                      db_reg = 0;
 +      }
 +      addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
 +                              pci_resource_len(adapter->pdev, db_reg));
        if (addr == NULL)
                goto pci_map_err;
 -      adapter->pcicfg = addr;
 +      adapter->db = addr;
 +
 +      if (be_physfn(adapter)) {
 +              addr = ioremap_nocache(
 +                              pci_resource_start(adapter->pdev, pcicfg_reg),
 +                              pci_resource_len(adapter->pdev, pcicfg_reg));
 +              if (addr == NULL)
 +                      goto pci_map_err;
 +              adapter->pcicfg = addr;
 +      } else
 +              adapter->pcicfg = adapter->db + SRIOV_VF_PCICFG_OFFSET;
  
        return 0;
  pci_map_err:
@@@ -2381,8 -2246,6 +2380,8 @@@ static void __devexit be_remove(struct 
  
        be_ctrl_cleanup(adapter);
  
 +      be_sriov_disable(adapter);
 +
        be_msix_disable(adapter);
  
        pci_set_drvdata(pdev, NULL);
@@@ -2407,20 -2270,16 +2406,20 @@@ static int be_get_config(struct be_adap
                return status;
  
        memset(mac, 0, ETH_ALEN);
 -      status = be_cmd_mac_addr_query(adapter, mac,
 +
 +      if (be_physfn(adapter)) {
 +              status = be_cmd_mac_addr_query(adapter, mac,
                        MAC_ADDRESS_TYPE_NETWORK, true /*permanent */, 0);
 -      if (status)
 -              return status;
  
 -      if (!is_valid_ether_addr(mac))
 -              return -EADDRNOTAVAIL;
 +              if (status)
 +                      return status;
 +
 +              if (!is_valid_ether_addr(mac))
 +                      return -EADDRNOTAVAIL;
  
 -      memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 -      memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
 +              memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
 +              memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
 +      }
  
        if (adapter->cap & 0x400)
                adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
@@@ -2437,7 -2296,6 +2436,7 @@@ static int __devinit be_probe(struct pc
        struct be_adapter *adapter;
        struct net_device *netdev;
  
 +
        status = pci_enable_device(pdev);
        if (status)
                goto do_none;
                }
        }
  
 +      be_sriov_enable(adapter);
 +
        status = be_ctrl_init(adapter);
        if (status)
                goto free_netdev;
  
        /* sync up with fw's ready state */
 -      status = be_cmd_POST(adapter);
 -      if (status)
 -              goto ctrl_clean;
 +      if (be_physfn(adapter)) {
 +              status = be_cmd_POST(adapter);
 +              if (status)
 +                      goto ctrl_clean;
 +
 +              status = be_cmd_reset_function(adapter);
 +              if (status)
 +                      goto ctrl_clean;
 +      }
  
        /* tell fw we're ready to fire cmds */
        status = be_cmd_fw_init(adapter);
        if (status)
                goto ctrl_clean;
  
 -      status = be_cmd_reset_function(adapter);
 -      if (status)
 -              goto ctrl_clean;
 -
        status = be_stats_init(adapter);
        if (status)
                goto ctrl_clean;
@@@ -2537,7 -2391,6 +2536,7 @@@ ctrl_clean
        be_ctrl_cleanup(adapter);
  free_netdev:
        be_msix_disable(adapter);
 +      be_sriov_disable(adapter);
        free_netdev(adapter->netdev);
        pci_set_drvdata(pdev, NULL);
  rel_reg:
@@@ -2734,13 -2587,6 +2733,13 @@@ static int __init be_init_module(void
                rx_frag_size = 2048;
        }
  
 +      if (num_vfs > 32) {
 +              printk(KERN_WARNING DRV_NAME
 +                      " : Module param num_vfs must not be greater than 32."
 +                      "Using 32\n");
 +              num_vfs = 32;
 +      }
 +
        return pci_register_driver(&be_driver);
  }
  module_init(be_init_module);
diff --combined drivers/net/bnx2.c
index 0b69ffb7951d02f30ea6f8e45007cda7b06422c4,a257babd1bb4085c184604c4d0b8dcdbbc59f203..802b538502ebeaaf361d9025cb10a5611aa4c4a1
@@@ -246,6 -246,8 +246,8 @@@ static const struct flash_spec flash_57
  
  MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
  
+ static void bnx2_init_napi(struct bnx2 *bp);
  static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
  {
        u32 diff;
@@@ -3544,6 -3546,7 +3546,6 @@@ bnx2_set_rx_mode(struct net_device *dev
        }
        else {
                /* Accept one or more multicast(s). */
 -              struct dev_mc_list *mclist;
                u32 mc_filter[NUM_MC_HASH_REGISTERS];
                u32 regidx;
                u32 bit;
  
                memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
  
 -              netdev_for_each_mc_addr(mclist, dev) {
 -                      crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      crc = ether_crc_le(ETH_ALEN, ha->addr);
                        bit = crc & 0xff;
                        regidx = (bit & 0xe0) >> 5;
                        bit &= 0x1f;
@@@ -6196,6 -6199,7 +6198,7 @@@ bnx2_open(struct net_device *dev
        bnx2_disable_int(bp);
  
        bnx2_setup_int_mode(bp, disable_msi);
+       bnx2_init_napi(bp);
        bnx2_napi_enable(bp);
        rc = bnx2_alloc_mem(bp);
        if (rc)
@@@ -7642,9 -7646,11 +7645,11 @@@ poll_bnx2(struct net_device *dev
        int i;
  
        for (i = 0; i < bp->irq_nvecs; i++) {
-               disable_irq(bp->irq_tbl[i].vector);
-               bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
-               enable_irq(bp->irq_tbl[i].vector);
+               struct bnx2_irq *irq = &bp->irq_tbl[i];
+               disable_irq(irq->vector);
+               irq->handler(irq->vector, &bp->bnx2_napi[i]);
+               enable_irq(irq->vector);
        }
  }
  #endif
@@@ -8206,7 -8212,7 +8211,7 @@@ bnx2_init_napi(struct bnx2 *bp
  {
        int i;
  
-       for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+       for (i = 0; i < bp->irq_nvecs; i++) {
                struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
                int (*poll)(struct napi_struct *, int);
  
@@@ -8275,7 -8281,6 +8280,6 @@@ bnx2_init_one(struct pci_dev *pdev, con
        dev->ethtool_ops = &bnx2_ethtool_ops;
  
        bp = netdev_priv(dev);
-       bnx2_init_napi(bp);
  
        pci_set_drvdata(pdev, dev);
  
index 22682f1c8473b4a69c1e2f5ae45f4d665c411d55,0075514bf32fc1d78e15bab0a717c91f945227d6..85e813c7762b0a07141ec397dbd51a0597373647
@@@ -761,6 -761,32 +761,6 @@@ static int bond_check_dev_link(struct b
  
  /*----------------------------- Multicast list ------------------------------*/
  
 -/*
 - * Returns 0 if dmi1 and dmi2 are the same, non-0 otherwise
 - */
 -static inline int bond_is_dmi_same(const struct dev_mc_list *dmi1,
 -                                 const struct dev_mc_list *dmi2)
 -{
 -      return memcmp(dmi1->dmi_addr, dmi2->dmi_addr, dmi1->dmi_addrlen) == 0 &&
 -                      dmi1->dmi_addrlen == dmi2->dmi_addrlen;
 -}
 -
 -/*
 - * returns dmi entry if found, NULL otherwise
 - */
 -static struct dev_mc_list *bond_mc_list_find_dmi(struct dev_mc_list *dmi,
 -                                               struct dev_mc_list *mc_list)
 -{
 -      struct dev_mc_list *idmi;
 -
 -      for (idmi = mc_list; idmi; idmi = idmi->next) {
 -              if (bond_is_dmi_same(dmi, idmi))
 -                      return idmi;
 -      }
 -
 -      return NULL;
 -}
 -
  /*
   * Push the promiscuity flag down to appropriate slaves
   */
@@@ -813,18 -839,18 +813,18 @@@ static int bond_set_allmulti(struct bon
   * Add a Multicast address to slaves
   * according to mode
   */
 -static void bond_mc_add(struct bonding *bond, void *addr, int alen)
 +static void bond_mc_add(struct bonding *bond, void *addr)
  {
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave)
 -                      dev_mc_add(bond->curr_active_slave->dev, addr, alen, 0);
 +                      dev_mc_add(bond->curr_active_slave->dev, addr);
        } else {
                struct slave *slave;
                int i;
  
                bond_for_each_slave(bond, slave, i)
 -                      dev_mc_add(slave->dev, addr, alen, 0);
 +                      dev_mc_add(slave->dev, addr);
        }
  }
  
   * Remove a multicast address from slave
   * according to mode
   */
 -static void bond_mc_delete(struct bonding *bond, void *addr, int alen)
 +static void bond_mc_del(struct bonding *bond, void *addr)
  {
        if (USES_PRIMARY(bond->params.mode)) {
                /* write lock already acquired */
                if (bond->curr_active_slave)
 -                      dev_mc_delete(bond->curr_active_slave->dev, addr,
 -                                    alen, 0);
 +                      dev_mc_del(bond->curr_active_slave->dev, addr);
        } else {
                struct slave *slave;
                int i;
                bond_for_each_slave(bond, slave, i) {
 -                      dev_mc_delete(slave->dev, addr, alen, 0);
 +                      dev_mc_del(slave->dev, addr);
                }
        }
  }
@@@ -868,6 -895,50 +868,6 @@@ static void bond_resend_igmp_join_reque
        rcu_read_unlock();
  }
  
 -/*
 - * Totally destroys the mc_list in bond
 - */
 -static void bond_mc_list_destroy(struct bonding *bond)
 -{
 -      struct dev_mc_list *dmi;
 -
 -      dmi = bond->mc_list;
 -      while (dmi) {
 -              bond->mc_list = dmi->next;
 -              kfree(dmi);
 -              dmi = bond->mc_list;
 -      }
 -
 -      bond->mc_list = NULL;
 -}
 -
 -/*
 - * Copy all the Multicast addresses from src to the bonding device dst
 - */
 -static int bond_mc_list_copy(struct dev_mc_list *mc_list, struct bonding *bond,
 -                           gfp_t gfp_flag)
 -{
 -      struct dev_mc_list *dmi, *new_dmi;
 -
 -      for (dmi = mc_list; dmi; dmi = dmi->next) {
 -              new_dmi = kmalloc(sizeof(struct dev_mc_list), gfp_flag);
 -
 -              if (!new_dmi) {
 -                      /* FIXME: Potential memory leak !!! */
 -                      return -ENOMEM;
 -              }
 -
 -              new_dmi->next = bond->mc_list;
 -              bond->mc_list = new_dmi;
 -              new_dmi->dmi_addrlen = dmi->dmi_addrlen;
 -              memcpy(new_dmi->dmi_addr, dmi->dmi_addr, dmi->dmi_addrlen);
 -              new_dmi->dmi_users = dmi->dmi_users;
 -              new_dmi->dmi_gusers = dmi->dmi_gusers;
 -      }
 -
 -      return 0;
 -}
 -
  /*
   * flush all members of flush->mc_list from device dev->mc_list
   */
@@@ -875,16 -946,16 +875,16 @@@ static void bond_mc_list_flush(struct n
                               struct net_device *slave_dev)
  {
        struct bonding *bond = netdev_priv(bond_dev);
 -      struct dev_mc_list *dmi;
 +      struct netdev_hw_addr *ha;
  
 -      for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
 -              dev_mc_delete(slave_dev, dmi->dmi_addr, dmi->dmi_addrlen, 0);
 +      netdev_for_each_mc_addr(ha, bond_dev)
 +              dev_mc_del(slave_dev, ha->addr);
  
        if (bond->params.mode == BOND_MODE_8023AD) {
                /* del lacpdu mc addr from mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
  
 -              dev_mc_delete(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
 +              dev_mc_del(slave_dev, lacpdu_multicast);
        }
  }
  
  static void bond_mc_swap(struct bonding *bond, struct slave *new_active,
                         struct slave *old_active)
  {
 -      struct dev_mc_list *dmi;
 +      struct netdev_hw_addr *ha;
  
        if (!USES_PRIMARY(bond->params.mode))
                /* nothing to do -  mc list is already up-to-date on
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(old_active->dev, -1);
  
 -              for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
 -                      dev_mc_delete(old_active->dev, dmi->dmi_addr,
 -                                    dmi->dmi_addrlen, 0);
 +              netdev_for_each_mc_addr(ha, bond->dev)
 +                      dev_mc_del(old_active->dev, ha->addr);
        }
  
        if (new_active) {
                if (bond->dev->flags & IFF_ALLMULTI)
                        dev_set_allmulti(new_active->dev, 1);
  
 -              for (dmi = bond->dev->mc_list; dmi; dmi = dmi->next)
 -                      dev_mc_add(new_active->dev, dmi->dmi_addr,
 -                                 dmi->dmi_addrlen, 0);
 +              netdev_for_each_mc_addr(ha, bond->dev)
 +                      dev_mc_add(new_active->dev, ha->addr);
                bond_resend_igmp_join_requests(bond);
        }
  }
@@@ -1162,6 -1235,11 +1162,11 @@@ void bond_change_active_slave(struct bo
                        write_lock_bh(&bond->curr_slave_lock);
                }
        }
+       /* resend IGMP joins since all were sent on curr_active_slave */
+       if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
+               bond_resend_igmp_join_requests(bond);
+       }
  }
  
  /**
@@@ -1333,7 -1411,7 +1338,7 @@@ int bond_enslave(struct net_device *bon
        struct bonding *bond = netdev_priv(bond_dev);
        const struct net_device_ops *slave_ops = slave_dev->netdev_ops;
        struct slave *new_slave = NULL;
 -      struct dev_mc_list *dmi;
 +      struct netdev_hw_addr *ha;
        struct sockaddr addr;
        int link_reporting;
        int old_features = bond_dev->features;
                                 bond_dev->name,
                                 bond_dev->type, slave_dev->type);
  
 -                      netdev_bonding_change(bond_dev, NETDEV_BONDING_OLDTYPE);
 +                      res = netdev_bonding_change(bond_dev,
 +                                                  NETDEV_PRE_TYPE_CHANGE);
 +                      res = notifier_to_errno(res);
 +                      if (res) {
 +                              pr_err("%s: refused to change device type\n",
 +                                     bond_dev->name);
 +                              res = -EBUSY;
 +                              goto err_undo_flags;
 +                      }
 +
 +                      /* Flush unicast and multicast addresses */
 +                      dev_uc_flush(bond_dev);
 +                      dev_mc_flush(bond_dev);
  
                        if (slave_dev->type != ARPHRD_ETHER)
                                bond_setup_by_slave(bond_dev, slave_dev);
                        else
                                ether_setup(bond_dev);
  
 -                      netdev_bonding_change(bond_dev, NETDEV_BONDING_NEWTYPE);
 +                      netdev_bonding_change(bond_dev,
 +                                            NETDEV_POST_TYPE_CHANGE);
                }
        } else if (bond_dev->type != slave_dev->type) {
                pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it.\n",
  
                netif_addr_lock_bh(bond_dev);
                /* upload master's mc_list to new slave */
 -              for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next)
 -                      dev_mc_add(slave_dev, dmi->dmi_addr,
 -                                 dmi->dmi_addrlen, 0);
 +              netdev_for_each_mc_addr(ha, bond_dev)
 +                      dev_mc_add(slave_dev, ha->addr);
                netif_addr_unlock_bh(bond_dev);
        }
  
                /* add lacpdu mc addr to mc list */
                u8 lacpdu_multicast[ETH_ALEN] = MULTICAST_LACPDU_ADDR;
  
 -              dev_mc_add(slave_dev, lacpdu_multicast, ETH_ALEN, 0);
 +              dev_mc_add(slave_dev, lacpdu_multicast);
        }
  
        bond_add_vlans_on_slave(bond, slave_dev);
@@@ -3839,24 -3905,10 +3844,24 @@@ static int bond_do_ioctl(struct net_dev
        return res;
  }
  
 +static bool bond_addr_in_mc_list(unsigned char *addr,
 +                               struct netdev_hw_addr_list *list,
 +                               int addrlen)
 +{
 +      struct netdev_hw_addr *ha;
 +
 +      netdev_hw_addr_list_for_each(ha, list)
 +              if (!memcmp(ha->addr, addr, addrlen))
 +                      return true;
 +
 +      return false;
 +}
 +
  static void bond_set_multicast_list(struct net_device *bond_dev)
  {
        struct bonding *bond = netdev_priv(bond_dev);
 -      struct dev_mc_list *dmi;
 +      struct netdev_hw_addr *ha;
 +      bool found;
  
        /*
         * Do promisc before checking multicast_mode
        bond->flags = bond_dev->flags;
  
        /* looking for addresses to add to slaves' mc list */
 -      for (dmi = bond_dev->mc_list; dmi; dmi = dmi->next) {
 -              if (!bond_mc_list_find_dmi(dmi, bond->mc_list))
 -                      bond_mc_add(bond, dmi->dmi_addr, dmi->dmi_addrlen);
 +      netdev_for_each_mc_addr(ha, bond_dev) {
 +              found = bond_addr_in_mc_list(ha->addr, &bond->mc_list,
 +                                           bond_dev->addr_len);
 +              if (!found)
 +                      bond_mc_add(bond, ha->addr);
        }
  
        /* looking for addresses to delete from slaves' list */
 -      for (dmi = bond->mc_list; dmi; dmi = dmi->next) {
 -              if (!bond_mc_list_find_dmi(dmi, bond_dev->mc_list))
 -                      bond_mc_delete(bond, dmi->dmi_addr, dmi->dmi_addrlen);
 +      netdev_hw_addr_list_for_each(ha, &bond->mc_list) {
 +              found = bond_addr_in_mc_list(ha->addr, &bond_dev->mc,
 +                                           bond_dev->addr_len);
 +              if (!found)
 +                      bond_mc_del(bond, ha->addr);
        }
  
        /* save master's multicast list */
 -      bond_mc_list_destroy(bond);
 -      bond_mc_list_copy(bond_dev->mc_list, bond, GFP_ATOMIC);
 +      __hw_addr_flush(&bond->mc_list);
 +      __hw_addr_add_multiple(&bond->mc_list, &bond_dev->mc,
 +                             bond_dev->addr_len, NETDEV_HW_ADDR_T_MULTICAST);
  
        read_unlock(&bond->lock);
  }
@@@ -4096,22 -4143,41 +4101,41 @@@ static int bond_xmit_roundrobin(struct 
        struct bonding *bond = netdev_priv(bond_dev);
        struct slave *slave, *start_at;
        int i, slave_no, res = 1;
+       struct iphdr *iph = ip_hdr(skb);
  
        read_lock(&bond->lock);
  
        if (!BOND_IS_OK(bond))
                goto out;
        /*
-        * Concurrent TX may collide on rr_tx_counter; we accept that
-        * as being rare enough not to justify using an atomic op here
+        * Start with the curr_active_slave that joined the bond as the
+        * default for sending IGMP traffic.  For failover purposes one
+        * needs to maintain some consistency for the interface that will
+        * send the join/membership reports.  The curr_active_slave found
+        * will send all of this type of traffic.
         */
-       slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+       if ((iph->protocol == IPPROTO_IGMP) &&
+           (skb->protocol == htons(ETH_P_IP))) {
  
-       bond_for_each_slave(bond, slave, i) {
-               slave_no--;
-               if (slave_no < 0)
-                       break;
+               read_lock(&bond->curr_slave_lock);
+               slave = bond->curr_active_slave;
+               read_unlock(&bond->curr_slave_lock);
+               if (!slave)
+                       goto out;
+       } else {
+               /*
+                * Concurrent TX may collide on rr_tx_counter; we accept
+                * that as being rare enough not to justify using an
+                * atomic op here.
+                */
+               slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+               bond_for_each_slave(bond, slave, i) {
+                       slave_no--;
+                       if (slave_no < 0)
+                               break;
+               }
        }
  
        start_at = slave;
@@@ -4384,6 -4450,14 +4408,14 @@@ static const struct net_device_ops bond
        .ndo_vlan_rx_kill_vid   = bond_vlan_rx_kill_vid,
  };
  
+ static void bond_destructor(struct net_device *bond_dev)
+ {
+       struct bonding *bond = netdev_priv(bond_dev);
+       if (bond->wq)
+               destroy_workqueue(bond->wq);
+       free_netdev(bond_dev);
+ }
  static void bond_setup(struct net_device *bond_dev)
  {
        struct bonding *bond = netdev_priv(bond_dev);
        bond_dev->ethtool_ops = &bond_ethtool_ops;
        bond_set_mode_ops(bond, bond->params.mode);
  
-       bond_dev->destructor = free_netdev;
+       bond_dev->destructor = bond_destructor;
  
        /* Initialize the device options */
        bond_dev->tx_queue_len = 0;
@@@ -4476,10 -4550,9 +4508,7 @@@ static void bond_uninit(struct net_devi
  
        bond_remove_proc_entry(bond);
  
-       if (bond->wq)
-               destroy_workqueue(bond->wq);
 -      netif_addr_lock_bh(bond_dev);
 -      bond_mc_list_destroy(bond);
 -      netif_addr_unlock_bh(bond_dev);
 +      __hw_addr_flush(&bond->mc_list);
  }
  
  /*------------------------- Module initialization ---------------------------*/
@@@ -4610,13 -4683,13 +4639,13 @@@ static int bond_check_params(struct bon
        }
  
        if (num_grat_arp < 0 || num_grat_arp > 255) {
 -              pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1 \n",
 +              pr_warning("Warning: num_grat_arp (%d) not in range 0-255 so it was reset to 1\n",
                           num_grat_arp);
                num_grat_arp = 1;
        }
  
        if (num_unsol_na < 0 || num_unsol_na > 255) {
 -              pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1 \n",
 +              pr_warning("Warning: num_unsol_na (%d) not in range 0-255 so it was reset to 1\n",
                           num_unsol_na);
                num_unsol_na = 1;
        }
@@@ -4851,8 -4924,6 +4880,8 @@@ static int bond_init(struct net_device 
        list_add_tail(&bond->bond_list, &bn->dev_list);
  
        bond_prepare_sysfs_group(bond);
 +
 +      __hw_addr_init(&bond->mc_list);
        return 0;
  }
  
@@@ -4890,8 -4961,8 +4919,8 @@@ int bond_create(struct net *net, const 
                                bond_setup);
        if (!bond_dev) {
                pr_err("%s: eek! can't alloc netdev!\n", name);
-               res = -ENOMEM;
-               goto out;
+               rtnl_unlock();
+               return -ENOMEM;
        }
  
        dev_net_set(bond_dev, net);
        if (!name) {
                res = dev_alloc_name(bond_dev, "bond%d");
                if (res < 0)
-                       goto out_netdev;
+                       goto out;
        }
  
        res = register_netdevice(bond_dev);
-       if (res < 0)
-               goto out_netdev;
  
  out:
        rtnl_unlock();
+       if (res < 0)
+               bond_destructor(bond_dev);
        return res;
- out_netdev:
-       free_netdev(bond_dev);
-       goto out;
  }
  
  static int __net_init bond_net_init(struct net *net)
index 0000000000000000000000000000000000000000,a7e30a23d322412c12b9e214f6d8f25fcfe29d64..5f582dba928fb808e8c888a1eaef59aa0facfc67
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,3388 +1,3388 @@@
 -      const struct dev_addr_list *d;
+ /*
+  * This file is part of the Chelsio T4 Ethernet driver for Linux.
+  *
+  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+  *
+  * This software is available to you under a choice of one of two
+  * licenses.  You may choose to be licensed under the terms of the GNU
+  * General Public License (GPL) Version 2, available from the file
+  * COPYING in the main directory of this source tree, or the
+  * OpenIB.org BSD license below:
+  *
+  *     Redistribution and use in source and binary forms, with or
+  *     without modification, are permitted provided that the following
+  *     conditions are met:
+  *
+  *      - Redistributions of source code must retain the above
+  *        copyright notice, this list of conditions and the following
+  *        disclaimer.
+  *
+  *      - Redistributions in binary form must reproduce the above
+  *        copyright notice, this list of conditions and the following
+  *        disclaimer in the documentation and/or other materials
+  *        provided with the distribution.
+  *
+  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+  * SOFTWARE.
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+ #include <linux/bitmap.h>
+ #include <linux/crc32.h>
+ #include <linux/ctype.h>
+ #include <linux/debugfs.h>
+ #include <linux/err.h>
+ #include <linux/etherdevice.h>
+ #include <linux/firmware.h>
+ #include <linux/if_vlan.h>
+ #include <linux/init.h>
+ #include <linux/log2.h>
+ #include <linux/mdio.h>
+ #include <linux/module.h>
+ #include <linux/moduleparam.h>
+ #include <linux/mutex.h>
+ #include <linux/netdevice.h>
+ #include <linux/pci.h>
+ #include <linux/aer.h>
+ #include <linux/rtnetlink.h>
+ #include <linux/sched.h>
+ #include <linux/seq_file.h>
+ #include <linux/sockios.h>
+ #include <linux/vmalloc.h>
+ #include <linux/workqueue.h>
+ #include <net/neighbour.h>
+ #include <net/netevent.h>
+ #include <asm/uaccess.h>
+ #include "cxgb4.h"
+ #include "t4_regs.h"
+ #include "t4_msg.h"
+ #include "t4fw_api.h"
+ #include "l2t.h"
+ #define DRV_VERSION "1.0.0-ko"
+ #define DRV_DESC "Chelsio T4 Network Driver"
+ /*
+  * Max interrupt hold-off timer value in us.  Queues fall back to this value
+  * under extreme memory pressure so it's largish to give the system time to
+  * recover.
+  */
+ #define MAX_SGE_TIMERVAL 200U
+ enum {
+       MEMWIN0_APERTURE = 65536,
+       MEMWIN0_BASE     = 0x30000,
+       MEMWIN1_APERTURE = 32768,
+       MEMWIN1_BASE     = 0x28000,
+       MEMWIN2_APERTURE = 2048,
+       MEMWIN2_BASE     = 0x1b800,
+ };
+ enum {
+       MAX_TXQ_ENTRIES      = 16384,
+       MAX_CTRL_TXQ_ENTRIES = 1024,
+       MAX_RSPQ_ENTRIES     = 16384,
+       MAX_RX_BUFFERS       = 16384,
+       MIN_TXQ_ENTRIES      = 32,
+       MIN_CTRL_TXQ_ENTRIES = 32,
+       MIN_RSPQ_ENTRIES     = 128,
+       MIN_FL_ENTRIES       = 16
+ };
+ #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+                        NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+                        NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+ #define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
+ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
+       CH_DEVICE(0xa000),  /* PE10K */
+       { 0, }
+ };
+ #define FW_FNAME "cxgb4/t4fw.bin"
+ MODULE_DESCRIPTION(DRV_DESC);
+ MODULE_AUTHOR("Chelsio Communications");
+ MODULE_LICENSE("Dual BSD/GPL");
+ MODULE_VERSION(DRV_VERSION);
+ MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
+ MODULE_FIRMWARE(FW_FNAME);
+ static int dflt_msg_enable = DFLT_MSG_ENABLE;
+ module_param(dflt_msg_enable, int, 0644);
+ MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+ /*
+  * The driver uses the best interrupt scheme available on a platform in the
+  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
+  * of these schemes the driver may consider as follows:
+  *
+  * msi = 2: choose from among all three options
+  * msi = 1: only consider MSI and INTx interrupts
+  * msi = 0: force INTx interrupts
+  */
+ static int msi = 2;
+ module_param(msi, int, 0644);
+ MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
+ /*
+  * Queue interrupt hold-off timer values.  Queues default to the first of these
+  * upon creation.
+  */
+ static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
+ module_param_array(intr_holdoff, uint, NULL, 0644);
+ MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
+                "0..4 in microseconds");
+ static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
+ module_param_array(intr_cnt, uint, NULL, 0644);
+ MODULE_PARM_DESC(intr_cnt,
+                "thresholds 1..3 for queue interrupt packet counters");
+ static int vf_acls;
+ #ifdef CONFIG_PCI_IOV
+ module_param(vf_acls, bool, 0644);
+ MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+ static unsigned int num_vf[4];
+ module_param_array(num_vf, uint, NULL, 0644);
+ MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+ #endif
+ static struct dentry *cxgb4_debugfs_root;
+ static LIST_HEAD(adapter_list);
+ static DEFINE_MUTEX(uld_mutex);
+ static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
+ static const char *uld_str[] = { "RDMA", "iSCSI" };
+ static void link_report(struct net_device *dev)
+ {
+       if (!netif_carrier_ok(dev))
+               netdev_info(dev, "link down\n");
+       else {
+               static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+               const char *s = "10Mbps";
+               const struct port_info *p = netdev_priv(dev);
+               switch (p->link_cfg.speed) {
+               case SPEED_10000:
+                       s = "10Gbps";
+                       break;
+               case SPEED_1000:
+                       s = "1000Mbps";
+                       break;
+               case SPEED_100:
+                       s = "100Mbps";
+                       break;
+               }
+               netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
+                           fc[p->link_cfg.fc]);
+       }
+ }
+ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
+ {
+       struct net_device *dev = adapter->port[port_id];
+       /* Skip changes from disabled ports. */
+       if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
+               if (link_stat)
+                       netif_carrier_on(dev);
+               else
+                       netif_carrier_off(dev);
+               link_report(dev);
+       }
+ }
+ void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+ {
+       static const char *mod_str[] = {
+               NULL, "LR", "SR", "ER", "passive DA", "active DA"
+       };
+       const struct net_device *dev = adap->port[port_id];
+       const struct port_info *pi = netdev_priv(dev);
+       if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+               netdev_info(dev, "port module unplugged\n");
+       else
+               netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+ }
+ /*
+  * Configure the exact and hash address filters to handle a port's multicast
+  * and secondary unicast MAC addresses.
+  */
+ static int set_addr_filters(const struct net_device *dev, bool sleep)
+ {
+       u64 mhash = 0;
+       u64 uhash = 0;
+       bool free = true;
+       u16 filt_idx[7];
+       const u8 *addr[7];
+       int ret, naddr = 0;
 -      netdev_for_each_mc_addr(d, dev) {
 -              addr[naddr++] = d->dmi_addr;
 -              if (naddr >= ARRAY_SIZE(addr) || d->next == NULL) {
+       const struct netdev_hw_addr *ha;
+       int uc_cnt = netdev_uc_count(dev);
++      int mc_cnt = netdev_mc_count(dev);
+       const struct port_info *pi = netdev_priv(dev);
+       /* first do the secondary unicast addresses */
+       netdev_for_each_uc_addr(ha, dev) {
+               addr[naddr++] = ha->addr;
+               if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+                       ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+                                       naddr, addr, filt_idx, &uhash, sleep);
+                       if (ret < 0)
+                               return ret;
+                       free = false;
+                       naddr = 0;
+               }
+       }
+       /* next set up the multicast addresses */
++      netdev_for_each_mc_addr(ha, dev) {
++              addr[naddr++] = ha->addr;
++              if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+                       ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+                                       naddr, addr, filt_idx, &mhash, sleep);
+                       if (ret < 0)
+                               return ret;
+                       free = false;
+                       naddr = 0;
+               }
+       }
+       return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
+                               uhash | mhash, sleep);
+ }
+ /*
+  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
+  * If @mtu is -1 it is left unchanged.
+  */
+ static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+ {
+       int ret;
+       struct port_info *pi = netdev_priv(dev);
+       ret = set_addr_filters(dev, sleep_ok);
+       if (ret == 0)
+               ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
+                                   (dev->flags & IFF_PROMISC) ? 1 : 0,
+                                   (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+                                   sleep_ok);
+       return ret;
+ }
+ /**
+  *    link_start - enable a port
+  *    @dev: the port to enable
+  *
+  *    Performs the MAC and PHY actions needed to enable a port.
+  */
+ static int link_start(struct net_device *dev)
+ {
+       int ret;
+       struct port_info *pi = netdev_priv(dev);
+       /*
+        * We do not set address filters and promiscuity here, the stack does
+        * that step explicitly.
+        */
+       ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
+                           true);
+       if (ret == 0) {
+               ret = t4_change_mac(pi->adapter, 0, pi->viid,
+                                   pi->xact_addr_filt, dev->dev_addr, true,
+                                   false);
+               if (ret >= 0) {
+                       pi->xact_addr_filt = ret;
+                       ret = 0;
+               }
+       }
+       if (ret == 0)
+               ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
+       if (ret == 0)
+               ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
+       return ret;
+ }
+ /*
+  * Response queue handler for the FW event queue.
+  */
+ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+                         const struct pkt_gl *gl)
+ {
+       u8 opcode = ((const struct rss_header *)rsp)->opcode;
+       rsp++;                                          /* skip RSS header */
+       if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+               const struct cpl_sge_egr_update *p = (void *)rsp;
+               unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+               struct sge_txq *txq = q->adap->sge.egr_map[qid];
+               txq->restarts++;
+               if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
+                       struct sge_eth_txq *eq;
+                       eq = container_of(txq, struct sge_eth_txq, q);
+                       netif_tx_wake_queue(eq->txq);
+               } else {
+                       struct sge_ofld_txq *oq;
+                       oq = container_of(txq, struct sge_ofld_txq, q);
+                       tasklet_schedule(&oq->qresume_tsk);
+               }
+       } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+               const struct cpl_fw6_msg *p = (void *)rsp;
+               if (p->type == 0)
+                       t4_handle_fw_rpl(q->adap, p->data);
+       } else if (opcode == CPL_L2T_WRITE_RPL) {
+               const struct cpl_l2t_write_rpl *p = (void *)rsp;
+               do_l2t_write_rpl(q->adap, p);
+       } else
+               dev_err(q->adap->pdev_dev,
+                       "unexpected CPL %#x on FW event queue\n", opcode);
+       return 0;
+ }
+ /**
+  *    uldrx_handler - response queue handler for ULD queues
+  *    @q: the response queue that received the packet
+  *    @rsp: the response queue descriptor holding the offload message
+  *    @gl: the gather list of packet fragments
+  *
+  *    Deliver an ingress offload packet to a ULD.  All processing is done by
+  *    the ULD, we just maintain statistics.
+  */
+ static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+                        const struct pkt_gl *gl)
+ {
+       struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+       if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+               rxq->stats.nomem++;
+               return -1;
+       }
+       if (gl == NULL)
+               rxq->stats.imm++;
+       else if (gl == CXGB4_MSG_AN)
+               rxq->stats.an++;
+       else
+               rxq->stats.pkts++;
+       return 0;
+ }
+ static void disable_msi(struct adapter *adapter)
+ {
+       if (adapter->flags & USING_MSIX) {
+               pci_disable_msix(adapter->pdev);
+               adapter->flags &= ~USING_MSIX;
+       } else if (adapter->flags & USING_MSI) {
+               pci_disable_msi(adapter->pdev);
+               adapter->flags &= ~USING_MSI;
+       }
+ }
+ /*
+  * Interrupt handler for non-data events used with MSI-X.
+  */
+ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
+ {
+       struct adapter *adap = cookie;
+       u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
+       if (v & PFSW) {
+               adap->swintr = 1;
+               t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+       }
+       t4_slow_intr_handler(adap);
+       return IRQ_HANDLED;
+ }
+ /*
+  * Name the MSI-X interrupts.
+  */
+ static void name_msix_vecs(struct adapter *adap)
+ {
+       int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
+       /* non-data interrupts */
+       snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+       adap->msix_info[0].desc[n] = 0;
+       /* FW events */
+       snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
+       adap->msix_info[1].desc[n] = 0;
+       /* Ethernet queues */
+       for_each_port(adap, j) {
+               struct net_device *d = adap->port[j];
+               const struct port_info *pi = netdev_priv(d);
+               for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+                       snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
+                                d->name, i);
+                       adap->msix_info[msi_idx].desc[n] = 0;
+               }
+       }
+       /* offload queues */
+       for_each_ofldrxq(&adap->sge, i) {
+               snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
+                        adap->name, i);
+               adap->msix_info[msi_idx++].desc[n] = 0;
+       }
+       for_each_rdmarxq(&adap->sge, i) {
+               snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
+                        adap->name, i);
+               adap->msix_info[msi_idx++].desc[n] = 0;
+       }
+ }
+ static int request_msix_queue_irqs(struct adapter *adap)
+ {
+       struct sge *s = &adap->sge;
+       int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+       err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
+                         adap->msix_info[1].desc, &s->fw_evtq);
+       if (err)
+               return err;
+       for_each_ethrxq(s, ethqidx) {
+               err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+                                 adap->msix_info[msi].desc,
+                                 &s->ethrxq[ethqidx].rspq);
+               if (err)
+                       goto unwind;
+               msi++;
+       }
+       for_each_ofldrxq(s, ofldqidx) {
+               err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+                                 adap->msix_info[msi].desc,
+                                 &s->ofldrxq[ofldqidx].rspq);
+               if (err)
+                       goto unwind;
+               msi++;
+       }
+       for_each_rdmarxq(s, rdmaqidx) {
+               err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+                                 adap->msix_info[msi].desc,
+                                 &s->rdmarxq[rdmaqidx].rspq);
+               if (err)
+                       goto unwind;
+               msi++;
+       }
+       return 0;
+ unwind:
+       while (--rdmaqidx >= 0)
+               free_irq(adap->msix_info[--msi].vec,
+                        &s->rdmarxq[rdmaqidx].rspq);
+       while (--ofldqidx >= 0)
+               free_irq(adap->msix_info[--msi].vec,
+                        &s->ofldrxq[ofldqidx].rspq);
+       while (--ethqidx >= 0)
+               free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+       free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+       return err;
+ }
+ static void free_msix_queue_irqs(struct adapter *adap)
+ {
+       int i, msi = 2;
+       struct sge *s = &adap->sge;
+       free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+       for_each_ethrxq(s, i)
+               free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+       for_each_ofldrxq(s, i)
+               free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+       for_each_rdmarxq(s, i)
+               free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+ }
+ /**
+  *    setup_rss - configure RSS
+  *    @adap: the adapter
+  *
+  *    Sets up RSS to distribute packets to multiple receive queues.  We
+  *    configure the RSS CPU lookup table to distribute to the number of HW
+  *    receive queues, and the response queue lookup table to narrow that
+  *    down to the response queues actually configured for each port.
+  *    We always configure the RSS mapping for all ports since the mapping
+  *    table has plenty of entries.
+  */
+ static int setup_rss(struct adapter *adap)
+ {
+       int i, j, err;
+       u16 rss[MAX_ETH_QSETS];
+       for_each_port(adap, i) {
+               const struct port_info *pi = adap2pinfo(adap, i);
+               const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+               for (j = 0; j < pi->nqsets; j++)
+                       rss[j] = q[j].rspq.abs_id;
+               err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
+                                         rss, pi->nqsets);
+               if (err)
+                       return err;
+       }
+       return 0;
+ }
+ /*
+  * Wait until all NAPI handlers are descheduled.
+  */
+ static void quiesce_rx(struct adapter *adap)
+ {
+       int i;
+       for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+               struct sge_rspq *q = adap->sge.ingr_map[i];
+               if (q && q->handler)
+                       napi_disable(&q->napi);
+       }
+ }
+ /*
+  * Enable NAPI scheduling and interrupt generation for all Rx queues.
+  */
+ static void enable_rx(struct adapter *adap)
+ {
+       int i;
+       for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+               struct sge_rspq *q = adap->sge.ingr_map[i];
+               if (!q)
+                       continue;
+               if (q->handler)
+                       napi_enable(&q->napi);
+               /* 0-increment GTS to start the timer and enable interrupts */
+               t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
+                            SEINTARM(q->intr_params) |
+                            INGRESSQID(q->cntxt_id));
+       }
+ }
+ /**
+  *    setup_sge_queues - configure SGE Tx/Rx/response queues
+  *    @adap: the adapter
+  *
+  *    Determines how many sets of SGE queues to use and initializes them.
+  *    We support multiple queue sets per port if we have MSI-X, otherwise
+  *    just one queue set per port.
+  */
+ static int setup_sge_queues(struct adapter *adap)
+ {
+       int err, msi_idx, i, j;
+       struct sge *s = &adap->sge;
+       bitmap_zero(s->starving_fl, MAX_EGRQ);
+       bitmap_zero(s->txq_maperr, MAX_EGRQ);
+       if (adap->flags & USING_MSIX)
+               msi_idx = 1;         /* vector 0 is for non-queue interrupts */
+       else {
+               err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+                                      NULL, NULL);
+               if (err)
+                       return err;
+               msi_idx = -((int)s->intrq.abs_id + 1);
+       }
+       err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+                              msi_idx, NULL, fwevtq_handler);
+       if (err) {
+ freeout:      t4_free_sge_resources(adap);
+               return err;
+       }
+       for_each_port(adap, i) {
+               struct net_device *dev = adap->port[i];
+               struct port_info *pi = netdev_priv(dev);
+               struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
+               struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
+               for (j = 0; j < pi->nqsets; j++, q++) {
+                       if (msi_idx > 0)
+                               msi_idx++;
+                       err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
+                                              msi_idx, &q->fl,
+                                              t4_ethrx_handler);
+                       if (err)
+                               goto freeout;
+                       q->rspq.idx = j;
+                       memset(&q->stats, 0, sizeof(q->stats));
+               }
+               for (j = 0; j < pi->nqsets; j++, t++) {
+                       err = t4_sge_alloc_eth_txq(adap, t, dev,
+                                       netdev_get_tx_queue(dev, j),
+                                       s->fw_evtq.cntxt_id);
+                       if (err)
+                               goto freeout;
+               }
+       }
+       j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
+       for_each_ofldrxq(s, i) {
+               struct sge_ofld_rxq *q = &s->ofldrxq[i];
+               struct net_device *dev = adap->port[i / j];
+               if (msi_idx > 0)
+                       msi_idx++;
+               err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
+                                      &q->fl, uldrx_handler);
+               if (err)
+                       goto freeout;
+               memset(&q->stats, 0, sizeof(q->stats));
+               s->ofld_rxq[i] = q->rspq.abs_id;
+               err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+                                           s->fw_evtq.cntxt_id);
+               if (err)
+                       goto freeout;
+       }
+       for_each_rdmarxq(s, i) {
+               struct sge_ofld_rxq *q = &s->rdmarxq[i];
+               if (msi_idx > 0)
+                       msi_idx++;
+               err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+                                      msi_idx, &q->fl, uldrx_handler);
+               if (err)
+                       goto freeout;
+               memset(&q->stats, 0, sizeof(q->stats));
+               s->rdma_rxq[i] = q->rspq.abs_id;
+       }
+       for_each_port(adap, i) {
+               /*
+                * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+                * have RDMA queues, and that's the right value.
+                */
+               err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
+                                           s->fw_evtq.cntxt_id,
+                                           s->rdmarxq[i].rspq.cntxt_id);
+               if (err)
+                       goto freeout;
+       }
+       t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+                    RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
+                    QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+       return 0;
+ }
+ /*
+  * Returns 0 if new FW was successfully loaded, a positive errno if a load was
+  * started but failed, and a negative errno if flash load couldn't start.
+  */
+ static int upgrade_fw(struct adapter *adap)
+ {
+       int ret;
+       u32 vers;
+       const struct fw_hdr *hdr;
+       const struct firmware *fw;
+       struct device *dev = adap->pdev_dev;
+       ret = request_firmware(&fw, FW_FNAME, dev);
+       if (ret < 0) {
+               dev_err(dev, "unable to load firmware image " FW_FNAME
+                       ", error %d\n", ret);
+               return ret;
+       }
+       hdr = (const struct fw_hdr *)fw->data;
+       vers = ntohl(hdr->fw_ver);
+       if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+               ret = -EINVAL;              /* wrong major version, won't do */
+               goto out;
+       }
+       /*
+        * If the flash FW is unusable or we found something newer, load it.
+        */
+       if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+           vers > adap->params.fw_vers) {
+               ret = -t4_load_fw(adap, fw->data, fw->size);
+               if (!ret)
+                       dev_info(dev, "firmware upgraded to version %pI4 from "
+                                FW_FNAME "\n", &hdr->fw_ver);
+       }
+ out:  release_firmware(fw);
+       return ret;
+ }
+ /*
+  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+  * The allocated memory is cleared.
+  */
+ void *t4_alloc_mem(size_t size)
+ {
+       void *p = kmalloc(size, GFP_KERNEL);
+       if (!p)
+               p = vmalloc(size);
+       if (p)
+               memset(p, 0, size);
+       return p;
+ }
+ /*
+  * Free memory allocated through alloc_mem().
+  */
+ void t4_free_mem(void *addr)
+ {
+       if (is_vmalloc_addr(addr))
+               vfree(addr);
+       else
+               kfree(addr);
+ }
+ static inline int is_offload(const struct adapter *adap)
+ {
+       return adap->params.offload;
+ }
+ /*
+  * Implementation of ethtool operations.
+  */
+ static u32 get_msglevel(struct net_device *dev)
+ {
+       return netdev2adap(dev)->msg_enable;
+ }
+ static void set_msglevel(struct net_device *dev, u32 val)
+ {
+       netdev2adap(dev)->msg_enable = val;
+ }
+ static char stats_strings[][ETH_GSTRING_LEN] = {
+       "TxOctetsOK         ",
+       "TxFramesOK         ",
+       "TxBroadcastFrames  ",
+       "TxMulticastFrames  ",
+       "TxUnicastFrames    ",
+       "TxErrorFrames      ",
+       "TxFrames64         ",
+       "TxFrames65To127    ",
+       "TxFrames128To255   ",
+       "TxFrames256To511   ",
+       "TxFrames512To1023  ",
+       "TxFrames1024To1518 ",
+       "TxFrames1519ToMax  ",
+       "TxFramesDropped    ",
+       "TxPauseFrames      ",
+       "TxPPP0Frames       ",
+       "TxPPP1Frames       ",
+       "TxPPP2Frames       ",
+       "TxPPP3Frames       ",
+       "TxPPP4Frames       ",
+       "TxPPP5Frames       ",
+       "TxPPP6Frames       ",
+       "TxPPP7Frames       ",
+       "RxOctetsOK         ",
+       "RxFramesOK         ",
+       "RxBroadcastFrames  ",
+       "RxMulticastFrames  ",
+       "RxUnicastFrames    ",
+       "RxFramesTooLong    ",
+       "RxJabberErrors     ",
+       "RxFCSErrors        ",
+       "RxLengthErrors     ",
+       "RxSymbolErrors     ",
+       "RxRuntFrames       ",
+       "RxFrames64         ",
+       "RxFrames65To127    ",
+       "RxFrames128To255   ",
+       "RxFrames256To511   ",
+       "RxFrames512To1023  ",
+       "RxFrames1024To1518 ",
+       "RxFrames1519ToMax  ",
+       "RxPauseFrames      ",
+       "RxPPP0Frames       ",
+       "RxPPP1Frames       ",
+       "RxPPP2Frames       ",
+       "RxPPP3Frames       ",
+       "RxPPP4Frames       ",
+       "RxPPP5Frames       ",
+       "RxPPP6Frames       ",
+       "RxPPP7Frames       ",
+       "RxBG0FramesDropped ",
+       "RxBG1FramesDropped ",
+       "RxBG2FramesDropped ",
+       "RxBG3FramesDropped ",
+       "RxBG0FramesTrunc   ",
+       "RxBG1FramesTrunc   ",
+       "RxBG2FramesTrunc   ",
+       "RxBG3FramesTrunc   ",
+       "TSO                ",
+       "TxCsumOffload      ",
+       "RxCsumGood         ",
+       "VLANextractions    ",
+       "VLANinsertions     ",
+ };
+ static int get_sset_count(struct net_device *dev, int sset)
+ {
+       switch (sset) {
+       case ETH_SS_STATS:
+               return ARRAY_SIZE(stats_strings);
+       default:
+               return -EOPNOTSUPP;
+       }
+ }
+ #define T4_REGMAP_SIZE (160 * 1024)
+ static int get_regs_len(struct net_device *dev)
+ {
+       return T4_REGMAP_SIZE;
+ }
+ static int get_eeprom_len(struct net_device *dev)
+ {
+       return EEPROMSIZE;
+ }
+ static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+ {
+       struct adapter *adapter = netdev2adap(dev);
+       strcpy(info->driver, KBUILD_MODNAME);
+       strcpy(info->version, DRV_VERSION);
+       strcpy(info->bus_info, pci_name(adapter->pdev));
+       if (!adapter->params.fw_vers)
+               strcpy(info->fw_version, "N/A");
+       else
+               snprintf(info->fw_version, sizeof(info->fw_version),
+                       "%u.%u.%u.%u, TP %u.%u.%u.%u",
+                       FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
+                       FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
+                       FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
+                       FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
+                       FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
+                       FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
+                       FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
+                       FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+ }
+ static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+ {
+       if (stringset == ETH_SS_STATS)
+               memcpy(data, stats_strings, sizeof(stats_strings));
+ }
+ /*
+  * port stats maintained per queue of the port.  They should be in the same
+  * order as in stats_strings above.
+  */
+ struct queue_port_stats {
+       u64 tso;
+       u64 tx_csum;
+       u64 rx_csum;
+       u64 vlan_ex;
+       u64 vlan_ins;
+ };
+ static void collect_sge_port_stats(const struct adapter *adap,
+               const struct port_info *p, struct queue_port_stats *s)
+ {
+       int i;
+       const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+       const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+       memset(s, 0, sizeof(*s));
+       for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+               s->tso += tx->tso;
+               s->tx_csum += tx->tx_cso;
+               s->rx_csum += rx->stats.rx_cso;
+               s->vlan_ex += rx->stats.vlan_ex;
+               s->vlan_ins += tx->vlan_ins;
+       }
+ }
+ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+                     u64 *data)
+ {
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adapter = pi->adapter;
+       t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+       data += sizeof(struct port_stats) / sizeof(u64);
+       collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+ }
+ /*
+  * Return a version number to identify the type of adapter.  The scheme is:
+  * - bits 0..9: chip version
+  * - bits 10..15: chip revision
+  */
+ static inline unsigned int mk_adap_vers(const struct adapter *ap)
+ {
+       return 4 | (ap->params.rev << 10);
+ }
+ static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
+                          unsigned int end)
+ {
+       u32 *p = buf + start;
+       for ( ; start <= end; start += sizeof(u32))
+               *p++ = t4_read_reg(ap, start);
+ }
+ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+                    void *buf)
+ {
+       static const unsigned int reg_ranges[] = {
+               0x1008, 0x1108,
+               0x1180, 0x11b4,
+               0x11fc, 0x123c,
+               0x1300, 0x173c,
+               0x1800, 0x18fc,
+               0x3000, 0x30d8,
+               0x30e0, 0x5924,
+               0x5960, 0x59d4,
+               0x5a00, 0x5af8,
+               0x6000, 0x6098,
+               0x6100, 0x6150,
+               0x6200, 0x6208,
+               0x6240, 0x6248,
+               0x6280, 0x6338,
+               0x6370, 0x638c,
+               0x6400, 0x643c,
+               0x6500, 0x6524,
+               0x6a00, 0x6a38,
+               0x6a60, 0x6a78,
+               0x6b00, 0x6b84,
+               0x6bf0, 0x6c84,
+               0x6cf0, 0x6d84,
+               0x6df0, 0x6e84,
+               0x6ef0, 0x6f84,
+               0x6ff0, 0x7084,
+               0x70f0, 0x7184,
+               0x71f0, 0x7284,
+               0x72f0, 0x7384,
+               0x73f0, 0x7450,
+               0x7500, 0x7530,
+               0x7600, 0x761c,
+               0x7680, 0x76cc,
+               0x7700, 0x7798,
+               0x77c0, 0x77fc,
+               0x7900, 0x79fc,
+               0x7b00, 0x7c38,
+               0x7d00, 0x7efc,
+               0x8dc0, 0x8e1c,
+               0x8e30, 0x8e78,
+               0x8ea0, 0x8f6c,
+               0x8fc0, 0x9074,
+               0x90fc, 0x90fc,
+               0x9400, 0x9458,
+               0x9600, 0x96bc,
+               0x9800, 0x9808,
+               0x9820, 0x983c,
+               0x9850, 0x9864,
+               0x9c00, 0x9c6c,
+               0x9c80, 0x9cec,
+               0x9d00, 0x9d6c,
+               0x9d80, 0x9dec,
+               0x9e00, 0x9e6c,
+               0x9e80, 0x9eec,
+               0x9f00, 0x9f6c,
+               0x9f80, 0x9fec,
+               0xd004, 0xd03c,
+               0xdfc0, 0xdfe0,
+               0xe000, 0xea7c,
+               0xf000, 0x11190,
+               0x19040, 0x19124,
+               0x19150, 0x191b0,
+               0x191d0, 0x191e8,
+               0x19238, 0x1924c,
+               0x193f8, 0x19474,
+               0x19490, 0x194f8,
+               0x19800, 0x19f30,
+               0x1a000, 0x1a06c,
+               0x1a0b0, 0x1a120,
+               0x1a128, 0x1a138,
+               0x1a190, 0x1a1c4,
+               0x1a1fc, 0x1a1fc,
+               0x1e040, 0x1e04c,
+               0x1e240, 0x1e28c,
+               0x1e2c0, 0x1e2c0,
+               0x1e2e0, 0x1e2e0,
+               0x1e300, 0x1e384,
+               0x1e3c0, 0x1e3c8,
+               0x1e440, 0x1e44c,
+               0x1e640, 0x1e68c,
+               0x1e6c0, 0x1e6c0,
+               0x1e6e0, 0x1e6e0,
+               0x1e700, 0x1e784,
+               0x1e7c0, 0x1e7c8,
+               0x1e840, 0x1e84c,
+               0x1ea40, 0x1ea8c,
+               0x1eac0, 0x1eac0,
+               0x1eae0, 0x1eae0,
+               0x1eb00, 0x1eb84,
+               0x1ebc0, 0x1ebc8,
+               0x1ec40, 0x1ec4c,
+               0x1ee40, 0x1ee8c,
+               0x1eec0, 0x1eec0,
+               0x1eee0, 0x1eee0,
+               0x1ef00, 0x1ef84,
+               0x1efc0, 0x1efc8,
+               0x1f040, 0x1f04c,
+               0x1f240, 0x1f28c,
+               0x1f2c0, 0x1f2c0,
+               0x1f2e0, 0x1f2e0,
+               0x1f300, 0x1f384,
+               0x1f3c0, 0x1f3c8,
+               0x1f440, 0x1f44c,
+               0x1f640, 0x1f68c,
+               0x1f6c0, 0x1f6c0,
+               0x1f6e0, 0x1f6e0,
+               0x1f700, 0x1f784,
+               0x1f7c0, 0x1f7c8,
+               0x1f840, 0x1f84c,
+               0x1fa40, 0x1fa8c,
+               0x1fac0, 0x1fac0,
+               0x1fae0, 0x1fae0,
+               0x1fb00, 0x1fb84,
+               0x1fbc0, 0x1fbc8,
+               0x1fc40, 0x1fc4c,
+               0x1fe40, 0x1fe8c,
+               0x1fec0, 0x1fec0,
+               0x1fee0, 0x1fee0,
+               0x1ff00, 0x1ff84,
+               0x1ffc0, 0x1ffc8,
+               0x20000, 0x2002c,
+               0x20100, 0x2013c,
+               0x20190, 0x201c8,
+               0x20200, 0x20318,
+               0x20400, 0x20528,
+               0x20540, 0x20614,
+               0x21000, 0x21040,
+               0x2104c, 0x21060,
+               0x210c0, 0x210ec,
+               0x21200, 0x21268,
+               0x21270, 0x21284,
+               0x212fc, 0x21388,
+               0x21400, 0x21404,
+               0x21500, 0x21518,
+               0x2152c, 0x2153c,
+               0x21550, 0x21554,
+               0x21600, 0x21600,
+               0x21608, 0x21628,
+               0x21630, 0x2163c,
+               0x21700, 0x2171c,
+               0x21780, 0x2178c,
+               0x21800, 0x21c38,
+               0x21c80, 0x21d7c,
+               0x21e00, 0x21e04,
+               0x22000, 0x2202c,
+               0x22100, 0x2213c,
+               0x22190, 0x221c8,
+               0x22200, 0x22318,
+               0x22400, 0x22528,
+               0x22540, 0x22614,
+               0x23000, 0x23040,
+               0x2304c, 0x23060,
+               0x230c0, 0x230ec,
+               0x23200, 0x23268,
+               0x23270, 0x23284,
+               0x232fc, 0x23388,
+               0x23400, 0x23404,
+               0x23500, 0x23518,
+               0x2352c, 0x2353c,
+               0x23550, 0x23554,
+               0x23600, 0x23600,
+               0x23608, 0x23628,
+               0x23630, 0x2363c,
+               0x23700, 0x2371c,
+               0x23780, 0x2378c,
+               0x23800, 0x23c38,
+               0x23c80, 0x23d7c,
+               0x23e00, 0x23e04,
+               0x24000, 0x2402c,
+               0x24100, 0x2413c,
+               0x24190, 0x241c8,
+               0x24200, 0x24318,
+               0x24400, 0x24528,
+               0x24540, 0x24614,
+               0x25000, 0x25040,
+               0x2504c, 0x25060,
+               0x250c0, 0x250ec,
+               0x25200, 0x25268,
+               0x25270, 0x25284,
+               0x252fc, 0x25388,
+               0x25400, 0x25404,
+               0x25500, 0x25518,
+               0x2552c, 0x2553c,
+               0x25550, 0x25554,
+               0x25600, 0x25600,
+               0x25608, 0x25628,
+               0x25630, 0x2563c,
+               0x25700, 0x2571c,
+               0x25780, 0x2578c,
+               0x25800, 0x25c38,
+               0x25c80, 0x25d7c,
+               0x25e00, 0x25e04,
+               0x26000, 0x2602c,
+               0x26100, 0x2613c,
+               0x26190, 0x261c8,
+               0x26200, 0x26318,
+               0x26400, 0x26528,
+               0x26540, 0x26614,
+               0x27000, 0x27040,
+               0x2704c, 0x27060,
+               0x270c0, 0x270ec,
+               0x27200, 0x27268,
+               0x27270, 0x27284,
+               0x272fc, 0x27388,
+               0x27400, 0x27404,
+               0x27500, 0x27518,
+               0x2752c, 0x2753c,
+               0x27550, 0x27554,
+               0x27600, 0x27600,
+               0x27608, 0x27628,
+               0x27630, 0x2763c,
+               0x27700, 0x2771c,
+               0x27780, 0x2778c,
+               0x27800, 0x27c38,
+               0x27c80, 0x27d7c,
+               0x27e00, 0x27e04
+       };
+       int i;
+       struct adapter *ap = netdev2adap(dev);
+       regs->version = mk_adap_vers(ap);
+       memset(buf, 0, T4_REGMAP_SIZE);
+       for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+               reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
+ }
+ static int restart_autoneg(struct net_device *dev)
+ {
+       struct port_info *p = netdev_priv(dev);
+       if (!netif_running(dev))
+               return -EAGAIN;
+       if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+               return -EINVAL;
+       t4_restart_aneg(p->adapter, 0, p->tx_chan);
+       return 0;
+ }
+ static int identify_port(struct net_device *dev, u32 data)
+ {
+       if (data == 0)
+               data = 2;     /* default to 2 seconds */
+       return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
+                               data * 5);
+ }
+ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+ {
+       unsigned int v = 0;
+       if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
+               v |= SUPPORTED_TP;
+               if (caps & FW_PORT_CAP_SPEED_100M)
+                       v |= SUPPORTED_100baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseT_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseT_Full;
+       } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+               v |= SUPPORTED_Backplane;
+               if (caps & FW_PORT_CAP_SPEED_1G)
+                       v |= SUPPORTED_1000baseKX_Full;
+               if (caps & FW_PORT_CAP_SPEED_10G)
+                       v |= SUPPORTED_10000baseKX4_Full;
+       } else if (type == FW_PORT_TYPE_KR)
+               v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+       else if (type == FW_PORT_TYPE_FIBER)
+               v |= SUPPORTED_FIBRE;
+       if (caps & FW_PORT_CAP_ANEG)
+               v |= SUPPORTED_Autoneg;
+       return v;
+ }
+ static unsigned int to_fw_linkcaps(unsigned int caps)
+ {
+       unsigned int v = 0;
+       if (caps & ADVERTISED_100baseT_Full)
+               v |= FW_PORT_CAP_SPEED_100M;
+       if (caps & ADVERTISED_1000baseT_Full)
+               v |= FW_PORT_CAP_SPEED_1G;
+       if (caps & ADVERTISED_10000baseT_Full)
+               v |= FW_PORT_CAP_SPEED_10G;
+       return v;
+ }
+ static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+       const struct port_info *p = netdev_priv(dev);
+       if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+           p->port_type == FW_PORT_TYPE_BT_XAUI)
+               cmd->port = PORT_TP;
+       else if (p->port_type == FW_PORT_TYPE_FIBER)
+               cmd->port = PORT_FIBRE;
+       else if (p->port_type == FW_PORT_TYPE_TWINAX)
+               cmd->port = PORT_DA;
+       else
+               cmd->port = PORT_OTHER;
+       if (p->mdio_addr >= 0) {
+               cmd->phy_address = p->mdio_addr;
+               cmd->transceiver = XCVR_EXTERNAL;
+               cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+                       MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+       } else {
+               cmd->phy_address = 0;  /* not really, but no better option */
+               cmd->transceiver = XCVR_INTERNAL;
+               cmd->mdio_support = 0;
+       }
+       cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+       cmd->advertising = from_fw_linkcaps(p->port_type,
+                                           p->link_cfg.advertising);
+       cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
+       cmd->duplex = DUPLEX_FULL;
+       cmd->autoneg = p->link_cfg.autoneg;
+       cmd->maxtxpkt = 0;
+       cmd->maxrxpkt = 0;
+       return 0;
+ }
+ static unsigned int speed_to_caps(int speed)
+ {
+       if (speed == SPEED_100)
+               return FW_PORT_CAP_SPEED_100M;
+       if (speed == SPEED_1000)
+               return FW_PORT_CAP_SPEED_1G;
+       if (speed == SPEED_10000)
+               return FW_PORT_CAP_SPEED_10G;
+       return 0;
+ }
+ static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+ {
+       unsigned int cap;
+       struct port_info *p = netdev_priv(dev);
+       struct link_config *lc = &p->link_cfg;
+       if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
+               return -EINVAL;
+       if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+               /*
+                * PHY offers a single speed.  See if that's what's
+                * being requested.
+                */
+               if (cmd->autoneg == AUTONEG_DISABLE &&
+                   (lc->supported & speed_to_caps(cmd->speed)))
+                               return 0;
+               return -EINVAL;
+       }
+       if (cmd->autoneg == AUTONEG_DISABLE) {
+               cap = speed_to_caps(cmd->speed);
+               if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
+                   cmd->speed == SPEED_10000)
+                       return -EINVAL;
+               lc->requested_speed = cap;
+               lc->advertising = 0;
+       } else {
+               cap = to_fw_linkcaps(cmd->advertising);
+               if (!(lc->supported & cap))
+                       return -EINVAL;
+               lc->requested_speed = 0;
+               lc->advertising = cap | FW_PORT_CAP_ANEG;
+       }
+       lc->autoneg = cmd->autoneg;
+       if (netif_running(dev))
+               return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+       return 0;
+ }
+ static void get_pauseparam(struct net_device *dev,
+                          struct ethtool_pauseparam *epause)
+ {
+       struct port_info *p = netdev_priv(dev);
+       epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+       epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+       epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+ }
+ static int set_pauseparam(struct net_device *dev,
+                         struct ethtool_pauseparam *epause)
+ {
+       struct port_info *p = netdev_priv(dev);
+       struct link_config *lc = &p->link_cfg;
+       if (epause->autoneg == AUTONEG_DISABLE)
+               lc->requested_fc = 0;
+       else if (lc->supported & FW_PORT_CAP_ANEG)
+               lc->requested_fc = PAUSE_AUTONEG;
+       else
+               return -EINVAL;
+       if (epause->rx_pause)
+               lc->requested_fc |= PAUSE_RX;
+       if (epause->tx_pause)
+               lc->requested_fc |= PAUSE_TX;
+       if (netif_running(dev))
+               return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+       return 0;
+ }
+ static u32 get_rx_csum(struct net_device *dev)
+ {
+       struct port_info *p = netdev_priv(dev);
+       return p->rx_offload & RX_CSO;
+ }
+ static int set_rx_csum(struct net_device *dev, u32 data)
+ {
+       struct port_info *p = netdev_priv(dev);
+       if (data)
+               p->rx_offload |= RX_CSO;
+       else
+               p->rx_offload &= ~RX_CSO;
+       return 0;
+ }
+ static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+ {
+       const struct port_info *pi = netdev_priv(dev);
+       const struct sge *s = &pi->adapter->sge;
+       e->rx_max_pending = MAX_RX_BUFFERS;
+       e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+       e->rx_jumbo_max_pending = 0;
+       e->tx_max_pending = MAX_TXQ_ENTRIES;
+       e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+       e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+       e->rx_jumbo_pending = 0;
+       e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+ }
+ static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+ {
+       int i;
+       const struct port_info *pi = netdev_priv(dev);
+       struct adapter *adapter = pi->adapter;
+       struct sge *s = &adapter->sge;
+       if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+           e->tx_pending > MAX_TXQ_ENTRIES ||
+           e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+           e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+           e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+               return -EINVAL;
+       if (adapter->flags & FULL_INIT_DONE)
+               return -EBUSY;
+       for (i = 0; i < pi->nqsets; ++i) {
+               s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+               s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+               s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+       }
+       return 0;
+ }
+ static int closest_timer(const struct sge *s, int time)
+ {
+       int i, delta, match = 0, min_delta = INT_MAX;
+       for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+               delta = time - s->timer_val[i];
+               if (delta < 0)
+                       delta = -delta;
+               if (delta < min_delta) {
+                       min_delta = delta;
+                       match = i;
+               }
+       }
+       return match;
+ }
+ static int closest_thres(const struct sge *s, int thres)
+ {
+       int i, delta, match = 0, min_delta = INT_MAX;
+       for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+               delta = thres - s->counter_val[i];
+               if (delta < 0)
+                       delta = -delta;
+               if (delta < min_delta) {
+                       min_delta = delta;
+                       match = i;
+               }
+       }
+       return match;
+ }
+ /*
+  * Return a queue's interrupt hold-off time in us.  0 means no timer.
+  */
+ static unsigned int qtimer_val(const struct adapter *adap,
+                              const struct sge_rspq *q)
+ {
+       unsigned int idx = q->intr_params >> 1;
+       return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+ }
+ /**
+  *    set_rxq_intr_params - set a queue's interrupt holdoff parameters
+  *    @adap: the adapter
+  *    @q: the Rx queue
+  *    @us: the hold-off time in us, or 0 to disable timer
+  *    @cnt: the hold-off packet count, or 0 to disable counter
+  *
+  *    Sets an Rx queue's interrupt hold-off time and packet count.  At least
+  *    one of the two needs to be enabled for the queue to generate interrupts.
+  */
+ static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
+                              unsigned int us, unsigned int cnt)
+ {
+       if ((us | cnt) == 0)
+               cnt = 1;
+       if (cnt) {
+               int err;
+               u32 v, new_idx;
+               new_idx = closest_thres(&adap->sge, cnt);
+               if (q->desc && q->pktcnt_idx != new_idx) {
+                       /* the queue has already been created, update it */
+                       v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+                           FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+                           FW_PARAMS_PARAM_YZ(q->cntxt_id);
+                       err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
+                       if (err)
+                               return err;
+               }
+               q->pktcnt_idx = new_idx;
+       }
+       us = us == 0 ? 6 : closest_timer(&adap->sge, us);
+       q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+       return 0;
+ }
+ static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+ {
+       const struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
+                       c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+ }
+ static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+ {
+       const struct port_info *pi = netdev_priv(dev);
+       const struct adapter *adap = pi->adapter;
+       const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+       c->rx_coalesce_usecs = qtimer_val(adap, rq);
+       c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+               adap->sge.counter_val[rq->pktcnt_idx] : 0;
+       return 0;
+ }
+ /*
+  * Translate a physical EEPROM address to virtual.  The first 1K is accessed
+  * through virtual addresses starting at 31K, the rest is accessed through
+  * virtual addresses starting at 0.  This mapping is correct only for PF0.
+  */
+ static int eeprom_ptov(unsigned int phys_addr)
+ {
+       if (phys_addr < 1024)
+               return phys_addr + (31 << 10);
+       if (phys_addr < EEPROMSIZE)
+               return phys_addr - 1024;
+       return -EINVAL;
+ }
+ /*
+  * The next two routines implement eeprom read/write from physical addresses.
+  * The physical->virtual translation is correct only for PF0.
+  */
+ static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+ {
+       int vaddr = eeprom_ptov(phys_addr);
+       if (vaddr >= 0)
+               vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+       return vaddr < 0 ? vaddr : 0;
+ }
+ static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+ {
+       int vaddr = eeprom_ptov(phys_addr);
+       if (vaddr >= 0)
+               vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+       return vaddr < 0 ? vaddr : 0;
+ }
+ #define EEPROM_MAGIC 0x38E2F10C
+ static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+                     u8 *data)
+ {
+       int i, err = 0;
+       struct adapter *adapter = netdev2adap(dev);
+       u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       e->magic = EEPROM_MAGIC;
+       for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+               err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+       if (!err)
+               memcpy(data, buf + e->offset, e->len);
+       kfree(buf);
+       return err;
+ }
+ static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+                     u8 *data)
+ {
+       u8 *buf;
+       int err = 0;
+       u32 aligned_offset, aligned_len, *p;
+       struct adapter *adapter = netdev2adap(dev);
+       if (eeprom->magic != EEPROM_MAGIC)
+               return -EINVAL;
+       aligned_offset = eeprom->offset & ~3;
+       aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+       if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+               /*
+                * RMW possibly needed for first or last words.
+                */
+               buf = kmalloc(aligned_len, GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+               err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+               if (!err && aligned_len > 4)
+                       err = eeprom_rd_phys(adapter,
+                                            aligned_offset + aligned_len - 4,
+                                            (u32 *)&buf[aligned_len - 4]);
+               if (err)
+                       goto out;
+               memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+       } else
+               buf = data;
+       err = t4_seeprom_wp(adapter, false);
+       if (err)
+               goto out;
+       for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+               err = eeprom_wr_phys(adapter, aligned_offset, *p);
+               aligned_offset += 4;
+       }
+       if (!err)
+               err = t4_seeprom_wp(adapter, true);
+ out:
+       if (buf != data)
+               kfree(buf);
+       return err;
+ }
+ static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+ {
+       int ret;
+       const struct firmware *fw;
+       struct adapter *adap = netdev2adap(netdev);
+       ef->data[sizeof(ef->data) - 1] = '\0';
+       ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+       if (ret < 0)
+               return ret;
+       ret = t4_load_fw(adap, fw->data, fw->size);
+       release_firmware(fw);
+       if (!ret)
+               dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
+       return ret;
+ }
+ #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+ #define BCAST_CRC 0xa0ccc1a6
+ static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ {
+       wol->supported = WAKE_BCAST | WAKE_MAGIC;
+       wol->wolopts = netdev2adap(dev)->wol;
+       memset(&wol->sopass, 0, sizeof(wol->sopass));
+ }
+ static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+ {
+       int err = 0;
+       struct port_info *pi = netdev_priv(dev);
+       if (wol->wolopts & ~WOL_SUPPORTED)
+               return -EINVAL;
+       t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+                           (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+       if (wol->wolopts & WAKE_BCAST) {
+               err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+                                       ~0ULL, 0, false);
+               if (!err)
+                       err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+                                               ~6ULL, ~0ULL, BCAST_CRC, true);
+       } else
+               t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+       return err;
+ }
+ static int set_tso(struct net_device *dev, u32 value)
+ {
+       if (value)
+               dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+       else
+               dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+       return 0;
+ }
+ static struct ethtool_ops cxgb_ethtool_ops = {
+       .get_settings      = get_settings,
+       .set_settings      = set_settings,
+       .get_drvinfo       = get_drvinfo,
+       .get_msglevel      = get_msglevel,
+       .set_msglevel      = set_msglevel,
+       .get_ringparam     = get_sge_param,
+       .set_ringparam     = set_sge_param,
+       .get_coalesce      = get_coalesce,
+       .set_coalesce      = set_coalesce,
+       .get_eeprom_len    = get_eeprom_len,
+       .get_eeprom        = get_eeprom,
+       .set_eeprom        = set_eeprom,
+       .get_pauseparam    = get_pauseparam,
+       .set_pauseparam    = set_pauseparam,
+       .get_rx_csum       = get_rx_csum,
+       .set_rx_csum       = set_rx_csum,
+       .set_tx_csum       = ethtool_op_set_tx_ipv6_csum,
+       .set_sg            = ethtool_op_set_sg,
+       .get_link          = ethtool_op_get_link,
+       .get_strings       = get_strings,
+       .phys_id           = identify_port,
+       .nway_reset        = restart_autoneg,
+       .get_sset_count    = get_sset_count,
+       .get_ethtool_stats = get_stats,
+       .get_regs_len      = get_regs_len,
+       .get_regs          = get_regs,
+       .get_wol           = get_wol,
+       .set_wol           = set_wol,
+       .set_tso           = set_tso,
+       .flash_device      = set_flash,
+ };
+ /*
+  * debugfs support
+  */
+ static int mem_open(struct inode *inode, struct file *file)
+ {
+       file->private_data = inode->i_private;
+       return 0;
+ }
+ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+                       loff_t *ppos)
+ {
+       loff_t pos = *ppos;
+       loff_t avail = file->f_path.dentry->d_inode->i_size;
+       unsigned int mem = (uintptr_t)file->private_data & 3;
+       struct adapter *adap = file->private_data - mem;
+       if (pos < 0)
+               return -EINVAL;
+       if (pos >= avail)
+               return 0;
+       if (count > avail - pos)
+               count = avail - pos;
+       while (count) {
+               size_t len;
+               int ret, ofst;
+               __be32 data[16];
+               if (mem == MEM_MC)
+                       ret = t4_mc_read(adap, pos, data, NULL);
+               else
+                       ret = t4_edc_read(adap, mem, pos, data, NULL);
+               if (ret)
+                       return ret;
+               ofst = pos % sizeof(data);
+               len = min(count, sizeof(data) - ofst);
+               if (copy_to_user(buf, (u8 *)data + ofst, len))
+                       return -EFAULT;
+               buf += len;
+               pos += len;
+               count -= len;
+       }
+       count = pos - *ppos;
+       *ppos = pos;
+       return count;
+ }
+ static const struct file_operations mem_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = mem_open,
+       .read    = mem_read,
+ };
+ static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
+                                     unsigned int idx, unsigned int size_mb)
+ {
+       struct dentry *de;
+       de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+                                (void *)adap + idx, &mem_debugfs_fops);
+       if (de && de->d_inode)
+               de->d_inode->i_size = size_mb << 20;
+ }
+ static int __devinit setup_debugfs(struct adapter *adap)
+ {
+       int i;
+       if (IS_ERR_OR_NULL(adap->debugfs_root))
+               return -1;
+       i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
+       if (i & EDRAM0_ENABLE)
+               add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
+       if (i & EDRAM1_ENABLE)
+               add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
+       if (i & EXT_MEM_ENABLE)
+               add_debugfs_mem(adap, "mc", MEM_MC,
+                       EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+       if (adap->l2t)
+               debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
+                                   &t4_l2t_fops);
+       return 0;
+ }
+ /*
+  * upper-layer driver support
+  */
+ /*
+  * Allocate an active-open TID and set it to the supplied value.
+  */
+ int cxgb4_alloc_atid(struct tid_info *t, void *data)
+ {
+       int atid = -1;
+       spin_lock_bh(&t->atid_lock);
+       if (t->afree) {
+               union aopen_entry *p = t->afree;
+               atid = p - t->atid_tab;
+               t->afree = p->next;
+               p->data = data;
+               t->atids_in_use++;
+       }
+       spin_unlock_bh(&t->atid_lock);
+       return atid;
+ }
+ EXPORT_SYMBOL(cxgb4_alloc_atid);
+ /*
+  * Release an active-open TID.
+  */
+ void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
+ {
+       union aopen_entry *p = &t->atid_tab[atid];
+       spin_lock_bh(&t->atid_lock);
+       p->next = t->afree;
+       t->afree = p;
+       t->atids_in_use--;
+       spin_unlock_bh(&t->atid_lock);
+ }
+ EXPORT_SYMBOL(cxgb4_free_atid);
+ /*
+  * Allocate a server TID and set it to the supplied value.
+  */
+ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
+ {
+       int stid;
+       spin_lock_bh(&t->stid_lock);
+       if (family == PF_INET) {
+               stid = find_first_zero_bit(t->stid_bmap, t->nstids);
+               if (stid < t->nstids)
+                       __set_bit(stid, t->stid_bmap);
+               else
+                       stid = -1;
+       } else {
+               stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+               if (stid < 0)
+                       stid = -1;
+       }
+       if (stid >= 0) {
+               t->stid_tab[stid].data = data;
+               stid += t->stid_base;
+               t->stids_in_use++;
+       }
+       spin_unlock_bh(&t->stid_lock);
+       return stid;
+ }
+ EXPORT_SYMBOL(cxgb4_alloc_stid);
+ /*
+  * Release a server TID.
+  */
+ void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
+ {
+       stid -= t->stid_base;
+       spin_lock_bh(&t->stid_lock);
+       if (family == PF_INET)
+               __clear_bit(stid, t->stid_bmap);
+       else
+               bitmap_release_region(t->stid_bmap, stid, 2);
+       t->stid_tab[stid].data = NULL;
+       t->stids_in_use--;
+       spin_unlock_bh(&t->stid_lock);
+ }
+ EXPORT_SYMBOL(cxgb4_free_stid);
+ /*
+  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
+  */
+ static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+                          unsigned int tid)
+ {
+       struct cpl_tid_release *req;
+       set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+       req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, tid);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+ }
+ /*
+  * Queue a TID release request and if necessary schedule a work queue to
+  * process it.
+  */
+ void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+                            unsigned int tid)
+ {
+       void **p = &t->tid_tab[tid];
+       struct adapter *adap = container_of(t, struct adapter, tids);
+       spin_lock_bh(&adap->tid_release_lock);
+       *p = adap->tid_release_head;
+       /* Low 2 bits encode the Tx channel number */
+       adap->tid_release_head = (void **)((uintptr_t)p | chan);
+       if (!adap->tid_release_task_busy) {
+               adap->tid_release_task_busy = true;
+               schedule_work(&adap->tid_release_task);
+       }
+       spin_unlock_bh(&adap->tid_release_lock);
+ }
+ EXPORT_SYMBOL(cxgb4_queue_tid_release);
+ /*
+  * Process the list of pending TID release requests.
+  */
+ static void process_tid_release_list(struct work_struct *work)
+ {
+       struct sk_buff *skb;
+       struct adapter *adap;
+       adap = container_of(work, struct adapter, tid_release_task);
+       spin_lock_bh(&adap->tid_release_lock);
+       while (adap->tid_release_head) {
+               void **p = adap->tid_release_head;
+               unsigned int chan = (uintptr_t)p & 3;
+               p = (void *)p - chan;
+               adap->tid_release_head = *p;
+               *p = NULL;
+               spin_unlock_bh(&adap->tid_release_lock);
+               while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
+                                        GFP_KERNEL)))
+                       schedule_timeout_uninterruptible(1);
+               mk_tid_release(skb, chan, p - adap->tids.tid_tab);
+               t4_ofld_send(adap, skb);
+               spin_lock_bh(&adap->tid_release_lock);
+       }
+       adap->tid_release_task_busy = false;
+       spin_unlock_bh(&adap->tid_release_lock);
+ }
+ /*
+  * Release a TID and inform HW.  If we are unable to allocate the release
+  * message we defer to a work queue.
+  */
+ void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+ {
+       void *old;
+       struct sk_buff *skb;
+       struct adapter *adap = container_of(t, struct adapter, tids);
+       old = t->tid_tab[tid];
+       skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+       if (likely(skb)) {
+               t->tid_tab[tid] = NULL;
+               mk_tid_release(skb, chan, tid);
+               t4_ofld_send(adap, skb);
+       } else
+               cxgb4_queue_tid_release(t, chan, tid);
+       if (old)
+               atomic_dec(&t->tids_in_use);
+ }
+ EXPORT_SYMBOL(cxgb4_remove_tid);
+ /*
+  * Allocate and initialize the TID tables.  Returns 0 on success.
+  */
+ static int tid_init(struct tid_info *t)
+ {
+       size_t size;
+       unsigned int natids = t->natids;
+       size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+              t->nstids * sizeof(*t->stid_tab) +
+              BITS_TO_LONGS(t->nstids) * sizeof(long);
+       t->tid_tab = t4_alloc_mem(size);
+       if (!t->tid_tab)
+               return -ENOMEM;
+       t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+       t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
+       t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+       spin_lock_init(&t->stid_lock);
+       spin_lock_init(&t->atid_lock);
+       t->stids_in_use = 0;
+       t->afree = NULL;
+       t->atids_in_use = 0;
+       atomic_set(&t->tids_in_use, 0);
+       /* Setup the free list for atid_tab and clear the stid bitmap. */
+       if (natids) {
+               while (--natids)
+                       t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+               t->afree = t->atid_tab;
+       }
+       bitmap_zero(t->stid_bmap, t->nstids);
+       return 0;
+ }
+ /**
+  *    cxgb4_create_server - create an IP server
+  *    @dev: the device
+  *    @stid: the server TID
+  *    @sip: local IP address to bind server to
+  *    @sport: the server's TCP port
+  *    @queue: queue to direct messages from this server to
+  *
+  *    Create an IP server for the given port and address.
+  *    Returns <0 on error and one of the %NET_XMIT_* values on success.
+  */
+ int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+                       __be32 sip, __be16 sport, unsigned int queue)
+ {
+       unsigned int chan;
+       struct sk_buff *skb;
+       struct adapter *adap;
+       struct cpl_pass_open_req *req;
+       skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+       adap = netdev2adap(dev);
+       req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+       req->local_port = sport;
+       req->peer_port = htons(0);
+       req->local_ip = sip;
+       req->peer_ip = htonl(0);
+       chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+       req->opt0 = cpu_to_be64(TX_CHAN(chan));
+       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       return t4_mgmt_tx(adap, skb);
+ }
+ EXPORT_SYMBOL(cxgb4_create_server);
+ /**
+  *    cxgb4_create_server6 - create an IPv6 server
+  *    @dev: the device
+  *    @stid: the server TID
+  *    @sip: local IPv6 address to bind server to
+  *    @sport: the server's TCP port
+  *    @queue: queue to direct messages from this server to
+  *
+  *    Create an IPv6 server for the given port and address.
+  *    Returns <0 on error and one of the %NET_XMIT_* values on success.
+  */
+ int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+                        const struct in6_addr *sip, __be16 sport,
+                        unsigned int queue)
+ {
+       unsigned int chan;
+       struct sk_buff *skb;
+       struct adapter *adap;
+       struct cpl_pass_open_req6 *req;
+       skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+       adap = netdev2adap(dev);
+       req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+       INIT_TP_WR(req, 0);
+       OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+       req->local_port = sport;
+       req->peer_port = htons(0);
+       req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+       req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+       req->peer_ip_hi = cpu_to_be64(0);
+       req->peer_ip_lo = cpu_to_be64(0);
+       chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+       req->opt0 = cpu_to_be64(TX_CHAN(chan));
+       req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+                               SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+       return t4_mgmt_tx(adap, skb);
+ }
+ EXPORT_SYMBOL(cxgb4_create_server6);
+ /**
+  *    cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
+  *    @mtus: the HW MTU table
+  *    @mtu: the target MTU
+  *    @idx: index of selected entry in the MTU table
+  *
+  *    Returns the index and the value in the HW MTU table that is closest to
+  *    but does not exceed @mtu, unless @mtu is smaller than any value in the
+  *    table, in which case that smallest available value is selected.
+  */
+ unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+                           unsigned int *idx)
+ {
+       unsigned int i = 0;
+       while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
+               ++i;
+       if (idx)
+               *idx = i;
+       return mtus[i];
+ }
+ EXPORT_SYMBOL(cxgb4_best_mtu);
+ /**
+  *    cxgb4_port_chan - get the HW channel of a port
+  *    @dev: the net device for the port
+  *
+  *    Return the HW Tx channel of the given port.
+  */
+ unsigned int cxgb4_port_chan(const struct net_device *dev)
+ {
+       return netdev2pinfo(dev)->tx_chan;
+ }
+ EXPORT_SYMBOL(cxgb4_port_chan);
+ /**
+  *    cxgb4_port_viid - get the VI id of a port
+  *    @dev: the net device for the port
+  *
+  *    Return the VI id of the given port.
+  */
+ unsigned int cxgb4_port_viid(const struct net_device *dev)
+ {
+       return netdev2pinfo(dev)->viid;
+ }
+ EXPORT_SYMBOL(cxgb4_port_viid);
+ /**
+  *    cxgb4_port_idx - get the index of a port
+  *    @dev: the net device for the port
+  *
+  *    Return the index of the given port.
+  */
+ unsigned int cxgb4_port_idx(const struct net_device *dev)
+ {
+       return netdev2pinfo(dev)->port_id;
+ }
+ EXPORT_SYMBOL(cxgb4_port_idx);
+ /**
+  *    cxgb4_netdev_by_hwid - return the net device of a HW port
+  *    @pdev: identifies the adapter
+  *    @id: the HW port id
+  *
+  *    Return the net device associated with the interface with the given HW
+  *    id.
+  */
+ struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
+ {
+       const struct adapter *adap = pci_get_drvdata(pdev);
+       if (!adap || id >= NCHAN)
+               return NULL;
+       id = adap->chan_map[id];
+       return id < MAX_NPORTS ? adap->port[id] : NULL;
+ }
+ EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
+ void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+                        struct tp_tcp_stats *v6)
+ {
+       struct adapter *adap = pci_get_drvdata(pdev);
+       spin_lock(&adap->stats_lock);
+       t4_tp_get_tcp_stats(adap, v4, v6);
+       spin_unlock(&adap->stats_lock);
+ }
+ EXPORT_SYMBOL(cxgb4_get_tcp_stats);
+ void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+                     const unsigned int *pgsz_order)
+ {
+       struct adapter *adap = netdev2adap(dev);
+       t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
+       t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
+                    HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
+                    HPZ3(pgsz_order[3]));
+ }
+ EXPORT_SYMBOL(cxgb4_iscsi_init);
+ static struct pci_driver cxgb4_driver;
+ static void check_neigh_update(struct neighbour *neigh)
+ {
+       const struct device *parent;
+       const struct net_device *netdev = neigh->dev;
+       if (netdev->priv_flags & IFF_802_1Q_VLAN)
+               netdev = vlan_dev_real_dev(netdev);
+       parent = netdev->dev.parent;
+       if (parent && parent->driver == &cxgb4_driver.driver)
+               t4_l2t_update(dev_get_drvdata(parent), neigh);
+ }
+ static int netevent_cb(struct notifier_block *nb, unsigned long event,
+                      void *data)
+ {
+       switch (event) {
+       case NETEVENT_NEIGH_UPDATE:
+               check_neigh_update(data);
+               break;
+       case NETEVENT_PMTU_UPDATE:
+       case NETEVENT_REDIRECT:
+       default:
+               break;
+       }
+       return 0;
+ }
+ static bool netevent_registered;
+ static struct notifier_block cxgb4_netevent_nb = {
+       .notifier_call = netevent_cb
+ };
+ static void uld_attach(struct adapter *adap, unsigned int uld)
+ {
+       void *handle;
+       struct cxgb4_lld_info lli;
+       lli.pdev = adap->pdev;
+       lli.l2t = adap->l2t;
+       lli.tids = &adap->tids;
+       lli.ports = adap->port;
+       lli.vr = &adap->vres;
+       lli.mtus = adap->params.mtus;
+       if (uld == CXGB4_ULD_RDMA) {
+               lli.rxq_ids = adap->sge.rdma_rxq;
+               lli.nrxq = adap->sge.rdmaqs;
+       } else if (uld == CXGB4_ULD_ISCSI) {
+               lli.rxq_ids = adap->sge.ofld_rxq;
+               lli.nrxq = adap->sge.ofldqsets;
+       }
+       lli.ntxq = adap->sge.ofldqsets;
+       lli.nchan = adap->params.nports;
+       lli.nports = adap->params.nports;
+       lli.wr_cred = adap->params.ofldq_wr_cred;
+       lli.adapter_type = adap->params.rev;
+       lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+       lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
+                       t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
+       lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
+                       t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+       lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
+       lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+       lli.fw_vers = adap->params.fw_vers;
+       handle = ulds[uld].add(&lli);
+       if (IS_ERR(handle)) {
+               dev_warn(adap->pdev_dev,
+                        "could not attach to the %s driver, error %ld\n",
+                        uld_str[uld], PTR_ERR(handle));
+               return;
+       }
+       adap->uld_handle[uld] = handle;
+       if (!netevent_registered) {
+               register_netevent_notifier(&cxgb4_netevent_nb);
+               netevent_registered = true;
+       }
+ }
+ static void attach_ulds(struct adapter *adap)
+ {
+       unsigned int i;
+       mutex_lock(&uld_mutex);
+       list_add_tail(&adap->list_node, &adapter_list);
+       for (i = 0; i < CXGB4_ULD_MAX; i++)
+               if (ulds[i].add)
+                       uld_attach(adap, i);
+       mutex_unlock(&uld_mutex);
+ }
+ static void detach_ulds(struct adapter *adap)
+ {
+       unsigned int i;
+       mutex_lock(&uld_mutex);
+       list_del(&adap->list_node);
+       for (i = 0; i < CXGB4_ULD_MAX; i++)
+               if (adap->uld_handle[i]) {
+                       ulds[i].state_change(adap->uld_handle[i],
+                                            CXGB4_STATE_DETACH);
+                       adap->uld_handle[i] = NULL;
+               }
+       if (netevent_registered && list_empty(&adapter_list)) {
+               unregister_netevent_notifier(&cxgb4_netevent_nb);
+               netevent_registered = false;
+       }
+       mutex_unlock(&uld_mutex);
+ }
+ static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
+ {
+       unsigned int i;
+       mutex_lock(&uld_mutex);
+       for (i = 0; i < CXGB4_ULD_MAX; i++)
+               if (adap->uld_handle[i])
+                       ulds[i].state_change(adap->uld_handle[i], new_state);
+       mutex_unlock(&uld_mutex);
+ }
+ /**
+  *    cxgb4_register_uld - register an upper-layer driver
+  *    @type: the ULD type
+  *    @p: the ULD methods
+  *
+  *    Registers an upper-layer driver with this driver and notifies the ULD
+  *    about any presently available devices that support its type.  Returns
+  *    %-EBUSY if a ULD of the same type is already registered.
+  */
+ int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
+ {
+       int ret = 0;
+       struct adapter *adap;
+       if (type >= CXGB4_ULD_MAX)
+               return -EINVAL;
+       mutex_lock(&uld_mutex);
+       if (ulds[type].add) {
+               ret = -EBUSY;
+               goto out;
+       }
+       ulds[type] = *p;
+       list_for_each_entry(adap, &adapter_list, list_node)
+               uld_attach(adap, type);
+ out:  mutex_unlock(&uld_mutex);
+       return ret;
+ }
+ EXPORT_SYMBOL(cxgb4_register_uld);
+ /**
+  *    cxgb4_unregister_uld - unregister an upper-layer driver
+  *    @type: the ULD type
+  *
+  *    Unregisters an existing upper-layer driver.
+  */
+ int cxgb4_unregister_uld(enum cxgb4_uld type)
+ {
+       struct adapter *adap;
+       if (type >= CXGB4_ULD_MAX)
+               return -EINVAL;
+       mutex_lock(&uld_mutex);
+       list_for_each_entry(adap, &adapter_list, list_node)
+               adap->uld_handle[type] = NULL;
+       ulds[type].add = NULL;
+       mutex_unlock(&uld_mutex);
+       return 0;
+ }
+ EXPORT_SYMBOL(cxgb4_unregister_uld);
+ /**
+  *    cxgb_up - enable the adapter
+  *    @adap: adapter being enabled
+  *
+  *    Called when the first port is enabled, this function performs the
+  *    actions necessary to make an adapter operational, such as completing
+  *    the initialization of HW modules, and enabling interrupts.
+  *
+  *    Must be called with the rtnl lock held.
+  */
+ static int cxgb_up(struct adapter *adap)
+ {
+       int err = 0;
+       if (!(adap->flags & FULL_INIT_DONE)) {
+               err = setup_sge_queues(adap);
+               if (err)
+                       goto out;
+               err = setup_rss(adap);
+               if (err) {
+                       t4_free_sge_resources(adap);
+                       goto out;
+               }
+               if (adap->flags & USING_MSIX)
+                       name_msix_vecs(adap);
+               adap->flags |= FULL_INIT_DONE;
+       }
+       if (adap->flags & USING_MSIX) {
+               err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+                                 adap->msix_info[0].desc, adap);
+               if (err)
+                       goto irq_err;
+               err = request_msix_queue_irqs(adap);
+               if (err) {
+                       free_irq(adap->msix_info[0].vec, adap);
+                       goto irq_err;
+               }
+       } else {
+               err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+                                 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+                                 adap->name, adap);
+               if (err)
+                       goto irq_err;
+       }
+       enable_rx(adap);
+       t4_sge_start(adap);
+       t4_intr_enable(adap);
+       notify_ulds(adap, CXGB4_STATE_UP);
+  out:
+       return err;
+  irq_err:
+       dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+       goto out;
+ }
+ static void cxgb_down(struct adapter *adapter)
+ {
+       t4_intr_disable(adapter);
+       cancel_work_sync(&adapter->tid_release_task);
+       adapter->tid_release_task_busy = false;
+       if (adapter->flags & USING_MSIX) {
+               free_msix_queue_irqs(adapter);
+               free_irq(adapter->msix_info[0].vec, adapter);
+       } else
+               free_irq(adapter->pdev->irq, adapter);
+       quiesce_rx(adapter);
+ }
+ /*
+  * net_device operations
+  */
+ static int cxgb_open(struct net_device *dev)
+ {
+       int err;
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adapter = pi->adapter;
+       if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+               return err;
+       dev->real_num_tx_queues = pi->nqsets;
+       set_bit(pi->tx_chan, &adapter->open_device_map);
+       link_start(dev);
+       netif_tx_start_all_queues(dev);
+       return 0;
+ }
+ static int cxgb_close(struct net_device *dev)
+ {
+       int ret;
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adapter = pi->adapter;
+       netif_tx_stop_all_queues(dev);
+       netif_carrier_off(dev);
+       ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
+       clear_bit(pi->tx_chan, &adapter->open_device_map);
+       if (!adapter->open_device_map)
+               cxgb_down(adapter);
+       return 0;
+ }
+ static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+ {
+       struct port_stats stats;
+       struct port_info *p = netdev_priv(dev);
+       struct adapter *adapter = p->adapter;
+       struct net_device_stats *ns = &dev->stats;
+       spin_lock(&adapter->stats_lock);
+       t4_get_port_stats(adapter, p->tx_chan, &stats);
+       spin_unlock(&adapter->stats_lock);
+       ns->tx_bytes   = stats.tx_octets;
+       ns->tx_packets = stats.tx_frames;
+       ns->rx_bytes   = stats.rx_octets;
+       ns->rx_packets = stats.rx_frames;
+       ns->multicast  = stats.rx_mcast_frames;
+       /* detailed rx_errors */
+       ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
+                              stats.rx_runt;
+       ns->rx_over_errors   = 0;
+       ns->rx_crc_errors    = stats.rx_fcs_err;
+       ns->rx_frame_errors  = stats.rx_symbol_err;
+       ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
+                              stats.rx_ovflow2 + stats.rx_ovflow3 +
+                              stats.rx_trunc0 + stats.rx_trunc1 +
+                              stats.rx_trunc2 + stats.rx_trunc3;
+       ns->rx_missed_errors = 0;
+       /* detailed tx_errors */
+       ns->tx_aborted_errors   = 0;
+       ns->tx_carrier_errors   = 0;
+       ns->tx_fifo_errors      = 0;
+       ns->tx_heartbeat_errors = 0;
+       ns->tx_window_errors    = 0;
+       ns->tx_errors = stats.tx_error_frames;
+       ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+               ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+       return ns;
+ }
+ static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+ {
+       int ret = 0, prtad, devad;
+       struct port_info *pi = netdev_priv(dev);
+       struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+       switch (cmd) {
+       case SIOCGMIIPHY:
+               if (pi->mdio_addr < 0)
+                       return -EOPNOTSUPP;
+               data->phy_id = pi->mdio_addr;
+               break;
+       case SIOCGMIIREG:
+       case SIOCSMIIREG:
+               if (mdio_phy_id_is_c45(data->phy_id)) {
+                       prtad = mdio_phy_id_prtad(data->phy_id);
+                       devad = mdio_phy_id_devad(data->phy_id);
+               } else if (data->phy_id < 32) {
+                       prtad = data->phy_id;
+                       devad = 0;
+                       data->reg_num &= 0x1f;
+               } else
+                       return -EINVAL;
+               if (cmd == SIOCGMIIREG)
+                       ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
+                                        data->reg_num, &data->val_out);
+               else
+                       ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
+                                        data->reg_num, data->val_in);
+               break;
+       default:
+               return -EOPNOTSUPP;
+       }
+       return ret;
+ }
+ static void cxgb_set_rxmode(struct net_device *dev)
+ {
+       /* unfortunately we can't return errors to the stack */
+       set_rxmode(dev, -1, false);
+ }
+ static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+ {
+       int ret;
+       struct port_info *pi = netdev_priv(dev);
+       if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
+               return -EINVAL;
+       ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
+                           true);
+       if (!ret)
+               dev->mtu = new_mtu;
+       return ret;
+ }
+ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+ {
+       int ret;
+       struct sockaddr *addr = p;
+       struct port_info *pi = netdev_priv(dev);
+       if (!is_valid_ether_addr(addr->sa_data))
+               return -EINVAL;
+       ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
+                           addr->sa_data, true, true);
+       if (ret < 0)
+               return ret;
+       memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+       pi->xact_addr_filt = ret;
+       return 0;
+ }
+ static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+ {
+       struct port_info *pi = netdev_priv(dev);
+       pi->vlan_grp = grp;
+       t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
+ }
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+ static void cxgb_netpoll(struct net_device *dev)
+ {
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       if (adap->flags & USING_MSIX) {
+               int i;
+               struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+               for (i = pi->nqsets; i; i--, rx++)
+                       t4_sge_intr_msix(0, &rx->rspq);
+       } else
+               t4_intr_handler(adap)(0, adap);
+ }
+ #endif
+ static const struct net_device_ops cxgb4_netdev_ops = {
+       .ndo_open             = cxgb_open,
+       .ndo_stop             = cxgb_close,
+       .ndo_start_xmit       = t4_eth_xmit,
+       .ndo_get_stats        = cxgb_get_stats,
+       .ndo_set_rx_mode      = cxgb_set_rxmode,
+       .ndo_set_mac_address  = cxgb_set_mac_addr,
+       .ndo_validate_addr    = eth_validate_addr,
+       .ndo_do_ioctl         = cxgb_ioctl,
+       .ndo_change_mtu       = cxgb_change_mtu,
+       .ndo_vlan_rx_register = vlan_rx_register,
+ #ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller  = cxgb_netpoll,
+ #endif
+ };
+ void t4_fatal_err(struct adapter *adap)
+ {
+       t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+       t4_intr_disable(adap);
+       dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
+ }
+ static void setup_memwin(struct adapter *adap)
+ {
+       u32 bar0;
+       bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
+                    (bar0 + MEMWIN0_BASE) | BIR(0) |
+                    WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
+                    (bar0 + MEMWIN1_BASE) | BIR(0) |
+                    WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+       t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
+                    (bar0 + MEMWIN2_BASE) | BIR(0) |
+                    WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+ }
+ /*
+  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
+  */
+ #define MAX_ATIDS 8192U
+ /*
+  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+  */
+ static int adap_init0(struct adapter *adap)
+ {
+       int ret;
+       u32 v, port_vec;
+       enum dev_state state;
+       u32 params[7], val[7];
+       struct fw_caps_config_cmd c;
+       ret = t4_check_fw_version(adap);
+       if (ret == -EINVAL || ret > 0) {
+               if (upgrade_fw(adap) >= 0)             /* recache FW version */
+                       ret = t4_check_fw_version(adap);
+       }
+       if (ret < 0)
+               return ret;
+       /* contact FW, request master */
+       ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
+       if (ret < 0) {
+               dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
+                       ret);
+               return ret;
+       }
+       /* reset device */
+       ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
+       if (ret < 0)
+               goto bye;
+       /* get device capabilities */
+       memset(&c, 0, sizeof(c));
+       c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                             FW_CMD_REQUEST | FW_CMD_READ);
+       c.retval_len16 = htonl(FW_LEN16(c));
+       ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+       if (ret < 0)
+               goto bye;
+       /* select capabilities we'll be using */
+       if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+               if (!vf_acls)
+                       c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+               else
+                       c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+       } else if (vf_acls) {
+               dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+               goto bye;
+       }
+       c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+                             FW_CMD_REQUEST | FW_CMD_WRITE);
+       ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
+       if (ret < 0)
+               goto bye;
+       ret = t4_config_glbl_rss(adap, 0,
+                                FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+                                FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+                                FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+       if (ret < 0)
+               goto bye;
+       ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
+                         FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+       if (ret < 0)
+               goto bye;
+       for (v = 0; v < SGE_NTIMERS - 1; v++)
+               adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
+       adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+       adap->sge.counter_val[0] = 1;
+       for (v = 1; v < SGE_NCOUNTERS; v++)
+               adap->sge.counter_val[v] = min(intr_cnt[v - 1],
+                                              THRESHOLD_3_MASK);
+       t4_sge_init(adap);
+       /* get basic stuff going */
+       ret = t4_early_init(adap, 0);
+       if (ret < 0)
+               goto bye;
+ #define FW_PARAM_DEV(param) \
+       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+ #define FW_PARAM_PFVF(param) \
+       (FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+        FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+       params[0] = FW_PARAM_DEV(PORTVEC);
+       params[1] = FW_PARAM_PFVF(L2T_START);
+       params[2] = FW_PARAM_PFVF(L2T_END);
+       params[3] = FW_PARAM_PFVF(FILTER_START);
+       params[4] = FW_PARAM_PFVF(FILTER_END);
+       ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
+       if (ret < 0)
+               goto bye;
+       port_vec = val[0];
+       adap->tids.ftid_base = val[3];
+       adap->tids.nftids = val[4] - val[3] + 1;
+       if (c.ofldcaps) {
+               /* query offload-related parameters */
+               params[0] = FW_PARAM_DEV(NTID);
+               params[1] = FW_PARAM_PFVF(SERVER_START);
+               params[2] = FW_PARAM_PFVF(SERVER_END);
+               params[3] = FW_PARAM_PFVF(TDDP_START);
+               params[4] = FW_PARAM_PFVF(TDDP_END);
+               params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+               if (ret < 0)
+                       goto bye;
+               adap->tids.ntids = val[0];
+               adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+               adap->tids.stid_base = val[1];
+               adap->tids.nstids = val[2] - val[1] + 1;
+               adap->vres.ddp.start = val[3];
+               adap->vres.ddp.size = val[4] - val[3] + 1;
+               adap->params.ofldq_wr_cred = val[5];
+               adap->params.offload = 1;
+       }
+       if (c.rdmacaps) {
+               params[0] = FW_PARAM_PFVF(STAG_START);
+               params[1] = FW_PARAM_PFVF(STAG_END);
+               params[2] = FW_PARAM_PFVF(RQ_START);
+               params[3] = FW_PARAM_PFVF(RQ_END);
+               params[4] = FW_PARAM_PFVF(PBL_START);
+               params[5] = FW_PARAM_PFVF(PBL_END);
+               ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+               if (ret < 0)
+                       goto bye;
+               adap->vres.stag.start = val[0];
+               adap->vres.stag.size = val[1] - val[0] + 1;
+               adap->vres.rq.start = val[2];
+               adap->vres.rq.size = val[3] - val[2] + 1;
+               adap->vres.pbl.start = val[4];
+               adap->vres.pbl.size = val[5] - val[4] + 1;
+       }
+       if (c.iscsicaps) {
+               params[0] = FW_PARAM_PFVF(ISCSI_START);
+               params[1] = FW_PARAM_PFVF(ISCSI_END);
+               ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+               if (ret < 0)
+                       goto bye;
+               adap->vres.iscsi.start = val[0];
+               adap->vres.iscsi.size = val[1] - val[0] + 1;
+       }
+ #undef FW_PARAM_PFVF
+ #undef FW_PARAM_DEV
+       adap->params.nports = hweight32(port_vec);
+       adap->params.portvec = port_vec;
+       adap->flags |= FW_OK;
+       /* These are finalized by FW initialization, load their values now */
+       v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+       adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+       t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+       t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+                    adap->params.b_wnd);
+       /* tweak some settings */
+       t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+       t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+       t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+       v = t4_read_reg(adap, TP_PIO_DATA);
+       t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+       setup_memwin(adap);
+       return 0;
+       /*
+        * If a command timed out or failed with EIO FW does not operate within
+        * its spec or something catastrophic happened to HW/FW, stop issuing
+        * commands.
+        */
+ bye:  if (ret != -ETIMEDOUT && ret != -EIO)
+               t4_fw_bye(adap, 0);
+       return ret;
+ }
+ static inline bool is_10g_port(const struct link_config *lc)
+ {
+       return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+ }
+ static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+                            unsigned int size, unsigned int iqe_size)
+ {
+       q->intr_params = QINTR_TIMER_IDX(timer_idx) |
+                        (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
+       q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+       q->iqe_len = iqe_size;
+       q->size = size;
+ }
+ /*
+  * Perform default configuration of DMA queues depending on the number and type
+  * of ports we found and the number of available CPUs.  Most settings can be
+  * modified by the admin prior to actual use.
+  */
+ static void __devinit cfg_queues(struct adapter *adap)
+ {
+       struct sge *s = &adap->sge;
+       int i, q10g = 0, n10g = 0, qidx = 0;
+       for_each_port(adap, i)
+               n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+       /*
+        * We default to 1 queue per non-10G port and up to # of cores queues
+        * per 10G port.
+        */
+       if (n10g)
+               q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+       if (q10g > num_online_cpus())
+               q10g = num_online_cpus();
+       for_each_port(adap, i) {
+               struct port_info *pi = adap2pinfo(adap, i);
+               pi->first_qset = qidx;
+               pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+               qidx += pi->nqsets;
+       }
+       s->ethqsets = qidx;
+       s->max_ethqsets = qidx;   /* MSI-X may lower it later */
+       if (is_offload(adap)) {
+               /*
+                * For offload we use 1 queue/channel if all ports are up to 1G,
+                * otherwise we divide all available queues amongst the channels
+                * capped by the number of available cores.
+                */
+               if (n10g) {
+                       i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+                                 num_online_cpus());
+                       s->ofldqsets = roundup(i, adap->params.nports);
+               } else
+                       s->ofldqsets = adap->params.nports;
+               /* For RDMA one Rx queue per channel suffices */
+               s->rdmaqs = adap->params.nports;
+       }
+       for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+               struct sge_eth_rxq *r = &s->ethrxq[i];
+               init_rspq(&r->rspq, 0, 0, 1024, 64);
+               r->fl.size = 72;
+       }
+       for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+               s->ethtxq[i].q.size = 1024;
+       for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
+               s->ctrlq[i].q.size = 512;
+       for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
+               s->ofldtxq[i].q.size = 1024;
+       for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
+               struct sge_ofld_rxq *r = &s->ofldrxq[i];
+               init_rspq(&r->rspq, 0, 0, 1024, 64);
+               r->rspq.uld = CXGB4_ULD_ISCSI;
+               r->fl.size = 72;
+       }
+       for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
+               struct sge_ofld_rxq *r = &s->rdmarxq[i];
+               init_rspq(&r->rspq, 0, 0, 511, 64);
+               r->rspq.uld = CXGB4_ULD_RDMA;
+               r->fl.size = 72;
+       }
+       init_rspq(&s->fw_evtq, 6, 0, 512, 64);
+       init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+ }
+ /*
+  * Reduce the number of Ethernet queues across all ports to at most n.
+  * n provides at least one queue per port.
+  */
+ static void __devinit reduce_ethqs(struct adapter *adap, int n)
+ {
+       int i;
+       struct port_info *pi;
+       while (n < adap->sge.ethqsets)
+               for_each_port(adap, i) {
+                       pi = adap2pinfo(adap, i);
+                       if (pi->nqsets > 1) {
+                               pi->nqsets--;
+                               adap->sge.ethqsets--;
+                               if (adap->sge.ethqsets <= n)
+                                       break;
+                       }
+               }
+       n = 0;
+       for_each_port(adap, i) {
+               pi = adap2pinfo(adap, i);
+               pi->first_qset = n;
+               n += pi->nqsets;
+       }
+ }
+ /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
+ #define EXTRA_VECS 2
+ static int __devinit enable_msix(struct adapter *adap)
+ {
+       int ofld_need = 0;
+       int i, err, want, need;
+       struct sge *s = &adap->sge;
+       unsigned int nchan = adap->params.nports;
+       struct msix_entry entries[MAX_INGQ + 1];
+       for (i = 0; i < ARRAY_SIZE(entries); ++i)
+               entries[i].entry = i;
+       want = s->max_ethqsets + EXTRA_VECS;
+       if (is_offload(adap)) {
+               want += s->rdmaqs + s->ofldqsets;
+               /* need nchan for each possible ULD */
+               ofld_need = 2 * nchan;
+       }
+       need = adap->params.nports + EXTRA_VECS + ofld_need;
+       while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
+               want = err;
+       if (!err) {
+               /*
+                * Distribute available vectors to the various queue groups.
+                * Every group gets its minimum requirement and NIC gets top
+                * priority for leftovers.
+                */
+               i = want - EXTRA_VECS - ofld_need;
+               if (i < s->max_ethqsets) {
+                       s->max_ethqsets = i;
+                       if (i < s->ethqsets)
+                               reduce_ethqs(adap, i);
+               }
+               if (is_offload(adap)) {
+                       i = want - EXTRA_VECS - s->max_ethqsets;
+                       i -= ofld_need - nchan;
+                       s->ofldqsets = (i / nchan) * nchan;  /* round down */
+               }
+               for (i = 0; i < want; ++i)
+                       adap->msix_info[i].vec = entries[i].vector;
+       } else if (err > 0)
+               dev_info(adap->pdev_dev,
+                        "only %d MSI-X vectors left, not using MSI-X\n", err);
+       return err;
+ }
+ #undef EXTRA_VECS
+ static void __devinit print_port_info(struct adapter *adap)
+ {
+       static const char *base[] = {
+               "R", "KX4", "T", "KX", "T", "KR", "CX4"
+       };
+       int i;
+       char buf[80];
+       for_each_port(adap, i) {
+               struct net_device *dev = adap->port[i];
+               const struct port_info *pi = netdev_priv(dev);
+               char *bufp = buf;
+               if (!test_bit(i, &adap->registered_device_map))
+                       continue;
+               if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+                       bufp += sprintf(bufp, "100/");
+               if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+                       bufp += sprintf(bufp, "1000/");
+               if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+                       bufp += sprintf(bufp, "10G/");
+               if (bufp != buf)
+                       --bufp;
+               sprintf(bufp, "BASE-%s", base[pi->port_type]);
+               netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
+                           adap->params.vpd.id, adap->params.rev,
+                           buf, is_offload(adap) ? "R" : "",
+                           adap->params.pci.width,
+                           (adap->flags & USING_MSIX) ? " MSI-X" :
+                           (adap->flags & USING_MSI) ? " MSI" : "");
+               if (adap->name == dev->name)
+                       netdev_info(dev, "S/N: %s, E/C: %s\n",
+                                   adap->params.vpd.sn, adap->params.vpd.ec);
+       }
+ }
+ #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
+                  NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+ static int __devinit init_one(struct pci_dev *pdev,
+                             const struct pci_device_id *ent)
+ {
+       int func, i, err;
+       struct port_info *pi;
+       unsigned int highdma = 0;
+       struct adapter *adapter = NULL;
+       printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+       err = pci_request_regions(pdev, KBUILD_MODNAME);
+       if (err) {
+               /* Just info, some other driver may have claimed the device. */
+               dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+               return err;
+       }
+       /* We control everything through PF 0 */
+       func = PCI_FUNC(pdev->devfn);
+       if (func > 0)
+               goto sriov;
+       err = pci_enable_device(pdev);
+       if (err) {
+               dev_err(&pdev->dev, "cannot enable PCI device\n");
+               goto out_release_regions;
+       }
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               highdma = NETIF_F_HIGHDMA;
+               err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               if (err) {
+                       dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+                               "coherent allocations\n");
+                       goto out_disable_device;
+               }
+       } else {
+               err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (err) {
+                       dev_err(&pdev->dev, "no usable DMA configuration\n");
+                       goto out_disable_device;
+               }
+       }
+       pci_enable_pcie_error_reporting(pdev);
+       pci_set_master(pdev);
+       pci_save_state(pdev);
+       adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+       if (!adapter) {
+               err = -ENOMEM;
+               goto out_disable_device;
+       }
+       adapter->regs = pci_ioremap_bar(pdev, 0);
+       if (!adapter->regs) {
+               dev_err(&pdev->dev, "cannot map device registers\n");
+               err = -ENOMEM;
+               goto out_free_adapter;
+       }
+       adapter->pdev = pdev;
+       adapter->pdev_dev = &pdev->dev;
+       adapter->name = pci_name(pdev);
+       adapter->msg_enable = dflt_msg_enable;
+       memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+       spin_lock_init(&adapter->stats_lock);
+       spin_lock_init(&adapter->tid_release_lock);
+       INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+       err = t4_prep_adapter(adapter);
+       if (err)
+               goto out_unmap_bar;
+       err = adap_init0(adapter);
+       if (err)
+               goto out_unmap_bar;
+       for_each_port(adapter, i) {
+               struct net_device *netdev;
+               netdev = alloc_etherdev_mq(sizeof(struct port_info),
+                                          MAX_ETH_QSETS);
+               if (!netdev) {
+                       err = -ENOMEM;
+                       goto out_free_dev;
+               }
+               SET_NETDEV_DEV(netdev, &pdev->dev);
+               adapter->port[i] = netdev;
+               pi = netdev_priv(netdev);
+               pi->adapter = adapter;
+               pi->xact_addr_filt = -1;
+               pi->rx_offload = RX_CSO;
+               pi->port_id = i;
+               netif_carrier_off(netdev);
+               netif_tx_stop_all_queues(netdev);
+               netdev->irq = pdev->irq;
+               netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+               netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+               netdev->features |= NETIF_F_GRO | highdma;
+               netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+               netdev->vlan_features = netdev->features & VLAN_FEAT;
+               netdev->netdev_ops = &cxgb4_netdev_ops;
+               SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+       }
+       pci_set_drvdata(pdev, adapter);
+       if (adapter->flags & FW_OK) {
+               err = t4_port_init(adapter, 0, 0, 0);
+               if (err)
+                       goto out_free_dev;
+       }
+       /*
+        * Configure queues and allocate tables now, they can be needed as
+        * soon as the first register_netdev completes.
+        */
+       cfg_queues(adapter);
+       adapter->l2t = t4_init_l2t();
+       if (!adapter->l2t) {
+               /* We tolerate a lack of L2T, giving up some functionality */
+               dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
+               adapter->params.offload = 0;
+       }
+       if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+               dev_warn(&pdev->dev, "could not allocate TID table, "
+                        "continuing\n");
+               adapter->params.offload = 0;
+       }
+       /*
+        * The card is now ready to go.  If any errors occur during device
+        * registration we do not fail the whole card but rather proceed only
+        * with the ports we manage to register successfully.  However we must
+        * register at least one net device.
+        */
+       for_each_port(adapter, i) {
+               err = register_netdev(adapter->port[i]);
+               if (err)
+                       dev_warn(&pdev->dev,
+                                "cannot register net device %s, skipping\n",
+                                adapter->port[i]->name);
+               else {
+                       /*
+                        * Change the name we use for messages to the name of
+                        * the first successfully registered interface.
+                        */
+                       if (!adapter->registered_device_map)
+                               adapter->name = adapter->port[i]->name;
+                       __set_bit(i, &adapter->registered_device_map);
+                       adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
+               }
+       }
+       if (!adapter->registered_device_map) {
+               dev_err(&pdev->dev, "could not register any net devices\n");
+               goto out_free_dev;
+       }
+       if (cxgb4_debugfs_root) {
+               adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
+                                                          cxgb4_debugfs_root);
+               setup_debugfs(adapter);
+       }
+       /* See what interrupts we'll be using */
+       if (msi > 1 && enable_msix(adapter) == 0)
+               adapter->flags |= USING_MSIX;
+       else if (msi > 0 && pci_enable_msi(pdev) == 0)
+               adapter->flags |= USING_MSI;
+       if (is_offload(adapter))
+               attach_ulds(adapter);
+       print_port_info(adapter);
+ sriov:
+ #ifdef CONFIG_PCI_IOV
+       if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+               if (pci_enable_sriov(pdev, num_vf[func]) == 0)
+                       dev_info(&pdev->dev,
+                                "instantiated %u virtual functions\n",
+                                num_vf[func]);
+ #endif
+       return 0;
+  out_free_dev:
+       t4_free_mem(adapter->tids.tid_tab);
+       t4_free_mem(adapter->l2t);
+       for_each_port(adapter, i)
+               if (adapter->port[i])
+                       free_netdev(adapter->port[i]);
+       if (adapter->flags & FW_OK)
+               t4_fw_bye(adapter, 0);
+  out_unmap_bar:
+       iounmap(adapter->regs);
+  out_free_adapter:
+       kfree(adapter);
+  out_disable_device:
+       pci_disable_pcie_error_reporting(pdev);
+       pci_disable_device(pdev);
+  out_release_regions:
+       pci_release_regions(pdev);
+       pci_set_drvdata(pdev, NULL);
+       return err;
+ }
+ static void __devexit remove_one(struct pci_dev *pdev)
+ {
+       struct adapter *adapter = pci_get_drvdata(pdev);
+       pci_disable_sriov(pdev);
+       if (adapter) {
+               int i;
+               if (is_offload(adapter))
+                       detach_ulds(adapter);
+               for_each_port(adapter, i)
+                       if (test_bit(i, &adapter->registered_device_map))
+                               unregister_netdev(adapter->port[i]);
+               if (adapter->debugfs_root)
+                       debugfs_remove_recursive(adapter->debugfs_root);
+               t4_sge_stop(adapter);
+               t4_free_sge_resources(adapter);
+               t4_free_mem(adapter->l2t);
+               t4_free_mem(adapter->tids.tid_tab);
+               disable_msi(adapter);
+               for_each_port(adapter, i)
+                       if (adapter->port[i])
+                               free_netdev(adapter->port[i]);
+               if (adapter->flags & FW_OK)
+                       t4_fw_bye(adapter, 0);
+               iounmap(adapter->regs);
+               kfree(adapter);
+               pci_disable_pcie_error_reporting(pdev);
+               pci_disable_device(pdev);
+               pci_release_regions(pdev);
+               pci_set_drvdata(pdev, NULL);
+       } else if (PCI_FUNC(pdev->devfn) > 0)
+               pci_release_regions(pdev);
+ }
+ static struct pci_driver cxgb4_driver = {
+       .name     = KBUILD_MODNAME,
+       .id_table = cxgb4_pci_tbl,
+       .probe    = init_one,
+       .remove   = __devexit_p(remove_one),
+ };
+ static int __init cxgb4_init_module(void)
+ {
+       int ret;
+       /* Debugfs support is optional, just warn if this fails */
+       cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+       if (!cxgb4_debugfs_root)
+               pr_warning("could not create debugfs entry, continuing\n");
+       ret = pci_register_driver(&cxgb4_driver);
+       if (ret < 0)
+               debugfs_remove(cxgb4_debugfs_root);
+       return ret;
+ }
+ static void __exit cxgb4_cleanup_module(void)
+ {
+       pci_unregister_driver(&cxgb4_driver);
+       debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
+ }
+ module_init(cxgb4_init_module);
+ module_exit(cxgb4_cleanup_module);
index 41330349b07ab924e3e2ae31b7db6c450ce38dbd,b15ece26ed8469136df4d40eced1b98839e4acce..47da5fc1e9f41ffd2ce57ba364d5446d8f958fd3
@@@ -383,8 -383,6 +383,6 @@@ static void e1000_configure(struct e100
                adapter->alloc_rx_buf(adapter, ring,
                                      E1000_DESC_UNUSED(ring));
        }
-       adapter->tx_queue_len = netdev->tx_queue_len;
  }
  
  int e1000_up(struct e1000_adapter *adapter)
@@@ -503,7 -501,6 +501,6 @@@ void e1000_down(struct e1000_adapter *a
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
  
-       netdev->tx_queue_len = adapter->tx_queue_len;
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
        netif_carrier_off(netdev);
@@@ -2101,6 -2098,7 +2098,6 @@@ static void e1000_set_rx_mode(struct ne
        struct e1000_hw *hw = &adapter->hw;
        struct netdev_hw_addr *ha;
        bool use_uc = false;
 -      struct dev_addr_list *mc_ptr;
        u32 rctl;
        u32 hash_value;
        int i, rar_entries = E1000_RAR_ENTRIES;
  
        WARN_ON(i == rar_entries);
  
 -      netdev_for_each_mc_addr(mc_ptr, netdev) {
 +      netdev_for_each_mc_addr(ha, netdev) {
                if (i == rar_entries) {
                        /* load any remaining addresses into the hash table */
                        u32 hash_reg, hash_bit, mta;
 -                      hash_value = e1000_hash_mc_addr(hw, mc_ptr->da_addr);
 +                      hash_value = e1000_hash_mc_addr(hw, ha->addr);
                        hash_reg = (hash_value >> 5) & 0x7F;
                        hash_bit = hash_value & 0x1F;
                        mta = (1 << hash_bit);
                        mcarray[hash_reg] |= mta;
                } else {
 -                      e1000_rar_set(hw, mc_ptr->da_addr, i++);
 +                      e1000_rar_set(hw, ha->addr, i++);
                }
        }
  
@@@ -2315,19 -2313,15 +2312,15 @@@ static void e1000_watchdog(unsigned lon
                                E1000_CTRL_RFCE) ? "RX" : ((ctrl &
                                E1000_CTRL_TFCE) ? "TX" : "None" )));
  
-                       /* tweak tx_queue_len according to speed/duplex
-                        * and adjust the timeout factor */
-                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
                                txb2b = false;
-                               netdev->tx_queue_len = 10;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = false;
-                               netdev->tx_queue_len = 100;
                                /* maybe add some timeout factor ? */
                                break;
                        }
index 11e02e1f187ca5bed2836b3ba48183029c0481ec,118bdf4835938301b37af18b10a45c206abe2b24..12648a1cdb78956cff27ad6749c2bfa50af74c79
  
  struct e1000_info;
  
 -#define e_printk(level, adapter, format, arg...) \
 -      printk(level "%s: %s: " format, pci_name(adapter->pdev), \
 -             adapter->netdev->name, ## arg)
 -
 -#ifdef DEBUG
  #define e_dbg(format, arg...) \
 -      e_printk(KERN_DEBUG , hw->adapter, format, ## arg)
 -#else
 -#define e_dbg(format, arg...) do { (void)(hw); } while (0)
 -#endif
 -
 +      netdev_dbg(hw->adapter->netdev, format, ## arg)
  #define e_err(format, arg...) \
 -      e_printk(KERN_ERR, adapter, format, ## arg)
 +      netdev_err(adapter->netdev, format, ## arg)
  #define e_info(format, arg...) \
 -      e_printk(KERN_INFO, adapter, format, ## arg)
 +      netdev_info(adapter->netdev, format, ## arg)
  #define e_warn(format, arg...) \
 -      e_printk(KERN_WARNING, adapter, format, ## arg)
 +      netdev_warn(adapter->netdev, format, ## arg)
  #define e_notice(format, arg...) \
 -      e_printk(KERN_NOTICE, adapter, format, ## arg)
 +      netdev_notice(adapter->netdev, format, ## arg)
  
  
  /* Interrupt modes, as used by the IntMode parameter */
  #define HV_M_STATUS_SPEED_1000            0x0200
  #define HV_M_STATUS_LINK_UP               0x0040
  
 +/* Time to wait before putting the device into D3 if there's no link (in ms). */
 +#define LINK_TIMEOUT          100
 +
  enum e1000_boards {
        board_82571,
        board_82572,
@@@ -273,7 -279,6 +273,6 @@@ struct e1000_adapter 
  
        struct napi_struct napi;
  
-       unsigned long tx_queue_len;
        unsigned int restart_queue;
        u32 txd_cmd;
  
        struct work_struct update_phy_task;
        struct work_struct led_blink_task;
        struct work_struct print_hang_task;
 +
 +      bool idle_check;
  };
  
  struct e1000_info {
index 02f7d20f3c80bb505c27984ebd084d77adf1dd76,e1cceb6065769bf040027049a6fd90f5291d14cf..167b1aedfb42588511350c9d8be0010858caeb56
@@@ -26,8 -26,6 +26,8 @@@
  
  *******************************************************************************/
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/module.h>
  #include <linux/types.h>
  #include <linux/init.h>
@@@ -46,7 -44,6 +46,7 @@@
  #include <linux/cpu.h>
  #include <linux/smp.h>
  #include <linux/pm_qos_params.h>
 +#include <linux/pm_runtime.h>
  #include <linux/aer.h>
  
  #include "e1000.h"
@@@ -2292,8 -2289,6 +2292,6 @@@ static void e1000_configure_tx(struct e
        ew32(TCTL, tctl);
  
        e1000e_config_collision_dist(hw);
-       adapter->tx_queue_len = adapter->netdev->tx_queue_len;
  }
  
  /**
@@@ -2567,7 -2562,7 +2565,7 @@@ static void e1000_set_multi(struct net_
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        u8  *mta_list;
        u32 rctl;
        int i;
  
                /* prepare a packed array of only addresses. */
                i = 0;
 -              netdev_for_each_mc_addr(mc_ptr, netdev)
 -                      memcpy(mta_list + (i++ * ETH_ALEN),
 -                             mc_ptr->dmi_addr, ETH_ALEN);
 +              netdev_for_each_mc_addr(ha, netdev)
 +                      memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  
                e1000_update_mc_addr_list(hw, mta_list, i);
                kfree(mta_list);
@@@ -2879,7 -2875,6 +2877,6 @@@ void e1000e_down(struct e1000_adapter *
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
  
-       netdev->tx_queue_len = adapter->tx_queue_len;
        netif_carrier_off(netdev);
        adapter->link_speed = 0;
        adapter->link_duplex = 0;
@@@ -3085,15 -3080,12 +3082,15 @@@ static int e1000_open(struct net_devic
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 +      struct pci_dev *pdev = adapter->pdev;
        int err;
  
        /* disallow open during test */
        if (test_bit(__E1000_TESTING, &adapter->state))
                return -EBUSY;
  
 +      pm_runtime_get_sync(&pdev->dev);
 +
        netif_carrier_off(netdev);
  
        /* allocate transmit descriptors */
  
        netif_start_queue(netdev);
  
 +      adapter->idle_check = true;
 +      pm_runtime_put(&pdev->dev);
 +
        /* fire a link status change interrupt to start the watchdog */
        ew32(ICS, E1000_ICS_LSC);
  
@@@ -3170,7 -3159,6 +3167,7 @@@ err_setup_rx
        e1000e_free_tx_resources(adapter);
  err_setup_tx:
        e1000e_reset(adapter);
 +      pm_runtime_put_sync(&pdev->dev);
  
        return err;
  }
  static int e1000_close(struct net_device *netdev)
  {
        struct e1000_adapter *adapter = netdev_priv(netdev);
 +      struct pci_dev *pdev = adapter->pdev;
  
        WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
 -      e1000e_down(adapter);
 +
 +      pm_runtime_get_sync(&pdev->dev);
 +
 +      if (!test_bit(__E1000_DOWN, &adapter->state)) {
 +              e1000e_down(adapter);
 +              e1000_free_irq(adapter);
 +      }
        e1000_power_down_phy(adapter);
 -      e1000_free_irq(adapter);
  
        e1000e_free_tx_resources(adapter);
        e1000e_free_rx_resources(adapter);
        if (adapter->flags & FLAG_HAS_AMT)
                e1000_release_hw_control(adapter);
  
 +      pm_runtime_put_sync(&pdev->dev);
 +
        return 0;
  }
  /**
@@@ -3567,9 -3547,6 +3564,9 @@@ static void e1000_watchdog_task(struct 
  
        link = e1000e_has_link(adapter);
        if ((netif_carrier_ok(netdev)) && link) {
 +              /* Cancel scheduled suspend requests. */
 +              pm_runtime_resume(netdev->dev.parent);
 +
                e1000e_enable_receives(adapter);
                goto link_up;
        }
        if (link) {
                if (!netif_carrier_ok(netdev)) {
                        bool txb2b = 1;
 +
 +                      /* Cancel scheduled suspend requests. */
 +                      pm_runtime_resume(netdev->dev.parent);
 +
                        /* update snapshot of PHY registers on LSC */
                        e1000_phy_read_status(adapter);
                        mac->ops.get_link_up_info(&adapter->hw,
                                               "link gets many collisions.\n");
                        }
  
-                       /*
-                        * tweak tx_queue_len according to speed/duplex
-                        * and adjust the timeout factor
-                        */
-                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
                                txb2b = 0;
-                               netdev->tx_queue_len = 10;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = 0;
-                               netdev->tx_queue_len = 100;
                                adapter->tx_timeout_factor = 10;
                                break;
                        }
  
                        if (adapter->flags & FLAG_RX_NEEDS_RESTART)
                                schedule_work(&adapter->reset_task);
 +                      else
 +                              pm_schedule_suspend(netdev->dev.parent,
 +                                                      LINK_TIMEOUT);
                }
        }
  
        return retval;
  }
  
 -static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
 +static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
 +                          bool runtime)
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 ctrl, ctrl_ext, rctl, status;
 -      u32 wufc = adapter->wol;
 +      /* Runtime suspend should only enable wakeup for link changes */
 +      u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
        int retval = 0;
  
        netif_device_detach(netdev);
@@@ -4665,21 -4627,43 +4656,21 @@@ static void e1000e_disable_l1aspm(struc
        }
  }
  
 -#ifdef CONFIG_PM
 -static int e1000_suspend(struct pci_dev *pdev, pm_message_t state)
 +#ifdef CONFIG_PM_OPS
 +static bool e1000e_pm_ready(struct e1000_adapter *adapter)
  {
 -      int retval;
 -      bool wake;
 -
 -      retval = __e1000_shutdown(pdev, &wake);
 -      if (!retval)
 -              e1000_complete_shutdown(pdev, true, wake);
 -
 -      return retval;
 +      return !!adapter->tx_ring->buffer_info;
  }
  
 -static int e1000_resume(struct pci_dev *pdev)
 +static int __e1000_resume(struct pci_dev *pdev)
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
        u32 err;
  
 -      pci_set_power_state(pdev, PCI_D0);
 -      pci_restore_state(pdev);
 -      pci_save_state(pdev);
        e1000e_disable_l1aspm(pdev);
  
 -      err = pci_enable_device_mem(pdev);
 -      if (err) {
 -              dev_err(&pdev->dev,
 -                      "Cannot enable PCI device from suspend\n");
 -              return err;
 -      }
 -
 -      pci_set_master(pdev);
 -
 -      pci_enable_wake(pdev, PCI_D3hot, 0);
 -      pci_enable_wake(pdev, PCI_D3cold, 0);
 -
        e1000e_set_interrupt_capability(adapter);
        if (netif_running(netdev)) {
                err = e1000_request_irq(adapter);
  
        return 0;
  }
 -#endif
 +
 +#ifdef CONFIG_PM_SLEEP
 +static int e1000_suspend(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      int retval;
 +      bool wake;
 +
 +      retval = __e1000_shutdown(pdev, &wake, false);
 +      if (!retval)
 +              e1000_complete_shutdown(pdev, true, wake);
 +
 +      return retval;
 +}
 +
 +static int e1000_resume(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (e1000e_pm_ready(adapter))
 +              adapter->idle_check = true;
 +
 +      return __e1000_resume(pdev);
 +}
 +#endif /* CONFIG_PM_SLEEP */
 +
 +#ifdef CONFIG_PM_RUNTIME
 +static int e1000_runtime_suspend(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (e1000e_pm_ready(adapter)) {
 +              bool wake;
 +
 +              __e1000_shutdown(pdev, &wake, true);
 +      }
 +
 +      return 0;
 +}
 +
 +static int e1000_idle(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (!e1000e_pm_ready(adapter))
 +              return 0;
 +
 +      if (adapter->idle_check) {
 +              adapter->idle_check = false;
 +              if (!e1000e_has_link(adapter))
 +                      pm_schedule_suspend(dev, MSEC_PER_SEC);
 +      }
 +
 +      return -EBUSY;
 +}
 +
 +static int e1000_runtime_resume(struct device *dev)
 +{
 +      struct pci_dev *pdev = to_pci_dev(dev);
 +      struct net_device *netdev = pci_get_drvdata(pdev);
 +      struct e1000_adapter *adapter = netdev_priv(netdev);
 +
 +      if (!e1000e_pm_ready(adapter))
 +              return 0;
 +
 +      adapter->idle_check = !dev->power.runtime_auto;
 +      return __e1000_resume(pdev);
 +}
 +#endif /* CONFIG_PM_RUNTIME */
 +#endif /* CONFIG_PM_OPS */
  
  static void e1000_shutdown(struct pci_dev *pdev)
  {
        bool wake = false;
  
 -      __e1000_shutdown(pdev, &wake);
 +      __e1000_shutdown(pdev, &wake, false);
  
        if (system_state == SYSTEM_POWER_OFF)
                e1000_complete_shutdown(pdev, false, wake);
@@@ -4891,8 -4800,8 +4882,8 @@@ static pci_ers_result_t e1000_io_slot_r
                result = PCI_ERS_RESULT_DISCONNECT;
        } else {
                pci_set_master(pdev);
 +              pdev->state_saved = true;
                pci_restore_state(pdev);
 -              pci_save_state(pdev);
  
                pci_enable_wake(pdev, PCI_D3hot, 0);
                pci_enable_wake(pdev, PCI_D3cold, 0);
@@@ -5299,12 -5208,6 +5290,12 @@@ static int __devinit e1000_probe(struc
  
        e1000_print_device_info(adapter);
  
 +      if (pci_dev_run_wake(pdev)) {
 +              pm_runtime_set_active(&pdev->dev);
 +              pm_runtime_enable(&pdev->dev);
 +      }
 +      pm_schedule_suspend(&pdev->dev, MSEC_PER_SEC);
 +
        return 0;
  
  err_register:
@@@ -5347,16 -5250,12 +5338,16 @@@ static void __devexit e1000_remove(stru
  {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
 +      bool down = test_bit(__E1000_DOWN, &adapter->state);
 +
 +      pm_runtime_get_sync(&pdev->dev);
  
        /*
         * flush_scheduled work may reschedule our watchdog task, so
         * explicitly disable watchdog tasks from being rescheduled
         */
 -      set_bit(__E1000_DOWN, &adapter->state);
 +      if (!down)
 +              set_bit(__E1000_DOWN, &adapter->state);
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
  
        if (!(netdev->flags & IFF_UP))
                e1000_power_down_phy(adapter);
  
 +      /* Don't lie to e1000_close() down the road. */
 +      if (!down)
 +              clear_bit(__E1000_DOWN, &adapter->state);
        unregister_netdev(netdev);
  
 +      if (pci_dev_run_wake(pdev)) {
 +              pm_runtime_disable(&pdev->dev);
 +              pm_runtime_set_suspended(&pdev->dev);
 +      }
 +      pm_runtime_put_noidle(&pdev->dev);
 +
        /*
         * Release control of h/w to f/w.  If f/w is AMT enabled, this
         * would have already happened in close and is redundant.
@@@ -5480,22 -5370,16 +5471,22 @@@ static DEFINE_PCI_DEVICE_TABLE(e1000_pc
  };
  MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
  
 +#ifdef CONFIG_PM_OPS
 +static const struct dev_pm_ops e1000_pm_ops = {
 +      SET_SYSTEM_SLEEP_PM_OPS(e1000_suspend, e1000_resume)
 +      SET_RUNTIME_PM_OPS(e1000_runtime_suspend,
 +                              e1000_runtime_resume, e1000_idle)
 +};
 +#endif
 +
  /* PCI Device API Driver */
  static struct pci_driver e1000_driver = {
        .name     = e1000e_driver_name,
        .id_table = e1000_pci_tbl,
        .probe    = e1000_probe,
        .remove   = __devexit_p(e1000_remove),
 -#ifdef CONFIG_PM
 -      /* Power Management Hooks */
 -      .suspend  = e1000_suspend,
 -      .resume   = e1000_resume,
 +#ifdef CONFIG_PM_OPS
 +      .driver.pm = &e1000_pm_ops,
  #endif
        .shutdown = e1000_shutdown,
        .err_handler = &e1000_err_handler
  static int __init e1000_init_module(void)
  {
        int ret;
 -      printk(KERN_INFO "%s: Intel(R) PRO/1000 Network Driver - %s\n",
 -             e1000e_driver_name, e1000e_driver_version);
 -      printk(KERN_INFO "%s: Copyright (c) 1999 - 2009 Intel Corporation.\n",
 -             e1000e_driver_name);
 +      pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
 +              e1000e_driver_version);
 +      pr_info("Copyright (c) 1999 - 2009 Intel Corporation.\n");
        ret = pci_register_driver(&e1000_driver);
  
        return ret;
diff --combined drivers/net/gianfar.c
index fdd26c2b1a2f5a870437b5b5b48e3538bee00695,080d1cea5b265704819d54b1c60e96393d1b7749..5175233f11f2e65babc0bd926ceba9aadcb4f4be
@@@ -676,7 -676,7 +676,7 @@@ static int gfar_of_init(struct of_devic
                priv->rx_queue[i] = NULL;
  
        for (i = 0; i < priv->num_tx_queues; i++) {
-               priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
+               priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
                                sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
                if (!priv->tx_queue[i]) {
                        err = -ENOMEM;
        }
  
        for (i = 0; i < priv->num_rx_queues; i++) {
-               priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+               priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
                                        sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
                if (!priv->rx_queue[i]) {
                        err = -ENOMEM;
@@@ -1120,10 -1120,10 +1120,10 @@@ static int gfar_probe(struct of_device 
        /* provided which set of benchmarks. */
        printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
        for (i = 0; i < priv->num_rx_queues; i++)
-               printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+               printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
                        dev->name, i, priv->rx_queue[i]->rx_ring_size);
        for(i = 0; i < priv->num_tx_queues; i++)
-                printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+                printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
                        dev->name, i, priv->tx_queue[i]->tx_ring_size);
  
        return 0;
@@@ -1638,13 -1638,13 +1638,13 @@@ static void free_skb_resources(struct g
        /* Go through all the buffer descriptors and free their data buffers */
        for (i = 0; i < priv->num_tx_queues; i++) {
                tx_queue = priv->tx_queue[i];
-               if(!tx_queue->tx_skbuff)
+               if(tx_queue->tx_skbuff)
                        free_skb_tx_queue(tx_queue);
        }
  
        for (i = 0; i < priv->num_rx_queues; i++) {
                rx_queue = priv->rx_queue[i];
-               if(!rx_queue->rx_skbuff)
+               if(rx_queue->rx_skbuff)
                        free_skb_rx_queue(rx_queue);
        }
  
@@@ -2393,6 -2393,7 +2393,7 @@@ struct sk_buff * gfar_new_skb(struct ne
         * as many bytes as needed to align the data properly
         */
        skb_reserve(skb, alignamount);
+       GFAR_CB(skb)->alignamount = alignamount;
  
        return skb;
  }
@@@ -2533,13 -2534,13 +2534,13 @@@ int gfar_clean_rx_ring(struct gfar_priv
                                newskb = skb;
                        else if (skb) {
                                /*
-                                * We need to reset ->data to what it
+                                * We need to un-reserve() the skb to what it
                                 * was before gfar_new_skb() re-aligned
                                 * it to an RXBUF_ALIGNMENT boundary
                                 * before we put the skb back on the
                                 * recycle list.
                                 */
-                               skb->data = skb->head + NET_SKB_PAD;
+                               skb_reserve(skb, -GFAR_CB(skb)->alignamount);
                                __skb_queue_head(&priv->rx_recycle, skb);
                        }
                } else {
@@@ -2797,7 -2798,7 +2798,7 @@@ static void adjust_link(struct net_devi
   * whenever dev->flags is changed */
  static void gfar_set_multi(struct net_device *dev)
  {
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        struct gfar_private *priv = netdev_priv(dev);
        struct gfar __iomem *regs = priv->gfargrp[0].regs;
        u32 tempval;
                        return;
  
                /* Parse the list, and set the appropriate bits */
 -              netdev_for_each_mc_addr(mc_ptr, dev) {
 +              netdev_for_each_mc_addr(ha, dev) {
                        if (idx < em_num) {
 -                              gfar_set_mac_for_addr(dev, idx,
 -                                              mc_ptr->dmi_addr);
 +                              gfar_set_mac_for_addr(dev, idx, ha->addr);
                                idx++;
                        } else
 -                              gfar_set_hash_for_addr(dev, mc_ptr->dmi_addr);
 +                              gfar_set_hash_for_addr(dev, ha->addr);
                }
        }
  
diff --combined drivers/net/igb/igb.h
index 4f69b6d951b3dce330ea44df31a7259e116908ff,3b772b822a5dcc61548c8bc98297c8bd9c47efb6..7d288ccca1cacf18b893bff33058853198d7a24a
@@@ -107,7 -107,6 +107,7 @@@ struct vf_data_storage 
  #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
  
  /* Supported Rx Buffer Sizes */
 +#define IGB_RXBUFFER_64    64     /* Used for packet split */
  #define IGB_RXBUFFER_128   128    /* Used for packet split */
  #define IGB_RXBUFFER_1024  1024
  #define IGB_RXBUFFER_2048  2048
@@@ -268,7 -267,6 +268,6 @@@ struct igb_adapter 
  
        /* TX */
        struct igb_ring *tx_ring[16];
-       unsigned long tx_queue_len;
        u32 tx_timeout_count;
  
        /* RX */
  
  #define IGB_82576_TSYNC_SHIFT 19
  #define IGB_82580_TSYNC_SHIFT 24
 +#define IGB_TS_HDR_LEN        16
  enum e1000_state_t {
        __IGB_TESTING,
        __IGB_RESETTING,
index 78cc742e233f5081cce6887a208ea1927e6fbe0f,01c65c7447e1c8dd784534c872a36bfd0a7fe6bc..2745e17fd021cb0377290f1103368f55fa90c682
@@@ -61,10 -61,6 +61,10 @@@ static const struct e1000_info *igb_inf
  };
  
  static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
 +      { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
        { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
@@@ -226,16 -222,42 +226,16 @@@ static cycle_t igb_read_clock(const str
        return stamp;
  }
  
 -#ifdef DEBUG
  /**
 - * igb_get_hw_dev_name - return device name string
 + * igb_get_hw_dev - return device
   * used by hardware layer to print debugging information
   **/
 -char *igb_get_hw_dev_name(struct e1000_hw *hw)
 +struct net_device *igb_get_hw_dev(struct e1000_hw *hw)
  {
        struct igb_adapter *adapter = hw->back;
 -      return adapter->netdev->name;
 +      return adapter->netdev;
  }
  
 -/**
 - * igb_get_time_str - format current NIC and system time as string
 - */
 -static char *igb_get_time_str(struct igb_adapter *adapter,
 -                            char buffer[160])
 -{
 -      cycle_t hw = adapter->cycles.read(&adapter->cycles);
 -      struct timespec nic = ns_to_timespec(timecounter_read(&adapter->clock));
 -      struct timespec sys;
 -      struct timespec delta;
 -      getnstimeofday(&sys);
 -
 -      delta = timespec_sub(nic, sys);
 -
 -      sprintf(buffer,
 -              "HW %llu, NIC %ld.%09lus, SYS %ld.%09lus, NIC-SYS %lds + %09luns",
 -              hw,
 -              (long)nic.tv_sec, nic.tv_nsec,
 -              (long)sys.tv_sec, sys.tv_nsec,
 -              (long)delta.tv_sec, delta.tv_nsec);
 -
 -      return buffer;
 -}
 -#endif
 -
  /**
   * igb_init_module - Driver Registration Routine
   *
@@@ -305,7 -327,6 +305,7 @@@ static void igb_cache_ring_register(str
                }
        case e1000_82575:
        case e1000_82580:
 +      case e1000_i350:
        default:
                for (; i < adapter->num_rx_queues; i++)
                        adapter->rx_ring[i]->reg_idx = rbase_offset + i;
@@@ -449,7 -470,6 +449,7 @@@ static void igb_assign_vector(struct ig
                q_vector->eims_value = 1 << msix_vector;
                break;
        case e1000_82580:
 +      case e1000_i350:
                /* 82580 uses the same table-based approach as 82576 but has fewer
                   entries as a result we carry over for queues greater than 4. */
                if (rx_queue > IGB_N0_QUEUE) {
@@@ -530,7 -550,6 +530,7 @@@ static void igb_configure_msix(struct i
  
        case e1000_82576:
        case e1000_82580:
 +      case e1000_i350:
                /* Turn on MSI-X capability first, or our settings
                 * won't stick.  And it will take days to debug. */
                wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
@@@ -1086,9 -1105,6 +1086,6 @@@ static void igb_configure(struct igb_ad
                struct igb_ring *ring = adapter->rx_ring[i];
                igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
        }
-       adapter->tx_queue_len = netdev->tx_queue_len;
  }
  
  /**
@@@ -1194,7 -1210,6 +1191,6 @@@ void igb_down(struct igb_adapter *adapt
        del_timer_sync(&adapter->watchdog_timer);
        del_timer_sync(&adapter->phy_info_timer);
  
-       netdev->tx_queue_len = adapter->tx_queue_len;
        netif_carrier_off(netdev);
  
        /* record the stats before reset*/
@@@ -1237,7 -1252,6 +1233,7 @@@ void igb_reset(struct igb_adapter *adap
         * To take effect CTRL.RST is required.
         */
        switch (mac->type) {
 +      case e1000_i350:
        case e1000_82580:
                pba = rd32(E1000_RXPBS);
                pba = igb_rxpbs_adjust_82580(pba);
@@@ -1810,7 -1824,6 +1806,7 @@@ static void igb_init_hw_timer(struct ig
        struct e1000_hw *hw = &adapter->hw;
  
        switch (hw->mac.type) {
 +      case e1000_i350:
        case e1000_82580:
                memset(&adapter->cycles, 0, sizeof(adapter->cycles));
                adapter->cycles.read = igb_read_clock;
@@@ -2324,7 -2337,6 +2320,7 @@@ static void igb_setup_mrqc(struct igb_a
        if (adapter->vfs_allocated_count) {
                /* 82575 and 82576 supports 2 RSS queues for VMDq */
                switch (hw->mac.type) {
 +              case e1000_i350:
                case e1000_82580:
                        num_rx_queues = 1;
                        shift = 0;
@@@ -2576,8 -2588,6 +2572,8 @@@ void igb_configure_rx_ring(struct igb_a
                         E1000_SRRCTL_BSIZEPKT_SHIFT;
                srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
 +      if (hw->mac.type == e1000_82580)
 +              srrctl |= E1000_SRRCTL_TIMESTAMP;
        /* Only set Drop Enable if we are supporting multiple queues */
        if (adapter->vfs_allocated_count || adapter->num_rx_queues > 1)
                srrctl |= E1000_SRRCTL_DROP_EN;
@@@ -2864,7 -2874,7 +2860,7 @@@ static int igb_write_mc_addr_list(struc
  {
        struct igb_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        u8  *mta_list;
        int i;
  
  
        /* The shared function expects a packed array of only addresses. */
        i = 0;
 -      netdev_for_each_mc_addr(mc_ptr, netdev)
 -              memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
 +      netdev_for_each_mc_addr(ha, netdev)
 +              memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  
        igb_update_mc_addr_list(hw, mta_list, i);
        kfree(mta_list);
@@@ -3092,17 -3102,13 +3088,13 @@@ static void igb_watchdog_task(struct wo
                               ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
                               ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
  
-                       /* tweak tx_queue_len according to speed/duplex and
-                        * adjust the timeout factor */
-                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
-                               netdev->tx_queue_len = 10;
                                adapter->tx_timeout_factor = 14;
                                break;
                        case SPEED_100:
-                               netdev->tx_queue_len = 100;
                                /* maybe add some timeout factor ? */
                                break;
                        }
@@@ -3912,9 -3918,6 +3904,9 @@@ static int igb_change_mtu(struct net_de
         * i.e. RXBUFFER_2048 --> size-4096 slab
         */
  
 +      if (adapter->hw.mac.type == e1000_82580)
 +              max_frame += IGB_TS_HDR_LEN;
 +
        if (max_frame <= IGB_RXBUFFER_1024)
                rx_buffer_len = IGB_RXBUFFER_1024;
        else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
        else
                rx_buffer_len = IGB_RXBUFFER_128;
  
 +      if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN + IGB_TS_HDR_LEN) ||
 +           (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN))
 +              rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE + IGB_TS_HDR_LEN;
 +
 +      if ((adapter->hw.mac.type == e1000_82580) &&
 +          (rx_buffer_len == IGB_RXBUFFER_128))
 +              rx_buffer_len += IGB_RXBUFFER_64;
 +
        if (netif_running(netdev))
                igb_down(adapter);
  
@@@ -3960,7 -3955,7 +3952,7 @@@ void igb_update_stats(struct igb_adapte
        struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
        struct e1000_hw *hw = &adapter->hw;
        struct pci_dev *pdev = adapter->pdev;
-       u32 rnbc, reg;
+       u32 reg, mpc;
        u16 phy_tmp;
        int i;
        u64 bytes, packets;
        adapter->stats.symerrs += rd32(E1000_SYMERRS);
        adapter->stats.sec += rd32(E1000_SEC);
  
-       adapter->stats.mpc += rd32(E1000_MPC);
+       mpc = rd32(E1000_MPC);
+       adapter->stats.mpc += mpc;
+       net_stats->rx_fifo_errors += mpc;
        adapter->stats.scc += rd32(E1000_SCC);
        adapter->stats.ecol += rd32(E1000_ECOL);
        adapter->stats.mcc += rd32(E1000_MCC);
        adapter->stats.gptc += rd32(E1000_GPTC);
        adapter->stats.gotc += rd32(E1000_GOTCL);
        rd32(E1000_GOTCH); /* clear GOTCL */
-       rnbc = rd32(E1000_RNBC);
-       adapter->stats.rnbc += rnbc;
-       net_stats->rx_fifo_errors += rnbc;
+       adapter->stats.rnbc += rd32(E1000_RNBC);
        adapter->stats.ruc += rd32(E1000_RUC);
        adapter->stats.rfc += rd32(E1000_RFC);
        adapter->stats.rjc += rd32(E1000_RJC);
@@@ -5107,7 -5102,7 +5099,7 @@@ static void igb_receive_skb(struct igb_
  {
        struct igb_adapter *adapter = q_vector->adapter;
  
-       if (vlan_tag)
+       if (vlan_tag && adapter->vlgrp)
                vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
                                 vlan_tag, skb);
        else
@@@ -5146,7 -5141,7 +5138,7 @@@ static inline void igb_rx_checksum_adv(
        dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
  }
  
 -static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
 +static void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
                                     struct sk_buff *skb)
  {
        struct igb_adapter *adapter = q_vector->adapter;
         * If nothing went wrong, then it should have a skb_shared_tx that we
         * can turn into a skb_shared_hwtstamps.
         */
 -      if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
 -              return;
 -      if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
 -              return;
 +      if (staterr & E1000_RXDADV_STAT_TSIP) {
 +              u32 *stamp = (u32 *)skb->data;
 +              regval = le32_to_cpu(*(stamp + 2));
 +              regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
 +              skb_pull(skb, IGB_TS_HDR_LEN);
 +      } else {
 +              if(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
 +                      return;
  
 -      regval = rd32(E1000_RXSTMPL);
 -      regval |= (u64)rd32(E1000_RXSTMPH) << 32;
 +              regval = rd32(E1000_RXSTMPL);
 +              regval |= (u64)rd32(E1000_RXSTMPH) << 32;
 +      }
  
        igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
  }
@@@ -5283,8 -5273,7 +5275,8 @@@ send_up
                        goto next_desc;
                }
  
 -              igb_rx_hwtstamp(q_vector, staterr, skb);
 +              if (staterr & (E1000_RXDADV_STAT_TSIP | E1000_RXDADV_STAT_TS))
 +                      igb_rx_hwtstamp(q_vector, staterr, skb);
                total_bytes += skb->len;
                total_packets++;
  
@@@ -5564,16 -5553,6 +5556,16 @@@ static int igb_hwtstamp_ioctl(struct ne
                return 0;
        }
  
 +      /*
 +       * Per-packet timestamping only works if all packets are
 +       * timestamped, so enable timestamping in all packets as
 +       * long as one rx filter was configured.
 +       */
 +      if ((hw->mac.type == e1000_82580) && tsync_rx_ctl) {
 +              tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
 +              tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
 +      }
 +
        /* enable/disable TX */
        regval = rd32(E1000_TSYNCTXCTL);
        regval &= ~E1000_TSYNCTXCTL_ENABLED;
@@@ -6150,25 -6129,19 +6142,25 @@@ static void igb_vmm_control(struct igb_
        struct e1000_hw *hw = &adapter->hw;
        u32 reg;
  
 -      /* replication is not supported for 82575 */
 -      if (hw->mac.type == e1000_82575)
 +      switch (hw->mac.type) {
 +      case e1000_82575:
 +      default:
 +              /* replication is not supported for 82575 */
                return;
 -
 -      /* enable replication vlan tag stripping */
 -      reg = rd32(E1000_RPLOLR);
 -      reg |= E1000_RPLOLR_STRVLAN;
 -      wr32(E1000_RPLOLR, reg);
 -
 -      /* notify HW that the MAC is adding vlan tags */
 -      reg = rd32(E1000_DTXCTL);
 -      reg |= E1000_DTXCTL_VLAN_ADDED;
 -      wr32(E1000_DTXCTL, reg);
 +      case e1000_82576:
 +              /* notify HW that the MAC is adding vlan tags */
 +              reg = rd32(E1000_DTXCTL);
 +              reg |= E1000_DTXCTL_VLAN_ADDED;
 +              wr32(E1000_DTXCTL, reg);
 +      case e1000_82580:
 +              /* enable replication vlan tag stripping */
 +              reg = rd32(E1000_RPLOLR);
 +              reg |= E1000_RPLOLR_STRVLAN;
 +              wr32(E1000_RPLOLR, reg);
 +      case e1000_i350:
 +              /* none of the above registers are supported by i350 */
 +              break;
 +      }
  
        if (adapter->vfs_allocated_count) {
                igb_vmdq_set_loopback_pf(hw, true);
index ea8abf5c1ef25a857a126168ab53d78f1968d35d,b41037ed8083107a7d736a9a54ebacbd989f0d85..868855078ebc3ac37c6a02a9b9ba3d3ccd0badc0
@@@ -1304,8 -1304,6 +1304,6 @@@ static void igbvf_configure_tx(struct i
  
        /* enable Report Status bit */
        adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
-       adapter->tx_queue_len = adapter->netdev->tx_queue_len;
  }
  
  /**
@@@ -1399,7 -1397,7 +1397,7 @@@ static void igbvf_set_multi(struct net_
  {
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        u8  *mta_list = NULL;
        int i;
  
  
        /* prepare a packed array of only addresses. */
        i = 0;
 -      netdev_for_each_mc_addr(mc_ptr, netdev)
 -              memcpy(mta_list + (i++ * ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
 +      netdev_for_each_mc_addr(ha, netdev)
 +              memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
  
        hw->mac.ops.update_mc_addr_list(hw, mta_list, i, 0, 0);
        kfree(mta_list);
@@@ -1524,7 -1522,6 +1522,6 @@@ void igbvf_down(struct igbvf_adapter *a
  
        del_timer_sync(&adapter->watchdog_timer);
  
-       netdev->tx_queue_len = adapter->tx_queue_len;
        netif_carrier_off(netdev);
  
        /* record the stats before reset*/
@@@ -1857,21 -1854,15 +1854,15 @@@ static void igbvf_watchdog_task(struct 
                                                  &adapter->link_duplex);
                        igbvf_print_link_info(adapter);
  
-                       /*
-                        * tweak tx_queue_len according to speed/duplex
-                        * and adjust the timeout factor
-                        */
-                       netdev->tx_queue_len = adapter->tx_queue_len;
+                       /* adjust timeout factor according to speed/duplex */
                        adapter->tx_timeout_factor = 1;
                        switch (adapter->link_speed) {
                        case SPEED_10:
                                txb2b = 0;
-                               netdev->tx_queue_len = 10;
                                adapter->tx_timeout_factor = 16;
                                break;
                        case SPEED_100:
                                txb2b = 0;
-                               netdev->tx_queue_len = 100;
                                /* maybe add some timeout factor ? */
                                break;
                        }
index 6effa2ca157d516183410444ecf831c18e9a239f,0c553f6cb53485e11b875a00aa8d8c3b1d0f1226..7216db218442b1fb2bdc0eaddb1c8f8927edf8d2
@@@ -2537,6 -2537,21 +2537,6 @@@ static void ixgbe_restore_vlan(struct i
        }
  }
  
 -static u8 *ixgbe_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr, u32 *vmdq)
 -{
 -      struct dev_mc_list *mc_ptr;
 -      u8 *addr = *mc_addr_ptr;
 -      *vmdq = 0;
 -
 -      mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
 -      if (mc_ptr->next)
 -              *mc_addr_ptr = mc_ptr->next->dmi_addr;
 -      else
 -              *mc_addr_ptr = NULL;
 -
 -      return addr;
 -}
 -
  /**
   * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set
   * @netdev: network interface device structure
@@@ -2551,6 -2566,8 +2551,6 @@@ void ixgbe_set_rx_mode(struct net_devic
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
        u32 fctrl, vlnctrl;
 -      u8 *addr_list = NULL;
 -      int addr_count = 0;
  
        /* Check for Promiscuous and All Multicast modes */
  
        hw->mac.ops.update_uc_addr_list(hw, netdev);
  
        /* reprogram multicast list */
 -      addr_count = netdev_mc_count(netdev);
 -      if (addr_count)
 -              addr_list = netdev->mc_list->dmi_addr;
 -      hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
 -                                      ixgbe_addr_list_itr);
 +      hw->mac.ops.update_mc_addr_list(hw, netdev);
 +
        if (adapter->num_vfs)
                ixgbe_restore_vf_multicasts(adapter);
  }
@@@ -3036,6 -3056,14 +3036,14 @@@ void ixgbe_reinit_locked(struct ixgbe_a
        while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
                msleep(1);
        ixgbe_down(adapter);
+       /*
+        * If SR-IOV enabled then wait a bit before bringing the adapter
+        * back up to give the VFs time to respond to the reset.  The
+        * two second wait is based upon the watchdog timer cycle in
+        * the VF driver.
+        */
+       if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+               msleep(2000);
        ixgbe_up(adapter);
        clear_bit(__IXGBE_RESETTING, &adapter->state);
  }
@@@ -3216,13 -3244,15 +3224,15 @@@ void ixgbe_down(struct ixgbe_adapter *a
  
        /* disable receive for all VFs and wait one second */
        if (adapter->num_vfs) {
-               for (i = 0 ; i < adapter->num_vfs; i++)
-                       adapter->vfinfo[i].clear_to_send = 0;
                /* ping all the active vfs to let them know we are going down */
                ixgbe_ping_all_vfs(adapter);
                /* Disable all VFTE/VFRE TX/RX */
                ixgbe_disable_tx_rx(adapter);
+               /* Mark all the VFs as inactive */
+               for (i = 0 ; i < adapter->num_vfs; i++)
+                       adapter->vfinfo[i].clear_to_send = 0;
        }
  
        /* disable receives */
@@@ -3440,12 -3470,12 +3450,12 @@@ static inline bool ixgbe_set_fcoe_queue
                adapter->num_tx_queues = 1;
  #ifdef CONFIG_IXGBE_DCB
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
 -                      DPRINTK(PROBE, INFO, "FCoE enabled with DCB \n");
 +                      DPRINTK(PROBE, INFO, "FCoE enabled with DCB\n");
                        ixgbe_set_dcb_queues(adapter);
                }
  #endif
                if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
 -                      DPRINTK(PROBE, INFO, "FCoE enabled with RSS \n");
 +                      DPRINTK(PROBE, INFO, "FCoE enabled with RSS\n");
                        if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
                            (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
                                ixgbe_set_fdir_queues(adapter);
@@@ -5061,7 -5091,7 +5071,7 @@@ static void ixgbe_fdir_reinit_task(stru
                                &(adapter->tx_ring[i]->reinit_state));
        } else {
                DPRINTK(PROBE, ERR, "failed to finish FDIR re-initialization, "
 -                      "ignored adding FDIR ATR filters \n");
 +                      "ignored adding FDIR ATR filters\n");
        }
        /* Done FDIR Re-initialization, enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
@@@ -5618,7 -5648,8 +5628,8 @@@ static u16 ixgbe_select_queue(struct ne
  
  #ifdef IXGBE_FCOE
        if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-           (skb->protocol == htons(ETH_P_FCOE))) {
+           ((skb->protocol == htons(ETH_P_FCOE)) ||
+            (skb->protocol == htons(ETH_P_FIP)))) {
                txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
                txq += adapter->ring_feature[RING_F_FCOE].mask;
                return txq;
@@@ -5665,18 -5696,25 +5676,25 @@@ static netdev_tx_t ixgbe_xmit_frame(str
  
        tx_ring = adapter->tx_ring[skb->queue_mapping];
  
-       if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-           (skb->protocol == htons(ETH_P_FCOE))) {
-               tx_flags |= IXGBE_TX_FLAGS_FCOE;
  #ifdef IXGBE_FCOE
+       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
  #ifdef CONFIG_IXGBE_DCB
-               tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
-                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
-               tx_flags |= ((adapter->fcoe.up << 13)
-                             << IXGBE_TX_FLAGS_VLAN_SHIFT);
- #endif
+               /* for FCoE with DCB, we force the priority to what
+                * was specified by the switch */
+               if ((skb->protocol == htons(ETH_P_FCOE)) ||
+                   (skb->protocol == htons(ETH_P_FIP))) {
+                       tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+                                     << IXGBE_TX_FLAGS_VLAN_SHIFT);
+                       tx_flags |= ((adapter->fcoe.up << 13)
+                                    << IXGBE_TX_FLAGS_VLAN_SHIFT);
+               }
  #endif
+               /* flag for FCoE offloads */
+               if (skb->protocol == htons(ETH_P_FCOE))
+                       tx_flags |= IXGBE_TX_FLAGS_FCOE;
        }
+ #endif
        /* four things can cause us to need a context descriptor */
        if (skb_is_gso(skb) ||
            (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@@ -6031,7 -6069,6 +6049,6 @@@ static int __devinit ixgbe_probe(struc
        indices += min_t(unsigned int, num_possible_cpus(),
                         IXGBE_MAX_FCOE_INDICES);
  #endif
-       indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
        netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
        if (!netdev) {
                err = -ENOMEM;
index c574d0a68f2a20b676b46571d3f50062ae1bf2e3,4ec6dc1a5b75c06a800d3bc7d61f8c2e6cab7853..aed4ed66564891a99727e9b0c2c6f68f2db07e6a
  #define IXGBE_ETQF_FILTER_BCN            1
  #define IXGBE_ETQF_FILTER_FCOE           2
  #define IXGBE_ETQF_FILTER_1588           3
+ #define IXGBE_ETQF_FILTER_FIP            4
  /* VLAN Control Bit Masks */
  #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
  #define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
@@@ -2416,7 -2417,8 +2417,7 @@@ struct ixgbe_mac_operations 
        s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32);
        s32 (*init_rx_addrs)(struct ixgbe_hw *);
        s32 (*update_uc_addr_list)(struct ixgbe_hw *, struct net_device *);
 -      s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32,
 -                                 ixgbe_mc_addr_itr);
 +      s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *);
        s32 (*enable_mc)(struct ixgbe_hw *);
        s32 (*disable_mc)(struct ixgbe_hw *);
        s32 (*clear_vfta)(struct ixgbe_hw *);
index 6ced5efc0e0728fab31ce3c94ba0d30d0ea5d010,1bbbef3ee3f49c3eb72828de449e773f076c1407..65cb133a6a1f985b852b79cedba762318024e44f
@@@ -1495,6 -1495,22 +1495,6 @@@ static void ixgbevf_restore_vlan(struc
        }
  }
  
 -static u8 *ixgbevf_addr_list_itr(struct ixgbe_hw *hw, u8 **mc_addr_ptr,
 -                               u32 *vmdq)
 -{
 -      struct dev_mc_list *mc_ptr;
 -      u8 *addr = *mc_addr_ptr;
 -      *vmdq = 0;
 -
 -      mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]);
 -      if (mc_ptr->next)
 -              *mc_addr_ptr = mc_ptr->next->dmi_addr;
 -      else
 -              *mc_addr_ptr = NULL;
 -
 -      return addr;
 -}
 -
  /**
   * ixgbevf_set_rx_mode - Multicast set
   * @netdev: network interface device structure
@@@ -1507,10 -1523,16 +1507,10 @@@ static void ixgbevf_set_rx_mode(struct 
  {
        struct ixgbevf_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 -      u8 *addr_list = NULL;
 -      int addr_count = 0;
  
        /* reprogram multicast list */
 -      addr_count = netdev_mc_count(netdev);
 -      if (addr_count)
 -              addr_list = netdev->mc_list->dmi_addr;
        if (hw->mac.ops.update_mc_addr_list)
 -              hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count,
 -                                              ixgbevf_addr_list_itr);
 +              hw->mac.ops.update_mc_addr_list(hw, netdev);
  }
  
  static void ixgbevf_napi_enable_all(struct ixgbevf_adapter *adapter)
@@@ -2395,9 -2417,9 +2395,9 @@@ static void ixgbevf_watchdog_task(struc
  
        if (link_up) {
                if (!netif_carrier_ok(netdev)) {
 -                      hw_dbg(&adapter->hw, "NIC Link is Up %s, ",
 -                             ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
 -                              "10 Gbps\n" : "1 Gbps\n"));
 +                      hw_dbg(&adapter->hw, "NIC Link is Up, %u Gbps\n",
 +                             (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
 +                             10 : 1);
                        netif_carrier_on(netdev);
                        netif_tx_wake_all_queues(netdev);
                } else {
@@@ -2921,9 -2943,10 +2921,10 @@@ static int ixgbevf_tx_map(struct ixgbev
        struct ixgbevf_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
-       unsigned int offset = 0, size, count = 0, i;
+       unsigned int offset = 0, size, count = 0;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
+       int i;
  
        i = tx_ring->next_to_use;
  
@@@ -3458,7 -3481,7 +3459,7 @@@ static int __devinit ixgbevf_probe(stru
  
        hw_dbg(hw, "MAC: %d\n", hw->mac.type);
  
 -      hw_dbg(hw, "LRO is disabled \n");
 +      hw_dbg(hw, "LRO is disabled\n");
  
        hw_dbg(hw, "Intel(R) 82599 Virtual Function\n");
        cards_found++;
diff --combined drivers/net/ksz884x.c
index 348769521615b1a77f6ad3338cb488ad9352e756,6c5327af1bf920e28f7291c7ce9bb47aac8eafde..097796423b5273f012bcf73fe8060719482bbe12
   * GNU General Public License for more details.
   */
  
 +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 +
  #include <linux/init.h>
  #include <linux/kernel.h>
  #include <linux/module.h>
 -#include <linux/version.h>
  #include <linux/ioport.h>
  #include <linux/pci.h>
  #include <linux/proc_fs.h>
@@@ -1484,6 -1483,11 +1484,6 @@@ struct dev_priv 
        int promiscuous;
  };
  
 -#define ks_info(_ks, _msg...) dev_info(&(_ks)->pdev->dev, _msg)
 -#define ks_warn(_ks, _msg...) dev_warn(&(_ks)->pdev->dev, _msg)
 -#define ks_dbg(_ks, _msg...) dev_dbg(&(_ks)->pdev->dev, _msg)
 -#define ks_err(_ks, _msg...) dev_err(&(_ks)->pdev->dev, _msg)
 -
  #define DRV_NAME              "KSZ884X PCI"
  #define DEVICE_NAME           "KSZ884x PCI"
  #define DRV_VERSION           "1.0.0"
@@@ -3830,7 -3834,7 +3830,7 @@@ static void ksz_check_desc_num(struct k
                alloc >>= 1;
        }
        if (alloc != 1 || shift < MIN_DESC_SHIFT) {
 -              printk(KERN_ALERT "Hardware descriptor numbers not right!\n");
 +              pr_alert("Hardware descriptor numbers not right!\n");
                while (alloc) {
                        shift++;
                        alloc >>= 1;
@@@ -4541,7 -4545,8 +4541,7 @@@ static int ksz_alloc_mem(struct dev_inf
                (((sizeof(struct ksz_hw_desc) + DESC_ALIGNMENT - 1) /
                DESC_ALIGNMENT) * DESC_ALIGNMENT);
        if (hw->rx_desc_info.size != sizeof(struct ksz_hw_desc))
 -              printk(KERN_ALERT
 -                      "Hardware descriptor size not right!\n");
 +              pr_alert("Hardware descriptor size not right!\n");
        ksz_check_desc_num(&hw->rx_desc_info);
        ksz_check_desc_num(&hw->tx_desc_info);
  
@@@ -5043,6 -5048,8 +5043,6 @@@ static inline int rx_proc(struct net_de
                        dma_buf->skb->data, packet_len);
        } while (0);
  
 -      skb->dev = dev;
 -
        skb->protocol = eth_type_trans(skb, dev);
  
        if (hw->rx_cfg & (DMA_RX_CSUM_UDP | DMA_RX_CSUM_TCP))
@@@ -5312,10 -5319,10 +5312,10 @@@ static irqreturn_t netdev_intr(int irq
                        u32 data;
  
                        hw->intr_mask &= ~KS884X_INT_TX_STOPPED;
 -                      printk(KERN_INFO "Tx stopped\n");
 +                      pr_info("Tx stopped\n");
                        data = readl(hw->io + KS_DMA_TX_CTRL);
                        if (!(data & DMA_TX_ENABLE))
 -                              printk(KERN_INFO "Tx disabled\n");
 +                              pr_info("Tx disabled\n");
                        break;
                }
        } while (0);
@@@ -5488,18 -5495,6 +5488,18 @@@ static int prepare_hardware(struct net_
        return 0;
  }
  
 +static void set_media_state(struct net_device *dev, int media_state)
 +{
 +      struct dev_priv *priv = netdev_priv(dev);
 +
 +      if (media_state == priv->media_state)
 +              netif_carrier_on(dev);
 +      else
 +              netif_carrier_off(dev);
 +      netif_info(priv, link, dev, "link %s\n",
 +                 media_state == priv->media_state ? "on" : "off");
 +}
 +
  /**
   * netdev_open - open network device
   * @dev:      Network device.
@@@ -5589,7 -5584,15 +5589,7 @@@ static int netdev_open(struct net_devic
  
        priv->media_state = port->linked->state;
  
 -      if (media_connected == priv->media_state)
 -              netif_carrier_on(dev);
 -      else
 -              netif_carrier_off(dev);
 -      if (netif_msg_link(priv))
 -              printk(KERN_INFO "%s link %s\n", dev->name,
 -                      (media_connected == priv->media_state ?
 -                      "on" : "off"));
 -
 +      set_media_state(dev, media_connected);
        netif_start_queue(dev);
  
        return 0;
@@@ -5763,7 -5766,7 +5763,7 @@@ static void netdev_set_rx_mode(struct n
        struct dev_priv *priv = netdev_priv(dev);
        struct dev_info *hw_priv = priv->adapter;
        struct ksz_hw *hw = &hw_priv->hw;
 -      struct dev_mc_list *mc_ptr;
 +      struct netdev_hw_addr *ha;
        int multicast = (dev->flags & IFF_ALLMULTI);
  
        dev_set_promiscuous(dev, priv, hw, (dev->flags & IFF_PROMISC));
                int i = 0;
  
                /* List too big to support so turn on all multicast mode. */
 -              if (dev->mc_count > MAX_MULTICAST_LIST) {
 +              if (netdev_mc_count(dev) > MAX_MULTICAST_LIST) {
                        if (MAX_MULTICAST_LIST != hw->multi_list_size) {
                                hw->multi_list_size = MAX_MULTICAST_LIST;
                                ++hw->all_multi;
                        return;
                }
  
 -              netdev_for_each_mc_addr(mc_ptr, dev) {
 -                      if (!(*mc_ptr->dmi_addr & 1))
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      if (!(*ha->addr & 1))
                                continue;
                        if (i >= MAX_MULTICAST_LIST)
                                break;
 -                      memcpy(hw->multi_list[i++], mc_ptr->dmi_addr,
 -                              MAC_ADDR_LEN);
 +                      memcpy(hw->multi_list[i++], ha->addr, MAC_ADDR_LEN);
                }
                hw->multi_list_size = (u8) i;
                hw_set_grp_addr(hw);
@@@ -6318,7 -6322,7 +6318,7 @@@ static int netdev_set_eeprom(struct net
        int len;
  
        if (eeprom->magic != EEPROM_MAGIC)
-               return 1;
+               return -EINVAL;
  
        len = (eeprom->offset + eeprom->len + 1) / 2;
        for (i = eeprom->offset / 2; i < len; i++)
@@@ -6678,8 -6682,16 +6678,8 @@@ static void update_link(struct net_devi
  {
        if (priv->media_state != port->linked->state) {
                priv->media_state = port->linked->state;
 -              if (netif_running(dev)) {
 -                      if (media_connected == priv->media_state)
 -                              netif_carrier_on(dev);
 -                      else
 -                              netif_carrier_off(dev);
 -                      if (netif_msg_link(priv))
 -                              printk(KERN_INFO "%s link %s\n", dev->name,
 -                                      (media_connected == priv->media_state ?
 -                                      "on" : "off"));
 -              }
 +              if (netif_running(dev))
 +                      set_media_state(dev, media_connected);
        }
  }
  
@@@ -6973,7 -6985,7 +6973,7 @@@ static int __init pcidev_init(struct pc
        int pi;
        int port_count;
        int result;
 -      char banner[80];
 +      char banner[sizeof(version)];
        struct ksz_switch *sw = NULL;
  
        result = pci_enable_device(pdev);
  
        result = -ENOMEM;
  
 -      info = kmalloc(sizeof(struct platform_info), GFP_KERNEL);
 +      info = kzalloc(sizeof(struct platform_info), GFP_KERNEL);
        if (!info)
                goto pcidev_init_dev_err;
 -      memset(info, 0, sizeof(struct platform_info));
  
        hw_priv = &info->dev_info;
        hw_priv->pdev = pdev;
        cnt = hw_init(hw);
        if (!cnt) {
                if (msg_enable & NETIF_MSG_PROBE)
 -                      printk(KERN_ALERT "chip not detected\n");
 +                      pr_alert("chip not detected\n");
                result = -ENODEV;
                goto pcidev_init_alloc_err;
        }
  
 -      sprintf(banner, "%s\n", version);
 -      banner[13] = cnt + '0';
 -      ks_info(hw_priv, "%s", banner);
 -      ks_dbg(hw_priv, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
 +      snprintf(banner, sizeof(banner), "%s", version);
 +      banner[13] = cnt + '0';         /* Replace x in "Micrel KSZ884x" */
 +      dev_info(&hw_priv->pdev->dev, "%s\n", banner);
 +      dev_dbg(&hw_priv->pdev->dev, "Mem = %p; IRQ = %d\n", hw->io, pdev->irq);
  
        /* Assume device is KSZ8841. */
        hw->dev_count = 1;
index 33ae5e13b60877f539437c59471d550cb70f941b,0f703838e21ad73b5ad775e35b9984146d60cf5f..174ac8ef82fa94749fddccb2e0996af3cb0d591b
@@@ -53,8 -53,8 +53,8 @@@
  
  #define _NETXEN_NIC_LINUX_MAJOR 4
  #define _NETXEN_NIC_LINUX_MINOR 0
- #define _NETXEN_NIC_LINUX_SUBVERSION 72
- #define NETXEN_NIC_LINUX_VERSIONID  "4.0.72"
+ #define _NETXEN_NIC_LINUX_SUBVERSION 73
+ #define NETXEN_NIC_LINUX_VERSIONID  "4.0.73"
  
  #define NETXEN_VERSION_CODE(a, b, c)  (((a) << 24) + ((b) << 16) + (c))
  #define _major(v)     (((v) >> 24) & 0xff)
@@@ -420,6 -420,7 +420,6 @@@ struct status_desc 
  } __attribute__ ((aligned(16)));
  
  /* UNIFIED ROMIMAGE *************************/
 -#define NX_UNI_FW_MIN_SIZE            0xc8000
  #define NX_UNI_DIR_SECT_PRODUCT_TBL   0x0
  #define NX_UNI_DIR_SECT_BOOTLD                0x6
  #define NX_UNI_DIR_SECT_FW            0x7
index 439f3e85969387fb4663f53012d4ab2690236384,7eb925a9f36e3ed6d558841659d02b812b790dd6..ecb6eed1d8e2b4014bf59618c5473ce07d9db1f5
@@@ -613,123 -613,22 +613,123 @@@ static struct uni_table_desc *nx_get_ta
        return NULL;
  }
  
 +#define       QLCNIC_FILEHEADER_SIZE  (14 * 4)
 +
  static int
 -nx_set_product_offs(struct netxen_adapter *adapter)
 -{
 -      struct uni_table_desc *ptab_descr;
 +netxen_nic_validate_header(struct netxen_adapter *adapter)
 + {
        const u8 *unirom = adapter->fw->data;
 -      uint32_t i;
 +      struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
 +      u32 fw_file_size = adapter->fw->size;
 +      u32 tab_size;
        __le32 entries;
 +      __le32 entry_size;
 +
 +      if (fw_file_size < QLCNIC_FILEHEADER_SIZE)
 +              return -EINVAL;
 +
 +      entries = cpu_to_le32(directory->num_entries);
 +      entry_size = cpu_to_le32(directory->entry_size);
 +      tab_size = cpu_to_le32(directory->findex) + (entries * entry_size);
 +
 +      if (fw_file_size < tab_size)
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +static int
 +netxen_nic_validate_bootld(struct netxen_adapter *adapter)
 +{
 +      struct uni_table_desc *tab_desc;
 +      struct uni_data_desc *descr;
 +      const u8 *unirom = adapter->fw->data;
 +      __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
 +                              NX_UNI_BOOTLD_IDX_OFF));
 +      u32 offs;
 +      u32 tab_size;
 +      u32 data_size;
 +
 +      tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_BOOTLD);
 +
 +      if (!tab_desc)
 +              return -EINVAL;
 +
 +      tab_size = cpu_to_le32(tab_desc->findex) +
 +                      (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
  
 +      if (adapter->fw->size < tab_size)
 +              return -EINVAL;
 +
 +      offs = cpu_to_le32(tab_desc->findex) +
 +              (cpu_to_le32(tab_desc->entry_size) * (idx));
 +      descr = (struct uni_data_desc *)&unirom[offs];
 +
 +      data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
 +
 +      if (adapter->fw->size < data_size)
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +static int
 +netxen_nic_validate_fw(struct netxen_adapter *adapter)
 +{
 +      struct uni_table_desc *tab_desc;
 +      struct uni_data_desc *descr;
 +      const u8 *unirom = adapter->fw->data;
 +      __le32 idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
 +                              NX_UNI_FIRMWARE_IDX_OFF));
 +      u32 offs;
 +      u32 tab_size;
 +      u32 data_size;
 +
 +      tab_desc = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_FW);
 +
 +      if (!tab_desc)
 +              return -EINVAL;
 +
 +      tab_size = cpu_to_le32(tab_desc->findex) +
 +                      (cpu_to_le32(tab_desc->entry_size) * (idx + 1));
 +
 +      if (adapter->fw->size < tab_size)
 +              return -EINVAL;
 +
 +      offs = cpu_to_le32(tab_desc->findex) +
 +              (cpu_to_le32(tab_desc->entry_size) * (idx));
 +      descr = (struct uni_data_desc *)&unirom[offs];
 +      data_size = cpu_to_le32(descr->findex) + cpu_to_le32(descr->size);
 +
 +      if (adapter->fw->size < data_size)
 +              return -EINVAL;
 +
 +      return 0;
 +}
 +
 +
 +static int
 +netxen_nic_validate_product_offs(struct netxen_adapter *adapter)
 +{
 +      struct uni_table_desc *ptab_descr;
 +      const u8 *unirom = adapter->fw->data;
        int mn_present = (NX_IS_REVISION_P2(adapter->ahw.revision_id)) ?
                        1 : netxen_p3_has_mn(adapter);
 +      __le32 entries;
 +      __le32 entry_size;
 +      u32 tab_size;
 +      u32 i;
  
        ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
        if (ptab_descr == NULL)
 -              return -1;
 +              return -EINVAL;
  
        entries = cpu_to_le32(ptab_descr->num_entries);
 +      entry_size = cpu_to_le32(ptab_descr->entry_size);
 +      tab_size = cpu_to_le32(ptab_descr->findex) + (entries * entry_size);
 +
 +      if (adapter->fw->size < tab_size)
 +              return -EINVAL;
  
  nomn:
        for (i = 0; i < entries; i++) {
                goto nomn;
        }
  
 -      return -1;
 +      return -EINVAL;
  }
  
 +static int
 +netxen_nic_validate_unified_romimage(struct netxen_adapter *adapter)
 +{
 +      if (netxen_nic_validate_header(adapter)) {
 +              dev_err(&adapter->pdev->dev,
 +                              "unified image: header validation failed\n");
 +              return -EINVAL;
 +      }
 +
 +      if (netxen_nic_validate_product_offs(adapter)) {
 +              dev_err(&adapter->pdev->dev,
 +                              "unified image: product validation failed\n");
 +              return -EINVAL;
 +      }
 +
 +      if (netxen_nic_validate_bootld(adapter)) {
 +              dev_err(&adapter->pdev->dev,
 +                              "unified image: bootld validation failed\n");
 +              return -EINVAL;
 +      }
 +
 +      if (netxen_nic_validate_fw(adapter)) {
 +              dev_err(&adapter->pdev->dev,
 +                              "unified image: firmware validation failed\n");
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
  
  static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
                        u32 section, u32 idx_offset)
@@@ -891,7 -761,7 +891,7 @@@ nx_get_bios_version(struct netxen_adapt
        if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
                bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
                                                + NX_UNI_BIOS_VERSION_OFF));
-               return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+               return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
                                                        (bios_ver >> 24);
        } else
                return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
@@@ -1019,16 -889,6 +1019,16 @@@ netxen_load_firmware(struct netxen_adap
  
                        flashaddr += 8;
                }
 +
 +              size = (__force u32)nx_get_fw_size(adapter) % 8;
 +              if (size) {
 +                      data = cpu_to_le64(ptr64[i]);
 +
 +                      if (adapter->pci_mem_write(adapter,
 +                                              flashaddr, data))
 +                              return -EIO;
 +              }
 +
        } else {
                u64 data;
                u32 hi, lo;
@@@ -1073,23 -933,27 +1073,23 @@@ static in
  netxen_validate_firmware(struct netxen_adapter *adapter)
  {
        __le32 val;
 -      u32 ver, min_ver, bios, min_size;
 +      u32 ver, min_ver, bios;
        struct pci_dev *pdev = adapter->pdev;
        const struct firmware *fw = adapter->fw;
        u8 fw_type = adapter->fw_type;
  
        if (fw_type == NX_UNIFIED_ROMIMAGE) {
 -              if (nx_set_product_offs(adapter))
 +              if (netxen_nic_validate_unified_romimage(adapter))
                        return -EINVAL;
 -
 -              min_size = NX_UNI_FW_MIN_SIZE;
        } else {
                val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
                if ((__force u32)val != NETXEN_BDINFO_MAGIC)
                        return -EINVAL;
  
 -              min_size = NX_FW_MIN_SIZE;
 +              if (fw->size < NX_FW_MIN_SIZE)
 +                      return -EINVAL;
        }
  
 -      if (fw->size < min_size)
 -              return -EINVAL;
 -
        val = nx_get_fw_version(adapter);
  
        if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
index f1daa9a8be07f9a5b6a12b82203f59fb654ebde3,01808b28d1b6c52fcaced72fe2ab9f6566659c9a..9e82061c02355c9fe30eda02f47fd183ee7de59d
@@@ -604,16 -604,14 +604,14 @@@ netxen_cleanup_pci_map(struct netxen_ad
  static int
  netxen_setup_pci_map(struct netxen_adapter *adapter)
  {
-       void __iomem *mem_ptr0 = NULL;
-       void __iomem *mem_ptr1 = NULL;
-       void __iomem *mem_ptr2 = NULL;
        void __iomem *db_ptr = NULL;
  
        resource_size_t mem_base, db_base;
-       unsigned long mem_len, db_len = 0, pci_len0 = 0;
+       unsigned long mem_len, db_len = 0;
  
        struct pci_dev *pdev = adapter->pdev;
        int pci_func = adapter->ahw.pci_func;
+       struct netxen_hardware_context *ahw = &adapter->ahw;
  
        int err = 0;
  
  
        /* 128 Meg of memory */
        if (mem_len == NETXEN_PCI_128MB_SIZE) {
-               mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
-               mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+               ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+               ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
                                SECOND_PAGE_GROUP_SIZE);
-               mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+               ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
                                THIRD_PAGE_GROUP_SIZE);
-               pci_len0 = FIRST_PAGE_GROUP_SIZE;
+               if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+                                               ahw->pci_base2 == NULL) {
+                       dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+                       err = -EIO;
+                       goto err_out;
+               }
+               ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
        } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
-               mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
-               mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+               ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+               ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
                        SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+               if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+                       dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+                       err = -EIO;
+                       goto err_out;
+               }
        } else if (mem_len == NETXEN_PCI_2MB_SIZE) {
  
-               mem_ptr0 = pci_ioremap_bar(pdev, 0);
-               if (mem_ptr0 == NULL) {
+               ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+               if (ahw->pci_base0 == NULL) {
                        dev_err(&pdev->dev, "failed to map PCI bar 0\n");
                        return -EIO;
                }
-               pci_len0 = mem_len;
+               ahw->pci_len0 = mem_len;
        } else {
                return -EIO;
        }
  
        dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
  
-       adapter->ahw.pci_base0 = mem_ptr0;
-       adapter->ahw.pci_len0 = pci_len0;
-       adapter->ahw.pci_base1 = mem_ptr1;
-       adapter->ahw.pci_base2 = mem_ptr2;
        if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
                adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
                        NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
@@@ -772,22 -781,15 +781,22 @@@ netxen_check_options(struct netxen_adap
        if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
                adapter->msix_supported = !!use_msi_x;
                adapter->rss_supported = !!use_msi_x;
 -      } else if (adapter->fw_version >= NETXEN_VERSION_CODE(3, 4, 336)) {
 -              switch (adapter->ahw.board_type) {
 -              case NETXEN_BRDTYPE_P2_SB31_10G:
 -              case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
 -                      adapter->msix_supported = !!use_msi_x;
 -                      adapter->rss_supported = !!use_msi_x;
 -                      break;
 -              default:
 -                      break;
 +      } else {
 +              u32 flashed_ver = 0;
 +              netxen_rom_fast_read(adapter,
 +                              NX_FW_VERSION_OFFSET, (int *)&flashed_ver);
 +              flashed_ver = NETXEN_DECODE_VERSION(flashed_ver);
 +
 +              if (flashed_ver >= NETXEN_VERSION_CODE(3, 4, 336)) {
 +                      switch (adapter->ahw.board_type) {
 +                      case NETXEN_BRDTYPE_P2_SB31_10G:
 +                      case NETXEN_BRDTYPE_P2_SB31_10G_CX4:
 +                              adapter->msix_supported = !!use_msi_x;
 +                              adapter->rss_supported = !!use_msi_x;
 +                              break;
 +                      default:
 +                              break;
 +                      }
                }
        }
  
@@@ -1253,8 -1255,8 +1262,8 @@@ netxen_nic_probe(struct pci_dev *pdev, 
        int pci_func_id = PCI_FUNC(pdev->devfn);
        uint8_t revision_id;
  
-       if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
-               pr_warning("%s: chip revisions between 0x%x-0x%x"
+       if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+               pr_warning("%s: chip revisions between 0x%x-0x%x "
                                "will not be enabled.\n",
                                module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
                return -ENODEV;
@@@ -2301,7 -2303,6 +2310,7 @@@ netxen_fwinit_work(struct work_struct *
                }
                break;
  
 +      case NX_DEV_NEED_RESET:
        case NX_DEV_INITALIZING:
                if (++adapter->fw_wait_cnt < FW_POLL_THRESH) {
                        netxen_schedule_work(adapter,
@@@ -2345,9 -2346,6 +2354,9 @@@ netxen_detach_work(struct work_struct *
  
        ref_cnt = nx_decr_dev_ref_cnt(adapter);
  
 +      if (ref_cnt == -EIO)
 +              goto err_ret;
 +
        delay = (ref_cnt == 0) ? 0 : (2 * FW_POLL_DELAY);
  
        adapter->fw_wait_cnt = 0;
index f45c626003a40fef3057352fb434968f50f6768e,ff7eb9116b6a94eb4cc86b93382dec84c40ba04e..ad2267646187dc48df2054ab666bdd056bce1884
@@@ -493,13 -493,14 +493,14 @@@ static int pcmcia_get_versmac(struct pc
  {
        struct net_device *dev = priv;
        cisparse_t parse;
+       u8 *buf;
  
        if (pcmcia_parse_tuple(tuple, &parse))
                return -EINVAL;
  
-       if ((parse.version_1.ns > 3) &&
-           (cvt_ascii_address(dev,
-                              (parse.version_1.str + parse.version_1.ofs[3]))))
+       buf = parse.version_1.str + parse.version_1.ofs[3];
+       if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0))
                return 0;
  
        return -EINVAL;
@@@ -528,7 -529,7 +529,7 @@@ static int mhz_setup(struct pcmcia_devi
      len = pcmcia_get_tuple(link, 0x81, &buf);
      if (buf && len >= 13) {
            buf[12] = '\0';
-           if (cvt_ascii_address(dev, buf))
+           if (cvt_ascii_address(dev, buf) == 0)
                    rc = 0;
      }
      kfree(buf);
@@@ -910,7 -911,7 +911,7 @@@ static int smc91c92_config(struct pcmci
  
      if (i != 0) {
        printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
-       goto config_undo;
+       goto config_failed;
      }
  
      smc->duplex = 0;
@@@ -998,6 -999,7 +999,7 @@@ config_undo
      unregister_netdev(dev);
  config_failed:
      smc91c92_release(link);
+     free_netdev(dev);
      return -ENODEV;
  } /* smc91c92_config */
  
@@@ -1616,12 -1618,12 +1618,12 @@@ static void set_rx_mode(struct net_devi
        rx_cfg_setting = RxStripCRC | RxEnable | RxAllMulti;
      else {
        if (!netdev_mc_empty(dev)) {
 -          struct dev_mc_list *mc_addr;
 +          struct netdev_hw_addr *ha;
  
 -          netdev_for_each_mc_addr(mc_addr, dev) {
 -              u_int position = ether_crc(6, mc_addr->dmi_addr);
 +          netdev_for_each_mc_addr(ha, dev) {
 +              u_int position = ether_crc(6, ha->addr);
  #ifndef final_version         /* Verify multicast address. */
 -              if ((mc_addr->dmi_addr[0] & 1) == 0)
 +              if ((ha->addr[0] & 1) == 0)
                    continue;
  #endif
                multicast_table[position >> 29] |= 1 << ((position >> 26) & 7);
diff --combined drivers/net/r8169.c
index 64cd250f642de02878c25f0368b7627a2dc585d1,dbb1f5a1824c7e7ddd251d9c75cbe3d4bab8469a..340da3915b9679c5994e6b5eb3ec03dca4a5a1d0
@@@ -23,7 -23,6 +23,7 @@@
  #include <linux/tcp.h>
  #include <linux/init.h>
  #include <linux/dma-mapping.h>
 +#include <linux/pm_runtime.h>
  
  #include <asm/system.h>
  #include <asm/io.h>
@@@ -187,8 -186,13 +187,13 @@@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_
  
  MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
  
- static int rx_copybreak = 200;
- static int use_dac = -1;
+ /*
+  * we set our copybreak very high so that we don't have
+  * to allocate 16k frames all the time (see note in
+  * rtl8169_open()
+  */
+ static int rx_copybreak = 16383;
+ static int use_dac;
  static struct {
        u32 msg_enable;
  } debug = { -1 };
@@@ -505,7 -509,6 +510,7 @@@ struct rtl8169_private 
  
        struct mii_if_info mii;
        struct rtl8169_counters counters;
 +      u32 saved_wolopts;
  };
  
  MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
@@@ -513,8 -516,7 +518,7 @@@ MODULE_DESCRIPTION("RealTek RTL-8169 Gi
  module_param(rx_copybreak, int, 0);
  MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
  module_param(use_dac, int, 0);
- MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only."
- " Unsafe on 32 bit PCI slot.");
+ MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
  module_param_named(debug, debug.msg_enable, int, 0);
  MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
  MODULE_LICENSE("GPL");
@@@ -746,61 -748,53 +750,61 @@@ static void rtl8169_check_link_status(s
  
        spin_lock_irqsave(&tp->lock, flags);
        if (tp->link_ok(ioaddr)) {
 +              /* This is to cancel a scheduled suspend if there's one. */
 +              pm_request_resume(&tp->pci_dev->dev);
                netif_carrier_on(dev);
                netif_info(tp, ifup, dev, "link up\n");
        } else {
                netif_carrier_off(dev);
                netif_info(tp, ifdown, dev, "link down\n");
 +              pm_schedule_suspend(&tp->pci_dev->dev, 100);
        }
        spin_unlock_irqrestore(&tp->lock, flags);
  }
  
 -static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 +
 +static u32 __rtl8169_get_wol(struct rtl8169_private *tp)
  {
 -      struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
        u8 options;
 -
 -      wol->wolopts = 0;
 -
 -#define WAKE_ANY (WAKE_PHY | WAKE_MAGIC | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)
 -      wol->supported = WAKE_ANY;
 -
 -      spin_lock_irq(&tp->lock);
 +      u32 wolopts = 0;
  
        options = RTL_R8(Config1);
        if (!(options & PMEnable))
 -              goto out_unlock;
 +              return 0;
  
        options = RTL_R8(Config3);
        if (options & LinkUp)
 -              wol->wolopts |= WAKE_PHY;
 +              wolopts |= WAKE_PHY;
        if (options & MagicPacket)
 -              wol->wolopts |= WAKE_MAGIC;
 +              wolopts |= WAKE_MAGIC;
  
        options = RTL_R8(Config5);
        if (options & UWF)
 -              wol->wolopts |= WAKE_UCAST;
 +              wolopts |= WAKE_UCAST;
        if (options & BWF)
 -              wol->wolopts |= WAKE_BCAST;
 +              wolopts |= WAKE_BCAST;
        if (options & MWF)
 -              wol->wolopts |= WAKE_MCAST;
 +              wolopts |= WAKE_MCAST;
  
 -out_unlock:
 -      spin_unlock_irq(&tp->lock);
 +      return wolopts;
  }
  
 -static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +static void rtl8169_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
  {
        struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      spin_lock_irq(&tp->lock);
 +
 +      wol->supported = WAKE_ANY;
 +      wol->wolopts = __rtl8169_get_wol(tp);
 +
 +      spin_unlock_irq(&tp->lock);
 +}
 +
 +static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
 +{
        void __iomem *ioaddr = tp->mmio_addr;
        unsigned int i;
        static const struct {
                { WAKE_ANY,   Config5, LanWake }
        };
  
 -      spin_lock_irq(&tp->lock);
 -
        RTL_W8(Cfg9346, Cfg9346_Unlock);
  
        for (i = 0; i < ARRAY_SIZE(cfg); i++) {
                u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
 -              if (wol->wolopts & cfg[i].opt)
 +              if (wolopts & cfg[i].opt)
                        options |= cfg[i].mask;
                RTL_W8(cfg[i].reg, options);
        }
  
        RTL_W8(Cfg9346, Cfg9346_Lock);
 +}
 +
 +static int rtl8169_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
 +{
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      spin_lock_irq(&tp->lock);
  
        if (wol->wolopts)
                tp->features |= RTL_FEATURE_WOL;
        else
                tp->features &= ~RTL_FEATURE_WOL;
 +      __rtl8169_set_wol(tp, wol->wolopts);
        device_set_wakeup_enable(&tp->pci_dev->dev, wol->wolopts);
  
        spin_unlock_irq(&tp->lock);
@@@ -1054,14 -1042,14 +1058,14 @@@ static void rtl8169_vlan_rx_register(st
  }
  
  static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
 -                             struct sk_buff *skb)
 +                             struct sk_buff *skb, int polling)
  {
        u32 opts2 = le32_to_cpu(desc->opts2);
        struct vlan_group *vlgrp = tp->vlgrp;
        int ret;
  
        if (vlgrp && (opts2 & RxVlanTag)) {
 -              vlan_hwaccel_receive_skb(skb, vlgrp, swab16(opts2 & 0xffff));
 +              __vlan_hwaccel_rx(skb, vlgrp, swab16(opts2 & 0xffff), polling);
                ret = 0;
        } else
                ret = -1;
@@@ -1078,7 -1066,7 +1082,7 @@@ static inline u32 rtl8169_tx_vlan_tag(s
  }
  
  static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
 -                             struct sk_buff *skb)
 +                             struct sk_buff *skb, int polling)
  {
        return -1;
  }
@@@ -2837,8 -2825,8 +2841,8 @@@ static void rtl_rar_set(struct rtl8169_
        spin_lock_irq(&tp->lock);
  
        RTL_W8(Cfg9346, Cfg9346_Unlock);
-       RTL_W32(MAC0, low);
        RTL_W32(MAC4, high);
+       RTL_W32(MAC0, low);
        RTL_W8(Cfg9346, Cfg9346_Lock);
  
        spin_unlock_irq(&tp->lock);
@@@ -2990,7 -2978,6 +2994,6 @@@ rtl8169_init_one(struct pci_dev *pdev, 
        void __iomem *ioaddr;
        unsigned int i;
        int rc;
-       int this_use_dac = use_dac;
  
        if (netif_msg_drv(&debug)) {
                printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
  
        tp->cp_cmd = PCIMulRW | RxChkSum;
  
-       tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-       if (!tp->pcie_cap)
-               netif_info(tp, probe, dev, "no PCI Express capability\n");
-       if (this_use_dac < 0)
-               this_use_dac = tp->pcie_cap != 0;
        if ((sizeof(dma_addr_t) > 4) &&
-           this_use_dac &&
-           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               netif_info(tp, probe, dev, "using 64-bit DMA\n");
+           !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
                tp->cp_cmd |= PCIDAC;
                dev->features |= NETIF_F_HIGHDMA;
        } else {
                goto err_out_free_res_4;
        }
  
+       tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+       if (!tp->pcie_cap)
+               netif_info(tp, probe, dev, "no PCI Express capability\n");
        RTL_W16(IntrMask, 0x0000);
  
        /* Soft reset the chip. */
  
        device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
  
 +      if (pci_dev_run_wake(pdev)) {
 +              pm_runtime_set_active(&pdev->dev);
 +              pm_runtime_enable(&pdev->dev);
 +      }
 +      pm_runtime_idle(&pdev->dev);
 +
  out:
        return rc;
  
@@@ -3233,18 -3209,10 +3231,18 @@@ static void __devexit rtl8169_remove_on
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rtl8169_private *tp = netdev_priv(dev);
  
 +      pm_runtime_get_sync(&pdev->dev);
 +
        flush_scheduled_work();
  
        unregister_netdev(dev);
  
 +      if (pci_dev_run_wake(pdev)) {
 +              pm_runtime_disable(&pdev->dev);
 +              pm_runtime_set_suspended(&pdev->dev);
 +      }
 +      pm_runtime_put_noidle(&pdev->dev);
 +
        /* restore original MAC address */
        rtl_rar_set(tp, dev->perm_addr);
  
  }
  
  static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
-                                 struct net_device *dev)
+                                 unsigned int mtu)
  {
-       unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+       unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+       if (max_frame != 16383)
+               printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
+                       "NIC may lead to frame reception errors!\n");
  
        tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
  }
@@@ -3267,9 -3239,18 +3269,19 @@@ static int rtl8169_open(struct net_devi
        struct pci_dev *pdev = tp->pci_dev;
        int retval = -ENOMEM;
  
 +      pm_runtime_get_sync(&pdev->dev);
  
-       rtl8169_set_rxbufsize(tp, dev);
+       /*
+        * Note that we use a magic value here, its wierd I know
+        * its done because, some subset of rtl8169 hardware suffers from
+        * a problem in which frames received that are longer than
+        * the size set in RxMaxSize register return garbage sizes
+        * when received.  To avoid this we need to turn off filtering,
+        * which is done by setting a value of 16383 in the RxMaxSize register
+        * and allocating 16k frames to handle the largest possible rx value
+        * thats what the magic math below does.
+        */
+       rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
  
        /*
         * Rx and Tx desscriptors needs 256 bytes alignment.
        tp->TxDescArray = pci_alloc_consistent(pdev, R8169_TX_RING_BYTES,
                                               &tp->TxPhyAddr);
        if (!tp->TxDescArray)
 -              goto out;
 +              goto err_pm_runtime_put;
  
        tp->RxDescArray = pci_alloc_consistent(pdev, R8169_RX_RING_BYTES,
                                               &tp->RxPhyAddr);
  
        rtl8169_request_timer(dev);
  
 +      tp->saved_wolopts = 0;
 +      pm_runtime_put_noidle(&pdev->dev);
 +
        rtl8169_check_link_status(dev, tp, tp->mmio_addr);
  out:
        return retval;
@@@ -3317,13 -3295,9 +3329,13 @@@ err_release_ring_2
  err_free_rx_1:
        pci_free_consistent(pdev, R8169_RX_RING_BYTES, tp->RxDescArray,
                            tp->RxPhyAddr);
 +      tp->RxDescArray = NULL;
  err_free_tx_0:
        pci_free_consistent(pdev, R8169_TX_RING_BYTES, tp->TxDescArray,
                            tp->TxPhyAddr);
 +      tp->TxDescArray = NULL;
 +err_pm_runtime_put:
 +      pm_runtime_put_noidle(&pdev->dev);
        goto out;
  }
  
@@@ -3929,7 -3903,7 +3941,7 @@@ static int rtl8169_change_mtu(struct ne
  
        rtl8169_down(dev);
  
-       rtl8169_set_rxbufsize(tp, dev);
+       rtl8169_set_rxbufsize(tp, dev->mtu);
  
        ret = rtl8169_init_ring(dev);
        if (ret < 0)
        return done;
  }
  
 +/*
 + * Warning : rtl8169_rx_interrupt() might be called :
 + * 1) from NAPI (softirq) context
 + *    (polling = 1 : we should call netif_receive_skb())
 + * 2) from process context (rtl8169_reset_task())
 + *    (polling = 0 : we must call netif_rx() instead)
 + */
  static int rtl8169_rx_interrupt(struct net_device *dev,
                                struct rtl8169_private *tp,
                                void __iomem *ioaddr, u32 budget)
  {
        unsigned int cur_rx, rx_left;
        unsigned int delta, count;
 +      int polling = (budget != ~(u32)0) ? 1 : 0;
  
        cur_rx = tp->cur_rx;
        rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx;
                        skb_put(skb, pkt_size);
                        skb->protocol = eth_type_trans(skb, dev);
  
 -                      if (rtl8169_rx_vlan_skb(tp, desc, skb) < 0)
 -                              netif_receive_skb(skb);
 +                      if (rtl8169_rx_vlan_skb(tp, desc, skb, polling) < 0) {
 +                              if (likely(polling))
 +                                      netif_receive_skb(skb);
 +                              else
 +                                      netif_rx(skb);
 +                      }
  
                        dev->stats.rx_bytes += pkt_size;
                        dev->stats.rx_packets++;
@@@ -4742,8 -4704,6 +4754,8 @@@ static int rtl8169_close(struct net_dev
        struct rtl8169_private *tp = netdev_priv(dev);
        struct pci_dev *pdev = tp->pci_dev;
  
 +      pm_runtime_get_sync(&pdev->dev);
 +
        /* update counters before going down */
        rtl8169_update_counters(dev);
  
        tp->TxDescArray = NULL;
        tp->RxDescArray = NULL;
  
 +      pm_runtime_put_sync(&pdev->dev);
 +
        return 0;
  }
  
@@@ -4785,12 -4743,12 +4797,12 @@@ static void rtl_set_rx_mode(struct net_
                rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
                mc_filter[1] = mc_filter[0] = 0xffffffff;
        } else {
 -              struct dev_mc_list *mclist;
 +              struct netdev_hw_addr *ha;
  
                rx_mode = AcceptBroadcast | AcceptMyPhys;
                mc_filter[1] = mc_filter[0] = 0;
 -              netdev_for_each_mc_addr(mclist, dev) {
 -                      int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
                        rx_mode |= AcceptMulticast;
                }
                mc_filter[1] = swab32(data);
        }
  
-       RTL_W32(MAR0 + 0, mc_filter[0]);
        RTL_W32(MAR0 + 4, mc_filter[1]);
+       RTL_W32(MAR0 + 0, mc_filter[0]);
  
        RTL_W32(RxConfig, tmp);
  
@@@ -4858,74 -4816,21 +4870,74 @@@ static int rtl8169_suspend(struct devic
        return 0;
  }
  
 +static void __rtl8169_resume(struct net_device *dev)
 +{
 +      netif_device_attach(dev);
 +      rtl8169_schedule_work(dev, rtl8169_reset_task);
 +}
 +
  static int rtl8169_resume(struct device *device)
  {
        struct pci_dev *pdev = to_pci_dev(device);
        struct net_device *dev = pci_get_drvdata(pdev);
  
 -      if (!netif_running(dev))
 -              goto out;
 +      if (netif_running(dev))
 +              __rtl8169_resume(dev);
  
 -      netif_device_attach(dev);
 +      return 0;
 +}
 +
 +static int rtl8169_runtime_suspend(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (!tp->TxDescArray)
 +              return 0;
 +
 +      spin_lock_irq(&tp->lock);
 +      tp->saved_wolopts = __rtl8169_get_wol(tp);
 +      __rtl8169_set_wol(tp, WAKE_ANY);
 +      spin_unlock_irq(&tp->lock);
 +
 +      rtl8169_net_suspend(dev);
  
 -      rtl8169_schedule_work(dev, rtl8169_reset_task);
 -out:
        return 0;
  }
  
 +static int rtl8169_runtime_resume(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (!tp->TxDescArray)
 +              return 0;
 +
 +      spin_lock_irq(&tp->lock);
 +      __rtl8169_set_wol(tp, tp->saved_wolopts);
 +      tp->saved_wolopts = 0;
 +      spin_unlock_irq(&tp->lock);
 +
 +      __rtl8169_resume(dev);
 +
 +      return 0;
 +}
 +
 +static int rtl8169_runtime_idle(struct device *device)
 +{
 +      struct pci_dev *pdev = to_pci_dev(device);
 +      struct net_device *dev = pci_get_drvdata(pdev);
 +      struct rtl8169_private *tp = netdev_priv(dev);
 +
 +      if (!tp->TxDescArray)
 +              return 0;
 +
 +      rtl8169_check_link_status(dev, tp, tp->mmio_addr);
 +      return -EBUSY;
 +}
 +
  static const struct dev_pm_ops rtl8169_pm_ops = {
        .suspend = rtl8169_suspend,
        .resume = rtl8169_resume,
        .thaw = rtl8169_resume,
        .poweroff = rtl8169_suspend,
        .restore = rtl8169_resume,
 +      .runtime_suspend = rtl8169_runtime_suspend,
 +      .runtime_resume = rtl8169_runtime_resume,
 +      .runtime_idle = rtl8169_runtime_idle,
  };
  
  #define RTL8169_PM_OPS        (&rtl8169_pm_ops)
index b79d908fe34ec6db5b41184dcd9370105a4094fb,a4f09d490531db40bf5f703b52d868492a1b6ffc..7063f56640c3158685f47d04a8b47aafe9a0fadd
@@@ -851,13 -851,15 +851,15 @@@ static void uli526x_rx_packet(struct ne
  
                        if ( !(rdes0 & 0x8000) ||
                                ((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+                               struct sk_buff *new_skb = NULL;
                                skb = rxptr->rx_skb_ptr;
  
                                /* Good packet, send to upper layer */
                                /* Shorst packet used new SKB */
-                               if ( (rxlen < RX_COPY_SIZE) &&
-                                       ( (skb = dev_alloc_skb(rxlen + 2) )
-                                       != NULL) ) {
+                               if ((rxlen < RX_COPY_SIZE) &&
+                                   (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+                                       skb = new_skb;
                                        /* size less than COPY_SIZE, allocate a rxlen SKB */
                                        skb_reserve(skb, 2); /* 16byte align */
                                        memcpy(skb_put(skb, rxlen),
@@@ -1392,7 -1394,7 +1394,7 @@@ static void update_cr6(u32 cr6_data, un
  static void send_filter_frame(struct net_device *dev, int mc_cnt)
  {
        struct uli526x_board_info *db = netdev_priv(dev);
 -      struct dev_mc_list *mcptr;
 +      struct netdev_hw_addr *ha;
        struct tx_desc *txptr;
        u16 * addrptr;
        u32 * suptr;
        *suptr++ = 0xffff << FLT_SHIFT;
  
        /* fit the multicast address */
 -      netdev_for_each_mc_addr(mcptr, dev) {
 -              addrptr = (u16 *) mcptr->dmi_addr;
 +      netdev_for_each_mc_addr(ha, dev) {
 +              addrptr = (u16 *) ha->addr;
                *suptr++ = addrptr[0] << FLT_SHIFT;
                *suptr++ = addrptr[1] << FLT_SHIFT;
                *suptr++ = addrptr[2] << FLT_SHIFT;
index 078903f10f02ad10f9ee9cd93eff3e47255030df,bc278d4ee89debb359c4d70ef21e06bfd7479ae3..616f8c92b7451a3a374fcbb8b680b7883f63550d
@@@ -719,30 -719,30 +719,30 @@@ static u32 mii_check_media_mode(struct 
        u32 status = 0;
        u16 ANAR;
  
 -      if (!MII_REG_BITS_IS_ON(BMSR_LNK, MII_REG_BMSR, regs))
 +      if (!MII_REG_BITS_IS_ON(BMSR_LSTATUS, MII_BMSR, regs))
                status |= VELOCITY_LINK_FAIL;
  
 -      if (MII_REG_BITS_IS_ON(G1000CR_1000FD, MII_REG_G1000CR, regs))
 +      if (MII_REG_BITS_IS_ON(ADVERTISE_1000FULL, MII_CTRL1000, regs))
                status |= VELOCITY_SPEED_1000 | VELOCITY_DUPLEX_FULL;
 -      else if (MII_REG_BITS_IS_ON(G1000CR_1000, MII_REG_G1000CR, regs))
 +      else if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF, MII_CTRL1000, regs))
                status |= (VELOCITY_SPEED_1000);
        else {
 -              velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
 -              if (ANAR & ANAR_TXFD)
 +              velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 +              if (ANAR & ADVERTISE_100FULL)
                        status |= (VELOCITY_SPEED_100 | VELOCITY_DUPLEX_FULL);
 -              else if (ANAR & ANAR_TX)
 +              else if (ANAR & ADVERTISE_100HALF)
                        status |= VELOCITY_SPEED_100;
 -              else if (ANAR & ANAR_10FD)
 +              else if (ANAR & ADVERTISE_10FULL)
                        status |= (VELOCITY_SPEED_10 | VELOCITY_DUPLEX_FULL);
                else
                        status |= (VELOCITY_SPEED_10);
        }
  
 -      if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
 -              velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
 -              if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
 -                  == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
 -                      if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
 +      if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 +              velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 +              if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 +                  == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 +                      if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
                                status |= VELOCITY_AUTONEG_ENABLE;
                }
        }
@@@ -801,23 -801,23 +801,23 @@@ static void set_mii_flow_control(struc
        /*Enable or Disable PAUSE in ANAR */
        switch (vptr->options.flow_cntl) {
        case FLOW_CNTL_TX:
 -              MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
 -              MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 +              MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
                break;
  
        case FLOW_CNTL_RX:
 -              MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
 -              MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 +              MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
                break;
  
        case FLOW_CNTL_TX_RX:
 -              MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
 -              MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
-               MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
++              MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
                break;
  
        case FLOW_CNTL_DISABLE:
 -              MII_REG_BITS_OFF(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
 -              MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_OFF(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
 +              MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
                break;
        default:
                break;
   */
  static void mii_set_auto_on(struct velocity_info *vptr)
  {
 -      if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs))
 -              MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
 +      if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs))
 +              MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
        else
 -              MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs);
 +              MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs);
  }
  
  static u32 check_connection_type(struct mac_regs __iomem *regs)
        else
                status |= VELOCITY_SPEED_100;
  
 -      if (MII_REG_BITS_IS_ON(BMCR_AUTO, MII_REG_BMCR, regs)) {
 -              velocity_mii_read(regs, MII_REG_ANAR, &ANAR);
 -              if ((ANAR & (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10))
 -                  == (ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10)) {
 -                      if (MII_REG_BITS_IS_ON(G1000CR_1000 | G1000CR_1000FD, MII_REG_G1000CR, regs))
 +      if (MII_REG_BITS_IS_ON(BMCR_ANENABLE, MII_BMCR, regs)) {
 +              velocity_mii_read(regs, MII_ADVERTISE, &ANAR);
 +              if ((ANAR & (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF))
 +                  == (ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF)) {
 +                      if (MII_REG_BITS_IS_ON(ADVERTISE_1000HALF | ADVERTISE_1000FULL, MII_CTRL1000, regs))
                                status |= VELOCITY_AUTONEG_ENABLE;
                }
        }
@@@ -905,7 -905,7 +905,7 @@@ static int velocity_set_media_mode(stru
         */
  
        if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 -              MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
 +              MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
  
        /*
         *      If connection type is AUTO
                /* clear force MAC mode bit */
                BYTE_REG_BITS_OFF(CHIPGCR_FCMODE, &regs->CHIPGCR);
                /* set duplex mode of MAC according to duplex mode of MII */
 -              MII_REG_BITS_ON(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10, MII_REG_ANAR, vptr->mac_regs);
 -              MII_REG_BITS_ON(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
 -              MII_REG_BITS_ON(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs);
 +              MII_REG_BITS_ON(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF, MII_ADVERTISE, vptr->mac_regs);
 +              MII_REG_BITS_ON(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
 +              MII_REG_BITS_ON(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs);
  
                /* enable AUTO-NEGO mode */
                mii_set_auto_on(vptr);
                                BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
                }
  
 -              MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
 +              MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
  
                if (!(mii_status & VELOCITY_DUPLEX_FULL) && (mii_status & VELOCITY_SPEED_10))
                        BYTE_REG_BITS_OFF(TESTCFG_HBDIS, &regs->TESTCFG);
                else
                        BYTE_REG_BITS_ON(TESTCFG_HBDIS, &regs->TESTCFG);
  
 -              /* MII_REG_BITS_OFF(BMCR_SPEED1G, MII_REG_BMCR, vptr->mac_regs); */
 -              velocity_mii_read(vptr->mac_regs, MII_REG_ANAR, &ANAR);
 -              ANAR &= (~(ANAR_TXFD | ANAR_TX | ANAR_10FD | ANAR_10));
 +              /* MII_REG_BITS_OFF(BMCR_SPEED1000, MII_BMCR, vptr->mac_regs); */
 +              velocity_mii_read(vptr->mac_regs, MII_ADVERTISE, &ANAR);
 +              ANAR &= (~(ADVERTISE_100FULL | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_10HALF));
                if (mii_status & VELOCITY_SPEED_100) {
                        if (mii_status & VELOCITY_DUPLEX_FULL)
 -                              ANAR |= ANAR_TXFD;
 +                              ANAR |= ADVERTISE_100FULL;
                        else
 -                              ANAR |= ANAR_TX;
 +                              ANAR |= ADVERTISE_100HALF;
                } else {
                        if (mii_status & VELOCITY_DUPLEX_FULL)
 -                              ANAR |= ANAR_10FD;
 +                              ANAR |= ADVERTISE_10FULL;
                        else
 -                              ANAR |= ANAR_10;
 +                              ANAR |= ADVERTISE_10HALF;
                }
 -              velocity_mii_write(vptr->mac_regs, MII_REG_ANAR, ANAR);
 +              velocity_mii_write(vptr->mac_regs, MII_ADVERTISE, ANAR);
                /* enable AUTO-NEGO mode */
                mii_set_auto_on(vptr);
 -              /* MII_REG_BITS_ON(BMCR_AUTO, MII_REG_BMCR, vptr->mac_regs); */
 +              /* MII_REG_BITS_ON(BMCR_ANENABLE, MII_BMCR, vptr->mac_regs); */
        }
        /* vptr->mii_status=mii_check_media_mode(vptr->mac_regs); */
        /* vptr->mii_status=check_connection_type(vptr->mac_regs); */
@@@ -1126,7 -1126,7 +1126,7 @@@ static void velocity_set_multi(struct n
        struct mac_regs __iomem *regs = vptr->mac_regs;
        u8 rx_mode;
        int i;
 -      struct dev_mc_list *mclist;
 +      struct netdev_hw_addr *ha;
  
        if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
                writel(0xffffffff, &regs->MARCAM[0]);
                mac_get_cam_mask(regs, vptr->mCAMmask);
  
                i = 0;
 -              netdev_for_each_mc_addr(mclist, dev) {
 -                      mac_set_cam(regs, i + offset, mclist->dmi_addr);
 +              netdev_for_each_mc_addr(ha, dev) {
 +                      mac_set_cam(regs, i + offset, ha->addr);
                        vptr->mCAMmask[(offset + i) / 8] |= 1 << ((offset + i) & 7);
                        i++;
                }
@@@ -1178,36 -1178,36 +1178,36 @@@ static void mii_init(struct velocity_in
                /*
                 *      Reset to hardware default
                 */
 -              MII_REG_BITS_OFF((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_OFF((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
                /*
                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
                 *      off it in NWay-forced half mode for NWay-forced v.s.
                 *      legacy-forced issue.
                 */
                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
 -                      MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
 +                      MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
                else
 -                      MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
 +                      MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
                /*
                 *      Turn on Link/Activity LED enable bit for CIS8201
                 */
 -              MII_REG_BITS_ON(PLED_LALBE, MII_REG_PLED, vptr->mac_regs);
 +              MII_REG_BITS_ON(PLED_LALBE, MII_TPISTATUS, vptr->mac_regs);
                break;
        case PHYID_VT3216_32BIT:
        case PHYID_VT3216_64BIT:
                /*
                 *      Reset to hardware default
                 */
 -              MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
                /*
                 *      Turn on ECHODIS bit in NWay-forced full mode and turn it
                 *      off it in NWay-forced half mode for NWay-forced v.s.
                 *      legacy-forced issue
                 */
                if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
 -                      MII_REG_BITS_ON(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
 +                      MII_REG_BITS_ON(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
                else
 -                      MII_REG_BITS_OFF(TCSR_ECHODIS, MII_REG_TCSR, vptr->mac_regs);
 +                      MII_REG_BITS_OFF(TCSR_ECHODIS, MII_SREVISION, vptr->mac_regs);
                break;
  
        case PHYID_MARVELL_1000:
                /*
                 *      Reset to hardware default
                 */
 -              MII_REG_BITS_ON((ANAR_ASMDIR | ANAR_PAUSE), MII_REG_ANAR, vptr->mac_regs);
 +              MII_REG_BITS_ON((ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP), MII_ADVERTISE, vptr->mac_regs);
                break;
        default:
                ;
        }
 -      velocity_mii_read(vptr->mac_regs, MII_REG_BMCR, &BMCR);
 -      if (BMCR & BMCR_ISO) {
 -              BMCR &= ~BMCR_ISO;
 -              velocity_mii_write(vptr->mac_regs, MII_REG_BMCR, BMCR);
 +      velocity_mii_read(vptr->mac_regs, MII_BMCR, &BMCR);
 +      if (BMCR & BMCR_ISOLATE) {
 +              BMCR &= ~BMCR_ISOLATE;
 +              velocity_mii_write(vptr->mac_regs, MII_BMCR, BMCR);
        }
  }
  
@@@ -2953,13 -2953,13 +2953,13 @@@ static int velocity_set_wol(struct velo
  
        if (vptr->mii_status & VELOCITY_AUTONEG_ENABLE) {
                if (PHYID_GET_PHY_ID(vptr->phy_id) == PHYID_CICADA_CS8201)
 -                      MII_REG_BITS_ON(AUXCR_MDPPS, MII_REG_AUXCR, vptr->mac_regs);
 +                      MII_REG_BITS_ON(AUXCR_MDPPS, MII_NCONFIG, vptr->mac_regs);
  
 -              MII_REG_BITS_OFF(G1000CR_1000FD | G1000CR_1000, MII_REG_G1000CR, vptr->mac_regs);
 +              MII_REG_BITS_OFF(ADVERTISE_1000FULL | ADVERTISE_1000HALF, MII_CTRL1000, vptr->mac_regs);
        }
  
        if (vptr->mii_status & VELOCITY_SPEED_1000)
 -              MII_REG_BITS_ON(BMCR_REAUTO, MII_REG_BMCR, vptr->mac_regs);
 +              MII_REG_BITS_ON(BMCR_ANRESTART, MII_BMCR, vptr->mac_regs);
  
        BYTE_REG_BITS_ON(CHIPGCR_FCMODE, &regs->CHIPGCR);
  
index 3949133d9ee20c49db520b0ef1b897f2c6aabcd2,83c52a682622a518d5b42bbddd17edfdd895db69..3297fc7b80bfd797f9964ad2f892c291b2468c56
@@@ -502,14 -502,14 +502,14 @@@ static void iwl4965_tx_queue_set_status
                       scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
  }
  
 -static const u16 default_queue_to_tx_fifo[] = {
 -      IWL_TX_FIFO_AC3,
 -      IWL_TX_FIFO_AC2,
 -      IWL_TX_FIFO_AC1,
 -      IWL_TX_FIFO_AC0,
 +static const s8 default_queue_to_tx_fifo[] = {
 +      IWL_TX_FIFO_VO,
 +      IWL_TX_FIFO_VI,
 +      IWL_TX_FIFO_BE,
 +      IWL_TX_FIFO_BK,
        IWL49_CMD_FIFO_NUM,
 -      IWL_TX_FIFO_HCCA_1,
 -      IWL_TX_FIFO_HCCA_2
 +      IWL_TX_FIFO_UNUSED,
 +      IWL_TX_FIFO_UNUSED,
  };
  
  static int iwl4965_alive_notify(struct iwl_priv *priv)
        /* reset to 0 to enable all the queue first */
        priv->txq_ctx_active_msk = 0;
        /* Map each Tx/cmd queue to its corresponding fifo */
 +      BUILD_BUG_ON(ARRAY_SIZE(default_queue_to_tx_fifo) != 7);
        for (i = 0; i < ARRAY_SIZE(default_queue_to_tx_fifo); i++) {
                int ac = default_queue_to_tx_fifo[i];
 +
                iwl_txq_ctx_activate(priv, i);
 +
 +              if (ac == IWL_TX_FIFO_UNUSED)
 +                      continue;
 +
                iwl4965_tx_queue_set_status(priv, &priv->txq[i], ac, 0);
        }
  
@@@ -2047,16 -2041,14 +2047,14 @@@ static void iwl4965_rx_reply_tx(struct 
                                   tx_resp->failure_frame);
  
                freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-               if (qc && likely(sta_id != IWL_INVALID_STATION))
-                       priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+               iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
  
                if (priv->mac80211_registered &&
                    (iwl_queue_space(&txq->q) > txq->q.low_mark))
                        iwl_wake_queue(priv, txq_id);
        }
  
-       if (qc && likely(sta_id != IWL_INVALID_STATION))
-               iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+       iwl_txq_check_empty(priv, sta_id, tid, txq_id);
  
        if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
                IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
@@@ -2187,7 -2179,6 +2185,7 @@@ static struct iwl_lib_ops iwl4965_lib 
        .load_ucode = iwl4965_load_bsm,
        .dump_nic_event_log = iwl_dump_nic_event_log,
        .dump_nic_error_log = iwl_dump_nic_error_log,
 +      .dump_fh = iwl_dump_fh,
        .set_channel_switch = iwl4965_hw_channel_switch,
        .apm_ops = {
                .init = iwl_apm_init,
                .set_ct_kill = iwl4965_set_ct_threshold,
        },
        .add_bcast_station = iwl_add_bcast_station,
 +      .check_plcp_health = iwl_good_plcp_health,
  };
  
  static const struct iwl_ops iwl4965_ops = {
  };
  
  struct iwl_cfg iwl4965_agn_cfg = {
 -      .name = "4965AGN",
 +      .name = "Intel(R) Wireless WiFi Link 4965AGN",
        .fw_name_pre = IWL4965_FW_PRE,
        .ucode_api_max = IWL4965_UCODE_API_MAX,
        .ucode_api_min = IWL4965_UCODE_API_MIN,
        .led_compensation = 61,
        .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
        .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
 +      .monitor_recover_period = IWL_MONITORING_PERIOD,
  };
  
  /* Module firmware */
index 0a376f720d7817c6e6faa87ebb168930eb1b61d5,e4c2e1e448ad27e2f5d2b9c65a96dc867576e31b..f43a45d0f1dd07d37792fd34b8cfe783578f24b3
@@@ -54,7 -54,6 +54,7 @@@
  #include "iwl-helpers.h"
  #include "iwl-sta.h"
  #include "iwl-calib.h"
 +#include "iwl-agn.h"
  
  
  /******************************************************************************
@@@ -144,6 -143,9 +144,6 @@@ int iwl_commit_rxon(struct iwl_priv *pr
                return 0;
        }
  
 -      /* station table will be cleared */
 -      priv->assoc_station_added = 0;
 -
        /* If we are currently associated and the new config requires
         * an RXON_ASSOC and the new config wants the associated mask enabled,
         * we must clear the associated from the active configuration
                        IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
                        return ret;
                }
 +              iwl_clear_ucode_stations(priv, false);
 +              iwl_restore_stations(priv);
        }
  
        IWL_DEBUG_INFO(priv, "Sending RXON\n"
        iwl_set_rxon_hwcrypto(priv, !priv->cfg->mod_params->sw_crypto);
  
        /* Apply the new configuration
 -       * RXON unassoc clears the station table in uCode, send it before
 -       * we add the bcast station. If assoc bit is set, we will send RXON
 -       * after having added the bcast and bssid station.
 +       * RXON unassoc clears the station table in uCode so restoration of
 +       * stations is needed after it (the RXON command) completes
         */
        if (!new_assoc) {
                ret = iwl_send_cmd_pdu(priv, REPLY_RXON,
                        IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
                        return ret;
                }
 +              IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON. \n");
                memcpy(active_rxon, &priv->staging_rxon, sizeof(*active_rxon));
 +              iwl_clear_ucode_stations(priv, false);
 +              iwl_restore_stations(priv);
        }
  
 -      iwl_clear_stations_table(priv);
 -
        priv->start_calib = 0;
 -
 -      /* Add the broadcast address so we can send broadcast frames */
 -      priv->cfg->ops->lib->add_bcast_station(priv);
 -
 -
 -      /* If we have set the ASSOC_MSK and we are in BSS mode then
 -       * add the IWL_AP_ID to the station rate table */
        if (new_assoc) {
 -              if (priv->iw_mode == NL80211_IFTYPE_STATION) {
 -                      ret = iwl_rxon_add_station(priv,
 -                                         priv->active_rxon.bssid_addr, 1);
 -                      if (ret == IWL_INVALID_STATION) {
 -                              IWL_ERR(priv,
 -                                      "Error adding AP address for TX.\n");
 -                              return -EIO;
 -                      }
 -                      priv->assoc_station_added = 1;
 -                      if (priv->default_wep_key &&
 -                          iwl_send_static_wepkey_cmd(priv, 0))
 -                              IWL_ERR(priv,
 -                                      "Could not send WEP static key.\n");
 -              }
 -
                /*
                 * allow CTS-to-self if possible for new association.
                 * this is relevant only for 5000 series and up,
@@@ -1236,9 -1258,17 +1236,17 @@@ static void iwl_irq_tasklet(struct iwl_
        /* Ack/clear/reset pending uCode interrupts.
         * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
         */
-       iwl_write32(priv, CSR_INT, priv->_agn.inta);
+       /* There is a hardware bug in the interrupt mask function that some
+        * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+        * they are disabled in the CSR_INT_MASK register. Furthermore the
+        * ICT interrupt handling mechanism has another bug that might cause
+        * these unmasked interrupts fail to be detected. We workaround the
+        * hardware bugs here by ACKing all the possible interrupts so that
+        * interrupt coalescing can still be achieved.
+        */
 -      iwl_write32(priv, CSR_INT, priv->inta | ~priv->inta_mask);
++      iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
  
 -      inta = priv->inta;
 +      inta = priv->_agn.inta;
  
  #ifdef CONFIG_IWLWIFI_DEBUG
        if (iwl_get_debug_level(priv) & IWL_DL_ISR) {
  
        spin_unlock_irqrestore(&priv->lock, flags);
  
 -      /* saved interrupt in inta variable now we can reset priv->inta */
 -      priv->inta = 0;
 +      /* saved interrupt in inta variable now we can reset priv->_agn.inta */
 +      priv->_agn.inta = 0;
  
        /* Now service all interrupt bits discovered above. */
        if (inta & CSR_INT_BIT_HW_ERR) {
@@@ -2064,6 -2094,7 +2072,6 @@@ static void iwl_alive_start(struct iwl_
                goto restart;
        }
  
 -      iwl_clear_stations_table(priv);
        ret = priv->cfg->ops->lib->alive_notify(priv);
        if (ret) {
                IWL_WARN(priv,
        /* After the ALIVE response, we can send host commands to the uCode */
        set_bit(STATUS_ALIVE, &priv->status);
  
 +      if (priv->cfg->ops->lib->recover_from_tx_stall) {
 +              /* Enable timer to monitor the driver queues */
 +              mod_timer(&priv->monitor_recover,
 +                      jiffies +
 +                      msecs_to_jiffies(priv->cfg->monitor_recover_period));
 +      }
 +
        if (iwl_is_rfkill(priv))
                return;
  
        ieee80211_wake_queues(priv->hw);
  
 -      priv->active_rate = priv->rates_mask;
 -      priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
 +      priv->active_rate = IWL_RATES_MASK;
  
        /* Configure Tx antenna selection based on H/W config */
        if (priv->cfg->ops->hcmd->set_tx_ant)
        wake_up_interruptible(&priv->wait_command_queue);
  
        iwl_power_update_mode(priv, true);
 +      IWL_DEBUG_INFO(priv, "Updated power mode\n");
  
 -      /* reassociate for ADHOC mode */
 -      if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
 -              struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
 -                                                              priv->vif);
 -              if (beacon)
 -                      iwl_mac_beacon_update(priv->hw, beacon);
 -      }
 -
 -
 -      if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
 -              iwl_set_mode(priv, priv->iw_mode);
  
        return;
  
@@@ -2147,7 -2182,7 +2155,7 @@@ static void __iwl_down(struct iwl_priv 
        if (!exit_pending)
                set_bit(STATUS_EXIT_PENDING, &priv->status);
  
 -      iwl_clear_stations_table(priv);
 +      iwl_clear_ucode_stations(priv, true);
  
        /* Unblock any waiting calls */
        wake_up_interruptible_all(&priv->wait_command_queue);
@@@ -2344,6 -2379,8 +2352,6 @@@ static int __iwl_up(struct iwl_priv *pr
  
        for (i = 0; i < MAX_HW_RESTARTS; i++) {
  
 -              iwl_clear_stations_table(priv);
 -
                /* load bootstrap state machine,
                 * load bootstrap program into processor's memory,
                 * prepare to load the "initialize" uCode */
@@@ -2484,6 -2521,10 +2492,6 @@@ void iwl_post_associate(struct iwl_pri
                return;
        }
  
 -      IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
 -                      priv->assoc_id, priv->active_rxon.bssid_addr);
 -
 -
        if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                return;
  
  
        iwlcore_commit_rxon(priv);
  
 +      IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
 +                      priv->assoc_id, priv->active_rxon.bssid_addr);
 +
        switch (priv->iw_mode) {
        case NL80211_IFTYPE_STATION:
                break;
                /* assume default assoc id */
                priv->assoc_id = 1;
  
 -              iwl_rxon_add_station(priv, priv->bssid, 0);
 +              iwl_add_local_station(priv, priv->bssid, true);
                iwl_send_beacon_cmd(priv);
  
                break;
                break;
        }
  
 -      if (priv->iw_mode == NL80211_IFTYPE_ADHOC)
 -              priv->assoc_station_added = 1;
 -
        spin_lock_irqsave(&priv->lock, flags);
        iwl_activate_qos(priv, 0);
        spin_unlock_irqrestore(&priv->lock, flags);
@@@ -2611,7 -2652,7 +2619,7 @@@ static int iwl_mac_setup_register(struc
                BIT(NL80211_IFTYPE_STATION) |
                BIT(NL80211_IFTYPE_ADHOC);
  
-       hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS;
  
        /*
@@@ -2848,6 -2889,7 +2856,6 @@@ static int iwl_mac_set_key(struct ieee8
  
        mutex_lock(&priv->mutex);
        iwl_scan_cancel_timeout(priv, 100);
 -      mutex_unlock(&priv->mutex);
  
        /* If we are getting WEP group key and we didn't receive any key mapping
         * so far, we are in legacy wep mode (group key only), otherwise we are
                ret = -EINVAL;
        }
  
 +      mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
  
        return ret;
@@@ -2916,21 -2957,10 +2924,21 @@@ static int iwl_mac_ampdu_action(struct 
                        return ret;
        case IEEE80211_AMPDU_TX_START:
                IWL_DEBUG_HT(priv, "start Tx\n");
 -              return iwl_tx_agg_start(priv, sta->addr, tid, ssn);
 +              ret = iwl_tx_agg_start(priv, sta->addr, tid, ssn);
 +              if (ret == 0) {
 +                      priv->_agn.agg_tids_count++;
 +                      IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
 +                                   priv->_agn.agg_tids_count);
 +              }
 +              return ret;
        case IEEE80211_AMPDU_TX_STOP:
                IWL_DEBUG_HT(priv, "stop Tx\n");
                ret = iwl_tx_agg_stop(priv, sta->addr, tid);
 +              if ((ret == 0) && (priv->_agn.agg_tids_count > 0)) {
 +                      priv->_agn.agg_tids_count--;
 +                      IWL_DEBUG_HT(priv, "priv->_agn.agg_tids_count = %u\n",
 +                                   priv->_agn.agg_tids_count);
 +              }
                if (test_bit(STATUS_EXIT_PENDING, &priv->status))
                        return 0;
                else
@@@ -2967,7 -2997,18 +2975,7 @@@ static void iwl_mac_sta_notify(struct i
        struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
        int sta_id;
  
 -      /*
 -       * TODO: We really should use this callback to
 -       *       actually maintain the station table in
 -       *       the device.
 -       */
 -
        switch (cmd) {
 -      case STA_NOTIFY_ADD:
 -              atomic_set(&sta_priv->pending_frames, 0);
 -              if (vif->type == NL80211_IFTYPE_AP)
 -                      sta_priv->client = true;
 -              break;
        case STA_NOTIFY_SLEEP:
                WARN_ON(!sta_priv->client);
                sta_priv->asleep = true;
        }
  }
  
 +/**
 + * iwl_restore_wepkeys - Restore WEP keys to device
 + */
 +static void iwl_restore_wepkeys(struct iwl_priv *priv)
 +{
 +      mutex_lock(&priv->mutex);
 +      if (priv->iw_mode == NL80211_IFTYPE_STATION &&
 +          priv->default_wep_key &&
 +          iwl_send_static_wepkey_cmd(priv, 0))
 +              IWL_ERR(priv, "Could not send WEP static key\n");
 +      mutex_unlock(&priv->mutex);
 +}
 +
 +static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
 +                            struct ieee80211_vif *vif,
 +                            struct ieee80211_sta *sta)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
 +      bool is_ap = priv->iw_mode == NL80211_IFTYPE_STATION;
 +      int ret;
 +      u8 sta_id;
 +
 +      IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
 +                      sta->addr);
 +
 +      atomic_set(&sta_priv->pending_frames, 0);
 +      if (vif->type == NL80211_IFTYPE_AP)
 +              sta_priv->client = true;
 +
 +      ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
 +                                   &sta_id);
 +      if (ret) {
 +              IWL_ERR(priv, "Unable to add station %pM (%d)\n",
 +                      sta->addr, ret);
 +              /* Should we return success if return code is EEXIST ? */
 +              return ret;
 +      }
 +
 +      iwl_restore_wepkeys(priv);
 +
 +      /* Initialize rate scaling */
 +      IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM \n",
 +                     sta->addr);
 +      iwl_rs_rate_init(priv, sta, sta_id);
 +
 +      return ret;
 +}
 +
  /*****************************************************************************
   *
   * sysfs attributes
@@@ -3137,6 -3129,87 +3145,6 @@@ static ssize_t store_tx_power(struct de
  
  static DEVICE_ATTR(tx_power, S_IWUSR | S_IRUGO, show_tx_power, store_tx_power);
  
 -static ssize_t show_flags(struct device *d,
 -                        struct device_attribute *attr, char *buf)
 -{
 -      struct iwl_priv *priv = dev_get_drvdata(d);
 -
 -      return sprintf(buf, "0x%04X\n", priv->active_rxon.flags);
 -}
 -
 -static ssize_t store_flags(struct device *d,
 -                         struct device_attribute *attr,
 -                         const char *buf, size_t count)
 -{
 -      struct iwl_priv *priv = dev_get_drvdata(d);
 -      unsigned long val;
 -      u32 flags;
 -      int ret = strict_strtoul(buf, 0, &val);
 -      if (ret)
 -              return ret;
 -      flags = (u32)val;
 -
 -      mutex_lock(&priv->mutex);
 -      if (le32_to_cpu(priv->staging_rxon.flags) != flags) {
 -              /* Cancel any currently running scans... */
 -              if (iwl_scan_cancel_timeout(priv, 100))
 -                      IWL_WARN(priv, "Could not cancel scan.\n");
 -              else {
 -                      IWL_DEBUG_INFO(priv, "Commit rxon.flags = 0x%04X\n", flags);
 -                      priv->staging_rxon.flags = cpu_to_le32(flags);
 -                      iwlcore_commit_rxon(priv);
 -              }
 -      }
 -      mutex_unlock(&priv->mutex);
 -
 -      return count;
 -}
 -
 -static DEVICE_ATTR(flags, S_IWUSR | S_IRUGO, show_flags, store_flags);
 -
 -static ssize_t show_filter_flags(struct device *d,
 -                               struct device_attribute *attr, char *buf)
 -{
 -      struct iwl_priv *priv = dev_get_drvdata(d);
 -
 -      return sprintf(buf, "0x%04X\n",
 -              le32_to_cpu(priv->active_rxon.filter_flags));
 -}
 -
 -static ssize_t store_filter_flags(struct device *d,
 -                                struct device_attribute *attr,
 -                                const char *buf, size_t count)
 -{
 -      struct iwl_priv *priv = dev_get_drvdata(d);
 -      unsigned long val;
 -      u32 filter_flags;
 -      int ret = strict_strtoul(buf, 0, &val);
 -      if (ret)
 -              return ret;
 -      filter_flags = (u32)val;
 -
 -      mutex_lock(&priv->mutex);
 -      if (le32_to_cpu(priv->staging_rxon.filter_flags) != filter_flags) {
 -              /* Cancel any currently running scans... */
 -              if (iwl_scan_cancel_timeout(priv, 100))
 -                      IWL_WARN(priv, "Could not cancel scan.\n");
 -              else {
 -                      IWL_DEBUG_INFO(priv, "Committing rxon.filter_flags = "
 -                                     "0x%04X\n", filter_flags);
 -                      priv->staging_rxon.filter_flags =
 -                              cpu_to_le32(filter_flags);
 -                      iwlcore_commit_rxon(priv);
 -              }
 -      }
 -      mutex_unlock(&priv->mutex);
 -
 -      return count;
 -}
 -
 -static DEVICE_ATTR(filter_flags, S_IWUSR | S_IRUGO, show_filter_flags,
 -                 store_filter_flags);
 -
 -
  static ssize_t show_statistics(struct device *d,
                               struct device_attribute *attr, char *buf)
  {
@@@ -3242,13 -3315,6 +3250,13 @@@ static void iwl_setup_deferred_work(str
        priv->ucode_trace.data = (unsigned long)priv;
        priv->ucode_trace.function = iwl_bg_ucode_trace;
  
 +      if (priv->cfg->ops->lib->recover_from_tx_stall) {
 +              init_timer(&priv->monitor_recover);
 +              priv->monitor_recover.data = (unsigned long)priv;
 +              priv->monitor_recover.function =
 +                      priv->cfg->ops->lib->recover_from_tx_stall;
 +      }
 +
        if (!priv->cfg->use_isr_legacy)
                tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                        iwl_irq_tasklet, (unsigned long)priv);
@@@ -3268,8 -3334,6 +3276,8 @@@ static void iwl_cancel_deferred_work(st
        cancel_work_sync(&priv->beacon_update);
        del_timer_sync(&priv->statistics_periodic);
        del_timer_sync(&priv->ucode_trace);
 +      if (priv->cfg->ops->lib->recover_from_tx_stall)
 +              del_timer_sync(&priv->monitor_recover);
  }
  
  static void iwl_init_hw_rates(struct iwl_priv *priv,
@@@ -3307,6 -3371,9 +3315,6 @@@ static int iwl_init_drv(struct iwl_pri
        mutex_init(&priv->mutex);
        mutex_init(&priv->sync_cmd_mutex);
  
 -      /* Clear the driver's (not device's) station table */
 -      iwl_clear_stations_table(priv);
 -
        priv->ieee_channels = NULL;
        priv->ieee_rates = NULL;
        priv->band = IEEE80211_BAND_2GHZ;
        priv->iw_mode = NL80211_IFTYPE_STATION;
        priv->current_ht_config.smps = IEEE80211_SMPS_STATIC;
        priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
 +      priv->_agn.agg_tids_count = 0;
  
        /* initialize force reset */
        priv->force_reset[IWL_RF_RESET].reset_duration =
        priv->qos_data.qos_active = 0;
        priv->qos_data.qos_cap.val = 0;
  
 -      priv->rates_mask = IWL_RATES_MASK;
        /* Set the tx_power_user_lmt to the lowest power level
         * this value will get overwritten by channel max power avg
         * from eeprom */
@@@ -3368,6 -3435,8 +3376,6 @@@ static void iwl_uninit_drv(struct iwl_p
  }
  
  static struct attribute *iwl_sysfs_entries[] = {
 -      &dev_attr_flags.attr,
 -      &dev_attr_filter_flags.attr,
        &dev_attr_statistics.attr,
        &dev_attr_temperature.attr,
        &dev_attr_tx_power.attr,
@@@ -3400,8 -3469,6 +3408,8 @@@ static struct ieee80211_ops iwl_hw_ops 
        .ampdu_action = iwl_mac_ampdu_action,
        .hw_scan = iwl_mac_hw_scan,
        .sta_notify = iwl_mac_sta_notify,
 +      .sta_add = iwlagn_mac_sta_add,
 +      .sta_remove = iwl_mac_sta_remove,
  };
  
  static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
  
        iwl_hw_detect(priv);
 -      IWL_INFO(priv, "Detected Intel Wireless WiFi Link %s REV=0x%X\n",
 +      IWL_INFO(priv, "Detected %s, REV=0x%X\n",
                priv->cfg->name, priv->hw_rev);
  
        /* We disable the RETRY_TIMEOUT register (0x41) to keep
@@@ -3686,6 -3753,7 +3694,6 @@@ static void __devexit iwl_pci_remove(st
                iwl_rx_queue_free(priv, &priv->rxq);
        iwl_hw_txq_ctx_free(priv);
  
 -      iwl_clear_stations_table(priv);
        iwl_eeprom_free(priv);
  
  
index 4995134d7e4a1f56372bdd81747e808d3b7202dd,e276f2a4e8350c444b016c032b00b123463bbdb7..64f150b19771048b6a101c2b3fc7479a28b38379
@@@ -351,11 -351,11 +351,11 @@@ static int iwl3945_send_beacon_cmd(stru
  
  static void iwl3945_unset_hw_params(struct iwl_priv *priv)
  {
 -      if (priv->shared_virt)
 +      if (priv->_3945.shared_virt)
                dma_free_coherent(&priv->pci_dev->dev,
                                  sizeof(struct iwl3945_shared),
 -                                priv->shared_virt,
 -                                priv->shared_phys);
 +                                priv->_3945.shared_virt,
 +                                priv->_3945.shared_phys);
  }
  
  static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
@@@ -504,6 -504,15 +504,6 @@@ static int iwl3945_tx_skb(struct iwl_pr
                IWL_DEBUG_TX(priv, "Sending REASSOC frame\n");
  #endif
  
 -      /* drop all non-injected data frame if we are not associated */
 -      if (ieee80211_is_data(fc) &&
 -          !(info->flags & IEEE80211_TX_CTL_INJECTED) &&
 -          (!iwl_is_associated(priv) ||
 -           ((priv->iw_mode == NL80211_IFTYPE_STATION) && !priv->assoc_id))) {
 -              IWL_DEBUG_DROP(priv, "Dropping - !iwl_is_associated\n");
 -              goto drop_unlock;
 -      }
 -
        spin_unlock_irqrestore(&priv->lock, flags);
  
        hdr_len = ieee80211_hdrlen(fc);
@@@ -744,7 -753,7 +744,7 @@@ static int iwl3945_get_measurement(stru
        if (iwl_is_associated(priv))
                add_time =
                    iwl3945_usecs_to_beacons(
 -                      le64_to_cpu(params->start_time) - priv->last_tsf,
 +                      le64_to_cpu(params->start_time) - priv->_3945.last_tsf,
                        le16_to_cpu(priv->rxon_timing.beacon_interval));
  
        memset(&spectrum, 0, sizeof(spectrum));
  
        if (iwl_is_associated(priv))
                spectrum.start_time =
 -                  iwl3945_add_beacon_time(priv->last_beacon_time,
 +                  iwl3945_add_beacon_time(priv->_3945.last_beacon_time,
                                add_time,
                                le16_to_cpu(priv->rxon_timing.beacon_interval));
        else
@@@ -1946,7 -1955,7 +1946,7 @@@ static void iwl3945_init_hw_rates(struc
  {
        int i;
  
-       for (i = 0; i < IWL_RATE_COUNT; i++) {
+       for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
                rates[i].bitrate = iwl3945_rates[i].ieee * 5;
                rates[i].hw_value = i; /* Rate scaling will work on indexes */
                rates[i].hw_value_short = i;
@@@ -2480,6 -2489,8 +2480,6 @@@ static void iwl3945_alive_start(struct 
                goto restart;
        }
  
 -      iwl_clear_stations_table(priv);
 -
        rfkill = iwl_read_prph(priv, APMG_RFKILL_REG);
        IWL_DEBUG_INFO(priv, "RFKILL status: 0x%x\n", rfkill);
  
        /* After the ALIVE response, we can send commands to 3945 uCode */
        set_bit(STATUS_ALIVE, &priv->status);
  
 +      if (priv->cfg->ops->lib->recover_from_tx_stall) {
 +              /* Enable timer to monitor the driver queues */
 +              mod_timer(&priv->monitor_recover,
 +                      jiffies +
 +                      msecs_to_jiffies(priv->cfg->monitor_recover_period));
 +      }
 +
        if (iwl_is_rfkill(priv))
                return;
  
        ieee80211_wake_queues(priv->hw);
  
 -      priv->active_rate = priv->rates_mask;
 -      priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
 +      priv->active_rate = IWL_RATES_MASK;
  
        iwl_power_update_mode(priv, true);
  
        set_bit(STATUS_READY, &priv->status);
        wake_up_interruptible(&priv->wait_command_queue);
  
 -      /* reassociate for ADHOC mode */
 -      if (priv->vif && (priv->iw_mode == NL80211_IFTYPE_ADHOC)) {
 -              struct sk_buff *beacon = ieee80211_beacon_get(priv->hw,
 -                                                              priv->vif);
 -              if (beacon)
 -                      iwl_mac_beacon_update(priv->hw, beacon);
 -      }
 -
 -      if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
 -              iwl_set_mode(priv, priv->iw_mode);
 -
        return;
  
   restart:
@@@ -2563,8 -2579,7 +2563,8 @@@ static void __iwl3945_down(struct iwl_p
        if (!exit_pending)
                set_bit(STATUS_EXIT_PENDING, &priv->status);
  
 -      iwl_clear_stations_table(priv);
 +      /* Station information will now be cleared in device */
 +      iwl_clear_ucode_stations(priv, true);
  
        /* Unblock any waiting calls */
        wake_up_interruptible_all(&priv->wait_command_queue);
@@@ -2698,10 -2713,12 +2698,10 @@@ static int __iwl3945_up(struct iwl_pri
  
        for (i = 0; i < MAX_HW_RESTARTS; i++) {
  
 -              iwl_clear_stations_table(priv);
 -
                /* load bootstrap state machine,
                 * load bootstrap program into processor's memory,
                 * prepare to load the "initialize" uCode */
 -              priv->cfg->ops->lib->load_ucode(priv);
 +              rc = priv->cfg->ops->lib->load_ucode(priv);
  
                if (rc) {
                        IWL_ERR(priv,
@@@ -2769,7 -2786,7 +2769,7 @@@ static void iwl3945_bg_alive_start(stru
  static void iwl3945_rfkill_poll(struct work_struct *data)
  {
        struct iwl_priv *priv =
 -          container_of(data, struct iwl_priv, rfkill_poll.work);
 +          container_of(data, struct iwl_priv, _3945.rfkill_poll.work);
        bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
        bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
                        & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
  
        /* Keep this running, even if radio now enabled.  This will be
         * cancelled in mac_start() if system decides to start again */
 -      queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
 +      queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
                           round_jiffies_relative(2 * HZ));
  
  }
@@@ -2803,6 -2820,7 +2803,6 @@@ static void iwl3945_bg_request_scan(str
                .len = sizeof(struct iwl3945_scan_cmd),
                .flags = CMD_SIZE_HUGE,
        };
 -      int rc = 0;
        struct iwl3945_scan_cmd *scan;
        struct ieee80211_conf *conf = NULL;
        u8 n_probes = 0;
        if (test_bit(STATUS_SCAN_HW, &priv->status)) {
                IWL_DEBUG_INFO(priv, "Multiple concurrent scan requests  "
                                "Ignoring second request.\n");
 -              rc = -EIO;
                goto done;
        }
  
                priv->scan = kmalloc(sizeof(struct iwl3945_scan_cmd) +
                                     IWL_MAX_SCAN_SIZE, GFP_KERNEL);
                if (!priv->scan) {
 -                      rc = -ENOMEM;
 +                      IWL_DEBUG_SCAN(priv, "Fail to allocate scan memory\n");
                        goto done;
                }
        }
                               scan_suspend_time, interval);
        }
  
 -      if (priv->scan_request->n_ssids) {
 +      if (priv->is_internal_short_scan) {
 +              IWL_DEBUG_SCAN(priv, "Start internal passive scan.\n");
 +      } else if (priv->scan_request->n_ssids) {
                int i, p = 0;
                IWL_DEBUG_SCAN(priv, "Kicking off active scan\n");
                for (i = 0; i < priv->scan_request->n_ssids; i++) {
                goto done;
        }
  
 -      scan->tx_cmd.len = cpu_to_le16(
 +      if (!priv->is_internal_short_scan) {
 +              scan->tx_cmd.len = cpu_to_le16(
                        iwl_fill_probe_req(priv,
                                (struct ieee80211_mgmt *)scan->data,
                                priv->scan_request->ie,
                                priv->scan_request->ie_len,
                                IWL_MAX_SCAN_SIZE - sizeof(*scan)));
 -
 +      } else {
 +              scan->tx_cmd.len = cpu_to_le16(
 +                      iwl_fill_probe_req(priv,
 +                              (struct ieee80211_mgmt *)scan->data,
 +                              NULL, 0,
 +                              IWL_MAX_SCAN_SIZE - sizeof(*scan)));
 +      }
        /* select Rx antennas */
        scan->flags |= iwl3945_get_antenna_flags(priv);
  
        scan->len = cpu_to_le16(cmd.len);
  
        set_bit(STATUS_SCAN_HW, &priv->status);
 -      rc = iwl_send_cmd_sync(priv, &cmd);
 -      if (rc)
 +      if (iwl_send_cmd_sync(priv, &cmd))
                goto done;
  
        queue_delayed_work(priv->workqueue, &priv->scan_check,
@@@ -3123,13 -3134,12 +3123,13 @@@ void iwl3945_post_associate(struct iwl_
        case NL80211_IFTYPE_ADHOC:
  
                priv->assoc_id = 1;
 -              iwl_add_station(priv, priv->bssid, 0, CMD_SYNC, NULL);
 +              iwl_add_local_station(priv, priv->bssid, false);
                iwl3945_sync_sta(priv, IWL_STA_ID,
 -                               (priv->band == IEEE80211_BAND_5GHZ) ?
 -                               IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
 +                              (priv->band == IEEE80211_BAND_5GHZ) ?
 +                              IWL_RATE_6M_PLCP : IWL_RATE_1M_PLCP,
                                 CMD_ASYNC);
                iwl3945_rate_scale_init(priv->hw, IWL_STA_ID);
 +
                iwl3945_send_beacon_cmd(priv);
  
                break;
@@@ -3202,7 -3212,7 +3202,7 @@@ static int iwl3945_mac_start(struct iee
  
        /* ucode is running and will send rfkill notifications,
         * no need to poll the killswitch state anymore */
 -      cancel_delayed_work(&priv->rfkill_poll);
 +      cancel_delayed_work(&priv->_3945.rfkill_poll);
  
        iwl_led_start(priv);
  
@@@ -3243,7 -3253,7 +3243,7 @@@ static void iwl3945_mac_stop(struct iee
        flush_workqueue(priv->workqueue);
  
        /* start polling the killswitch state again */
 -      queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
 +      queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
                           round_jiffies_relative(2 * HZ));
  
        IWL_DEBUG_MAC80211(priv, "leave\n");
@@@ -3314,7 -3324,7 +3314,7 @@@ void iwl3945_config_ap(struct iwl_priv 
                /* restore RXON assoc */
                priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
                iwlcore_commit_rxon(priv);
 -              iwl_add_station(priv, iwl_bcast_addr, 0, CMD_SYNC, NULL);
 +              iwl_add_local_station(priv, iwl_bcast_addr, false);
        }
        iwl3945_send_beacon_cmd(priv);
  
@@@ -3355,6 -3365,7 +3355,6 @@@ static int iwl3945_mac_set_key(struct i
  
        mutex_lock(&priv->mutex);
        iwl_scan_cancel_timeout(priv, 100);
 -      mutex_unlock(&priv->mutex);
  
        switch (cmd) {
        case SET_KEY:
                ret = -EINVAL;
        }
  
 +      mutex_unlock(&priv->mutex);
        IWL_DEBUG_MAC80211(priv, "leave\n");
  
        return ret;
  }
  
 +static int iwl3945_mac_sta_add(struct ieee80211_hw *hw,
 +                             struct ieee80211_vif *vif,
 +                             struct ieee80211_sta *sta)
 +{
 +      struct iwl_priv *priv = hw->priv;
 +      int ret;
 +      bool is_ap = priv->iw_mode == NL80211_IFTYPE_STATION;
 +      u8 sta_id;
 +
 +      IWL_DEBUG_INFO(priv, "received request to add station %pM\n",
 +                      sta->addr);
 +
 +      ret = iwl_add_station_common(priv, sta->addr, is_ap, &sta->ht_cap,
 +                                   &sta_id);
 +      if (ret) {
 +              IWL_ERR(priv, "Unable to add station %pM (%d)\n",
 +                      sta->addr, ret);
 +              /* Should we return success if return code is EEXIST ? */
 +              return ret;
 +      }
 +
 +      /* Initialize rate scaling */
 +      IWL_DEBUG_INFO(priv, "Initializing rate scaling for station %pM \n",
 +                     sta->addr);
 +      iwl3945_rs_rate_init(priv, sta, sta_id);
 +
 +      return 0;
 +
 +
 +
 +      return ret;
 +}
  /*****************************************************************************
   *
   * sysfs attributes
@@@ -3612,7 -3590,7 +3612,7 @@@ static ssize_t store_measurement(struc
        struct iwl_priv *priv = dev_get_drvdata(d);
        struct ieee80211_measurement_params params = {
                .channel = le16_to_cpu(priv->active_rxon.channel),
 -              .start_time = cpu_to_le64(priv->last_tsf),
 +              .start_time = cpu_to_le64(priv->_3945.last_tsf),
                .duration = cpu_to_le16(1),
        };
        u8 type = IWL_MEASURE_BASIC;
@@@ -3682,7 -3660,7 +3682,7 @@@ static ssize_t show_statistics(struct d
        struct iwl_priv *priv = dev_get_drvdata(d);
        u32 size = sizeof(struct iwl3945_notif_statistics);
        u32 len = 0, ofs = 0;
 -      u8 *data = (u8 *)&priv->statistics_39;
 +      u8 *data = (u8 *)&priv->_3945.statistics;
        int rc = 0;
  
        if (!iwl_is_alive(priv))
@@@ -3795,7 -3773,7 +3795,7 @@@ static void iwl3945_setup_deferred_work
        INIT_WORK(&priv->beacon_update, iwl3945_bg_beacon_update);
        INIT_DELAYED_WORK(&priv->init_alive_start, iwl3945_bg_init_alive_start);
        INIT_DELAYED_WORK(&priv->alive_start, iwl3945_bg_alive_start);
 -      INIT_DELAYED_WORK(&priv->rfkill_poll, iwl3945_rfkill_poll);
 +      INIT_DELAYED_WORK(&priv->_3945.rfkill_poll, iwl3945_rfkill_poll);
        INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
        INIT_WORK(&priv->request_scan, iwl3945_bg_request_scan);
        INIT_WORK(&priv->abort_scan, iwl_bg_abort_scan);
  
        iwl3945_hw_setup_deferred_work(priv);
  
 +      if (priv->cfg->ops->lib->recover_from_tx_stall) {
 +              init_timer(&priv->monitor_recover);
 +              priv->monitor_recover.data = (unsigned long)priv;
 +              priv->monitor_recover.function =
 +                      priv->cfg->ops->lib->recover_from_tx_stall;
 +      }
 +
        tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
                     iwl3945_irq_tasklet, (unsigned long)priv);
  }
@@@ -3822,8 -3793,6 +3822,8 @@@ static void iwl3945_cancel_deferred_wor
        cancel_delayed_work(&priv->scan_check);
        cancel_delayed_work(&priv->alive_start);
        cancel_work_sync(&priv->beacon_update);
 +      if (priv->cfg->ops->lib->recover_from_tx_stall)
 +              del_timer_sync(&priv->monitor_recover);
  }
  
  static struct attribute *iwl3945_sysfs_entries[] = {
@@@ -3861,9 -3830,7 +3861,9 @@@ static struct ieee80211_ops iwl3945_hw_
        .conf_tx = iwl_mac_conf_tx,
        .reset_tsf = iwl_mac_reset_tsf,
        .bss_info_changed = iwl_bss_info_changed,
 -      .hw_scan = iwl_mac_hw_scan
 +      .hw_scan = iwl_mac_hw_scan,
 +      .sta_add = iwl3945_mac_sta_add,
 +      .sta_remove = iwl_mac_sta_remove,
  };
  
  static int iwl3945_init_drv(struct iwl_priv *priv)
        mutex_init(&priv->mutex);
        mutex_init(&priv->sync_cmd_mutex);
  
 -      /* Clear the driver's (not device's) station table */
 -      iwl_clear_stations_table(priv);
 -
        priv->ieee_channels = NULL;
        priv->ieee_rates = NULL;
        priv->band = IEEE80211_BAND_2GHZ;
        priv->qos_data.qos_active = 0;
        priv->qos_data.qos_cap.val = 0;
  
 -      priv->rates_mask = IWL_RATES_MASK;
        priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
  
        if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
@@@ -3950,7 -3921,7 +3950,7 @@@ static int iwl3945_setup_mac(struct iwl
                BIT(NL80211_IFTYPE_STATION) |
                BIT(NL80211_IFTYPE_ADHOC);
  
-       hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+       hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
                            WIPHY_FLAG_DISABLE_BEACON_HINTS;
  
        hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
@@@ -4158,7 -4129,7 +4158,7 @@@ static int iwl3945_pci_probe(struct pci
                IWL_ERR(priv, "failed to create debugfs files. Ignoring error: %d\n", err);
  
        /* Start monitoring the killswitch */
 -      queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
 +      queue_delayed_work(priv->workqueue, &priv->_3945.rfkill_poll,
                           2 * HZ);
  
        return 0;
@@@ -4232,7 -4203,7 +4232,7 @@@ static void __devexit iwl3945_pci_remov
  
        sysfs_remove_group(&pdev->dev.kobj, &iwl3945_attribute_group);
  
 -      cancel_delayed_work_sync(&priv->rfkill_poll);
 +      cancel_delayed_work_sync(&priv->_3945.rfkill_poll);
  
        iwl3945_dealloc_ucode_pci(priv);
  
        iwl3945_hw_txq_ctx_free(priv);
  
        iwl3945_unset_hw_params(priv);
 -      iwl_clear_stations_table(priv);
  
        /*netif_stop_queue(dev); */
        flush_workqueue(priv->workqueue);
index 058d1720242ea3d99e3aedd06777e0350c5ca754,6875e1498bd57e36f54e826c763bb041a9bd3283..a54880e4ad2b99e00eeb0c3114bd796adafbd53b
@@@ -36,6 -36,7 +36,7 @@@ struct lbs_private 
  
        /* CFG80211 */
        struct wireless_dev *wdev;
+       bool wiphy_registered;
  
        /* Mesh */
        struct net_device *mesh_dev; /* Virtual device */
        u8 wpa_ie_len;
        u16 wep_tx_keyidx;
        struct enc_key wep_keys[4];
 +      u8 authtype_auto;
  
        /* Wake On LAN */
        uint32_t wol_criteria;
index 6599fd15e6751bdfddfa051af3eb186b573078a1,4e58ebe15580b0c0aa0abdd93319a56310a8ab09..1b5d0aebbb0e96edbfdb36be49daba41913b5468
@@@ -1938,15 -1938,11 +1938,15 @@@ struct mwl8k_cmd_mac_multicast_adr 
  
  static struct mwl8k_cmd_pkt *
  __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
 -                            int mc_count, struct dev_addr_list *mclist)
 +                            struct netdev_hw_addr_list *mc_list)
  {
        struct mwl8k_priv *priv = hw->priv;
        struct mwl8k_cmd_mac_multicast_adr *cmd;
        int size;
 +      int mc_count = 0;
 +
 +      if (mc_list)
 +              mc_count = netdev_hw_addr_list_count(mc_list);
  
        if (allmulti || mc_count > priv->num_mcaddrs) {
                allmulti = 1;
        if (allmulti) {
                cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
        } else if (mc_count) {
 -              int i;
 +              struct netdev_hw_addr *ha;
 +              int i = 0;
  
                cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
                cmd->numaddr = cpu_to_le16(mc_count);
 -              for (i = 0; i < mc_count && mclist; i++) {
 -                      if (mclist->da_addrlen != ETH_ALEN) {
 -                              kfree(cmd);
 -                              return NULL;
 -                      }
 -                      memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
 -                      mclist = mclist->next;
 +              netdev_hw_addr_list_for_each(ha, mc_list) {
 +                      memcpy(cmd->addr[i], ha->addr, ETH_ALEN);
                }
        }
  
@@@ -3552,7 -3552,7 +3552,7 @@@ mwl8k_bss_info_changed(struct ieee80211
  }
  
  static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
 -                                 int mc_count, struct dev_addr_list *mclist)
 +                                 struct netdev_hw_addr_list *mc_list)
  {
        struct mwl8k_cmd_pkt *cmd;
  
         * we'll end up throwing this packet away and creating a new
         * one in mwl8k_configure_filter().
         */
 -      cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
 +      cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_list);
  
        return (unsigned long)cmd;
  }
@@@ -3686,7 -3686,7 +3686,7 @@@ static void mwl8k_configure_filter(stru
         */
        if (*total_flags & FIF_ALLMULTI) {
                kfree(cmd);
 -              cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
 +              cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, NULL);
        }
  
        if (cmd != NULL) {
@@@ -3851,6 -3851,7 +3851,7 @@@ MODULE_FIRMWARE("mwl8k/helper_8366.fw")
  MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
  
  static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+       { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
        { PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
        { PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
        { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
diff --combined include/linux/skbuff.h
index def10b064f292b3e9636e1cefdd12b3b8d74e9a7,124f90cd5a38bd39ab88af0563aa0d0ff9aa06d2..cf42f194616e39600dcbdfc2f4eba78eb8636efb
@@@ -190,9 -190,6 +190,6 @@@ struct skb_shared_info 
        atomic_t        dataref;
        unsigned short  nr_frags;
        unsigned short  gso_size;
- #ifdef CONFIG_HAS_DMA
-       dma_addr_t      dma_head;
- #endif
        /* Warning: this field is not always filled in (UFO)! */
        unsigned short  gso_segs;
        unsigned short  gso_type;
        struct sk_buff  *frag_list;
        struct skb_shared_hwtstamps hwtstamps;
        skb_frag_t      frags[MAX_SKB_FRAGS];
- #ifdef CONFIG_HAS_DMA
-       dma_addr_t      dma_maps[MAX_SKB_FRAGS];
- #endif
        /* Intermediate layers must ensure that destructor_arg
         * remains valid until skb destructor */
        void *          destructor_arg;
@@@ -300,7 -294,6 +294,7 @@@ typedef unsigned char *sk_buff_data_t
   *    @nfct_reasm: netfilter conntrack re-assembly pointer
   *    @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
   *    @skb_iif: ifindex of device we arrived on
 + *    @rxhash: the packet hash computed on receive
   *    @queue_mapping: Queue mapping for multiqueue devices
   *    @tc_index: Traffic control index
   *    @tc_verd: traffic control verdict
@@@ -376,8 -369,6 +370,8 @@@ struct sk_buff 
  #endif
  #endif
  
 +      __u32                   rxhash;
 +
        kmemcheck_bitfield_begin(flags2);
        __u16                   queue_mapping:16;
  #ifdef CONFIG_IPV6_NDISC_NODETYPE
diff --combined include/linux/socket.h
index 960659bd8f783b9cc71fc1fb836716511e73267f,354cc5617f8b87304b97eebeec2c4b3e8a52428a..032a19eb61b12077a3f8c8f5942d4ac3d4c52806
@@@ -189,8 -189,7 +189,8 @@@ struct ucred 
  #define AF_ISDN               34      /* mISDN sockets                */
  #define AF_PHONET     35      /* Phonet sockets               */
  #define AF_IEEE802154 36      /* IEEE802154 sockets           */
 -#define AF_MAX                37      /* For now.. */
 +#define AF_CAIF               37      /* CAIF sockets                 */
 +#define AF_MAX                38      /* For now.. */
  
  /* Protocol families, same as address families. */
  #define PF_UNSPEC     AF_UNSPEC
  #define PF_ISDN               AF_ISDN
  #define PF_PHONET     AF_PHONET
  #define PF_IEEE802154 AF_IEEE802154
 +#define PF_CAIF               AF_CAIF
  #define PF_MAX                AF_MAX
  
  /* Maximum queue length specifiable by listen.  */
  #define MSG_ERRQUEUE  0x2000  /* Fetch message from error queue */
  #define MSG_NOSIGNAL  0x4000  /* Do not generate SIGPIPE */
  #define MSG_MORE      0x8000  /* Sender will send more */
+ #define MSG_WAITFORONE        0x10000 /* recvmmsg(): block until 1+ packets avail */
  
  #define MSG_EOF         MSG_FIN
  
  #define SOL_PNPIPE    275
  #define SOL_RDS               276
  #define SOL_IUCV      277
 +#define SOL_CAIF      278
  
  /* IPX options */
  #define IPX_TYPE      1
diff --combined include/linux/tty.h
index 71c7e9c96b2342317e2955fd7cc833272d902fa3,4409967db0c45e8e0a5568b5596e4b42bffe659c..bb44fa9ae135a030a9cc7173db61b3f48c4c392f
@@@ -23,7 -23,7 +23,7 @@@
   */
  #define NR_UNIX98_PTY_DEFAULT 4096      /* Default maximum for Unix98 ptys */
  #define NR_UNIX98_PTY_MAX     (1 << MINORBITS) /* Absolute limit */
 -#define NR_LDISCS             20
 +#define NR_LDISCS             21
  
  /* line disciplines */
  #define N_TTY         0
@@@ -46,8 -46,8 +46,8 @@@
  #define N_GIGASET_M101        16      /* Siemens Gigaset M101 serial DECT adapter */
  #define N_SLCAN               17      /* Serial / USB serial CAN Adaptors */
  #define N_PPS         18      /* Pulse per Second */
 -
  #define N_V253                19      /* Codec control over voice modem */
 +#define N_CAIF                20      /* CAIF protocol for talking to modems */
  
  /*
   * This character is the same as _POSIX_VDISABLE: it cannot be used as
@@@ -70,12 -70,13 +70,13 @@@ struct tty_buffer 
  
  /*
   * We default to dicing tty buffer allocations to this many characters
-  * in order to avoid multiple page allocations. We assume tty_buffer itself
-  * is under 256 bytes. See tty_buffer_find for the allocation logic this
-  * must match
+  * in order to avoid multiple page allocations. We know the size of
+  * tty_buffer itself but it must also be taken into account that the
+  * the buffer is 256 byte aligned. See tty_buffer_find for the allocation
+  * logic this must match
   */
  
- #define TTY_BUFFER_PAGE               ((PAGE_SIZE  - 256) / 2)
+ #define TTY_BUFFER_PAGE       (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
  
  
  struct tty_bufhead {
@@@ -223,6 -224,7 +224,7 @@@ struct tty_port 
        wait_queue_head_t       close_wait;     /* Close waiters */
        wait_queue_head_t       delta_msr_wait; /* Modem status change */
        unsigned long           flags;          /* TTY flags ASY_*/
+       unsigned char           console:1;      /* port is a console */
        struct mutex            mutex;          /* Locking */
        struct mutex            buf_mutex;      /* Buffer alloc lock */
        unsigned char           *xmit_buf;      /* Optional buffer */
diff --combined net/8021q/vlan.c
index bd33f02013ec96af62546158a57739e36d0fe0bc,db783d7af5a3cd41ab2695c0c7f326e5a266e3f2..7b13206185ba4e2055a2c7f663610cf943563800
@@@ -356,13 -356,13 +356,13 @@@ static void vlan_sync_address(struct ne
         * the new address */
        if (compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
            !compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
 -              dev_unicast_delete(dev, vlandev->dev_addr);
 +              dev_uc_del(dev, vlandev->dev_addr);
  
        /* vlan address was equal to the old address and is different from
         * the new address */
        if (!compare_ether_addr(vlandev->dev_addr, vlan->real_dev_addr) &&
            compare_ether_addr(vlandev->dev_addr, dev->dev_addr))
 -              dev_unicast_add(dev, vlandev->dev_addr);
 +              dev_uc_add(dev, vlandev->dev_addr);
  
        memcpy(vlan->real_dev_addr, dev->dev_addr, ETH_ALEN);
  }
@@@ -378,6 -378,8 +378,8 @@@ static void vlan_transfer_features(stru
  #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
        vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
  #endif
+       vlandev->real_num_tx_queues = dev->real_num_tx_queues;
+       BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
  
        if (old_features != vlandev->features)
                netdev_features_change(vlandev);
@@@ -530,10 -532,6 +532,10 @@@ static int vlan_device_event(struct not
                }
                unregister_netdevice_many(&list);
                break;
 +
 +      case NETDEV_PRE_TYPE_CHANGE:
 +              /* Forbid underlaying device to change its type. */
 +              return NOTIFY_BAD;
        }
  
  out:
diff --combined net/8021q/vlan_dev.c
index 7f4d247237e4d442cba898d99c69f1f04657943c,2fd057c81bbf151b080ebf31681386a0ae0363dd..f7d2fe431ee013a13f88d811402e04b91a4a1078
@@@ -361,6 -361,14 +361,14 @@@ static netdev_tx_t vlan_dev_hwaccel_har
        return ret;
  }
  
+ static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
+ {
+       struct net_device *rdev = vlan_dev_info(dev)->real_dev;
+       const struct net_device_ops *ops = rdev->netdev_ops;
+       return ops->ndo_select_queue(rdev, skb);
+ }
  static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
  {
        /* TODO: gotta make sure the underlying layer can handle it,
@@@ -461,7 -469,7 +469,7 @@@ static int vlan_dev_open(struct net_dev
                return -ENETDOWN;
  
        if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) {
 -              err = dev_unicast_add(real_dev, dev->dev_addr);
 +              err = dev_uc_add(real_dev, dev->dev_addr);
                if (err < 0)
                        goto out;
        }
@@@ -490,7 -498,7 +498,7 @@@ clear_allmulti
                dev_set_allmulti(real_dev, -1);
  del_unicast:
        if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
 -              dev_unicast_delete(real_dev, dev->dev_addr);
 +              dev_uc_del(real_dev, dev->dev_addr);
  out:
        netif_carrier_off(dev);
        return err;
@@@ -505,14 -513,14 +513,14 @@@ static int vlan_dev_stop(struct net_dev
                vlan_gvrp_request_leave(dev);
  
        dev_mc_unsync(real_dev, dev);
 -      dev_unicast_unsync(real_dev, dev);
 +      dev_uc_unsync(real_dev, dev);
        if (dev->flags & IFF_ALLMULTI)
                dev_set_allmulti(real_dev, -1);
        if (dev->flags & IFF_PROMISC)
                dev_set_promiscuity(real_dev, -1);
  
        if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
 -              dev_unicast_delete(real_dev, dev->dev_addr);
 +              dev_uc_del(real_dev, dev->dev_addr);
  
        netif_carrier_off(dev);
        return 0;
@@@ -531,13 -539,13 +539,13 @@@ static int vlan_dev_set_mac_address(str
                goto out;
  
        if (compare_ether_addr(addr->sa_data, real_dev->dev_addr)) {
 -              err = dev_unicast_add(real_dev, addr->sa_data);
 +              err = dev_uc_add(real_dev, addr->sa_data);
                if (err < 0)
                        return err;
        }
  
        if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr))
 -              dev_unicast_delete(real_dev, dev->dev_addr);
 +              dev_uc_del(real_dev, dev->dev_addr);
  
  out:
        memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
@@@ -654,7 -662,7 +662,7 @@@ static void vlan_dev_change_rx_flags(st
  static void vlan_dev_set_rx_mode(struct net_device *vlan_dev)
  {
        dev_mc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
 -      dev_unicast_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
 +      dev_uc_sync(vlan_dev_info(vlan_dev)->real_dev, vlan_dev);
  }
  
  /*
@@@ -688,7 -696,8 +696,8 @@@ static const struct header_ops vlan_hea
        .parse   = eth_header_parse,
  };
  
- static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops;
+ static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
+                   vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
  
  static int vlan_dev_init(struct net_device *dev)
  {
        if (real_dev->features & NETIF_F_HW_VLAN_TX) {
                dev->header_ops      = real_dev->header_ops;
                dev->hard_header_len = real_dev->hard_header_len;
-               dev->netdev_ops         = &vlan_netdev_accel_ops;
+               if (real_dev->netdev_ops->ndo_select_queue)
+                       dev->netdev_ops = &vlan_netdev_accel_ops_sq;
+               else
+                       dev->netdev_ops = &vlan_netdev_accel_ops;
        } else {
                dev->header_ops      = &vlan_header_ops;
                dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
-               dev->netdev_ops         = &vlan_netdev_ops;
+               if (real_dev->netdev_ops->ndo_select_queue)
+                       dev->netdev_ops = &vlan_netdev_ops_sq;
+               else
+                       dev->netdev_ops = &vlan_netdev_ops;
        }
  
        if (is_vlan_dev(real_dev))
@@@ -847,6 -862,56 +862,56 @@@ static const struct net_device_ops vlan
        .ndo_uninit             = vlan_dev_uninit,
        .ndo_open               = vlan_dev_open,
        .ndo_stop               = vlan_dev_stop,
+       .ndo_start_xmit =  vlan_dev_hwaccel_hard_start_xmit,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = vlan_dev_set_mac_address,
+       .ndo_set_rx_mode        = vlan_dev_set_rx_mode,
+       .ndo_set_multicast_list = vlan_dev_set_rx_mode,
+       .ndo_change_rx_flags    = vlan_dev_change_rx_flags,
+       .ndo_do_ioctl           = vlan_dev_ioctl,
+       .ndo_neigh_setup        = vlan_dev_neigh_setup,
+       .ndo_get_stats          = vlan_dev_get_stats,
+ #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+       .ndo_fcoe_ddp_setup     = vlan_dev_fcoe_ddp_setup,
+       .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
+       .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
+       .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
+       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+ #endif
+ };
+ static const struct net_device_ops vlan_netdev_ops_sq = {
+       .ndo_select_queue       = vlan_dev_select_queue,
+       .ndo_change_mtu         = vlan_dev_change_mtu,
+       .ndo_init               = vlan_dev_init,
+       .ndo_uninit             = vlan_dev_uninit,
+       .ndo_open               = vlan_dev_open,
+       .ndo_stop               = vlan_dev_stop,
+       .ndo_start_xmit =  vlan_dev_hard_start_xmit,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = vlan_dev_set_mac_address,
+       .ndo_set_rx_mode        = vlan_dev_set_rx_mode,
+       .ndo_set_multicast_list = vlan_dev_set_rx_mode,
+       .ndo_change_rx_flags    = vlan_dev_change_rx_flags,
+       .ndo_do_ioctl           = vlan_dev_ioctl,
+       .ndo_neigh_setup        = vlan_dev_neigh_setup,
+       .ndo_get_stats          = vlan_dev_get_stats,
+ #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+       .ndo_fcoe_ddp_setup     = vlan_dev_fcoe_ddp_setup,
+       .ndo_fcoe_ddp_done      = vlan_dev_fcoe_ddp_done,
+       .ndo_fcoe_enable        = vlan_dev_fcoe_enable,
+       .ndo_fcoe_disable       = vlan_dev_fcoe_disable,
+       .ndo_fcoe_get_wwn       = vlan_dev_fcoe_get_wwn,
+ #endif
+ };
+ static const struct net_device_ops vlan_netdev_accel_ops_sq = {
+       .ndo_select_queue       = vlan_dev_select_queue,
+       .ndo_change_mtu         = vlan_dev_change_mtu,
+       .ndo_init               = vlan_dev_init,
+       .ndo_uninit             = vlan_dev_uninit,
+       .ndo_open               = vlan_dev_open,
+       .ndo_stop               = vlan_dev_stop,
        .ndo_start_xmit =  vlan_dev_hwaccel_hard_start_xmit,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = vlan_dev_set_mac_address,
diff --combined net/ipv4/af_inet.c
index 55e11906a73a5eb95ef2fdbfb26ff90a9dc69abd,a366861bf4cd5d421cb80e369f8a12e0efc7d97a..b5924f178812621c09a4171e78213d7bb2f7f22a
@@@ -530,6 -530,8 +530,8 @@@ int inet_dgram_connect(struct socket *s
  {
        struct sock *sk = sock->sk;
  
+       if (addr_len < sizeof(uaddr->sa_family))
+               return -EINVAL;
        if (uaddr->sa_family == AF_UNSPEC)
                return sk->sk_prot->disconnect(sk, flags);
  
@@@ -573,6 -575,9 +575,9 @@@ int inet_stream_connect(struct socket *
        int err;
        long timeo;
  
+       if (addr_len < sizeof(uaddr->sa_family))
+               return -EINVAL;
        lock_sock(sk);
  
        if (uaddr->sa_family == AF_UNSPEC) {
@@@ -1401,10 -1406,10 +1406,10 @@@ EXPORT_SYMBOL_GPL(snmp_fold_field)
  int snmp_mib_init(void __percpu *ptr[2], size_t mibsize)
  {
        BUG_ON(ptr == NULL);
 -      ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
 +      ptr[0] = __alloc_percpu(mibsize, __alignof__(unsigned long));
        if (!ptr[0])
                goto err0;
 -      ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long long));
 +      ptr[1] = __alloc_percpu(mibsize, __alignof__(unsigned long));
        if (!ptr[1])
                goto err1;
        return 0;
diff --combined net/ipv4/devinet.c
index c75320ef95c2f628d2e84e9beababa03e3d344fa,3feb2b39030838ca08fb02e0b0063711b34e76b2..d009c6a5d9ad693cc6a521f8f69c1b384e1a35f6
@@@ -1095,10 -1095,10 +1095,10 @@@ static int inetdev_event(struct notifie
        case NETDEV_DOWN:
                ip_mc_down(in_dev);
                break;
 -      case NETDEV_BONDING_OLDTYPE:
 +      case NETDEV_PRE_TYPE_CHANGE:
                ip_mc_unmap(in_dev);
                break;
 -      case NETDEV_BONDING_NEWTYPE:
 +      case NETDEV_POST_TYPE_CHANGE:
                ip_mc_remap(in_dev);
                break;
        case NETDEV_CHANGEMTU:
@@@ -1194,7 -1194,7 +1194,7 @@@ static int inet_dump_ifaddr(struct sk_b
                hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
-                       if (idx > s_idx)
+                       if (h > s_h || idx > s_idx)
                                s_ip_idx = 0;
                        in_dev = __in_dev_get_rcu(dev);
                        if (!in_dev)
diff --combined net/ipv6/addrconf.c
index 21b4c9e1a682bc777603c713e00bb5752a176716,7e567ae5eaab052d482a296db7ff4457114eef78..1c58b99a54a4e4b91128fd2b143d487a5fb17cae
@@@ -81,7 -81,7 +81,7 @@@
  #include <linux/random.h>
  #endif
  
 -#include <asm/uaccess.h>
 +#include <linux/uaccess.h>
  #include <asm/unaligned.h>
  
  #include <linux/proc_fs.h>
  #endif
  
  #define       INFINITY_LIFE_TIME      0xFFFFFFFF
 -#define TIME_DELTA(a,b) ((unsigned long)((long)(a) - (long)(b)))
 +#define TIME_DELTA(a, b) ((unsigned long)((long)(a) - (long)(b)))
 +
 +#define ADDRCONF_TIMER_FUZZ_MINUS     (HZ > 50 ? HZ/50 : 1)
 +#define ADDRCONF_TIMER_FUZZ           (HZ / 4)
 +#define ADDRCONF_TIMER_FUZZ_MAX               (HZ)
  
  #ifdef CONFIG_SYSCTL
  static void addrconf_sysctl_register(struct inet6_dev *idev);
@@@ -130,8 -126,8 +130,8 @@@ static int ipv6_count_addresses(struct 
  /*
   *    Configured unicast address hash table
   */
 -static struct inet6_ifaddr            *inet6_addr_lst[IN6_ADDR_HSIZE];
 -static DEFINE_RWLOCK(addrconf_hash_lock);
 +static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
 +static DEFINE_SPINLOCK(addrconf_hash_lock);
  
  static void addrconf_verify(unsigned long);
  
@@@ -141,8 -137,8 +141,8 @@@ static DEFINE_SPINLOCK(addrconf_verify_
  static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
  static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
  
 -static void addrconf_bonding_change(struct net_device *dev,
 -                                  unsigned long event);
 +static void addrconf_type_change(struct net_device *dev,
 +                               unsigned long event);
  static int addrconf_ifdown(struct net_device *dev, int how);
  
  static void addrconf_dad_start(struct inet6_ifaddr *ifp, u32 flags);
@@@ -155,8 -151,8 +155,8 @@@ static void ipv6_ifa_notify(int event, 
  
  static void inet6_prefix_notify(int event, struct inet6_dev *idev,
                                struct prefix_info *pinfo);
 -static int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
 -                            struct net_device *dev);
 +static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
 +                             struct net_device *dev);
  
  static ATOMIC_NOTIFIER_HEAD(inet6addr_chain);
  
@@@ -253,7 -249,8 +253,7 @@@ static void addrconf_del_timer(struct i
                __in6_ifa_put(ifp);
  }
  
 -enum addrconf_timer_t
 -{
 +enum addrconf_timer_t {
        AC_NONE,
        AC_DAD,
        AC_RS,
@@@ -273,8 -270,7 +273,8 @@@ static void addrconf_mod_timer(struct i
        case AC_RS:
                ifp->timer.function = addrconf_rs_timer;
                break;
 -      default:;
 +      default:
 +              break;
        }
        ifp->timer.expires = jiffies + when;
        add_timer(&ifp->timer);
@@@ -321,7 -317,7 +321,7 @@@ void in6_dev_finish_destroy(struct inet
  {
        struct net_device *dev = idev->dev;
  
 -      WARN_ON(idev->addr_list != NULL);
 +      WARN_ON(!list_empty(&idev->addr_list));
        WARN_ON(idev->mc_list != NULL);
  
  #ifdef NET_REFCNT_DEBUG
  #endif
        dev_put(dev);
        if (!idev->dead) {
 -              printk("Freeing alive inet6 device %p\n", idev);
 +              pr_warning("Freeing alive inet6 device %p\n", idev);
                return;
        }
        snmp6_free_dev(idev);
@@@ -354,8 -350,6 +354,8 @@@ static struct inet6_dev * ipv6_add_dev(
  
        rwlock_init(&ndev->lock);
        ndev->dev = dev;
 +      INIT_LIST_HEAD(&ndev->addr_list);
 +
        memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
        ndev->cnf.mtu6 = dev->mtu;
        ndev->cnf.sysctl = NULL;
  #endif
  
  #ifdef CONFIG_IPV6_PRIVACY
 +      INIT_LIST_HEAD(&ndev->tempaddr_list);
        setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
        if ((dev->flags&IFF_LOOPBACK) ||
            dev->type == ARPHRD_TUNNEL ||
@@@ -445,10 -438,8 +445,10 @@@ static struct inet6_dev * ipv6_find_ide
  
        ASSERT_RTNL();
  
 -      if ((idev = __in6_dev_get(dev)) == NULL) {
 -              if ((idev = ipv6_add_dev(dev)) == NULL)
 +      idev = __in6_dev_get(dev);
 +      if (!idev) {
 +              idev = ipv6_add_dev(dev);
 +              if (!idev)
                        return NULL;
        }
  
@@@ -474,8 -465,7 +474,8 @@@ static void dev_forward_change(struct i
                else
                        ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
        }
 -      for (ifa=idev->addr_list; ifa; ifa=ifa->if_next) {
 +
 +      list_for_each_entry(ifa, &idev->addr_list, if_list) {
                if (ifa->flags&IFA_F_TENTATIVE)
                        continue;
                if (idev->cnf.forwarding)
@@@ -532,16 -522,12 +532,16 @@@ static int addrconf_fixup_forwarding(st
  }
  #endif
  
 -/* Nobody refers to this ifaddr, destroy it */
 +static void inet6_ifa_finish_destroy_rcu(struct rcu_head *head)
 +{
 +      struct inet6_ifaddr *ifp = container_of(head, struct inet6_ifaddr, rcu);
 +      kfree(ifp);
 +}
  
 +/* Nobody refers to this ifaddr, destroy it */
  void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
  {
 -      WARN_ON(ifp->if_next != NULL);
 -      WARN_ON(ifp->lst_next != NULL);
 +      WARN_ON(!hlist_unhashed(&ifp->addr_lst));
  
  #ifdef NET_REFCNT_DEBUG
        printk(KERN_DEBUG "inet6_ifa_finish_destroy\n");
        in6_dev_put(ifp->idev);
  
        if (del_timer(&ifp->timer))
 -              printk("Timer is still running, when freeing ifa=%p\n", ifp);
 +              pr_notice("Timer is still running, when freeing ifa=%p\n", ifp);
  
        if (!ifp->dead) {
 -              printk("Freeing alive inet6 address %p\n", ifp);
 +              pr_warning("Freeing alive inet6 address %p\n", ifp);
                return;
        }
        dst_release(&ifp->rt->u.dst);
  
 -      kfree(ifp);
 +      call_rcu(&ifp->rcu, inet6_ifa_finish_destroy_rcu);
  }
  
  static void
  ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
  {
 -      struct inet6_ifaddr *ifa, **ifap;
 +      struct list_head *p;
        int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
  
        /*
         * Each device address list is sorted in order of scope -
         * global before linklocal.
         */
 -      for (ifap = &idev->addr_list; (ifa = *ifap) != NULL;
 -           ifap = &ifa->if_next) {
 +      list_for_each(p, &idev->addr_list) {
 +              struct inet6_ifaddr *ifa
 +                      = list_entry(p, struct inet6_ifaddr, if_list);
                if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
                        break;
        }
  
 -      ifp->if_next = *ifap;
 -      *ifap = ifp;
 +      list_add_tail(&ifp->if_list, p);
  }
  
 -/*
 - *    Hash function taken from net_alias.c
 - */
 -static u8 ipv6_addr_hash(const struct in6_addr *addr)
 +static u32 ipv6_addr_hash(const struct in6_addr *addr)
  {
 -      __u32 word;
 -
        /*
         * We perform the hash function over the last 64 bits of the address
         * This will include the IEEE address token on links that support it.
         */
 -
 -      word = (__force u32)(addr->s6_addr32[2] ^ addr->s6_addr32[3]);
 -      word ^= (word >> 16);
 -      word ^= (word >> 8);
 -
 -      return ((word ^ (word >> 4)) & 0x0f);
 +      return jhash_2words(addr->s6_addr32[2],  addr->s6_addr32[3], 0)
 +              & (IN6_ADDR_HSIZE - 1);
  }
  
  /* On success it returns ifp with increased reference count */
@@@ -599,7 -594,7 +599,7 @@@ ipv6_add_addr(struct inet6_dev *idev, c
  {
        struct inet6_ifaddr *ifa = NULL;
        struct rt6_info *rt;
 -      int hash;
 +      unsigned int hash;
        int err = 0;
        int addr_type = ipv6_addr_type(addr);
  
                goto out2;
        }
  
 -      write_lock(&addrconf_hash_lock);
 +      spin_lock(&addrconf_hash_lock);
  
        /* Ignore adding duplicate addresses on an interface */
        if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
  
        spin_lock_init(&ifa->lock);
        init_timer(&ifa->timer);
 +      INIT_HLIST_NODE(&ifa->addr_lst);
        ifa->timer.data = (unsigned long) ifa;
        ifa->scope = scope;
        ifa->prefix_len = pfxlen;
        /* Add to big hash table */
        hash = ipv6_addr_hash(addr);
  
 -      ifa->lst_next = inet6_addr_lst[hash];
 -      inet6_addr_lst[hash] = ifa;
 +      hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
        in6_ifa_hold(ifa);
 -      write_unlock(&addrconf_hash_lock);
 +      spin_unlock(&addrconf_hash_lock);
  
        write_lock(&idev->lock);
        /* Add to inet6_dev unicast addr list. */
  
  #ifdef CONFIG_IPV6_PRIVACY
        if (ifa->flags&IFA_F_TEMPORARY) {
 -              ifa->tmp_next = idev->tempaddr_list;
 -              idev->tempaddr_list = ifa;
 +              list_add(&ifa->tmp_list, &idev->tempaddr_list);
                in6_ifa_hold(ifa);
        }
  #endif
@@@ -703,7 -699,7 +703,7 @@@ out2
  
        return ifa;
  out:
 -      write_unlock(&addrconf_hash_lock);
 +      spin_unlock(&addrconf_hash_lock);
        goto out2;
  }
  
  
  static void ipv6_del_addr(struct inet6_ifaddr *ifp)
  {
 -      struct inet6_ifaddr *ifa, **ifap;
 +      struct inet6_ifaddr *ifa, *ifn;
        struct inet6_dev *idev = ifp->idev;
        int hash;
        int deleted = 0, onlink = 0;
  
        ifp->dead = 1;
  
 -      write_lock_bh(&addrconf_hash_lock);
 -      for (ifap = &inet6_addr_lst[hash]; (ifa=*ifap) != NULL;
 -           ifap = &ifa->lst_next) {
 -              if (ifa == ifp) {
 -                      *ifap = ifa->lst_next;
 -                      __in6_ifa_put(ifp);
 -                      ifa->lst_next = NULL;
 -                      break;
 -              }
 -      }
 -      write_unlock_bh(&addrconf_hash_lock);
 +      spin_lock_bh(&addrconf_hash_lock);
 +      hlist_del_init_rcu(&ifp->addr_lst);
 +      __in6_ifa_put(ifp);
 +      spin_unlock_bh(&addrconf_hash_lock);
  
        write_lock_bh(&idev->lock);
  #ifdef CONFIG_IPV6_PRIVACY
        if (ifp->flags&IFA_F_TEMPORARY) {
 -              for (ifap = &idev->tempaddr_list; (ifa=*ifap) != NULL;
 -                   ifap = &ifa->tmp_next) {
 -                      if (ifa == ifp) {
 -                              *ifap = ifa->tmp_next;
 -                              if (ifp->ifpub) {
 -                                      in6_ifa_put(ifp->ifpub);
 -                                      ifp->ifpub = NULL;
 -                              }
 -                              __in6_ifa_put(ifp);
 -                              ifa->tmp_next = NULL;
 -                              break;
 -                      }
 +              list_del(&ifp->tmp_list);
 +              if (ifp->ifpub) {
 +                      in6_ifa_put(ifp->ifpub);
 +                      ifp->ifpub = NULL;
                }
 +              __in6_ifa_put(ifp);
        }
  #endif
  
 -      for (ifap = &idev->addr_list; (ifa=*ifap) != NULL;) {
 +      list_for_each_entry_safe(ifa, ifn, &idev->addr_list, if_list) {
                if (ifa == ifp) {
 -                      *ifap = ifa->if_next;
 +                      list_del_init(&ifp->if_list);
                        __in6_ifa_put(ifp);
 -                      ifa->if_next = NULL;
 +
                        if (!(ifp->flags & IFA_F_PERMANENT) || onlink > 0)
                                break;
                        deleted = 1;
                                }
                        }
                }
 -              ifap = &ifa->if_next;
        }
        write_unlock_bh(&idev->lock);
  
@@@ -1153,7 -1164,7 +1153,7 @@@ int ipv6_dev_get_saddr(struct net *net
                        continue;
  
                read_lock_bh(&idev->lock);
 -              for (score->ifa = idev->addr_list; score->ifa; score->ifa = score->ifa->if_next) {
 +              list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
                        int i;
  
                        /*
@@@ -1231,6 -1242,7 +1231,6 @@@ try_nextdev
        in6_ifa_put(hiscore->ifa);
        return 0;
  }
 -
  EXPORT_SYMBOL(ipv6_dev_get_saddr);
  
  int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
        int err = -EADDRNOTAVAIL;
  
        rcu_read_lock();
 -      if ((idev = __in6_dev_get(dev)) != NULL) {
 +      idev = __in6_dev_get(dev);
 +      if (idev) {
                struct inet6_ifaddr *ifp;
  
                read_lock_bh(&idev->lock);
 -              for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
 -                      if (ifp->scope == IFA_LINK && !(ifp->flags & banned_flags)) {
 +              list_for_each_entry(ifp, &idev->addr_list, if_list) {
 +                      if (ifp->scope == IFA_LINK &&
 +                          !(ifp->flags & banned_flags)) {
                                ipv6_addr_copy(addr, &ifp->addr);
                                err = 0;
                                break;
@@@ -1265,7 -1275,7 +1265,7 @@@ static int ipv6_count_addresses(struct 
        struct inet6_ifaddr *ifp;
  
        read_lock_bh(&idev->lock);
 -      for (ifp=idev->addr_list; ifp; ifp=ifp->if_next)
 +      list_for_each_entry(ifp, &idev->addr_list, if_list)
                cnt++;
        read_unlock_bh(&idev->lock);
        return cnt;
  int ipv6_chk_addr(struct net *net, struct in6_addr *addr,
                  struct net_device *dev, int strict)
  {
 -      struct inet6_ifaddr * ifp;
 -      u8 hash = ipv6_addr_hash(addr);
 +      struct inet6_ifaddr *ifp = NULL;
 +      struct hlist_node *node;
 +      unsigned int hash = ipv6_addr_hash(addr);
  
 -      read_lock_bh(&addrconf_hash_lock);
 -      for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
 +      rcu_read_lock_bh();
 +      hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr) &&
                                break;
                }
        }
 -      read_unlock_bh(&addrconf_hash_lock);
 +      rcu_read_unlock_bh();
 +
        return ifp != NULL;
  }
  EXPORT_SYMBOL(ipv6_chk_addr);
  
 -static
 -int ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
 -                     struct net_device *dev)
 +static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
 +                             struct net_device *dev)
  {
 -      struct inet6_ifaddr * ifp;
 -      u8 hash = ipv6_addr_hash(addr);
 +      unsigned int hash = ipv6_addr_hash(addr);
 +      struct inet6_ifaddr *ifp;
 +      struct hlist_node *node;
  
 -      for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
 +      hlist_for_each_entry(ifp, node, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr)) {
                        if (dev == NULL || ifp->idev->dev == dev)
 -                              break;
 +                              return true;
                }
        }
 -      return ifp != NULL;
 +      return false;
  }
  
  int ipv6_chk_prefix(struct in6_addr *addr, struct net_device *dev)
        idev = __in6_dev_get(dev);
        if (idev) {
                read_lock_bh(&idev->lock);
 -              for (ifa = idev->addr_list; ifa; ifa = ifa->if_next) {
 +              list_for_each_entry(ifa, &idev->addr_list, if_list) {
                        onlink = ipv6_prefix_equal(addr, &ifa->addr,
                                                   ifa->prefix_len);
                        if (onlink)
@@@ -1341,26 -1349,24 +1341,26 @@@ EXPORT_SYMBOL(ipv6_chk_prefix)
  struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
                                     struct net_device *dev, int strict)
  {
 -      struct inet6_ifaddr * ifp;
 -      u8 hash = ipv6_addr_hash(addr);
 +      struct inet6_ifaddr *ifp, *result = NULL;
 +      unsigned int hash = ipv6_addr_hash(addr);
 +      struct hlist_node *node;
  
 -      read_lock_bh(&addrconf_hash_lock);
 -      for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
 +      rcu_read_lock_bh();
 +      hlist_for_each_entry_rcu(ifp, node, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr)) {
                        if (dev == NULL || ifp->idev->dev == dev ||
                            !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
 +                              result = ifp;
                                in6_ifa_hold(ifp);
                                break;
                        }
                }
        }
 -      read_unlock_bh(&addrconf_hash_lock);
 +      rcu_read_unlock_bh();
  
 -      return ifp;
 +      return result;
  }
  
  /* Gets referenced address, destroys ifaddr */
@@@ -1563,7 -1569,7 +1563,7 @@@ static int ipv6_inherit_eui64(u8 *eui, 
        struct inet6_ifaddr *ifp;
  
        read_lock_bh(&idev->lock);
 -      for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
 +      list_for_each_entry(ifp, &idev->addr_list, if_list) {
                if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
                        memcpy(eui, ifp->addr.s6_addr+8, 8);
                        err = 0;
@@@ -1731,8 -1737,7 +1731,8 @@@ static struct inet6_dev *addrconf_add_d
  
        ASSERT_RTNL();
  
 -      if ((idev = ipv6_find_idev(dev)) == NULL)
 +      idev = ipv6_find_idev(dev);
 +      if (!idev)
                return NULL;
  
        /* Add default multicast route */
@@@ -1965,7 -1970,7 +1965,7 @@@ ok
  #ifdef CONFIG_IPV6_PRIVACY
                        read_lock_bh(&in6_dev->lock);
                        /* update all temporary addresses in the list */
 -                      for (ift=in6_dev->tempaddr_list; ift; ift=ift->tmp_next) {
 +                      list_for_each_entry(ift, &in6_dev->tempaddr_list, tmp_list) {
                                /*
                                 * When adjusting the lifetimes of an existing
                                 * temporary address, only lower the lifetimes.
@@@ -2168,7 -2173,7 +2168,7 @@@ static int inet6_addr_del(struct net *n
                return -ENXIO;
  
        read_lock_bh(&idev->lock);
 -      for (ifp = idev->addr_list; ifp; ifp=ifp->if_next) {
 +      list_for_each_entry(ifp, &idev->addr_list, if_list) {
                if (ifp->prefix_len == plen &&
                    ipv6_addr_equal(pfx, &ifp->addr)) {
                        in6_ifa_hold(ifp);
                        /* If the last address is deleted administratively,
                           disable IPv6 on this interface.
                         */
 -                      if (idev->addr_list == NULL)
 +                      if (list_empty(&idev->addr_list))
                                addrconf_ifdown(idev->dev, 1);
                        return 0;
                }
@@@ -2440,8 -2445,7 +2440,8 @@@ static void addrconf_ip6_tnl_config(str
  
        ASSERT_RTNL();
  
 -      if ((idev = addrconf_add_dev(dev)) == NULL) {
 +      idev = addrconf_add_dev(dev);
 +      if (!idev) {
                printk(KERN_DEBUG "init ip6-ip6: add_dev failed\n");
                return;
        }
@@@ -2456,7 -2460,7 +2456,7 @@@ static int addrconf_notify(struct notif
        int run_pending = 0;
        int err;
  
 -      switch(event) {
 +      switch (event) {
        case NETDEV_REGISTER:
                if (!idev && dev->mtu >= IPV6_MIN_MTU) {
                        idev = ipv6_add_dev(dev);
                                return notifier_from_errno(-ENOMEM);
                }
                break;
 +
        case NETDEV_UP:
        case NETDEV_CHANGE:
                if (dev->flags & IFF_SLAVE)
                        }
  
                        if (idev) {
 -                              if (idev->if_flags & IF_READY) {
 +                              if (idev->if_flags & IF_READY)
                                        /* device is already configured. */
                                        break;
 -                              }
                                idev->if_flags |= IF_READY;
                        }
  
                        run_pending = 1;
                }
  
 -              switch(dev->type) {
 +              switch (dev->type) {
  #if defined(CONFIG_IPV6_SIT) || defined(CONFIG_IPV6_SIT_MODULE)
                case ARPHRD_SIT:
                        addrconf_sit_config(dev);
                        addrconf_dev_config(dev);
                        break;
                }
 +
                if (idev) {
                        if (run_pending)
                                addrconf_dad_run(idev);
  
 -                      /* If the MTU changed during the interface down, when the
 -                         interface up, the changed MTU must be reflected in the
 -                         idev as well as routers.
 +                      /*
 +                       * If the MTU changed during the interface down,
 +                       * when the interface up, the changed MTU must be
 +                       * reflected in the idev as well as routers.
                         */
 -                      if (idev->cnf.mtu6 != dev->mtu && dev->mtu >= IPV6_MIN_MTU) {
 +                      if (idev->cnf.mtu6 != dev->mtu &&
 +                          dev->mtu >= IPV6_MIN_MTU) {
                                rt6_mtu_change(dev, dev->mtu);
                                idev->cnf.mtu6 = dev->mtu;
                        }
                        idev->tstamp = jiffies;
                        inet6_ifinfo_notify(RTM_NEWLINK, idev);
 -                      /* If the changed mtu during down is lower than IPV6_MIN_MTU
 -                         stop IPv6 on this interface.
 +
 +                      /*
 +                       * If the changed mtu during down is lower than
 +                       * IPV6_MIN_MTU stop IPv6 on this interface.
                         */
                        if (dev->mtu < IPV6_MIN_MTU)
 -                              addrconf_ifdown(dev, event != NETDEV_DOWN);
 +                              addrconf_ifdown(dev, 1);
                }
                break;
  
                                break;
                }
  
 -              /* MTU falled under IPV6_MIN_MTU. Stop IPv6 on this interface. */
 +              /*
 +               * MTU falled under IPV6_MIN_MTU.
 +               * Stop IPv6 on this interface.
 +               */
  
        case NETDEV_DOWN:
        case NETDEV_UNREGISTER:
                                return notifier_from_errno(err);
                }
                break;
 -      case NETDEV_BONDING_OLDTYPE:
 -      case NETDEV_BONDING_NEWTYPE:
 -              addrconf_bonding_change(dev, event);
 +
 +      case NETDEV_PRE_TYPE_CHANGE:
 +      case NETDEV_POST_TYPE_CHANGE:
 +              addrconf_type_change(dev, event);
                break;
        }
  
   */
  static struct notifier_block ipv6_dev_notf = {
        .notifier_call = addrconf_notify,
 -      .priority = 0
  };
  
 -static void addrconf_bonding_change(struct net_device *dev, unsigned long event)
 +static void addrconf_type_change(struct net_device *dev, unsigned long event)
  {
        struct inet6_dev *idev;
        ASSERT_RTNL();
  
        idev = __in6_dev_get(dev);
  
 -      if (event == NETDEV_BONDING_NEWTYPE)
 +      if (event == NETDEV_POST_TYPE_CHANGE)
                ipv6_mc_remap(idev);
 -      else if (event == NETDEV_BONDING_OLDTYPE)
 +      else if (event == NETDEV_PRE_TYPE_CHANGE)
                ipv6_mc_unmap(idev);
  }
  
  static int addrconf_ifdown(struct net_device *dev, int how)
  {
 -      struct inet6_dev *idev;
 -      struct inet6_ifaddr *ifa, *keep_list, **bifa;
        struct net *net = dev_net(dev);
 -      int i;
 +      struct inet6_dev *idev;
 +      struct inet6_ifaddr *ifa;
 +      LIST_HEAD(keep_list);
  
        ASSERT_RTNL();
  
        if (idev == NULL)
                return -ENODEV;
  
 -      /* Step 1: remove reference to ipv6 device from parent device.
 -                 Do not dev_put!
 +      /*
 +       * Step 1: remove reference to ipv6 device from parent device.
 +       *         Do not dev_put!
         */
        if (how) {
                idev->dead = 1;
  
        }
  
 -      /* Step 2: clear hash table */
 -      for (i=0; i<IN6_ADDR_HSIZE; i++) {
 -              bifa = &inet6_addr_lst[i];
 -
 -              write_lock_bh(&addrconf_hash_lock);
 -              while ((ifa = *bifa) != NULL) {
 -                      if (ifa->idev == idev &&
 -                          (how || !(ifa->flags&IFA_F_PERMANENT) ||
 -                           ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
 -                              *bifa = ifa->lst_next;
 -                              ifa->lst_next = NULL;
 -                              __in6_ifa_put(ifa);
 -                              continue;
 -                      }
 -                      bifa = &ifa->lst_next;
 -              }
 -              write_unlock_bh(&addrconf_hash_lock);
 -      }
 -
        write_lock_bh(&idev->lock);
  
 -      /* Step 3: clear flags for stateless addrconf */
 +      /* Step 2: clear flags for stateless addrconf */
        if (!how)
                idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
  
 -      /* Step 4: clear address list */
  #ifdef CONFIG_IPV6_PRIVACY
        if (how && del_timer(&idev->regen_timer))
                in6_dev_put(idev);
  
 -      /* clear tempaddr list */
 -      while ((ifa = idev->tempaddr_list) != NULL) {
 -              idev->tempaddr_list = ifa->tmp_next;
 -              ifa->tmp_next = NULL;
 +      /* Step 3: clear tempaddr list */
 +      while (!list_empty(&idev->tempaddr_list)) {
 +              ifa = list_first_entry(&idev->tempaddr_list,
 +                                     struct inet6_ifaddr, tmp_list);
 +              list_del(&ifa->tmp_list);
                ifa->dead = 1;
                write_unlock_bh(&idev->lock);
                spin_lock_bh(&ifa->lock);
                write_lock_bh(&idev->lock);
        }
  #endif
 -      keep_list = NULL;
 -      bifa = &keep_list;
 -      while ((ifa = idev->addr_list) != NULL) {
 -              idev->addr_list = ifa->if_next;
 -              ifa->if_next = NULL;
  
 +      while (!list_empty(&idev->addr_list)) {
 +              ifa = list_first_entry(&idev->addr_list,
 +                                     struct inet6_ifaddr, if_list);
                addrconf_del_timer(ifa);
  
                /* If just doing link down, and address is permanent
                   and not link-local, then retain it. */
 -              if (how == 0 &&
 +              if (!how &&
                    (ifa->flags&IFA_F_PERMANENT) &&
                    !(ipv6_addr_type(&ifa->addr) & IPV6_ADDR_LINKLOCAL)) {
 -
 -                      /* Move to holding list */
 -                      *bifa = ifa;
 -                      bifa = &ifa->if_next;
 +                      list_move_tail(&ifa->if_list, &keep_list);
  
                        /* If not doing DAD on this address, just keep it. */
                        if ((dev->flags&(IFF_NOARP|IFF_LOOPBACK)) ||
                        ifa->flags |= IFA_F_TENTATIVE;
                        in6_ifa_hold(ifa);
                } else {
 +                      list_del(&ifa->if_list);
                        ifa->dead = 1;
                }
                write_unlock_bh(&idev->lock);
  
 +              /* clear hash table */
 +              spin_lock_bh(&addrconf_hash_lock);
 +              hlist_del_init_rcu(&ifa->addr_lst);
 +              __in6_ifa_put(ifa);
 +              spin_unlock_bh(&addrconf_hash_lock);
 +
                __ipv6_ifa_notify(RTM_DELADDR, ifa);
                atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa);
                in6_ifa_put(ifa);
                write_lock_bh(&idev->lock);
        }
  
 -      idev->addr_list = keep_list;
 +      list_splice(&keep_list, &idev->addr_list);
  
        write_unlock_bh(&idev->lock);
  
        /* Step 5: Discard multicast list */
 -
        if (how)
                ipv6_mc_destroy_dev(idev);
        else
  
        idev->tstamp = jiffies;
  
 -      /* Shot the device (if unregistered) */
 -
 +      /* Last: Shot the device (if unregistered) */
        if (how) {
                addrconf_sysctl_unregister(idev);
                neigh_parms_release(&nd_tbl, idev->nd_parms);
@@@ -2845,7 -2859,7 +2845,7 @@@ static void addrconf_dad_start(struct i
         * Optimistic nodes can start receiving
         * Frames right away
         */
 -      if(ifp->flags & IFA_F_OPTIMISTIC)
 +      if (ifp->flags & IFA_F_OPTIMISTIC)
                ip6_ins_rt(ifp->rt);
  
        addrconf_dad_kick(ifp);
@@@ -2895,7 -2909,7 +2895,7 @@@ out
  
  static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
  {
 -      struct net_device *     dev = ifp->idev->dev;
 +      struct net_device *dev = ifp->idev->dev;
  
        /*
         *      Configure the address for reception. Now it is valid.
        }
  }
  
 -static void addrconf_dad_run(struct inet6_dev *idev) {
 +static void addrconf_dad_run(struct inet6_dev *idev)
 +{
        struct inet6_ifaddr *ifp;
  
        read_lock_bh(&idev->lock);
 -      for (ifp = idev->addr_list; ifp; ifp = ifp->if_next) {
 +      list_for_each_entry(ifp, &idev->addr_list, if_list) {
                spin_lock(&ifp->lock);
                if (!(ifp->flags & IFA_F_TENTATIVE)) {
                        spin_unlock(&ifp->lock);
@@@ -2956,35 -2969,36 +2956,35 @@@ static struct inet6_ifaddr *if6_get_fir
        struct net *net = seq_file_net(seq);
  
        for (state->bucket = 0; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
 -              ifa = inet6_addr_lst[state->bucket];
 -
 -              while (ifa && !net_eq(dev_net(ifa->idev->dev), net))
 -                      ifa = ifa->lst_next;
 -              if (ifa)
 -                      break;
 +              struct hlist_node *n;
 +              hlist_for_each_entry_rcu(ifa, n, &inet6_addr_lst[state->bucket],
 +                                       addr_lst)
 +                      if (net_eq(dev_net(ifa->idev->dev), net))
 +                              return ifa;
        }
 -      return ifa;
 +      return NULL;
  }
  
 -static struct inet6_ifaddr *if6_get_next(struct seq_file *seq, struct inet6_ifaddr *ifa)
 +static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
 +                                       struct inet6_ifaddr *ifa)
  {
        struct if6_iter_state *state = seq->private;
        struct net *net = seq_file_net(seq);
 +      struct hlist_node *n = &ifa->addr_lst;
  
 -      ifa = ifa->lst_next;
 -try_again:
 -      if (ifa) {
 -              if (!net_eq(dev_net(ifa->idev->dev), net)) {
 -                      ifa = ifa->lst_next;
 -                      goto try_again;
 -              }
 -      }
 +      hlist_for_each_entry_continue_rcu(ifa, n, addr_lst)
 +              if (net_eq(dev_net(ifa->idev->dev), net))
 +                      return ifa;
  
 -      if (!ifa && ++state->bucket < IN6_ADDR_HSIZE) {
 -              ifa = inet6_addr_lst[state->bucket];
 -              goto try_again;
 +      while (++state->bucket < IN6_ADDR_HSIZE) {
 +              hlist_for_each_entry(ifa, n,
 +                                   &inet6_addr_lst[state->bucket], addr_lst) {
 +                      if (net_eq(dev_net(ifa->idev->dev), net))
 +                              return ifa;
 +              }
        }
  
 -      return ifa;
 +      return NULL;
  }
  
  static struct inet6_ifaddr *if6_get_idx(struct seq_file *seq, loff_t pos)
        struct inet6_ifaddr *ifa = if6_get_first(seq);
  
        if (ifa)
 -              while(pos && (ifa = if6_get_next(seq, ifa)) != NULL)
 +              while (pos && (ifa = if6_get_next(seq, ifa)) != NULL)
                        --pos;
        return pos ? NULL : ifa;
  }
  
  static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
 -      __acquires(addrconf_hash_lock)
 +      __acquires(rcu)
  {
 -      read_lock_bh(&addrconf_hash_lock);
 +      rcu_read_lock_bh();
        return if6_get_idx(seq, *pos);
  }
  
@@@ -3014,9 -3028,9 +3014,9 @@@ static void *if6_seq_next(struct seq_fi
  }
  
  static void if6_seq_stop(struct seq_file *seq, void *v)
 -      __releases(addrconf_hash_lock)
 +      __releases(rcu)
  {
 -      read_unlock_bh(&addrconf_hash_lock);
 +      rcu_read_unlock_bh();
  }
  
  static int if6_seq_show(struct seq_file *seq, void *v)
@@@ -3086,12 -3100,10 +3086,12 @@@ void if6_proc_exit(void
  int ipv6_chk_home_addr(struct net *net, struct in6_addr *addr)
  {
        int ret = 0;
 -      struct inet6_ifaddr * ifp;
 -      u8 hash = ipv6_addr_hash(addr);
 -      read_lock_bh(&addrconf_hash_lock);
 -      for (ifp = inet6_addr_lst[hash]; ifp; ifp = ifp->lst_next) {
 +      struct inet6_ifaddr *ifp = NULL;
 +      struct hlist_node *n;
 +      unsigned int hash = ipv6_addr_hash(addr);
 +
 +      rcu_read_lock_bh();
 +      hlist_for_each_entry_rcu(ifp, n, &inet6_addr_lst[hash], addr_lst) {
                if (!net_eq(dev_net(ifp->idev->dev), net))
                        continue;
                if (ipv6_addr_equal(&ifp->addr, addr) &&
                        break;
                }
        }
 -      read_unlock_bh(&addrconf_hash_lock);
 +      rcu_read_unlock_bh();
        return ret;
  }
  #endif
  
  static void addrconf_verify(unsigned long foo)
  {
 +      unsigned long now, next, next_sec, next_sched;
        struct inet6_ifaddr *ifp;
 -      unsigned long now, next;
 +      struct hlist_node *node;
        int i;
  
 -      spin_lock_bh(&addrconf_verify_lock);
 +      rcu_read_lock_bh();
 +      spin_lock(&addrconf_verify_lock);
        now = jiffies;
 -      next = now + ADDR_CHECK_FREQUENCY;
 +      next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
  
        del_timer(&addr_chk_timer);
  
 -      for (i=0; i < IN6_ADDR_HSIZE; i++) {
 -
 +      for (i = 0; i < IN6_ADDR_HSIZE; i++) {
  restart:
 -              read_lock(&addrconf_hash_lock);
 -              for (ifp=inet6_addr_lst[i]; ifp; ifp=ifp->lst_next) {
 +              hlist_for_each_entry_rcu(ifp, node,
 +                                       &inet6_addr_lst[i], addr_lst) {
                        unsigned long age;
 -#ifdef CONFIG_IPV6_PRIVACY
 -                      unsigned long regen_advance;
 -#endif
  
                        if (ifp->flags & IFA_F_PERMANENT)
                                continue;
  
                        spin_lock(&ifp->lock);
 -                      age = (now - ifp->tstamp) / HZ;
 -
 -#ifdef CONFIG_IPV6_PRIVACY
 -                      regen_advance = ifp->idev->cnf.regen_max_retry *
 -                                      ifp->idev->cnf.dad_transmits *
 -                                      ifp->idev->nd_parms->retrans_time / HZ;
 -#endif
 +                      /* We try to batch several events at once. */
 +                      age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
  
                        if (ifp->valid_lft != INFINITY_LIFE_TIME &&
                            age >= ifp->valid_lft) {
                                spin_unlock(&ifp->lock);
                                in6_ifa_hold(ifp);
 -                              read_unlock(&addrconf_hash_lock);
                                ipv6_del_addr(ifp);
                                goto restart;
                        } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
  
                                if (deprecate) {
                                        in6_ifa_hold(ifp);
 -                                      read_unlock(&addrconf_hash_lock);
  
                                        ipv6_ifa_notify(0, ifp);
                                        in6_ifa_put(ifp);
  #ifdef CONFIG_IPV6_PRIVACY
                        } else if ((ifp->flags&IFA_F_TEMPORARY) &&
                                   !(ifp->flags&IFA_F_TENTATIVE)) {
 +                              unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
 +                                      ifp->idev->cnf.dad_transmits *
 +                                      ifp->idev->nd_parms->retrans_time / HZ;
 +
                                if (age >= ifp->prefered_lft - regen_advance) {
                                        struct inet6_ifaddr *ifpub = ifp->ifpub;
                                        if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
                                                in6_ifa_hold(ifp);
                                                in6_ifa_hold(ifpub);
                                                spin_unlock(&ifp->lock);
 -                                              read_unlock(&addrconf_hash_lock);
 +
                                                spin_lock(&ifpub->lock);
                                                ifpub->regen_count = 0;
                                                spin_unlock(&ifpub->lock);
                                spin_unlock(&ifp->lock);
                        }
                }
 -              read_unlock(&addrconf_hash_lock);
        }
  
 -      addr_chk_timer.expires = time_before(next, jiffies + HZ) ? jiffies + HZ : next;
 +      next_sec = round_jiffies_up(next);
 +      next_sched = next;
 +
 +      /* If rounded timeout is accurate enough, accept it. */
 +      if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
 +              next_sched = next_sec;
 +
 +      /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
 +      if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
 +              next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
 +
 +      ADBG((KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
 +            now, next, next_sec, next_sched));
 +
 +      addr_chk_timer.expires = next_sched;
        add_timer(&addr_chk_timer);
 -      spin_unlock_bh(&addrconf_verify_lock);
 +      spin_unlock(&addrconf_verify_lock);
 +      rcu_read_unlock_bh();
  }
  
  static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local)
@@@ -3511,7 -3514,8 +3511,7 @@@ static int inet6_fill_ifacaddr(struct s
        return nlmsg_end(skb, nlh);
  }
  
 -enum addr_type_t
 -{
 +enum addr_type_t {
        UNICAST_ADDR,
        MULTICAST_ADDR,
        ANYCAST_ADDR,
@@@ -3522,6 -3526,7 +3522,6 @@@ static int in6_dump_addrs(struct inet6_
                          struct netlink_callback *cb, enum addr_type_t type,
                          int s_ip_idx, int *p_ip_idx)
  {
 -      struct inet6_ifaddr *ifa;
        struct ifmcaddr6 *ifmca;
        struct ifacaddr6 *ifaca;
        int err = 1;
  
        read_lock_bh(&idev->lock);
        switch (type) {
 -      case UNICAST_ADDR:
 +      case UNICAST_ADDR: {
 +              struct inet6_ifaddr *ifa;
 +
                /* unicast address incl. temp addr */
 -              for (ifa = idev->addr_list; ifa;
 -                   ifa = ifa->if_next, ip_idx++) {
 -                      if (ip_idx < s_ip_idx)
 +              list_for_each_entry(ifa, &idev->addr_list, if_list) {
 +                      if (++ip_idx < s_ip_idx)
                                continue;
                        err = inet6_fill_ifaddr(skb, ifa,
                                                NETLINK_CB(cb->skb).pid,
                                break;
                }
                break;
 +      }
        case MULTICAST_ADDR:
                /* multicast address */
                for (ifmca = idev->mc_list; ifmca;
@@@ -3607,11 -3610,10 +3607,11 @@@ static int inet6_dump_addr(struct sk_bu
                hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
                        if (idx < s_idx)
                                goto cont;
-                       if (idx > s_idx)
+                       if (h > s_h || idx > s_idx)
                                s_ip_idx = 0;
                        ip_idx = 0;
 -                      if ((idev = __in6_dev_get(dev)) == NULL)
 +                      idev = __in6_dev_get(dev);
 +                      if (!idev)
                                goto cont;
  
                        if (in6_dump_addrs(idev, skb, cb, type,
@@@ -3678,14 -3680,12 +3678,14 @@@ static int inet6_rtm_getaddr(struct sk_
        if (ifm->ifa_index)
                dev = __dev_get_by_index(net, ifm->ifa_index);
  
 -      if ((ifa = ipv6_get_ifaddr(net, addr, dev, 1)) == NULL) {
 +      ifa = ipv6_get_ifaddr(net, addr, dev, 1);
 +      if (!ifa) {
                err = -EADDRNOTAVAIL;
                goto errout;
        }
  
 -      if ((skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL)) == NULL) {
 +      skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
 +      if (!skb) {
                err = -ENOBUFS;
                goto errout_ifa;
        }
@@@ -3810,7 -3810,7 +3810,7 @@@ static inline void __snmp6_fill_stats(u
  static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
                             int bytes)
  {
 -      switch(attrtype) {
 +      switch (attrtype) {
        case IFLA_INET6_STATS:
                __snmp6_fill_stats(stats, (void __percpu **)idev->stats.ipv6, IPSTATS_MIB_MAX, bytes);
                break;
@@@ -4162,211 -4162,211 +4162,211 @@@ static struct addrconf_sysctl_tabl
        .sysctl_header = NULL,
        .addrconf_vars = {
                {
 -                      .procname       =       "forwarding",
 -                      .data           =       &ipv6_devconf.forwarding,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       addrconf_sysctl_forward,
 +                      .procname       = "forwarding",
 +                      .data           = &ipv6_devconf.forwarding,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = addrconf_sysctl_forward,
                },
                {
 -                      .procname       =       "hop_limit",
 -                      .data           =       &ipv6_devconf.hop_limit,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "hop_limit",
 +                      .data           = &ipv6_devconf.hop_limit,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "mtu",
 -                      .data           =       &ipv6_devconf.mtu6,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "mtu",
 +                      .data           = &ipv6_devconf.mtu6,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "accept_ra",
 -                      .data           =       &ipv6_devconf.accept_ra,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_ra",
 +                      .data           = &ipv6_devconf.accept_ra,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "accept_redirects",
 -                      .data           =       &ipv6_devconf.accept_redirects,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_redirects",
 +                      .data           = &ipv6_devconf.accept_redirects,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "autoconf",
 -                      .data           =       &ipv6_devconf.autoconf,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "autoconf",
 +                      .data           = &ipv6_devconf.autoconf,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "dad_transmits",
 -                      .data           =       &ipv6_devconf.dad_transmits,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "dad_transmits",
 +                      .data           = &ipv6_devconf.dad_transmits,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "router_solicitations",
 -                      .data           =       &ipv6_devconf.rtr_solicits,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "router_solicitations",
 +                      .data           = &ipv6_devconf.rtr_solicits,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "router_solicitation_interval",
 -                      .data           =       &ipv6_devconf.rtr_solicit_interval,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec_jiffies,
 +                      .procname       = "router_solicitation_interval",
 +                      .data           = &ipv6_devconf.rtr_solicit_interval,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec_jiffies,
                },
                {
 -                      .procname       =       "router_solicitation_delay",
 -                      .data           =       &ipv6_devconf.rtr_solicit_delay,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec_jiffies,
 +                      .procname       = "router_solicitation_delay",
 +                      .data           = &ipv6_devconf.rtr_solicit_delay,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec_jiffies,
                },
                {
 -                      .procname       =       "force_mld_version",
 -                      .data           =       &ipv6_devconf.force_mld_version,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "force_mld_version",
 +                      .data           = &ipv6_devconf.force_mld_version,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
  #ifdef CONFIG_IPV6_PRIVACY
                {
 -                      .procname       =       "use_tempaddr",
 -                      .data           =       &ipv6_devconf.use_tempaddr,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "use_tempaddr",
 +                      .data           = &ipv6_devconf.use_tempaddr,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "temp_valid_lft",
 -                      .data           =       &ipv6_devconf.temp_valid_lft,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "temp_valid_lft",
 +                      .data           = &ipv6_devconf.temp_valid_lft,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "temp_prefered_lft",
 -                      .data           =       &ipv6_devconf.temp_prefered_lft,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "temp_prefered_lft",
 +                      .data           = &ipv6_devconf.temp_prefered_lft,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "regen_max_retry",
 -                      .data           =       &ipv6_devconf.regen_max_retry,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "regen_max_retry",
 +                      .data           = &ipv6_devconf.regen_max_retry,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "max_desync_factor",
 -                      .data           =       &ipv6_devconf.max_desync_factor,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "max_desync_factor",
 +                      .data           = &ipv6_devconf.max_desync_factor,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
  #endif
                {
 -                      .procname       =       "max_addresses",
 -                      .data           =       &ipv6_devconf.max_addresses,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "max_addresses",
 +                      .data           = &ipv6_devconf.max_addresses,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "accept_ra_defrtr",
 -                      .data           =       &ipv6_devconf.accept_ra_defrtr,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_ra_defrtr",
 +                      .data           = &ipv6_devconf.accept_ra_defrtr,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "accept_ra_pinfo",
 -                      .data           =       &ipv6_devconf.accept_ra_pinfo,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_ra_pinfo",
 +                      .data           = &ipv6_devconf.accept_ra_pinfo,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
  #ifdef CONFIG_IPV6_ROUTER_PREF
                {
 -                      .procname       =       "accept_ra_rtr_pref",
 -                      .data           =       &ipv6_devconf.accept_ra_rtr_pref,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_ra_rtr_pref",
 +                      .data           = &ipv6_devconf.accept_ra_rtr_pref,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "router_probe_interval",
 -                      .data           =       &ipv6_devconf.rtr_probe_interval,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec_jiffies,
 +                      .procname       = "router_probe_interval",
 +                      .data           = &ipv6_devconf.rtr_probe_interval,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec_jiffies,
                },
  #ifdef CONFIG_IPV6_ROUTE_INFO
                {
 -                      .procname       =       "accept_ra_rt_info_max_plen",
 -                      .data           =       &ipv6_devconf.accept_ra_rt_info_max_plen,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_ra_rt_info_max_plen",
 +                      .data           = &ipv6_devconf.accept_ra_rt_info_max_plen,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
  #endif
  #endif
                {
 -                      .procname       =       "proxy_ndp",
 -                      .data           =       &ipv6_devconf.proxy_ndp,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "proxy_ndp",
 +                      .data           = &ipv6_devconf.proxy_ndp,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
 -                      .procname       =       "accept_source_route",
 -                      .data           =       &ipv6_devconf.accept_source_route,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_source_route",
 +                      .data           = &ipv6_devconf.accept_source_route,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
  #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
                {
 -                      .procname       =       "optimistic_dad",
 -                      .data           =       &ipv6_devconf.optimistic_dad,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "optimistic_dad",
 +                      .data           = &ipv6_devconf.optimistic_dad,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
  
                },
  #endif
  #ifdef CONFIG_IPV6_MROUTE
                {
 -                      .procname       =       "mc_forwarding",
 -                      .data           =       &ipv6_devconf.mc_forwarding,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0444,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "mc_forwarding",
 +                      .data           = &ipv6_devconf.mc_forwarding,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0444,
 +                      .proc_handler   = proc_dointvec,
                },
  #endif
                {
 -                      .procname       =       "disable_ipv6",
 -                      .data           =       &ipv6_devconf.disable_ipv6,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       addrconf_sysctl_disable,
 +                      .procname       = "disable_ipv6",
 +                      .data           = &ipv6_devconf.disable_ipv6,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = addrconf_sysctl_disable,
                },
                {
 -                      .procname       =       "accept_dad",
 -                      .data           =       &ipv6_devconf.accept_dad,
 -                      .maxlen         =       sizeof(int),
 -                      .mode           =       0644,
 -                      .proc_handler   =       proc_dointvec,
 +                      .procname       = "accept_dad",
 +                      .data           = &ipv6_devconf.accept_dad,
 +                      .maxlen         = sizeof(int),
 +                      .mode           = 0644,
 +                      .proc_handler   = proc_dointvec,
                },
                {
                        .procname       = "force_tllao",
@@@ -4402,8 -4402,8 +4402,8 @@@ static int __addrconf_sysctl_register(s
        if (t == NULL)
                goto out;
  
 -      for (i=0; t->addrconf_vars[i].data; i++) {
 -              t->addrconf_vars[i].data += (char*)p - (char*)&ipv6_devconf;
 +      for (i = 0; t->addrconf_vars[i].data; i++) {
 +              t->addrconf_vars[i].data += (char *)p - (char *)&ipv6_devconf;
                t->addrconf_vars[i].extra1 = idev; /* embedded; no ref */
                t->addrconf_vars[i].extra2 = net;
        }
@@@ -4540,12 -4540,14 +4540,12 @@@ int register_inet6addr_notifier(struct 
  {
        return atomic_notifier_chain_register(&inet6addr_chain, nb);
  }
 -
  EXPORT_SYMBOL(register_inet6addr_notifier);
  
  int unregister_inet6addr_notifier(struct notifier_block *nb)
  {
 -      return atomic_notifier_chain_unregister(&inet6addr_chain,nb);
 +      return atomic_notifier_chain_unregister(&inet6addr_chain, nb);
  }
 -
  EXPORT_SYMBOL(unregister_inet6addr_notifier);
  
  /*
  
  int __init addrconf_init(void)
  {
 -      int err;
 +      int i, err;
  
 -      if ((err = ipv6_addr_label_init()) < 0) {
 -              printk(KERN_CRIT "IPv6 Addrconf: cannot initialize default policy table: %d.\n",
 -                      err);
 +      err = ipv6_addr_label_init();
 +      if (err < 0) {
 +              printk(KERN_CRIT "IPv6 Addrconf:"
 +                     " cannot initialize default policy table: %d.\n", err);
                return err;
        }
  
        if (err)
                goto errlo;
  
 +      for (i = 0; i < IN6_ADDR_HSIZE; i++)
 +              INIT_HLIST_HEAD(&inet6_addr_lst[i]);
 +
        register_netdevice_notifier(&ipv6_dev_notf);
  
        addrconf_verify(0);
@@@ -4621,6 -4619,7 +4621,6 @@@ errlo
  
  void addrconf_cleanup(void)
  {
 -      struct inet6_ifaddr *ifa;
        struct net_device *dev;
        int i;
  
        /*
         *      Check hash table.
         */
 -      write_lock_bh(&addrconf_hash_lock);
 -      for (i=0; i < IN6_ADDR_HSIZE; i++) {
 -              for (ifa=inet6_addr_lst[i]; ifa; ) {
 -                      struct inet6_ifaddr *bifa;
 -
 -                      bifa = ifa;
 -                      ifa = ifa->lst_next;
 -                      printk(KERN_DEBUG "bug: IPv6 address leakage detected: ifa=%p\n", bifa);
 -                      /* Do not free it; something is wrong.
 -                         Now we can investigate it with debugger.
 -                       */
 -              }
 -      }
 -      write_unlock_bh(&addrconf_hash_lock);
 +      spin_lock_bh(&addrconf_hash_lock);
 +      for (i = 0; i < IN6_ADDR_HSIZE; i++)
 +              WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
 +      spin_unlock_bh(&addrconf_hash_lock);
  
        del_timer(&addr_chk_timer);
        rtnl_unlock();
diff --combined net/mac80211/tx.c
index 08e1f17a4226be1c9c564fb46eb89c7bf1162f4f,cfc473e1b0509ca1850d61a49b5dd59b23ecee55..db25fa9ef135c61ecc60f84b8b8c5da5ec09e603
@@@ -1991,6 -1991,7 +1991,7 @@@ static bool ieee80211_tx_pending_skb(st
  void ieee80211_tx_pending(unsigned long data)
  {
        struct ieee80211_local *local = (struct ieee80211_local *)data;
+       struct ieee80211_sub_if_data *sdata;
        unsigned long flags;
        int i;
        bool txok;
                while (!skb_queue_empty(&local->pending[i])) {
                        struct sk_buff *skb = __skb_dequeue(&local->pending[i]);
                        struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
 -                      struct ieee80211_sub_if_data *sdata;
  
                        if (WARN_ON(!info->control.vif)) {
                                kfree_skb(skb);
                                continue;
                        }
  
 -                      sdata = vif_to_sdata(info->control.vif);
                        spin_unlock_irqrestore(&local->queue_stop_reason_lock,
                                                flags);
  
                        if (!txok)
                                break;
                }
+               if (skb_queue_empty(&local->pending[i]))
+                       list_for_each_entry_rcu(sdata, &local->interfaces, list)
+                               netif_tx_wake_queue(
+                                       netdev_get_tx_queue(sdata->dev, i));
        }
        spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
  
diff --combined net/netlink/af_netlink.c
index 274d977166b73289de7ffa96c16dfc700ac49929,795424396aff62fba6971aaff0eb6631965ce481..6464a1972a69dc649099fbd05a7ca5d56708b790
@@@ -545,7 -545,7 +545,7 @@@ static int netlink_autobind(struct sock
        struct hlist_head *head;
        struct sock *osk;
        struct hlist_node *node;
 -      s32 pid = current->tgid;
 +      s32 pid = task_tgid_vnr(current);
        int err;
        static s32 rover = -4097;
  
@@@ -683,6 -683,9 +683,9 @@@ static int netlink_connect(struct socke
        struct netlink_sock *nlk = nlk_sk(sk);
        struct sockaddr_nl *nladdr = (struct sockaddr_nl *)addr;
  
+       if (alen < sizeof(addr->sa_family))
+               return -EINVAL;
        if (addr->sa_family == AF_UNSPEC) {
                sk->sk_state    = NETLINK_UNCONNECTED;
                nlk->dst_pid    = 0;
diff --combined net/socket.c
index ae904b58d9f516ef4cca81552d0bd011f19edde1,f55ffe9f8c87f27d670ddea0da7aff762355cf2f..ad2e8153c6180116368ef97ac3c50c2182c6c4e2
@@@ -619,9 -619,10 +619,9 @@@ void __sock_recv_timestamp(struct msghd
                        put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMP,
                                 sizeof(tv), &tv);
                } else {
 -                      struct timespec ts;
 -                      skb_get_timestampns(skb, &ts);
 +                      skb_get_timestampns(skb, &ts[0]);
                        put_cmsg(msg, SOL_SOCKET, SCM_TIMESTAMPNS,
 -                               sizeof(ts), &ts);
 +                               sizeof(ts[0]), &ts[0]);
                }
        }
  
@@@ -2134,6 -2135,10 +2134,10 @@@ int __sys_recvmmsg(int fd, struct mmsgh
                        break;
                ++datagrams;
  
+               /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
+               if (flags & MSG_WAITFORONE)
+                       flags |= MSG_DONTWAIT;
                if (timeout) {
                        ktime_get_ts(timeout);
                        *timeout = timespec_sub(end_time, *timeout);