Merge remote-tracking branch 'stable/linux-3.0.y' into develop-3.0
author黄涛 <huangtao@rock-chips.com>
Mon, 15 Oct 2012 03:56:57 +0000 (11:56 +0800)
committer黄涛 <huangtao@rock-chips.com>
Mon, 15 Oct 2012 03:56:57 +0000 (11:56 +0800)
Conflicts:
Makefile

67 files changed:
arch/mn10300/Makefile
arch/x86/include/asm/pgtable.h
drivers/acpi/bus.c
drivers/block/aoe/aoecmd.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_pm.c
drivers/media/rc/ite-cir.c
drivers/mtd/maps/autcpu12-nvram.c
drivers/mtd/nand/nand_bbt.c
drivers/mtd/nand/nandsim.c
drivers/mtd/nand/omap2.c
drivers/net/davinci_cpdma.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/pppoe.c
drivers/net/r8169.c
drivers/net/tg3.c
drivers/net/usb/sierra_net.c
drivers/net/wan/ixp4xx_hss.c
drivers/pci/hotplug/acpiphp_glue.c
drivers/pci/probe.c
drivers/s390/scsi/zfcp_aux.c
drivers/s390/scsi/zfcp_ccw.c
drivers/s390/scsi/zfcp_cfdc.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_dbf.h
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_sysfs.c
drivers/s390/scsi/zfcp_unit.c
drivers/scsi/atp870u.c
fs/ext4/inode.c
fs/ext4/move_extent.c
fs/ext4/namei.c
include/linux/mempolicy.h
include/linux/xfrm.h
include/net/xfrm.h
kernel/cpuset.c
kernel/rcutree.c
kernel/sched.c
kernel/sys.c
kernel/workqueue.c
lib/gcd.c
mm/mempolicy.c
mm/truncate.c
net/8021q/vlan_core.c
net/core/dev.c
net/core/sock.c
net/ipv4/raw.c
net/ipv4/tcp.c
net/ipv6/mip6.c
net/ipv6/raw.c
net/ipv6/route.c
net/l2tp/l2tp_eth.c
net/netrom/af_netrom.c
net/sched/sch_cbq.c
net/sched/sch_qfq.c
net/sctp/output.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_user.c
scripts/Kbuild.include
sound/drivers/aloop.c
sound/soc/codecs/wm9712.c
sound/usb/mixer.c
sound/usb/quirks-table.h

index 7120282bf0d89c3240886cbad32b973bda1bfc6f..3eb4a52ff9a786d5352fad68213268b37307e31f 100644 (file)
@@ -26,7 +26,7 @@ CHECKFLAGS    +=
 PROCESSOR      := unset
 UNIT           := unset
 
-KBUILD_CFLAGS  += -mam33 -mmem-funcs -DCPU=AM33
+KBUILD_CFLAGS  += -mam33 -DCPU=AM33 $(call cc-option,-mmem-funcs,)
 KBUILD_AFLAGS  += -mam33 -DCPU=AM33
 
 ifeq ($(CONFIG_MN10300_CURRENT_IN_E2),y)
index 18601c86fab18700c6cf7ad9a42b20d4582f769a..884507e68ab1804c4c854c07bec6e0685751af23 100644 (file)
@@ -146,8 +146,7 @@ static inline unsigned long pmd_pfn(pmd_t pmd)
 
 static inline int pmd_large(pmd_t pte)
 {
-       return (pmd_flags(pte) & (_PAGE_PSE | _PAGE_PRESENT)) ==
-               (_PAGE_PSE | _PAGE_PRESENT);
+       return pmd_flags(pte) & _PAGE_PSE;
 }
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -415,7 +414,13 @@ static inline int pte_hidden(pte_t pte)
 
 static inline int pmd_present(pmd_t pmd)
 {
-       return pmd_flags(pmd) & _PAGE_PRESENT;
+       /*
+        * Checking for _PAGE_PSE is needed too because
+        * split_huge_page will temporarily clear the present bit (but
+        * the _PAGE_PSE flag will remain set at all times while the
+        * _PAGE_PRESENT bit is clear).
+        */
+       return pmd_flags(pmd) & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_PSE);
 }
 
 static inline int pmd_none(pmd_t pmd)
index d1e06c182cdba8ee46d404dc7cdb95571e9fe893..1c57307c310f63ed12ea33bfeeb7795f616276a8 100644 (file)
@@ -944,14 +944,18 @@ static int __init acpi_bus_init(void)
        status = acpi_ec_ecdt_probe();
        /* Ignore result. Not having an ECDT is not fatal. */
 
-       acpi_bus_osc_support();
-
        status = acpi_initialize_objects(ACPI_FULL_INITIALIZATION);
        if (ACPI_FAILURE(status)) {
                printk(KERN_ERR PREFIX "Unable to initialize ACPI objects\n");
                goto error1;
        }
 
+       /*
+        * _OSC method may exist in module level code,
+        * so it must be run after ACPI_FULL_INITIALIZATION
+        */
+       acpi_bus_osc_support();
+
        /*
         * _PDC control method may load dynamic SSDT tables,
         * and we need to install the table handler before that.
index de0435e63b02cbd349c5dcc282682359f6f85934..887f68f6d79a9e615beba7525cb9d0e671917c4e 100644 (file)
@@ -35,6 +35,7 @@ new_skb(ulong len)
                skb_reset_mac_header(skb);
                skb_reset_network_header(skb);
                skb->protocol = __constant_htons(ETH_P_AOE);
+               skb_checksum_none_assert(skb);
        }
        return skb;
 }
index eb6fe79c691f835ac104fbafad332a7253c50300..1cfe7539fd9fad3b63322fbf9116cc8997b96e3b 100644 (file)
@@ -143,6 +143,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
            (rdev->pdev->subsystem_device == 0x01fd))
                return true;
 
+       /* Gateway RS690 only seems to work with MSIs. */
+       if ((rdev->pdev->device == 0x791f) &&
+           (rdev->pdev->subsystem_vendor == 0x107b) &&
+           (rdev->pdev->subsystem_device == 0x0185))
+               return true;
+
+       /* try and enable MSIs by default on all RS690s */
+       if (rdev->family == CHIP_RS690)
+               return true;
+
        /* RV515 seems to have MSI issues where it loses
         * MSI rearms occasionally. This leads to lockups and freezes.
         * disable it by default.
index 6fabe89fa6a18da11ce5636961b717f84b9b9491..4f88863bcc4c499e80b224908c4bdb4e48e80d64 100644 (file)
@@ -535,7 +535,9 @@ void radeon_pm_suspend(struct radeon_device *rdev)
 void radeon_pm_resume(struct radeon_device *rdev)
 {
        /* set up the default clocks if the MC ucode is loaded */
-       if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+       if ((rdev->family >= CHIP_BARTS) &&
+           (rdev->family <= CHIP_CAYMAN) &&
+           rdev->mc_fw) {
                if (rdev->pm.default_vddc)
                        radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
                                                SET_VOLTAGE_TYPE_ASIC_VDDC);
@@ -590,7 +592,9 @@ int radeon_pm_init(struct radeon_device *rdev)
                radeon_pm_print_states(rdev);
                radeon_pm_init_profile(rdev);
                /* set up the default clocks if the MC ucode is loaded */
-               if (ASIC_IS_DCE5(rdev) && rdev->mc_fw) {
+               if ((rdev->family >= CHIP_BARTS) &&
+                   (rdev->family <= CHIP_CAYMAN) &&
+                   rdev->mc_fw) {
                        if (rdev->pm.default_vddc)
                                radeon_atom_set_voltage(rdev, rdev->pm.default_vddc,
                                                        SET_VOLTAGE_TYPE_ASIC_VDDC);
index c5ca0914087b3672e0ba3248c9e0ff2ab430743c..d8e0b2d81c81eaaf09ec13b9db68e1973937cc1b 100644 (file)
@@ -1477,6 +1477,7 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        rdev = rc_allocate_device();
        if (!rdev)
                goto failure;
+       itdev->rdev = rdev;
 
        ret = -ENODEV;
 
@@ -1608,7 +1609,6 @@ static int ite_probe(struct pnp_dev *pdev, const struct pnp_device_id
        if (ret)
                goto failure;
 
-       itdev->rdev = rdev;
        ite_pr(KERN_NOTICE, "driver has been successfully loaded\n");
 
        return 0;
index e5bfd0e093bb1e20e0336f84e69eb7058dc7a822..0598d52eaf9f983ca28efbac6ffba400e6de98b0 100644 (file)
@@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
 
 static int __init init_autcpu12_sram (void)
 {
-       int err, save0, save1;
+       map_word tmp, save0, save1;
+       int err;
 
        autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
        if (!autcpu12_sram_map.virt) {
@@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
                err = -EIO;
                goto out;
        }
-       simple_map_init(&autcpu_sram_map);
+       simple_map_init(&autcpu12_sram_map);
 
        /*
         * Check for 32K/128K
@@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
         * Read and check result on ofs 0x0
         * Restore contents
         */
-       save0 = map_read32(&autcpu12_sram_map,0);
-       save1 = map_read32(&autcpu12_sram_map,0x10000);
-       map_write32(&autcpu12_sram_map,~save0,0x10000);
+       save0 = map_read(&autcpu12_sram_map, 0);
+       save1 = map_read(&autcpu12_sram_map, 0x10000);
+       tmp.x[0] = ~save0.x[0];
+       map_write(&autcpu12_sram_map, tmp, 0x10000);
        /* if we find this pattern on 0x0, we have 32K size
         * restore contents and exit
         */
-       if ( map_read32(&autcpu12_sram_map,0) != save0) {
-               map_write32(&autcpu12_sram_map,save0,0x0);
+       tmp = map_read(&autcpu12_sram_map, 0);
+       if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
+               map_write(&autcpu12_sram_map, save0, 0x0);
                goto map;
        }
        /* We have a 128K found, restore 0x10000 and set size
         * to 128K
         */
-       map_write32(&autcpu12_sram_map,save1,0x10000);
+       map_write(&autcpu12_sram_map, save1, 0x10000);
        autcpu12_sram_map.size = SZ_128K;
 
 map:
index 22234a1fe52916e3f553f212bef12f345b3a0455..ed369b102348ff2a305f456a2f9953fd7d410109 100644 (file)
@@ -429,7 +429,7 @@ static int read_abs_bbts(struct mtd_info *mtd, uint8_t *buf,
        /* Read the mirror version, if available */
        if (md && (md->options & NAND_BBT_VERSION)) {
                scan_read_raw(mtd, buf, (loff_t)md->pages[0] << this->page_shift,
-                             mtd->writesize, td);
+                             mtd->writesize, md);
                md->version[0] = buf[bbt_get_ver_offs(mtd, md)];
                printk(KERN_DEBUG "Bad block table at page %d, version 0x%02X\n",
                       md->pages[0], md->version[0]);
index 63c8048d70b73c48bcc9f0a5cfeac510061af9a9..1f2b8803cca2fb886770d7ee7d89f6b58c0d0339 100644 (file)
@@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
                uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
                if (new_size >> overridesize != nsmtd->erasesize) {
                        NS_ERR("overridesize is too big\n");
+                       retval = -EINVAL;
                        goto err_exit;
                }
                /* N.B. This relies on nand_scan not doing anything with the size before we change it */
index 0db2c0e7656ae7abbde5536ca8c842a465482696..02897077f16a5f9816a5942ddad8e1297d146ea3 100644 (file)
@@ -1139,7 +1139,8 @@ static int omap_nand_remove(struct platform_device *pdev)
        /* Release NAND device, its internal structures and partitions */
        nand_release(&info->mtd);
        iounmap(info->nand.IO_ADDR_R);
-       kfree(&info->mtd);
+       release_mem_region(info->phys_base, NAND_IO_SIZE);
+       kfree(info);
        return 0;
 }
 
index ae47f23ba93086855c5709985c368900d2b75b99..6b67c526c4612f3bdc9061637e806928b377349e 100644 (file)
@@ -849,6 +849,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
 
                next_dma = desc_read(desc, hw_next);
                chan->head = desc_from_phys(pool, next_dma);
+               chan->count--;
                chan->stats.teardown_dequeue++;
 
                /* issue callback without locks held */
index c0788a31ff0f4a8c0ee8b637ba335e69b7cd3056..78d5b674757b91e08f51f5efacc8d1821656cbbe 100644 (file)
@@ -1288,6 +1288,10 @@ static void netxen_mask_aer_correctable(struct netxen_adapter *adapter)
        struct pci_dev *root = pdev->bus->self;
        u32 aer_pos;
 
+       /* root bus? */
+       if (!root)
+               return;
+
        if (adapter->ahw.board_type != NETXEN_BRDTYPE_P3_4_GB_MM &&
                adapter->ahw.board_type != NETXEN_BRDTYPE_P3_10G_TP)
                return;
index bc9a4bb31980f1758a38e38daebdc5a7ab8f2a12..11615842a57bd9b5211f8f23c78b2c8248f16a03 100644 (file)
@@ -576,7 +576,7 @@ static int pppoe_release(struct socket *sock)
 
        po = pppox_sk(sk);
 
-       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
                dev_put(po->pppoe_dev);
                po->pppoe_dev = NULL;
        }
index 5f838ef924945c79360fc4b51f3ab49568694a3f..f7a56f465b080a7ab4b2c856c880398d8407d334 100644 (file)
 #define R8169_MSG_DEFAULT \
        (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_IFUP | NETIF_MSG_IFDOWN)
 
-#define TX_BUFFS_AVAIL(tp) \
-       (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx - 1)
+#define TX_SLOTS_AVAIL(tp) \
+       (tp->dirty_tx + NUM_TX_DESC - tp->cur_tx)
+
+/* A skbuff with nr_frags needs nr_frags+1 entries in the tx queue */
+#define TX_FRAGS_READY_FOR(tp,nr_frags) \
+       (TX_SLOTS_AVAIL(tp) >= (nr_frags + 1))
 
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
@@ -140,82 +144,101 @@ enum rtl_tx_desc_version {
        RTL_TD_1        = 1,
 };
 
-#define _R(NAME,TD,FW) \
-       { .name = NAME, .txd_version = TD, .fw_name = FW }
+#define JUMBO_1K       ETH_DATA_LEN
+#define JUMBO_4K       (4*1024 - ETH_HLEN - 2)
+#define JUMBO_6K       (6*1024 - ETH_HLEN - 2)
+#define JUMBO_7K       (7*1024 - ETH_HLEN - 2)
+#define JUMBO_9K       (9*1024 - ETH_HLEN - 2)
+
+#define _R(NAME,TD,FW,SZ,B) {  \
+       .name = NAME,           \
+       .txd_version = TD,      \
+       .fw_name = FW,          \
+       .jumbo_max = SZ,        \
+       .jumbo_tx_csum = B      \
+}
 
 static const struct {
        const char *name;
        enum rtl_tx_desc_version txd_version;
        const char *fw_name;
+       u16 jumbo_max;
+       bool jumbo_tx_csum;
 } rtl_chip_infos[] = {
        /* PCI devices. */
        [RTL_GIGA_MAC_VER_01] =
-               _R("RTL8169",           RTL_TD_0, NULL),
+               _R("RTL8169",           RTL_TD_0, NULL, JUMBO_7K, true),
        [RTL_GIGA_MAC_VER_02] =
-               _R("RTL8169s",          RTL_TD_0, NULL),
+               _R("RTL8169s",          RTL_TD_0, NULL, JUMBO_7K, true),
        [RTL_GIGA_MAC_VER_03] =
-               _R("RTL8110s",          RTL_TD_0, NULL),
+               _R("RTL8110s",          RTL_TD_0, NULL, JUMBO_7K, true),
        [RTL_GIGA_MAC_VER_04] =
-               _R("RTL8169sb/8110sb",  RTL_TD_0, NULL),
+               _R("RTL8169sb/8110sb",  RTL_TD_0, NULL, JUMBO_7K, true),
        [RTL_GIGA_MAC_VER_05] =
-               _R("RTL8169sc/8110sc",  RTL_TD_0, NULL),
+               _R("RTL8169sc/8110sc",  RTL_TD_0, NULL, JUMBO_7K, true),
        [RTL_GIGA_MAC_VER_06] =
-               _R("RTL8169sc/8110sc",  RTL_TD_0, NULL),
+               _R("RTL8169sc/8110sc",  RTL_TD_0, NULL, JUMBO_7K, true),
        /* PCI-E devices. */
        [RTL_GIGA_MAC_VER_07] =
-               _R("RTL8102e",          RTL_TD_1, NULL),
+               _R("RTL8102e",          RTL_TD_1, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_08] =
-               _R("RTL8102e",          RTL_TD_1, NULL),
+               _R("RTL8102e",          RTL_TD_1, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_09] =
-               _R("RTL8102e",          RTL_TD_1, NULL),
+               _R("RTL8102e",          RTL_TD_1, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_10] =
-               _R("RTL8101e",          RTL_TD_0, NULL),
+               _R("RTL8101e",          RTL_TD_0, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_11] =
-               _R("RTL8168b/8111b",    RTL_TD_0, NULL),
+               _R("RTL8168b/8111b",    RTL_TD_0, NULL, JUMBO_4K, false),
        [RTL_GIGA_MAC_VER_12] =
-               _R("RTL8168b/8111b",    RTL_TD_0, NULL),
+               _R("RTL8168b/8111b",    RTL_TD_0, NULL, JUMBO_4K, false),
        [RTL_GIGA_MAC_VER_13] =
-               _R("RTL8101e",          RTL_TD_0, NULL),
+               _R("RTL8101e",          RTL_TD_0, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_14] =
-               _R("RTL8100e",          RTL_TD_0, NULL),
+               _R("RTL8100e",          RTL_TD_0, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_15] =
-               _R("RTL8100e",          RTL_TD_0, NULL),
+               _R("RTL8100e",          RTL_TD_0, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_16] =
-               _R("RTL8101e",          RTL_TD_0, NULL),
+               _R("RTL8101e",          RTL_TD_0, NULL, JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_17] =
-               _R("RTL8168b/8111b",    RTL_TD_0, NULL),
+               _R("RTL8168b/8111b",    RTL_TD_1, NULL, JUMBO_4K, false),
        [RTL_GIGA_MAC_VER_18] =
-               _R("RTL8168cp/8111cp",  RTL_TD_1, NULL),
+               _R("RTL8168cp/8111cp",  RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_19] =
-               _R("RTL8168c/8111c",    RTL_TD_1, NULL),
+               _R("RTL8168c/8111c",    RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_20] =
-               _R("RTL8168c/8111c",    RTL_TD_1, NULL),
+               _R("RTL8168c/8111c",    RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_21] =
-               _R("RTL8168c/8111c",    RTL_TD_1, NULL),
+               _R("RTL8168c/8111c",    RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_22] =
-               _R("RTL8168c/8111c",    RTL_TD_1, NULL),
+               _R("RTL8168c/8111c",    RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_23] =
-               _R("RTL8168cp/8111cp",  RTL_TD_1, NULL),
+               _R("RTL8168cp/8111cp",  RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_24] =
-               _R("RTL8168cp/8111cp",  RTL_TD_1, NULL),
+               _R("RTL8168cp/8111cp",  RTL_TD_1, NULL, JUMBO_6K, false),
        [RTL_GIGA_MAC_VER_25] =
-               _R("RTL8168d/8111d",    RTL_TD_1, FIRMWARE_8168D_1),
+               _R("RTL8168d/8111d",    RTL_TD_1, FIRMWARE_8168D_1,
+                                                       JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_26] =
-               _R("RTL8168d/8111d",    RTL_TD_1, FIRMWARE_8168D_2),
+               _R("RTL8168d/8111d",    RTL_TD_1, FIRMWARE_8168D_2,
+                                                       JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_27] =
-               _R("RTL8168dp/8111dp",  RTL_TD_1, NULL),
+               _R("RTL8168dp/8111dp",  RTL_TD_1, NULL, JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_28] =
-               _R("RTL8168dp/8111dp",  RTL_TD_1, NULL),
+               _R("RTL8168dp/8111dp",  RTL_TD_1, NULL, JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_29] =
-               _R("RTL8105e",          RTL_TD_1, FIRMWARE_8105E_1),
+               _R("RTL8105e",          RTL_TD_1, FIRMWARE_8105E_1,
+                                                       JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_30] =
-               _R("RTL8105e",          RTL_TD_1, FIRMWARE_8105E_1),
+               _R("RTL8105e",          RTL_TD_1, FIRMWARE_8105E_1,
+                                                       JUMBO_1K, true),
        [RTL_GIGA_MAC_VER_31] =
-               _R("RTL8168dp/8111dp",  RTL_TD_1, NULL),
+               _R("RTL8168dp/8111dp",  RTL_TD_1, NULL, JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_32] =
-               _R("RTL8168e/8111e",    RTL_TD_1, FIRMWARE_8168E_1),
+               _R("RTL8168e/8111e",    RTL_TD_1, FIRMWARE_8168E_1,
+                                                       JUMBO_9K, false),
        [RTL_GIGA_MAC_VER_33] =
-               _R("RTL8168e/8111e",    RTL_TD_1, FIRMWARE_8168E_2)
+               _R("RTL8168e/8111e",    RTL_TD_1, FIRMWARE_8168E_2,
+                                                       JUMBO_9K, false)
 };
 #undef _R
 
@@ -280,6 +303,8 @@ enum rtl_registers {
        Config0         = 0x51,
        Config1         = 0x52,
        Config2         = 0x53,
+#define PME_SIGNAL                     (1 << 5)        /* 8168c and later */
+
        Config3         = 0x54,
        Config4         = 0x55,
        Config5         = 0x56,
@@ -388,6 +413,7 @@ enum rtl_register_content {
        RxOK            = 0x0001,
 
        /* RxStatusDesc */
+       RxBOVF  = (1 << 24),
        RxFOVF  = (1 << 23),
        RxRWT   = (1 << 22),
        RxRES   = (1 << 21),
@@ -428,7 +454,6 @@ enum rtl_register_content {
        /* Config1 register p.24 */
        LEDS1           = (1 << 7),
        LEDS0           = (1 << 6),
-       MSIEnable       = (1 << 5),     /* Enable Message Signaled Interrupt */
        Speed_down      = (1 << 4),
        MEMMAP          = (1 << 3),
        IOMAP           = (1 << 2),
@@ -436,14 +461,19 @@ enum rtl_register_content {
        PMEnable        = (1 << 0),     /* Power Management Enable */
 
        /* Config2 register p. 25 */
+       MSIEnable       = (1 << 5),     /* 8169 only. Reserved in the 8168. */
        PCI_Clock_66MHz = 0x01,
        PCI_Clock_33MHz = 0x00,
 
        /* Config3 register p.25 */
        MagicPacket     = (1 << 5),     /* Wake up when receives a Magic Packet */
        LinkUp          = (1 << 4),     /* Wake up when the cable connection is re-established */
+       Jumbo_En0       = (1 << 2),     /* 8168 only. Reserved in the 8168b */
        Beacon_en       = (1 << 0),     /* 8168 only. Reserved in the 8168b */
 
+       /* Config4 register */
+       Jumbo_En1       = (1 << 1),     /* 8168 only. Reserved in the 8168b */
+
        /* Config5 register p.27 */
        BWF             = (1 << 6),     /* Accept Broadcast wakeup frame */
        MWF             = (1 << 5),     /* Accept Multicast wakeup frame */
@@ -652,6 +682,11 @@ struct rtl8169_private {
                void (*up)(struct rtl8169_private *);
        } pll_power_ops;
 
+       struct jumbo_ops {
+               void (*enable)(struct rtl8169_private *);
+               void (*disable)(struct rtl8169_private *);
+       } jumbo_ops;
+
        int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
        int (*get_settings)(struct net_device *, struct ethtool_cmd *);
        void (*phy_reset_enable)(struct rtl8169_private *tp);
@@ -666,6 +701,7 @@ struct rtl8169_private {
        struct mii_if_info mii;
        struct rtl8169_counters counters;
        u32 saved_wolopts;
+       u32 opts1_mask;
 
        const struct firmware *fw;
 #define RTL_FIRMWARE_UNKNOWN   ERR_PTR(-EAGAIN);
@@ -705,6 +741,21 @@ static int rtl8169_poll(struct napi_struct *napi, int budget);
 static const unsigned int rtl8169_rx_config =
        (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
 
+static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+       struct rtl8169_private *tp = netdev_priv(dev);
+       int cap = tp->pcie_cap;
+
+       if (cap) {
+               u16 ctl;
+
+               pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
+               ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
+               pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
+       }
+}
+
 static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
 {
        void __iomem *ioaddr = tp->mmio_addr;
@@ -1043,17 +1094,21 @@ static u8 rtl8168d_efuse_read(void __iomem *ioaddr, int reg_addr)
        return value;
 }
 
-static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
+static void rtl8169_irq_mask_and_ack(struct rtl8169_private *tp)
 {
-       RTL_W16(IntrMask, 0x0000);
+       void __iomem *ioaddr = tp->mmio_addr;
 
-       RTL_W16(IntrStatus, 0xffff);
+       RTL_W16(IntrMask, 0x0000);
+       RTL_W16(IntrStatus, tp->intr_event);
+       RTL_R8(ChipCmd);
 }
 
-static void rtl8169_asic_down(void __iomem *ioaddr)
+static void rtl8169_asic_down(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
        RTL_W8(ChipCmd, 0x00);
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
        RTL_R16(CPlusCmd);
 }
 
@@ -1112,7 +1167,7 @@ static void __rtl8169_check_link_status(struct net_device *dev,
                netif_carrier_off(dev);
                netif_info(tp, ifdown, dev, "link down\n");
                if (pm)
-                       pm_schedule_suspend(&tp->pci_dev->dev, 100);
+                       pm_schedule_suspend(&tp->pci_dev->dev, 5000);
        }
        spin_unlock_irqrestore(&tp->lock, flags);
 }
@@ -1174,7 +1229,6 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
                u16 reg;
                u8  mask;
        } cfg[] = {
-               { WAKE_ANY,   Config1, PMEnable },
                { WAKE_PHY,   Config3, LinkUp },
                { WAKE_MAGIC, Config3, MagicPacket },
                { WAKE_UCAST, Config5, UWF },
@@ -1182,16 +1236,32 @@ static void __rtl8169_set_wol(struct rtl8169_private *tp, u32 wolopts)
                { WAKE_MCAST, Config5, MWF },
                { WAKE_ANY,   Config5, LanWake }
        };
+       u8 options;
 
        RTL_W8(Cfg9346, Cfg9346_Unlock);
 
        for (i = 0; i < ARRAY_SIZE(cfg); i++) {
-               u8 options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
+               options = RTL_R8(cfg[i].reg) & ~cfg[i].mask;
                if (wolopts & cfg[i].opt)
                        options |= cfg[i].mask;
                RTL_W8(cfg[i].reg, options);
        }
 
+       switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_01 ... RTL_GIGA_MAC_VER_17:
+               options = RTL_R8(Config1) & ~PMEnable;
+               if (wolopts)
+                       options |= PMEnable;
+               RTL_W8(Config1, options);
+               break;
+       default:
+               options = RTL_R8(Config2) & ~PME_SIGNAL;
+               if (wolopts)
+                       options |= PME_SIGNAL;
+               RTL_W8(Config2, options);
+               break;
+       }
+
        RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
@@ -1373,9 +1443,15 @@ static int rtl8169_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 
 static u32 rtl8169_fix_features(struct net_device *dev, u32 features)
 {
+       struct rtl8169_private *tp = netdev_priv(dev);
+
        if (dev->mtu > TD_MSS_MAX)
                features &= ~NETIF_F_ALL_TSO;
 
+       if (dev->mtu > JUMBO_1K &&
+           !rtl_chip_infos[tp->mac_version].jumbo_tx_csum)
+               features &= ~NETIF_F_IP_CSUM;
+
        return features;
 }
 
@@ -2948,22 +3024,24 @@ static const struct rtl_cfg_info {
 };
 
 /* Cfg9346_Unlock assumed. */
-static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
+static unsigned rtl_try_msi(struct rtl8169_private *tp,
                            const struct rtl_cfg_info *cfg)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
        unsigned msi = 0;
        u8 cfg2;
 
        cfg2 = RTL_R8(Config2) & ~MSIEnable;
        if (cfg->features & RTL_FEATURE_MSI) {
-               if (pci_enable_msi(pdev)) {
-                       dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
+               if (pci_enable_msi(tp->pci_dev)) {
+                       netif_info(tp, hw, tp->dev, "no MSI. Back to INTx.\n");
                } else {
                        cfg2 |= MSIEnable;
                        msi = RTL_FEATURE_MSI;
                }
        }
-       RTL_W8(Config2, cfg2);
+       if (tp->mac_version <= RTL_GIGA_MAC_VER_06)
+               RTL_W8(Config2, cfg2);
        return msi;
 }
 
@@ -3126,8 +3204,10 @@ static void r8168_pll_power_down(struct rtl8169_private *tp)
                rtl_writephy(tp, 0x1f, 0x0000);
                rtl_writephy(tp, MII_BMCR, 0x0000);
 
-               RTL_W32(RxConfig, RTL_R32(RxConfig) |
-                       AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
+               if (tp->mac_version == RTL_GIGA_MAC_VER_32 ||
+                   tp->mac_version == RTL_GIGA_MAC_VER_33)
+                       RTL_W32(RxConfig, RTL_R32(RxConfig) | AcceptBroadcast |
+                               AcceptMulticast | AcceptMyPhys);
                return;
        }
 
@@ -3172,8 +3252,8 @@ static void r8168_pll_power_up(struct rtl8169_private *tp)
        r8168_phy_power_up(tp);
 }
 
-static void rtl_pll_power_op(struct rtl8169_private *tp,
-                            void (*op)(struct rtl8169_private *))
+static void rtl_generic_op(struct rtl8169_private *tp,
+                          void (*op)(struct rtl8169_private *))
 {
        if (op)
                op(tp);
@@ -3181,12 +3261,12 @@ static void rtl_pll_power_op(struct rtl8169_private *tp,
 
 static void rtl_pll_power_down(struct rtl8169_private *tp)
 {
-       rtl_pll_power_op(tp, tp->pll_power_ops.down);
+       rtl_generic_op(tp, tp->pll_power_ops.down);
 }
 
 static void rtl_pll_power_up(struct rtl8169_private *tp)
 {
-       rtl_pll_power_op(tp, tp->pll_power_ops.up);
+       rtl_generic_op(tp, tp->pll_power_ops.up);
 }
 
 static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
@@ -3233,6 +3313,149 @@ static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
        }
 }
 
+static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+       rtl_generic_op(tp, tp->jumbo_ops.enable);
+}
+
+static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+       rtl_generic_op(tp, tp->jumbo_ops.disable);
+}
+
+static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+       RTL_W8(Config4, RTL_R8(Config4) | Jumbo_En1);
+       rtl_tx_performance_tweak(tp->pci_dev, 0x2 << MAX_READ_REQUEST_SHIFT);
+}
+
+static void r8168c_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+       RTL_W8(Config4, RTL_R8(Config4) & ~Jumbo_En1);
+       rtl_tx_performance_tweak(tp->pci_dev, 0x5 << MAX_READ_REQUEST_SHIFT);
+}
+
+static void r8168dp_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+}
+
+static void r8168dp_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+}
+
+static void r8168e_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       struct pci_dev *pdev = tp->pci_dev;
+
+       RTL_W8(MaxTxPacketSize, 0x3f);
+       RTL_W8(Config3, RTL_R8(Config3) | Jumbo_En0);
+       RTL_W8(Config4, RTL_R8(Config4) | 0x01);
+       pci_write_config_byte(pdev, 0x79, 0x20);
+}
+
+static void r8168e_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       struct pci_dev *pdev = tp->pci_dev;
+
+       RTL_W8(MaxTxPacketSize, 0x0c);
+       RTL_W8(Config3, RTL_R8(Config3) & ~Jumbo_En0);
+       RTL_W8(Config4, RTL_R8(Config4) & ~0x01);
+       pci_write_config_byte(pdev, 0x79, 0x50);
+}
+
+static void r8168b_0_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+       rtl_tx_performance_tweak(tp->pci_dev,
+               (0x2 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void r8168b_0_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+       rtl_tx_performance_tweak(tp->pci_dev,
+               (0x5 << MAX_READ_REQUEST_SHIFT) | PCI_EXP_DEVCTL_NOSNOOP_EN);
+}
+
+static void r8168b_1_hw_jumbo_enable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       r8168b_0_hw_jumbo_enable(tp);
+
+       RTL_W8(Config4, RTL_R8(Config4) | (1 << 0));
+}
+
+static void r8168b_1_hw_jumbo_disable(struct rtl8169_private *tp)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       r8168b_0_hw_jumbo_disable(tp);
+
+       RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
+}
+
+static void __devinit rtl_init_jumbo_ops(struct rtl8169_private *tp)
+{
+       struct jumbo_ops *ops = &tp->jumbo_ops;
+
+       switch (tp->mac_version) {
+       case RTL_GIGA_MAC_VER_11:
+               ops->disable    = r8168b_0_hw_jumbo_disable;
+               ops->enable     = r8168b_0_hw_jumbo_enable;
+               break;
+       case RTL_GIGA_MAC_VER_12:
+       case RTL_GIGA_MAC_VER_17:
+               ops->disable    = r8168b_1_hw_jumbo_disable;
+               ops->enable     = r8168b_1_hw_jumbo_enable;
+               break;
+       case RTL_GIGA_MAC_VER_18: /* Wild guess. Needs info from Realtek. */
+       case RTL_GIGA_MAC_VER_19:
+       case RTL_GIGA_MAC_VER_20:
+       case RTL_GIGA_MAC_VER_21: /* Wild guess. Needs info from Realtek. */
+       case RTL_GIGA_MAC_VER_22:
+       case RTL_GIGA_MAC_VER_23:
+       case RTL_GIGA_MAC_VER_24:
+       case RTL_GIGA_MAC_VER_25:
+       case RTL_GIGA_MAC_VER_26:
+               ops->disable    = r8168c_hw_jumbo_disable;
+               ops->enable     = r8168c_hw_jumbo_enable;
+               break;
+       case RTL_GIGA_MAC_VER_27:
+       case RTL_GIGA_MAC_VER_28:
+               ops->disable    = r8168dp_hw_jumbo_disable;
+               ops->enable     = r8168dp_hw_jumbo_enable;
+               break;
+       case RTL_GIGA_MAC_VER_31: /* Wild guess. Needs info from Realtek. */
+       case RTL_GIGA_MAC_VER_32:
+       case RTL_GIGA_MAC_VER_33:
+               ops->disable    = r8168e_hw_jumbo_disable;
+               ops->enable     = r8168e_hw_jumbo_enable;
+               break;
+
+       /*
+        * No action needed for jumbo frames with 8169.
+        * No jumbo for 810x at all.
+        */
+       default:
+               ops->disable    = NULL;
+               ops->enable     = NULL;
+               break;
+       }
+}
+
 static void rtl_hw_reset(struct rtl8169_private *tp)
 {
        void __iomem *ioaddr = tp->mmio_addr;
@@ -3374,6 +3597,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        rtl_init_mdio_ops(tp);
        rtl_init_pll_power_ops(tp);
+       rtl_init_jumbo_ops(tp);
 
        rtl8169_print_mac_version(tp);
 
@@ -3387,7 +3611,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                tp->features |= RTL_FEATURE_WOL;
        if ((RTL_R8(Config5) & (UWF | BWF | MWF)) != 0)
                tp->features |= RTL_FEATURE_WOL;
-       tp->features |= rtl_try_msi(pdev, ioaddr, cfg);
+       tp->features |= rtl_try_msi(tp, cfg);
        RTL_W8(Cfg9346, Cfg9346_Lock);
 
        if ((tp->mac_version <= RTL_GIGA_MAC_VER_06) &&
@@ -3440,6 +3664,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        tp->intr_event = cfg->intr_event;
        tp->napi_event = cfg->napi_event;
 
+       tp->opts1_mask = (tp->mac_version != RTL_GIGA_MAC_VER_01) ?
+               ~(RxBOVF | RxFOVF) : ~0;
+
        init_timer(&tp->timer);
        tp->timer.data = (unsigned long) dev;
        tp->timer.function = rtl8169_phy_timer;
@@ -3455,6 +3682,12 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        netif_info(tp, probe, dev, "%s at 0x%lx, %pM, XID %08x IRQ %d\n",
                   rtl_chip_infos[chipset].name, dev->base_addr, dev->dev_addr,
                   (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
+       if (rtl_chip_infos[chipset].jumbo_max != JUMBO_1K) {
+               netif_info(tp, probe, dev, "jumbo features [frames: %d bytes, "
+                          "tx checksumming: %s]\n",
+                          rtl_chip_infos[chipset].jumbo_max,
+                          rtl_chip_infos[chipset].jumbo_tx_csum ? "ok" : "ko");
+       }
 
        if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
            tp->mac_version == RTL_GIGA_MAC_VER_28 ||
@@ -3473,6 +3706,7 @@ out:
        return rc;
 
 err_out_msi_4:
+       netif_napi_del(&tp->napi);
        rtl_disable_msi(pdev, tp);
        iounmap(ioaddr);
 err_out_free_res_3:
@@ -3498,6 +3732,8 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 
        cancel_delayed_work_sync(&tp->task);
 
+       netif_napi_del(&tp->napi);
+
        unregister_netdev(dev);
 
        rtl_release_firmware(tp);
@@ -3611,7 +3847,7 @@ static void rtl8169_hw_reset(struct rtl8169_private *tp)
        void __iomem *ioaddr = tp->mmio_addr;
 
        /* Disable interrupts */
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        if (tp->mac_version == RTL_GIGA_MAC_VER_27 ||
            tp->mac_version == RTL_GIGA_MAC_VER_28 ||
@@ -3779,21 +4015,6 @@ static void rtl_hw_start_8169(struct net_device *dev)
        RTL_W16(IntrMask, tp->intr_event);
 }
 
-static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       struct rtl8169_private *tp = netdev_priv(dev);
-       int cap = tp->pcie_cap;
-
-       if (cap) {
-               u16 ctl;
-
-               pci_read_config_word(pdev, cap + PCI_EXP_DEVCTL, &ctl);
-               ctl = (ctl & ~PCI_EXP_DEVCTL_READRQ) | force;
-               pci_write_config_word(pdev, cap + PCI_EXP_DEVCTL, ctl);
-       }
-}
-
 static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
 {
        u32 csi;
@@ -4093,8 +4314,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
        RTL_W16(IntrMitigate, 0x5151);
 
        /* Work around for RxFIFO overflow. */
-       if (tp->mac_version == RTL_GIGA_MAC_VER_11 ||
-           tp->mac_version == RTL_GIGA_MAC_VER_22) {
+       if (tp->mac_version == RTL_GIGA_MAC_VER_11) {
                tp->intr_event |= RxFIFOOver | PCSTimeout;
                tp->intr_event &= ~RxOverflow;
        }
@@ -4276,6 +4496,11 @@ static void rtl_hw_start_8101(struct net_device *dev)
        void __iomem *ioaddr = tp->mmio_addr;
        struct pci_dev *pdev = tp->pci_dev;
 
+       if (tp->mac_version >= RTL_GIGA_MAC_VER_30) {
+               tp->intr_event &= ~RxFIFOOver;
+               tp->napi_event &= ~RxFIFOOver;
+       }
+
        if (tp->mac_version == RTL_GIGA_MAC_VER_13 ||
            tp->mac_version == RTL_GIGA_MAC_VER_16) {
                int cap = tp->pcie_cap;
@@ -4336,9 +4561,17 @@ static void rtl_hw_start_8101(struct net_device *dev)
 
 static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
 {
-       if (new_mtu < ETH_ZLEN || new_mtu > SafeMtu)
+       struct rtl8169_private *tp = netdev_priv(dev);
+
+       if (new_mtu < ETH_ZLEN ||
+           new_mtu > rtl_chip_infos[tp->mac_version].jumbo_max)
                return -EINVAL;
 
+       if (new_mtu > ETH_DATA_LEN)
+               rtl_hw_jumbo_enable(tp);
+       else
+               rtl_hw_jumbo_disable(tp);
+
        dev->mtu = new_mtu;
        netdev_update_features(dev);
 
@@ -4539,7 +4772,7 @@ static void rtl8169_wait_for_quiescence(struct net_device *dev)
        /* Wait for any pending NAPI task to complete */
        napi_disable(&tp->napi);
 
-       rtl8169_irq_mask_and_ack(ioaddr);
+       rtl8169_irq_mask_and_ack(tp);
 
        tp->intr_mask = 0xffff;
        RTL_W16(IntrMask, tp->intr_event);
@@ -4698,7 +4931,7 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        u32 opts[2];
        int frags;
 
-       if (unlikely(TX_BUFFS_AVAIL(tp) < skb_shinfo(skb)->nr_frags)) {
+       if (unlikely(!TX_FRAGS_READY_FOR(tp, skb_shinfo(skb)->nr_frags))) {
                netif_err(tp, drv, dev, "BUG! Tx Ring full when queue awake!\n");
                goto err_stop_0;
        }
@@ -4746,10 +4979,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
 
        RTL_W8(TxPoll, NPQ);
 
-       if (TX_BUFFS_AVAIL(tp) < MAX_SKB_FRAGS) {
+       if (!TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                netif_stop_queue(dev);
-               smp_rmb();
-               if (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)
+               smp_mb();
+               if (TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS))
                        netif_wake_queue(dev);
        }
 
@@ -4849,9 +5082,9 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
 
        if (tp->dirty_tx != dirty_tx) {
                tp->dirty_tx = dirty_tx;
-               smp_wmb();
+               smp_mb();
                if (netif_queue_stopped(dev) &&
-                   (TX_BUFFS_AVAIL(tp) >= MAX_SKB_FRAGS)) {
+                   TX_FRAGS_READY_FOR(tp, MAX_SKB_FRAGS)) {
                        netif_wake_queue(dev);
                }
                /*
@@ -4860,7 +5093,6 @@ static void rtl8169_tx_interrupt(struct net_device *dev,
                 * of start_xmit activity is detected (if it is not detected,
                 * it is slow enough). -- FR
                 */
-               smp_rmb();
                if (tp->cur_tx != dirty_tx)
                        RTL_W8(TxPoll, NPQ);
        }
@@ -4918,7 +5150,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                u32 status;
 
                rmb();
-               status = le32_to_cpu(desc->opts1);
+               status = le32_to_cpu(desc->opts1) & tp->opts1_mask;
 
                if (status & DescOwn)
                        break;
@@ -4938,7 +5170,7 @@ static int rtl8169_rx_interrupt(struct net_device *dev,
                } else {
                        struct sk_buff *skb;
                        dma_addr_t addr = le64_to_cpu(desc->addr);
-                       int pkt_size = (status & 0x00001FFF) - 4;
+                       int pkt_size = (status & 0x00003fff) - 4;
 
                        /*
                         * The driver does not support incoming fragmented
@@ -5001,13 +5233,17 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
         */
        status = RTL_R16(IntrStatus);
        while (status && status != 0xffff) {
+               status &= tp->intr_event;
+               if (!status)
+                       break;
+
                handled = 1;
 
                /* Handle all of the error cases first. These will reset
                 * the chip, so just exit the loop.
                 */
                if (unlikely(!netif_running(dev))) {
-                       rtl8169_asic_down(ioaddr);
+                       rtl8169_asic_down(tp);
                        break;
                }
 
@@ -5015,27 +5251,9 @@ static irqreturn_t rtl8169_interrupt(int irq, void *dev_instance)
                        switch (tp->mac_version) {
                        /* Work around for rx fifo overflow */
                        case RTL_GIGA_MAC_VER_11:
-                       case RTL_GIGA_MAC_VER_22:
-                       case RTL_GIGA_MAC_VER_26:
                                netif_stop_queue(dev);
                                rtl8169_tx_timeout(dev);
                                goto done;
-                       /* Testers needed. */
-                       case RTL_GIGA_MAC_VER_17:
-                       case RTL_GIGA_MAC_VER_19:
-                       case RTL_GIGA_MAC_VER_20:
-                       case RTL_GIGA_MAC_VER_21:
-                       case RTL_GIGA_MAC_VER_23:
-                       case RTL_GIGA_MAC_VER_24:
-                       case RTL_GIGA_MAC_VER_27:
-                       case RTL_GIGA_MAC_VER_28:
-                       case RTL_GIGA_MAC_VER_31:
-                       /* Experimental science. Pktgen proof. */
-                       case RTL_GIGA_MAC_VER_12:
-                       case RTL_GIGA_MAC_VER_25:
-                               if (status == RxFIFOOver)
-                                       goto done;
-                               break;
                        default:
                                break;
                        }
@@ -5130,7 +5348,7 @@ static void rtl8169_down(struct net_device *dev)
 
        spin_lock_irq(&tp->lock);
 
-       rtl8169_asic_down(ioaddr);
+       rtl8169_asic_down(tp);
        /*
         * At this point device interrupts can not be enabled in any function,
         * as netif_running is not true (rtl8169_interrupt, rtl8169_reset_task,
@@ -5376,6 +5594,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rtl8169_private *tp = netdev_priv(dev);
        void __iomem *ioaddr = tp->mmio_addr;
+       struct device *d = &pdev->dev;
+
+       pm_runtime_get_sync(d);
 
        rtl8169_net_suspend(dev);
 
@@ -5384,13 +5605,16 @@ static void rtl_shutdown(struct pci_dev *pdev)
 
        spin_lock_irq(&tp->lock);
 
-       rtl8169_asic_down(ioaddr);
+       rtl8169_asic_down(tp);
 
        spin_unlock_irq(&tp->lock);
 
        if (system_state == SYSTEM_POWER_OFF) {
-               /* WoL fails with some 8168 when the receiver is disabled. */
-               if (tp->features & RTL_FEATURE_WOL) {
+               /* WoL fails with 8168b when the receiver is disabled. */
+               if ((tp->mac_version == RTL_GIGA_MAC_VER_11 ||
+                    tp->mac_version == RTL_GIGA_MAC_VER_12 ||
+                    tp->mac_version == RTL_GIGA_MAC_VER_17) &&
+                   (tp->features & RTL_FEATURE_WOL)) {
                        pci_clear_master(pdev);
 
                        RTL_W8(ChipCmd, CmdRxEnb);
@@ -5401,6 +5625,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
                pci_wake_from_d3(pdev, true);
                pci_set_power_state(pdev, PCI_D3hot);
        }
+
+       pm_runtime_put_noidle(d);
 }
 
 static struct pci_driver rtl8169_pci_driver = {
index c4ab8a721b4ac1fde41d685e5df10006f5746756..85931cab22455da1110560e3d78d75d55f20f42c 100644 (file)
@@ -13647,9 +13647,13 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (tg3_flag(tp, HW_TSO_1) ||
            tg3_flag(tp, HW_TSO_2) ||
            tg3_flag(tp, HW_TSO_3) ||
-           (tp->fw_needed && !tg3_flag(tp, ENABLE_ASF)))
+           tp->fw_needed) {
+               /* For firmware TSO, assume ASF is disabled.
+                * We'll disable TSO later if we discover ASF
+                * is enabled in tg3_get_eeprom_hw_cfg().
+                */
                tg3_flag_set(tp, TSO_CAPABLE);
-       else {
+       else {
                tg3_flag_clear(tp, TSO_CAPABLE);
                tg3_flag_clear(tp, TSO_BUG);
                tp->fw_needed = NULL;
@@ -13887,6 +13891,12 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
         */
        tg3_get_eeprom_hw_cfg(tp);
 
+       if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
+               tg3_flag_clear(tp, TSO_CAPABLE);
+               tg3_flag_clear(tp, TSO_BUG);
+               tp->fw_needed = NULL;
+       }
+
        if (tg3_flag(tp, ENABLE_APE)) {
                /* Allow reads and writes to the
                 * APE register and memory space.
index 864448b761201122252cb47aaccadea269627f8a..e7732508b8f1c1b79fdc81716e4005e200db7439 100644 (file)
@@ -678,7 +678,7 @@ static int sierra_net_get_fw_attr(struct usbnet *dev, u16 *datap)
                return -EIO;
        }
 
-       *datap = *attrdata;
+       *datap = le16_to_cpu(*attrdata);
 
        kfree(attrdata);
        return result;
index f1e1643dc3eb87c25d35c84a8aa1605718cffb8b..78c51ab2e9bad0ec90e368b944bc81fdafdce04d 100644 (file)
@@ -8,6 +8,7 @@
  * as published by the Free Software Foundation.
  */
 
+#include <linux/module.h>
 #include <linux/bitops.h>
 #include <linux/cdev.h>
 #include <linux/dma-mapping.h>
index a70fa89f76fdcbf20407747849ce75409f27f0c1..7bd36947deb3cba707d886d8012a57a4683790a2 100644 (file)
@@ -131,7 +131,12 @@ register_slot(acpi_handle handle, u32 lvl, void *context, void **rv)
        if (!acpi_pci_check_ejectable(pbus, handle) && !is_dock_device(handle))
                return AE_OK;
 
-       acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+       status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
+       if (ACPI_FAILURE(status)) {
+               warn("can't evaluate _ADR (%#x)\n", status);
+               return AE_OK;
+       }
+
        device = (adr >> 16) & 0xffff;
        function = adr & 0xffff;
 
index 5b3771a7a413a7df105cc8647b1e7e5f2e96194a..0d5d0bfcb6631060a946fdc4480a1d096392602a 100644 (file)
@@ -664,8 +664,10 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
 
        /* Check if setup is sensible at all */
        if (!pass &&
-           (primary != bus->number || secondary <= bus->number)) {
-               dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
+           (primary != bus->number || secondary <= bus->number ||
+            secondary > subordinate)) {
+               dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
+                        secondary, subordinate);
                broken = 1;
        }
 
index 645b0fcbb370aa76b5bc816e4509330b67f73146..61da2cd2250828068135b753848a0322426cdd08 100644 (file)
@@ -518,6 +518,7 @@ struct zfcp_port *zfcp_port_enqueue(struct zfcp_adapter *adapter, u64 wwpn,
 
        rwlock_init(&port->unit_list_lock);
        INIT_LIST_HEAD(&port->unit_list);
+       atomic_set(&port->units, 0);
 
        INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
        INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
index e8b7cee62046e994187499139eda8333d766bca2..de1bcfa23f3596cdba3bd95ed2265c10ffb43ffb 100644 (file)
@@ -38,17 +38,23 @@ void zfcp_ccw_adapter_put(struct zfcp_adapter *adapter)
        spin_unlock_irqrestore(&zfcp_ccw_adapter_ref_lock, flags);
 }
 
-static int zfcp_ccw_activate(struct ccw_device *cdev)
-
+/**
+ * zfcp_ccw_activate - activate adapter and wait for it to finish
+ * @cdev: pointer to belonging ccw device
+ * @clear: Status flags to clear.
+ * @tag: s390dbf trace record tag
+ */
+static int zfcp_ccw_activate(struct ccw_device *cdev, int clear, char *tag)
 {
        struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
        if (!adapter)
                return 0;
 
+       zfcp_erp_clear_adapter_status(adapter, clear);
        zfcp_erp_set_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING);
        zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
-                               "ccresu2");
+                               tag);
        zfcp_erp_wait(adapter);
        flush_work(&adapter->scan_work);
 
@@ -163,32 +169,47 @@ static int zfcp_ccw_set_online(struct ccw_device *cdev)
        BUG_ON(!zfcp_reqlist_isempty(adapter->req_list));
        adapter->req_no = 0;
 
-       zfcp_ccw_activate(cdev);
+       zfcp_ccw_activate(cdev, 0, "ccsonl1");
        zfcp_ccw_adapter_put(adapter);
        return 0;
 }
 
 /**
- * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * zfcp_ccw_offline_sync - shut down adapter and wait for it to finish
  * @cdev: pointer to belonging ccw device
+ * @set: Status flags to set.
+ * @tag: s390dbf trace record tag
  *
  * This function gets called by the common i/o layer and sets an adapter
  * into state offline.
  */
-static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+static int zfcp_ccw_offline_sync(struct ccw_device *cdev, int set, char *tag)
 {
        struct zfcp_adapter *adapter = zfcp_ccw_adapter_by_cdev(cdev);
 
        if (!adapter)
                return 0;
 
-       zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1");
+       zfcp_erp_set_adapter_status(adapter, set);
+       zfcp_erp_adapter_shutdown(adapter, 0, tag);
        zfcp_erp_wait(adapter);
 
        zfcp_ccw_adapter_put(adapter);
        return 0;
 }
 
+/**
+ * zfcp_ccw_set_offline - set_offline function of zfcp driver
+ * @cdev: pointer to belonging ccw device
+ *
+ * This function gets called by the common i/o layer and sets an adapter
+ * into state offline.
+ */
+static int zfcp_ccw_set_offline(struct ccw_device *cdev)
+{
+       return zfcp_ccw_offline_sync(cdev, 0, "ccsoff1");
+}
+
 /**
  * zfcp_ccw_notify - ccw notify function
  * @cdev: pointer to belonging ccw device
@@ -206,6 +227,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
 
        switch (event) {
        case CIO_GONE:
+               if (atomic_read(&adapter->status) &
+                   ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+                       zfcp_dbf_hba_basic("ccnigo1", adapter);
+                       break;
+               }
                dev_warn(&cdev->dev, "The FCP device has been detached\n");
                zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti1");
                break;
@@ -215,6 +241,11 @@ static int zfcp_ccw_notify(struct ccw_device *cdev, int event)
                zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti2");
                break;
        case CIO_OPER:
+               if (atomic_read(&adapter->status) &
+                   ZFCP_STATUS_ADAPTER_SUSPENDED) { /* notification ignore */
+                       zfcp_dbf_hba_basic("ccniop1", adapter);
+                       break;
+               }
                dev_info(&cdev->dev, "The FCP device is operational again\n");
                zfcp_erp_set_adapter_status(adapter,
                                            ZFCP_STATUS_COMMON_RUNNING);
@@ -250,6 +281,28 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
        zfcp_ccw_adapter_put(adapter);
 }
 
+static int zfcp_ccw_suspend(struct ccw_device *cdev)
+{
+       zfcp_ccw_offline_sync(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccsusp1");
+       return 0;
+}
+
+static int zfcp_ccw_thaw(struct ccw_device *cdev)
+{
+       /* trace records for thaw and final shutdown during suspend
+          can only be found in system dump until the end of suspend
+          but not after resume because it's based on the memory image
+          right after the very first suspend (freeze) callback */
+       zfcp_ccw_activate(cdev, 0, "ccthaw1");
+       return 0;
+}
+
+static int zfcp_ccw_resume(struct ccw_device *cdev)
+{
+       zfcp_ccw_activate(cdev, ZFCP_STATUS_ADAPTER_SUSPENDED, "ccresu1");
+       return 0;
+}
+
 struct ccw_driver zfcp_ccw_driver = {
        .driver = {
                .owner  = THIS_MODULE,
@@ -262,7 +315,7 @@ struct ccw_driver zfcp_ccw_driver = {
        .set_offline = zfcp_ccw_set_offline,
        .notify      = zfcp_ccw_notify,
        .shutdown    = zfcp_ccw_shutdown,
-       .freeze      = zfcp_ccw_set_offline,
-       .thaw        = zfcp_ccw_activate,
-       .restore     = zfcp_ccw_activate,
+       .freeze      = zfcp_ccw_suspend,
+       .thaw        = zfcp_ccw_thaw,
+       .restore     = zfcp_ccw_resume,
 };
index fab2c2592a9744e400971b73c264fd9f2c3ab7b9..8ed63aa9abeafd5328f383bae99b26962086cbc6 100644 (file)
@@ -293,7 +293,7 @@ void zfcp_cfdc_adapter_access_changed(struct zfcp_adapter *adapter)
        }
        read_unlock_irqrestore(&adapter->port_list_lock, flags);
 
-       shost_for_each_device(sdev, port->adapter->scsi_host) {
+       shost_for_each_device(sdev, adapter->scsi_host) {
                zfcp_sdev = sdev_to_zfcp(sdev);
                status = atomic_read(&zfcp_sdev->status);
                if ((status & ZFCP_STATUS_COMMON_ACCESS_DENIED) ||
index 96d1462e0bf5bddc8719e603702afb491981df3c..8b18dc04f0683263aa43de45ab306c169e411f8c 100644 (file)
@@ -163,6 +163,26 @@ void zfcp_dbf_hba_bit_err(char *tag, struct zfcp_fsf_req *req)
        spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
+/**
+ * zfcp_dbf_hba_basic - trace event for basic adapter events
+ * @adapter: pointer to struct zfcp_adapter
+ */
+void zfcp_dbf_hba_basic(char *tag, struct zfcp_adapter *adapter)
+{
+       struct zfcp_dbf *dbf = adapter->dbf;
+       struct zfcp_dbf_hba *rec = &dbf->hba_buf;
+       unsigned long flags;
+
+       spin_lock_irqsave(&dbf->hba_lock, flags);
+       memset(rec, 0, sizeof(*rec));
+
+       memcpy(rec->tag, tag, ZFCP_DBF_TAG_LEN);
+       rec->id = ZFCP_DBF_HBA_BASIC;
+
+       debug_event(dbf->hba, 1, rec, sizeof(*rec));
+       spin_unlock_irqrestore(&dbf->hba_lock, flags);
+}
+
 static void zfcp_dbf_set_common(struct zfcp_dbf_rec *rec,
                                struct zfcp_adapter *adapter,
                                struct zfcp_port *port,
index 714f087eb7a96bdf973b43665636d14df9adc7a9..3ac7a4b30dd910ef6f59c4ada70966d66694c90e 100644 (file)
@@ -154,6 +154,7 @@ enum zfcp_dbf_hba_id {
        ZFCP_DBF_HBA_RES        = 1,
        ZFCP_DBF_HBA_USS        = 2,
        ZFCP_DBF_HBA_BIT        = 3,
+       ZFCP_DBF_HBA_BASIC      = 4,
 };
 
 /**
index 527ba48eea5762563c981e6a725b7c91e191f33f..ebbf7606c13c33437d7e5133848e46101150299d 100644 (file)
@@ -76,6 +76,7 @@ struct zfcp_reqlist;
 #define ZFCP_STATUS_ADAPTER_SIOSL_ISSUED       0x00000004
 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK         0x00000008
 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT      0x00000010
+#define ZFCP_STATUS_ADAPTER_SUSPENDED          0x00000040
 #define ZFCP_STATUS_ADAPTER_ERP_PENDING                0x00000100
 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED     0x00000200
 #define ZFCP_STATUS_ADAPTER_DATA_DIV_ENABLED   0x00000400
@@ -203,6 +204,7 @@ struct zfcp_port {
        struct zfcp_adapter    *adapter;       /* adapter used to access port */
        struct list_head        unit_list;      /* head of logical unit list */
        rwlock_t                unit_list_lock; /* unit list lock */
+       atomic_t                units;         /* zfcp_unit count */
        atomic_t               status;         /* status of this remote port */
        u64                    wwnn;           /* WWNN if known */
        u64                    wwpn;           /* WWPN */
index 03627cfd81cddff765c0d4a64468ad9f44535d63..3ad6399cc8bfdbd05103b918b6902abee4d2d717 100644 (file)
@@ -53,6 +53,7 @@ extern void zfcp_dbf_hba_fsf_uss(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_fsf_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_bit_err(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_hba_basic(char *, struct zfcp_adapter *);
 extern void zfcp_dbf_san_req(char *, struct zfcp_fsf_req *, u32);
 extern void zfcp_dbf_san_res(char *, struct zfcp_fsf_req *);
 extern void zfcp_dbf_san_in_els(char *, struct zfcp_fsf_req *);
@@ -157,6 +158,7 @@ extern void zfcp_scsi_dif_sense_error(struct scsi_cmnd *, int);
 extern struct attribute_group zfcp_sysfs_unit_attrs;
 extern struct attribute_group zfcp_sysfs_adapter_attrs;
 extern struct attribute_group zfcp_sysfs_port_attrs;
+extern struct mutex zfcp_sysfs_port_units_mutex;
 extern struct device_attribute *zfcp_sysfs_sdev_attrs[];
 extern struct device_attribute *zfcp_sysfs_shost_attrs[];
 
index 022fb6a8cb8339a6a8854b69ffc7021a3bdce1cf..6e73bfe92daad141f454aeb3a3e2d8075fe1ebfb 100644 (file)
@@ -219,7 +219,7 @@ static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req)
                return;
        }
 
-       zfcp_dbf_hba_fsf_uss("fssrh_2", req);
+       zfcp_dbf_hba_fsf_uss("fssrh_4", req);
 
        switch (sr_buf->status_type) {
        case FSF_STATUS_READ_PORT_CLOSED:
@@ -771,12 +771,14 @@ out:
 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
 {
        struct scsi_device *sdev = req->data;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
        union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
                if (fsq->word[0] == fsq->word[1]) {
@@ -885,7 +887,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
 
        switch (header->fsf_status) {
         case FSF_GOOD:
-               zfcp_dbf_san_res("fsscth1", req);
+               zfcp_dbf_san_res("fsscth2", req);
                ct->status = 0;
                break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1730,13 +1732,15 @@ static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req)
 {
        struct zfcp_adapter *adapter = req->adapter;
        struct scsi_device *sdev = req->data;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
        struct fsf_qtcb_header *header = &req->qtcb->header;
        struct fsf_qtcb_bottom_support *bottom = &req->qtcb->bottom.support;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        atomic_clear_mask(ZFCP_STATUS_COMMON_ACCESS_DENIED |
                          ZFCP_STATUS_COMMON_ACCESS_BOXED |
                          ZFCP_STATUS_LUN_SHARED |
@@ -1847,11 +1851,13 @@ out:
 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req)
 {
        struct scsi_device *sdev = req->data;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        switch (req->qtcb->header.fsf_status) {
        case FSF_PORT_HANDLE_NOT_VALID:
                zfcp_erp_adapter_reopen(zfcp_sdev->port->adapter, 0, "fscuh_1");
@@ -1941,7 +1947,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
 {
        struct fsf_qual_latency_info *lat_in;
        struct latency_cont *lat = NULL;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(scsi->device);
+       struct zfcp_scsi_dev *zfcp_sdev;
        struct zfcp_blk_drv_data blktrc;
        int ticks = req->adapter->timer_ticks;
 
@@ -1956,6 +1962,7 @@ static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi)
 
        if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA &&
            !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) {
+               zfcp_sdev = sdev_to_zfcp(scsi->device);
                blktrc.flags |= ZFCP_BLK_LAT_VALID;
                blktrc.channel_lat = lat_in->channel_lat * ticks;
                blktrc.fabric_lat = lat_in->fabric_lat * ticks;
@@ -1993,12 +2000,14 @@ static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req)
 {
        struct scsi_cmnd *scmnd = req->data;
        struct scsi_device *sdev = scmnd->device;
-       struct zfcp_scsi_dev *zfcp_sdev = sdev_to_zfcp(sdev);
+       struct zfcp_scsi_dev *zfcp_sdev;
        struct fsf_qtcb_header *header = &req->qtcb->header;
 
        if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR))
                return;
 
+       zfcp_sdev = sdev_to_zfcp(sdev);
+
        switch (header->fsf_status) {
        case FSF_HANDLE_MISMATCH:
        case FSF_PORT_HANDLE_NOT_VALID:
index cdc4ff78a7baf787e36958b84c208654d97fa9c1..9e62210b294f46d460e340429931085068c8f638 100644 (file)
@@ -227,6 +227,8 @@ static ssize_t zfcp_sysfs_port_rescan_store(struct device *dev,
 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
                     zfcp_sysfs_port_rescan_store);
 
+DEFINE_MUTEX(zfcp_sysfs_port_units_mutex);
+
 static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
                                            struct device_attribute *attr,
                                            const char *buf, size_t count)
@@ -249,6 +251,16 @@ static ssize_t zfcp_sysfs_port_remove_store(struct device *dev,
        else
                retval = 0;
 
+       mutex_lock(&zfcp_sysfs_port_units_mutex);
+       if (atomic_read(&port->units) > 0) {
+               retval = -EBUSY;
+               mutex_unlock(&zfcp_sysfs_port_units_mutex);
+               goto out;
+       }
+       /* port is about to be removed, so no more unit_add */
+       atomic_set(&port->units, -1);
+       mutex_unlock(&zfcp_sysfs_port_units_mutex);
+
        write_lock_irq(&adapter->port_list_lock);
        list_del(&port->list);
        write_unlock_irq(&adapter->port_list_lock);
@@ -289,12 +301,14 @@ static ssize_t zfcp_sysfs_unit_add_store(struct device *dev,
 {
        struct zfcp_port *port = container_of(dev, struct zfcp_port, dev);
        u64 fcp_lun;
+       int retval;
 
        if (strict_strtoull(buf, 0, (unsigned long long *) &fcp_lun))
                return -EINVAL;
 
-       if (zfcp_unit_add(port, fcp_lun))
-               return -EINVAL;
+       retval = zfcp_unit_add(port, fcp_lun);
+       if (retval)
+               return retval;
 
        return count;
 }
index 20796ebc33cec3435cb5b2a2de297580e534fb8c..4e6a5356bdbd5c14708f347d7e4317518a821b58 100644 (file)
@@ -104,7 +104,7 @@ static void zfcp_unit_release(struct device *dev)
 {
        struct zfcp_unit *unit = container_of(dev, struct zfcp_unit, dev);
 
-       put_device(&unit->port->dev);
+       atomic_dec(&unit->port->units);
        kfree(unit);
 }
 
@@ -119,16 +119,27 @@ static void zfcp_unit_release(struct device *dev)
 int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
 {
        struct zfcp_unit *unit;
+       int retval = 0;
+
+       mutex_lock(&zfcp_sysfs_port_units_mutex);
+       if (atomic_read(&port->units) == -1) {
+               /* port is already gone */
+               retval = -ENODEV;
+               goto out;
+       }
 
        unit = zfcp_unit_find(port, fcp_lun);
        if (unit) {
                put_device(&unit->dev);
-               return -EEXIST;
+               retval = -EEXIST;
+               goto out;
        }
 
        unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
-       if (!unit)
-               return -ENOMEM;
+       if (!unit) {
+               retval = -ENOMEM;
+               goto out;
+       }
 
        unit->port = port;
        unit->fcp_lun = fcp_lun;
@@ -139,28 +150,33 @@ int zfcp_unit_add(struct zfcp_port *port, u64 fcp_lun)
        if (dev_set_name(&unit->dev, "0x%016llx",
                         (unsigned long long) fcp_lun)) {
                kfree(unit);
-               return -ENOMEM;
+               retval = -ENOMEM;
+               goto out;
        }
 
-       get_device(&port->dev);
-
        if (device_register(&unit->dev)) {
                put_device(&unit->dev);
-               return -ENOMEM;
+               retval = -ENOMEM;
+               goto out;
        }
 
        if (sysfs_create_group(&unit->dev.kobj, &zfcp_sysfs_unit_attrs)) {
                device_unregister(&unit->dev);
-               return -EINVAL;
+               retval = -EINVAL;
+               goto out;
        }
 
+       atomic_inc(&port->units); /* under zfcp_sysfs_port_units_mutex ! */
+
        write_lock_irq(&port->unit_list_lock);
        list_add_tail(&unit->list, &port->unit_list);
        write_unlock_irq(&port->unit_list_lock);
 
        zfcp_unit_scsi_scan(unit);
 
-       return 0;
+out:
+       mutex_unlock(&zfcp_sysfs_port_units_mutex);
+       return retval;
 }
 
 /**
index 7e6eca4a125e91f75c55280dbbc34ee3d03a9010..59fc5a1fdae0babf2f9581cacd2e7b552d26e422 100644 (file)
@@ -1174,7 +1174,16 @@ wait_io1:
        outw(val, tmport);
        outb(2, 0x80);
 TCM_SYNC:
-       udelay(0x800);
+       /*
+        * The funny division into multiple delays is to accomodate
+        * arches like ARM where udelay() multiplies its argument by
+        * a large number to initialize a loop counter.  To avoid
+        * overflow, the maximum supported udelay is 2000 microseconds.
+        *
+        * XXX it would be more polite to find a way to use msleep()
+        */
+       mdelay(2);
+       udelay(48);
        if ((inb(tmport) & 0x80) == 0x00) {     /* bsy ? */
                outw(0, tmport--);
                outb(0, tmport);
index 18fee6daecd592101d2e87f94f56467991470e13..1dbf758c49647585fe6b2bc866be4d7c966a3857 100644 (file)
@@ -5151,6 +5151,7 @@ static int ext4_do_update_inode(handle_t *handle,
        struct ext4_inode_info *ei = EXT4_I(inode);
        struct buffer_head *bh = iloc->bh;
        int err = 0, rc, block;
+       int need_datasync = 0;
 
        /* For fields not not tracking in the in-memory inode,
         * initialise them to zero for new inodes. */
@@ -5199,7 +5200,10 @@ static int ext4_do_update_inode(handle_t *handle,
                raw_inode->i_file_acl_high =
                        cpu_to_le16(ei->i_file_acl >> 32);
        raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
-       ext4_isize_set(raw_inode, ei->i_disksize);
+       if (ei->i_disksize != ext4_isize(raw_inode)) {
+               ext4_isize_set(raw_inode, ei->i_disksize);
+               need_datasync = 1;
+       }
        if (ei->i_disksize > 0x7fffffffULL) {
                struct super_block *sb = inode->i_sb;
                if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
@@ -5252,7 +5256,7 @@ static int ext4_do_update_inode(handle_t *handle,
                err = rc;
        ext4_clear_inode_state(inode, EXT4_STATE_NEW);
 
-       ext4_update_inode_fsync_trans(handle, inode, 0);
+       ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
        brelse(bh);
        ext4_std_error(inode->i_sb, err);
index f57455a1b1b281bdf21e12f63bc46087abe58194..72f97326baa405eaeb807b7a83b20c22838cd399 100644 (file)
@@ -1209,7 +1209,12 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
                        orig_inode->i_ino, donor_inode->i_ino);
                return -EINVAL;
        }
-
+       /* TODO: This is non obvious task to swap blocks for inodes with full
+          jornaling enabled */
+       if (ext4_should_journal_data(orig_inode) ||
+           ext4_should_journal_data(donor_inode)) {
+               return -EINVAL;
+       }
        /* Protect orig and donor inodes against a truncate */
        ret1 = mext_inode_double_lock(orig_inode, donor_inode);
        if (ret1 < 0)
index 3d36d5a1e19a108104252bd435a2d9a6543e226b..78585fc0a27abb66138d3ce2fd90762830e74bff 100644 (file)
@@ -1799,9 +1799,7 @@ retry:
        err = PTR_ERR(inode);
        if (!IS_ERR(inode)) {
                init_special_inode(inode, inode->i_mode, rdev);
-#ifdef CONFIG_EXT4_FS_XATTR
                inode->i_op = &ext4_special_inode_operations;
-#endif
                err = ext4_add_nondir(handle, dentry, inode);
        }
        ext4_journal_stop(handle);
index 7978eec1b7d9964420c2e854afba51a8f9cbf8f5..3e8f2f705b37f7005ccf7e5e70c7b88b571229c8 100644 (file)
@@ -188,7 +188,7 @@ struct sp_node {
 
 struct shared_policy {
        struct rb_root root;
-       spinlock_t lock;
+       struct mutex mutex;
 };
 
 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
index 22e61fdf75a2bd46a656e164e046a006acd43343..28e493b5b94c9ba6d946d728a0908c84e34294db 100644 (file)
@@ -84,6 +84,8 @@ struct xfrm_replay_state {
        __u32   bitmap;
 };
 
+#define XFRMA_REPLAY_ESN_MAX   4096
+
 struct xfrm_replay_state_esn {
        unsigned int    bmp_len;
        __u32           oseq;
index b203e14d26b7f59a5f884b87183d3ecb74447d2a..921f6270a20c845b55c902f714514ef5337ef613 100644 (file)
@@ -269,6 +269,9 @@ struct xfrm_replay {
        int     (*check)(struct xfrm_state *x,
                         struct sk_buff *skb,
                         __be32 net_seq);
+       int     (*recheck)(struct xfrm_state *x,
+                          struct sk_buff *skb,
+                          __be32 net_seq);
        void    (*notify)(struct xfrm_state *x, int event);
        int     (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
 };
index b2e84bd3ceb91e92e85050617046c3185a8b990e..6cbe0330249d13289634c520ec0643e776b85dcd 100644 (file)
@@ -2080,6 +2080,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
  * (of no affect) on systems that are actively using CPU hotplug
  * but making no active use of cpusets.
  *
+ * The only exception to this is suspend/resume, where we don't
+ * modify cpusets at all.
+ *
  * This routine ensures that top_cpuset.cpus_allowed tracks
  * cpu_active_mask on each CPU hotplug (cpuhp) event.
  *
index ba06207b1dd3bf9f9d42bc8800998b80b909c02d..fe7a9b090f96a8fdb40b70a8402d06e4c41dadb5 100644 (file)
@@ -283,7 +283,9 @@ cpu_has_callbacks_ready_to_invoke(struct rcu_data *rdp)
 static int
 cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       return *rdp->nxttail[RCU_DONE_TAIL] && !rcu_gp_in_progress(rsp);
+       return *rdp->nxttail[RCU_DONE_TAIL +
+                            ACCESS_ONCE(rsp->completed) != rdp->completed] &&
+              !rcu_gp_in_progress(rsp);
 }
 
 /*
index bbe2afd79d7b807f7f85f28eb5a402ff3a704666..c4743ee8bcbc4249e473a29de5cf7731e774e854 100644 (file)
@@ -7778,34 +7778,66 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
 }
 #endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
 
+static int num_cpus_frozen;    /* used to mark begin/end of suspend/resume */
+
 /*
  * Update cpusets according to cpu_active mask.  If cpusets are
  * disabled, cpuset_update_active_cpus() becomes a simple wrapper
  * around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
  */
 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
                             void *hcpu)
 {
-       switch (action & ~CPU_TASKS_FROZEN) {
+       switch (action) {
+       case CPU_ONLINE_FROZEN:
+       case CPU_DOWN_FAILED_FROZEN:
+
+               /*
+                * num_cpus_frozen tracks how many CPUs are involved in suspend
+                * resume sequence. As long as this is not the last online
+                * operation in the resume sequence, just build a single sched
+                * domain, ignoring cpusets.
+                */
+               num_cpus_frozen--;
+               if (likely(num_cpus_frozen)) {
+                       partition_sched_domains(1, NULL, NULL);
+                       break;
+               }
+
+               /*
+                * This is the last CPU online operation. So fall through and
+                * restore the original sched domains by considering the
+                * cpuset configurations.
+                */
+
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
                cpuset_update_active_cpus();
-               return NOTIFY_OK;
+               break;
        default:
                return NOTIFY_DONE;
        }
+       return NOTIFY_OK;
 }
 
 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
                               void *hcpu)
 {
-       switch (action & ~CPU_TASKS_FROZEN) {
+       switch (action) {
        case CPU_DOWN_PREPARE:
                cpuset_update_active_cpus();
-               return NOTIFY_OK;
+               break;
+       case CPU_DOWN_PREPARE_FROZEN:
+               num_cpus_frozen++;
+               partition_sched_domains(1, NULL, NULL);
+               break;
        default:
                return NOTIFY_DONE;
        }
+       return NOTIFY_OK;
 }
 
 static int update_runtime(struct notifier_block *nfb,
index 61dfac580989a550124054bbc00e32642772ecc4..1f4f15c409876253f9e8d49bda08fcc0d18045e1 100644 (file)
@@ -351,6 +351,7 @@ void kernel_restart(char *cmd)
        restart_dbg("%s->%d->cmd=%s",__FUNCTION__,__LINE__,cmd);
        
        kernel_restart_prepare(cmd);
+       disable_nonboot_cpus();
        if (!cmd)
                printk(KERN_EMERG "Restarting system.\n");
        else
index 00c0bad5060954f9fcf5a7f1c8faeb6a5b8d7d3f..aef945275953c41dd88c92fe57403787c22391c7 100644 (file)
@@ -1868,7 +1868,9 @@ __acquires(&gcwq->lock)
 
        spin_unlock_irq(&gcwq->lock);
 
+       smp_wmb();      /* paired with test_and_set_bit(PENDING) */
        work_clear_pending(work);
+
        lock_map_acquire_read(&cwq->wq->lockdep_map);
        lock_map_acquire(&lockdep_map);
        trace_workqueue_execute_start(work);
index f879033d98229450a7a88d891c16319cc213f134..433d89bd9d899bb538ff1fa2546f9888937918ec 100644 (file)
--- a/lib/gcd.c
+++ b/lib/gcd.c
@@ -9,6 +9,9 @@ unsigned long gcd(unsigned long a, unsigned long b)
 
        if (a < b)
                swap(a, b);
+
+       if (!b)
+               return a;
        while ((r = a % b) != 0) {
                a = b;
                b = r;
index 3f3cc5622c015bff2c095e953eccfff23e0d5f5b..5dce7d46f799a5e1aa0d86e0eada52cd34728c86 100644 (file)
@@ -606,6 +606,42 @@ check_range(struct mm_struct *mm, unsigned long start, unsigned long end,
        return first;
 }
 
+/*
+ * Apply policy to a single VMA
+ * This must be called with the mmap_sem held for writing.
+ */
+static int vma_replace_policy(struct vm_area_struct *vma,
+                                               struct mempolicy *pol)
+{
+       int err;
+       struct mempolicy *old;
+       struct mempolicy *new;
+
+       pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
+                vma->vm_start, vma->vm_end, vma->vm_pgoff,
+                vma->vm_ops, vma->vm_file,
+                vma->vm_ops ? vma->vm_ops->set_policy : NULL);
+
+       new = mpol_dup(pol);
+       if (IS_ERR(new))
+               return PTR_ERR(new);
+
+       if (vma->vm_ops && vma->vm_ops->set_policy) {
+               err = vma->vm_ops->set_policy(vma, new);
+               if (err)
+                       goto err_out;
+       }
+
+       old = vma->vm_policy;
+       vma->vm_policy = new; /* protected by mmap_sem */
+       mpol_put(old);
+
+       return 0;
+ err_out:
+       mpol_put(new);
+       return err;
+}
+
 /* Step 2: apply policy to a range and do splits. */
 static int mbind_range(struct mm_struct *mm, unsigned long start,
                       unsigned long end, struct mempolicy *new_pol)
@@ -645,23 +681,9 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
                        if (err)
                                goto out;
                }
-
-               /*
-                * Apply policy to a single VMA. The reference counting of
-                * policy for vma_policy linkages has already been handled by
-                * vma_merge and split_vma as necessary. If this is a shared
-                * policy then ->set_policy will increment the reference count
-                * for an sp node.
-                */
-               pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
-                       vma->vm_start, vma->vm_end, vma->vm_pgoff,
-                       vma->vm_ops, vma->vm_file,
-                       vma->vm_ops ? vma->vm_ops->set_policy : NULL);
-               if (vma->vm_ops && vma->vm_ops->set_policy) {
-                       err = vma->vm_ops->set_policy(vma, new_pol);
-                       if (err)
-                               goto out;
-               }
+               err = vma_replace_policy(vma, new_pol);
+               if (err)
+                       goto out;
        }
 
  out:
@@ -1489,8 +1511,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
                                                                        addr);
                        if (vpol)
                                pol = vpol;
-               } else if (vma->vm_policy)
+               } else if (vma->vm_policy) {
                        pol = vma->vm_policy;
+
+                       /*
+                        * shmem_alloc_page() passes MPOL_F_SHARED policy with
+                        * a pseudo vma whose vma->vm_ops=NULL. Take a reference
+                        * count on these policies which will be dropped by
+                        * mpol_cond_put() later
+                        */
+                       if (mpol_needs_cond_ref(pol))
+                               mpol_get(pol);
+               }
        }
        if (!pol)
                pol = &default_policy;
@@ -1999,7 +2031,7 @@ int __mpol_equal(struct mempolicy *a, struct mempolicy *b)
  */
 
 /* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/* Caller holds sp->mutex */
 static struct sp_node *
 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
 {
@@ -2063,36 +2095,50 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 
        if (!sp->root.rb_node)
                return NULL;
-       spin_lock(&sp->lock);
+       mutex_lock(&sp->mutex);
        sn = sp_lookup(sp, idx, idx+1);
        if (sn) {
                mpol_get(sn->policy);
                pol = sn->policy;
        }
-       spin_unlock(&sp->lock);
+       mutex_unlock(&sp->mutex);
        return pol;
 }
 
+static void sp_free(struct sp_node *n)
+{
+       mpol_put(n->policy);
+       kmem_cache_free(sn_cache, n);
+}
+
 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
 {
        pr_debug("deleting %lx-l%lx\n", n->start, n->end);
        rb_erase(&n->nd, &sp->root);
-       mpol_put(n->policy);
-       kmem_cache_free(sn_cache, n);
+       sp_free(n);
 }
 
 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
                                struct mempolicy *pol)
 {
-       struct sp_node *n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
+       struct sp_node *n;
+       struct mempolicy *newpol;
 
+       n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
        if (!n)
                return NULL;
+
+       newpol = mpol_dup(pol);
+       if (IS_ERR(newpol)) {
+               kmem_cache_free(sn_cache, n);
+               return NULL;
+       }
+       newpol->flags |= MPOL_F_SHARED;
+
        n->start = start;
        n->end = end;
-       mpol_get(pol);
-       pol->flags |= MPOL_F_SHARED;    /* for unref */
-       n->policy = pol;
+       n->policy = newpol;
+
        return n;
 }
 
@@ -2100,10 +2146,10 @@ static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
                                 unsigned long end, struct sp_node *new)
 {
-       struct sp_node *n, *new2 = NULL;
+       struct sp_node *n;
+       int ret = 0;
 
-restart:
-       spin_lock(&sp->lock);
+       mutex_lock(&sp->mutex);
        n = sp_lookup(sp, start, end);
        /* Take care of old policies in the same range. */
        while (n && n->start < end) {
@@ -2116,16 +2162,14 @@ restart:
                } else {
                        /* Old policy spanning whole new range. */
                        if (n->end > end) {
+                               struct sp_node *new2;
+                               new2 = sp_alloc(end, n->end, n->policy);
                                if (!new2) {
-                                       spin_unlock(&sp->lock);
-                                       new2 = sp_alloc(end, n->end, n->policy);
-                                       if (!new2)
-                                               return -ENOMEM;
-                                       goto restart;
+                                       ret = -ENOMEM;
+                                       goto out;
                                }
                                n->end = start;
                                sp_insert(sp, new2);
-                               new2 = NULL;
                                break;
                        } else
                                n->end = start;
@@ -2136,12 +2180,9 @@ restart:
        }
        if (new)
                sp_insert(sp, new);
-       spin_unlock(&sp->lock);
-       if (new2) {
-               mpol_put(new2->policy);
-               kmem_cache_free(sn_cache, new2);
-       }
-       return 0;
+out:
+       mutex_unlock(&sp->mutex);
+       return ret;
 }
 
 /**
@@ -2159,7 +2200,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
        int ret;
 
        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
-       spin_lock_init(&sp->lock);
+       mutex_init(&sp->mutex);
 
        if (mpol) {
                struct vm_area_struct pvma;
@@ -2213,7 +2254,7 @@ int mpol_set_shared_policy(struct shared_policy *info,
        }
        err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
        if (err && new)
-               kmem_cache_free(sn_cache, new);
+               sp_free(new);
        return err;
 }
 
@@ -2225,16 +2266,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
 
        if (!p->root.rb_node)
                return;
-       spin_lock(&p->lock);
+       mutex_lock(&p->mutex);
        next = rb_first(&p->root);
        while (next) {
                n = rb_entry(next, struct sp_node, nd);
                next = rb_next(&n->nd);
-               rb_erase(&n->nd, &p->root);
-               mpol_put(n->policy);
-               kmem_cache_free(sn_cache, n);
+               sp_delete(p, n);
        }
-       spin_unlock(&p->lock);
+       mutex_unlock(&p->mutex);
 }
 
 /* assumes fs == KERNEL_DS */
index e13f22efaad741bfb0268ba577c3d34807769201..3e9829f398887acb01a95e0d10402a1cbfb06157 100644 (file)
@@ -398,11 +398,12 @@ invalidate_complete_page2(struct address_space *mapping, struct page *page)
        if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
                return 0;
 
+       clear_page_mlock(page);
+
        spin_lock_irq(&mapping->tree_lock);
        if (PageDirty(page))
                goto failed;
 
-       clear_page_mlock(page);
        BUG_ON(page_has_private(page));
        __delete_from_page_cache(page);
        spin_unlock_irq(&mapping->tree_lock);
index 27263fb15642d884622c0567557aea45bc292020..c177f9e97639a2a08fb3fc5589946352005fd1fb 100644 (file)
@@ -106,7 +106,6 @@ static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
                return NULL;
        memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
        skb->mac_header += VLAN_HLEN;
-       skb_reset_mac_len(skb);
        return skb;
 }
 
@@ -173,6 +172,8 @@ struct sk_buff *vlan_untag(struct sk_buff *skb)
 
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
+       skb_reset_mac_len(skb);
+
        return skb;
 
 err_free:
index d8bc889b50c8189abb149a9e90704b410fe2a16c..5b84eafb1a6305189bcb25125379780f4287f38a 100644 (file)
@@ -2038,7 +2038,8 @@ static bool can_checksum_protocol(unsigned long features, __be16 protocol)
 
 static u32 harmonize_features(struct sk_buff *skb, __be16 protocol, u32 features)
 {
-       if (!can_checksum_protocol(features, protocol)) {
+       if (skb->ip_summed != CHECKSUM_NONE &&
+           !can_checksum_protocol(features, protocol)) {
                features &= ~NETIF_F_ALL_CSUM;
                features &= ~NETIF_F_SG;
        } else if (illegal_highdma(skb->dev, skb)) {
@@ -2559,16 +2560,17 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
        poff = proto_ports_offset(ip_proto);
        if (poff >= 0) {
                nhoff += ihl * 4 + poff;
-               if (pskb_may_pull(skb, nhoff + 4)) {
+               if (pskb_may_pull(skb, nhoff + 4))
                        ports.v32 = * (__force u32 *) (skb->data + nhoff);
-                       if (ports.v16[1] < ports.v16[0])
-                               swap(ports.v16[0], ports.v16[1]);
-               }
        }
 
        /* get a consistent hash (same value on both flow directions) */
-       if (addr2 < addr1)
+       if (addr2 < addr1 ||
+           (addr2 == addr1 &&
+            ports.v16[1] < ports.v16[0])) {
                swap(addr1, addr2);
+               swap(ports.v16[0], ports.v16[1]);
+       }
 
        hash = jhash_3words(addr1, addr2, ports.v32, hashrnd);
        if (!hash)
index 56623adfa4aeca7a01c080a2913eab5b27323835..3da11ba3b0a6807c17b607521f68afd041d6c013 100644 (file)
@@ -594,7 +594,8 @@ set_rcvbuf:
 
        case SO_KEEPALIVE:
 #ifdef CONFIG_INET
-               if (sk->sk_protocol == IPPROTO_TCP)
+               if (sk->sk_protocol == IPPROTO_TCP &&
+                   sk->sk_type == SOCK_STREAM)
                        tcp_set_keepalive(sk, valbool);
 #endif
                sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
index c9893d43242e52c83580f4676f20793fd5bdf48b..3d8bb189babbad2deaad518d483e80c8a50eb1e6 100644 (file)
@@ -130,18 +130,20 @@ found:
  *     0 - deliver
  *     1 - block
  */
-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
+static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
 {
-       int type;
+       struct icmphdr _hdr;
+       const struct icmphdr *hdr;
 
-       if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
+       hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+                                sizeof(_hdr), &_hdr);
+       if (!hdr)
                return 1;
 
-       type = icmp_hdr(skb)->type;
-       if (type < 32) {
+       if (hdr->type < 32) {
                __u32 data = raw_sk(sk)->filter.data;
 
-               return ((1 << type) & data) != 0;
+               return ((1U << hdr->type) & data) != 0;
        }
 
        /* Do not block unknown ICMP types */
index 4a2d6f50be8592b4634b0612775a11829c132aeb..08d22449f563c19a0b93ace1af27acbaf032294f 100644 (file)
@@ -1602,8 +1602,14 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                }
 
 #ifdef CONFIG_NET_DMA
-               if (tp->ucopy.dma_chan)
-                       dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+               if (tp->ucopy.dma_chan) {
+                       if (tp->rcv_wnd == 0 &&
+                           !skb_queue_empty(&sk->sk_async_wait_queue)) {
+                               tcp_service_net_dma(sk, true);
+                               tcp_cleanup_rbuf(sk, copied);
+                       } else
+                               dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
+               }
 #endif
                if (copied >= target) {
                        /* Do not sleep, just process backlog. */
index 43242e6e610301c2a662932f3f910ab6ed6e573d..42853c4d7321e0cd05ea939a66ad3d9b9f733561 100644 (file)
@@ -84,28 +84,30 @@ static int mip6_mh_len(int type)
 
 static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
 {
-       struct ip6_mh *mh;
+       struct ip6_mh _hdr;
+       const struct ip6_mh *mh;
 
-       if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
-           !pskb_may_pull(skb, (skb_transport_offset(skb) +
-                                ((skb_transport_header(skb)[1] + 1) << 3))))
+       mh = skb_header_pointer(skb, skb_transport_offset(skb),
+                               sizeof(_hdr), &_hdr);
+       if (!mh)
                return -1;
 
-       mh = (struct ip6_mh *)skb_transport_header(skb);
+       if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
+               return -1;
 
        if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
                LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
                               mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
-               mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
-                                        skb_network_header(skb)));
+               mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
+                               skb_network_header_len(skb));
                return -1;
        }
 
        if (mh->ip6mh_proto != IPPROTO_NONE) {
                LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
                               mh->ip6mh_proto);
-               mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
-                                        skb_network_header(skb)));
+               mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
+                               skb_network_header_len(skb));
                return -1;
        }
 
index cc7313b8f7eaee42ec5385f9fa9999f1227f01db..fb812a638f8f87ffbec96690b91b1765745328f6 100644 (file)
@@ -106,21 +106,20 @@ found:
  *     0 - deliver
  *     1 - block
  */
-static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
+static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
 {
-       struct icmp6hdr *icmph;
-       struct raw6_sock *rp = raw6_sk(sk);
-
-       if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
-               __u32 *data = &rp->filter.data[0];
-               int bit_nr;
+       struct icmp6hdr *_hdr;
+       const struct icmp6hdr *hdr;
 
-               icmph = (struct icmp6hdr *) skb->data;
-               bit_nr = icmph->icmp6_type;
+       hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+                                sizeof(_hdr), &_hdr);
+       if (hdr) {
+               const __u32 *data = &raw6_sk(sk)->filter.data[0];
+               unsigned int type = hdr->icmp6_type;
 
-               return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
+               return (data[type >> 5] & (1U << (type & 31))) != 0;
        }
-       return 0;
+       return 1;
 }
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
index a6e78020eeb5720d68110c0f21ee18d7d5b2731d..6a96cad4b0b072170e6a993f65749ab49c469217 100644 (file)
@@ -1401,17 +1401,18 @@ static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
        struct fib6_table *table;
        struct net *net = dev_net(rt->rt6i_dev);
 
-       if (rt == net->ipv6.ip6_null_entry)
-               return -ENOENT;
+       if (rt == net->ipv6.ip6_null_entry) {
+               err = -ENOENT;
+               goto out;
+       }
 
        table = rt->rt6i_table;
        write_lock_bh(&table->tb6_lock);
-
        err = fib6_del(rt, info);
-       dst_release(&rt->dst);
-
        write_unlock_bh(&table->tb6_lock);
 
+out:
+       dst_release(&rt->dst);
        return err;
 }
 
index 3c55f633928eb86bb6daff37ee93630c4fd9f3bd..2cef50b5fc67da5f17861219468042f343600bf5 100644 (file)
@@ -132,7 +132,7 @@ static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb,
                printk("\n");
        }
 
-       if (!pskb_may_pull(skb, sizeof(ETH_HLEN)))
+       if (!pskb_may_pull(skb, ETH_HLEN))
                goto error;
 
        secpath_reset(skb);
index 732152f718e0dceb36826e7c4f36935c20ef603b..f1563823696a9f380e6e9af5aaf1cf48789313ab 100644 (file)
@@ -1170,7 +1170,12 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,
                msg->msg_flags |= MSG_TRUNC;
        }
 
-       skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       er = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
+       if (er < 0) {
+               skb_free_datagram(sk, skb);
+               release_sock(sk);
+               return er;
+       }
 
        if (sax != NULL) {
                sax->sax25_family = AF_NETROM;
index 24d94c097b35f34bfee4ff5aed2a724e8549176d..599f67ada1eda60c85beeeadca8f20a94aa41315 100644 (file)
@@ -250,10 +250,11 @@ cbq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr)
                        else if ((cl = defmap[res.classid & TC_PRIO_MAX]) == NULL)
                                cl = defmap[TC_PRIO_BESTEFFORT];
 
-                       if (cl == NULL || cl->level >= head->level)
+                       if (cl == NULL)
                                goto fallback;
                }
-
+               if (cl->level >= head->level)
+                       goto fallback;
 #ifdef CONFIG_NET_CLS_ACT
                switch (result) {
                case TC_ACT_QUEUED:
index 103343408593589e8f2343987f3f4b44afca5edb..f86bc727b98fb4d65a0f462acd3888d8c07aba17 100644 (file)
@@ -829,7 +829,10 @@ static void qfq_update_start(struct qfq_sched *q, struct qfq_class *cl)
                if (mask) {
                        struct qfq_group *next = qfq_ffs(q, mask);
                        if (qfq_gt(roundedF, next->F)) {
-                               cl->S = next->F;
+                               if (qfq_gt(limit, next->F))
+                                       cl->S = next->F;
+                               else /* preserve timestamp correctness */
+                                       cl->S = limit;
                                return;
                        }
                }
index 8fc4dcd294abdafbb669a18c190679ceccdd3015..32ba8d0e50e2c7d8042a649a1d1fa0518d17def6 100644 (file)
@@ -334,6 +334,25 @@ finish:
        return retval;
 }
 
+static void sctp_packet_release_owner(struct sk_buff *skb)
+{
+       sk_free(skb->sk);
+}
+
+static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
+{
+       skb_orphan(skb);
+       skb->sk = sk;
+       skb->destructor = sctp_packet_release_owner;
+
+       /*
+        * The data chunks have already been accounted for in sctp_sendmsg(),
+        * therefore only reserve a single byte to keep socket around until
+        * the packet has been transmitted.
+        */
+       atomic_inc(&sk->sk_wmem_alloc);
+}
+
 /* All packets are sent to the network through this function from
  * sctp_outq_tail().
  *
@@ -375,7 +394,7 @@ int sctp_packet_transmit(struct sctp_packet *packet)
        /* Set the owning socket so that we know where to get the
         * destination IP address.
         */
-       skb_set_owner_w(nskb, sk);
+       sctp_packet_set_owner_w(nskb, sk);
 
        if (!sctp_transport_dst_check(tp)) {
                sctp_transport_route(tp, NULL, sctp_sk(sk));
index 54a0dc2e2f8d45d7a842be98882969f696c07ec2..ab2bb42fe094b7390d5135ec6e37b9113ea8219b 100644 (file)
@@ -212,7 +212,7 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (async && x->repl->check(x, skb, seq)) {
+               if (async && x->repl->recheck(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
index 7372127bd61b53e41faaa895705b83b8ae04c769..6b9e4e10c84914472dee5e6e5ea840667e5800a9 100644 (file)
@@ -1761,7 +1761,7 @@ static struct dst_entry *make_blackhole(struct net *net, u16 family,
 
        if (!afinfo) {
                dst_release(dst_orig);
-               ret = ERR_PTR(-EINVAL);
+               return ERR_PTR(-EINVAL);
        } else {
                ret = afinfo->blackhole_route(net, dst_orig);
        }
index 3235023eaf4eef04b8c0c003850ee0a30c5b6ccb..379c1764ffafa974771847eaae1add2bf8b49b83 100644 (file)
@@ -437,6 +437,18 @@ err:
        return -EINVAL;
 }
 
+static int xfrm_replay_recheck_esn(struct xfrm_state *x,
+                                  struct sk_buff *skb, __be32 net_seq)
+{
+       if (unlikely(XFRM_SKB_CB(skb)->seq.input.hi !=
+                    htonl(xfrm_replay_seqhi(x, net_seq)))) {
+                       x->stats.replay_window++;
+                       return -EINVAL;
+       }
+
+       return xfrm_replay_check_esn(x, skb, net_seq);
+}
+
 static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
 {
        unsigned int bitnr, nr, i;
@@ -508,6 +520,7 @@ static void xfrm_replay_advance_esn(struct xfrm_state *x, __be32 net_seq)
 static struct xfrm_replay xfrm_replay_legacy = {
        .advance        = xfrm_replay_advance,
        .check          = xfrm_replay_check,
+       .recheck        = xfrm_replay_check,
        .notify         = xfrm_replay_notify,
        .overflow       = xfrm_replay_overflow,
 };
@@ -515,6 +528,7 @@ static struct xfrm_replay xfrm_replay_legacy = {
 static struct xfrm_replay xfrm_replay_bmp = {
        .advance        = xfrm_replay_advance_bmp,
        .check          = xfrm_replay_check_bmp,
+       .recheck        = xfrm_replay_check_bmp,
        .notify         = xfrm_replay_notify_bmp,
        .overflow       = xfrm_replay_overflow_bmp,
 };
@@ -522,6 +536,7 @@ static struct xfrm_replay xfrm_replay_bmp = {
 static struct xfrm_replay xfrm_replay_esn = {
        .advance        = xfrm_replay_advance_esn,
        .check          = xfrm_replay_check_esn,
+       .recheck        = xfrm_replay_recheck_esn,
        .notify         = xfrm_replay_notify_bmp,
        .overflow       = xfrm_replay_overflow_esn,
 };
index c658cb3bc7c3ccf213fffee288f8b211a2c93293..05f82e62943bcdaf4e472d62c321d1a48b68e044 100644 (file)
@@ -123,9 +123,21 @@ static inline int verify_replay(struct xfrm_usersa_info *p,
                                struct nlattr **attrs)
 {
        struct nlattr *rt = attrs[XFRMA_REPLAY_ESN_VAL];
+       struct xfrm_replay_state_esn *rs;
 
-       if ((p->flags & XFRM_STATE_ESN) && !rt)
-               return -EINVAL;
+       if (p->flags & XFRM_STATE_ESN) {
+               if (!rt)
+                       return -EINVAL;
+
+               rs = nla_data(rt);
+
+               if (rs->bmp_len > XFRMA_REPLAY_ESN_MAX / sizeof(rs->bmp[0]) / 8)
+                       return -EINVAL;
+
+               if (nla_len(rt) < xfrm_replay_state_esn_len(rs) &&
+                   nla_len(rt) != sizeof(*rs))
+                       return -EINVAL;
+       }
 
        if (!rt)
                return 0;
@@ -370,14 +382,15 @@ static inline int xfrm_replay_verify_len(struct xfrm_replay_state_esn *replay_es
                                         struct nlattr *rp)
 {
        struct xfrm_replay_state_esn *up;
+       int ulen;
 
        if (!replay_esn || !rp)
                return 0;
 
        up = nla_data(rp);
+       ulen = xfrm_replay_state_esn_len(up);
 
-       if (xfrm_replay_state_esn_len(replay_esn) !=
-                       xfrm_replay_state_esn_len(up))
+       if (nla_len(rp) < ulen || xfrm_replay_state_esn_len(replay_esn) != ulen)
                return -EINVAL;
 
        return 0;
@@ -388,22 +401,28 @@ static int xfrm_alloc_replay_state_esn(struct xfrm_replay_state_esn **replay_esn
                                       struct nlattr *rta)
 {
        struct xfrm_replay_state_esn *p, *pp, *up;
+       int klen, ulen;
 
        if (!rta)
                return 0;
 
        up = nla_data(rta);
+       klen = xfrm_replay_state_esn_len(up);
+       ulen = nla_len(rta) >= klen ? klen : sizeof(*up);
 
-       p = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+       p = kzalloc(klen, GFP_KERNEL);
        if (!p)
                return -ENOMEM;
 
-       pp = kmemdup(up, xfrm_replay_state_esn_len(up), GFP_KERNEL);
+       pp = kzalloc(klen, GFP_KERNEL);
        if (!pp) {
                kfree(p);
                return -ENOMEM;
        }
 
+       memcpy(p, up, ulen);
+       memcpy(pp, up, ulen);
+
        *replay_esn = p;
        *preplay_esn = pp;
 
@@ -442,10 +461,11 @@ static void copy_from_user_state(struct xfrm_state *x, struct xfrm_usersa_info *
  * somehow made shareable and move it to xfrm_state.c - JHS
  *
 */
-static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs)
+static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs,
+                                 int update_esn)
 {
        struct nlattr *rp = attrs[XFRMA_REPLAY_VAL];
-       struct nlattr *re = attrs[XFRMA_REPLAY_ESN_VAL];
+       struct nlattr *re = update_esn ? attrs[XFRMA_REPLAY_ESN_VAL] : NULL;
        struct nlattr *lt = attrs[XFRMA_LTIME_VAL];
        struct nlattr *et = attrs[XFRMA_ETIMER_THRESH];
        struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH];
@@ -555,7 +575,7 @@ static struct xfrm_state *xfrm_state_construct(struct net *net,
                goto error;
 
        /* override default values from above */
-       xfrm_update_ae_params(x, attrs);
+       xfrm_update_ae_params(x, attrs, 0);
 
        return x;
 
@@ -689,6 +709,7 @@ out:
 
 static void copy_to_user_state(struct xfrm_state *x, struct xfrm_usersa_info *p)
 {
+       memset(p, 0, sizeof(*p));
        memcpy(&p->id, &x->id, sizeof(p->id));
        memcpy(&p->sel, &x->sel, sizeof(p->sel));
        memcpy(&p->lft, &x->lft, sizeof(p->lft));
@@ -742,7 +763,7 @@ static int copy_to_user_auth(struct xfrm_algo_auth *auth, struct sk_buff *skb)
                return -EMSGSIZE;
 
        algo = nla_data(nla);
-       strcpy(algo->alg_name, auth->alg_name);
+       strncpy(algo->alg_name, auth->alg_name, sizeof(algo->alg_name));
        memcpy(algo->alg_key, auth->alg_key, (auth->alg_key_len + 7) / 8);
        algo->alg_key_len = auth->alg_key_len;
 
@@ -862,6 +883,7 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
 {
        struct xfrm_dump_info info;
        struct sk_buff *skb;
+       int err;
 
        skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
        if (!skb)
@@ -872,9 +894,10 @@ static struct sk_buff *xfrm_state_netlink(struct sk_buff *in_skb,
        info.nlmsg_seq = seq;
        info.nlmsg_flags = 0;
 
-       if (dump_one_state(x, 0, &info)) {
+       err = dump_one_state(x, 0, &info);
+       if (err) {
                kfree_skb(skb);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        return skb;
@@ -1297,6 +1320,7 @@ static void copy_from_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy
 
 static void copy_to_user_policy(struct xfrm_policy *xp, struct xfrm_userpolicy_info *p, int dir)
 {
+       memset(p, 0, sizeof(*p));
        memcpy(&p->sel, &xp->selector, sizeof(p->sel));
        memcpy(&p->lft, &xp->lft, sizeof(p->lft));
        memcpy(&p->curlft, &xp->curlft, sizeof(p->curlft));
@@ -1401,6 +1425,7 @@ static int copy_to_user_tmpl(struct xfrm_policy *xp, struct sk_buff *skb)
                struct xfrm_user_tmpl *up = &vec[i];
                struct xfrm_tmpl *kp = &xp->xfrm_vec[i];
 
+               memset(up, 0, sizeof(*up));
                memcpy(&up->id, &kp->id, sizeof(up->id));
                up->family = kp->encap_family;
                memcpy(&up->saddr, &kp->saddr, sizeof(up->saddr));
@@ -1529,6 +1554,7 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
 {
        struct xfrm_dump_info info;
        struct sk_buff *skb;
+       int err;
 
        skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
        if (!skb)
@@ -1539,9 +1565,10 @@ static struct sk_buff *xfrm_policy_netlink(struct sk_buff *in_skb,
        info.nlmsg_seq = seq;
        info.nlmsg_flags = 0;
 
-       if (dump_one_policy(xp, dir, 0, &info) < 0) {
+       err = dump_one_policy(xp, dir, 0, &info);
+       if (err) {
                kfree_skb(skb);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        return skb;
@@ -1794,7 +1821,7 @@ static int xfrm_new_ae(struct sk_buff *skb, struct nlmsghdr *nlh,
                goto out;
 
        spin_lock_bh(&x->lock);
-       xfrm_update_ae_params(x, attrs);
+       xfrm_update_ae_params(x, attrs, 1);
        spin_unlock_bh(&x->lock);
 
        c.event = nlh->nlmsg_type;
index ce8844f619cf15e39ddac8ed3304821848c68e29..2c18eddc0eb1cabc27acea81f7a8a4bd1989c463 100644 (file)
@@ -205,7 +205,7 @@ endif
 # >$< substitution to preserve $ when reloading .cmd file
 # note: when using inline perl scripts [perl -e '...$$t=1;...']
 # in $(cmd_xxx) double $$ your perl vars
-make-cmd = $(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1)))))
+make-cmd = $(subst \\,\\\\,$(subst \#,\\\#,$(subst $$,$$$$,$(call escsq,$(cmd_$(1))))))
 
 # Find any prerequisites that is newer than target or that does not exist.
 # PHONY targets skipped in both cases.
index a0da7755fceaad0236c5ef013164df8770e1a5df..5eab9481306f6acefff9e49f23ae7c44695e97b4 100644 (file)
@@ -119,6 +119,7 @@ struct loopback_pcm {
        unsigned int period_size_frac;
        unsigned long last_jiffies;
        struct timer_list timer;
+       spinlock_t timer_lock;
 };
 
 static struct platform_device *devices[SNDRV_CARDS];
@@ -169,6 +170,7 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
        unsigned long tick;
        unsigned int rate_shift = get_rate_shift(dpcm);
 
+       spin_lock(&dpcm->timer_lock);
        if (rate_shift != dpcm->pcm_rate_shift) {
                dpcm->pcm_rate_shift = rate_shift;
                dpcm->period_size_frac = frac_pos(dpcm, dpcm->pcm_period_size);
@@ -181,12 +183,15 @@ static void loopback_timer_start(struct loopback_pcm *dpcm)
        tick = (tick + dpcm->pcm_bps - 1) / dpcm->pcm_bps;
        dpcm->timer.expires = jiffies + tick;
        add_timer(&dpcm->timer);
+       spin_unlock(&dpcm->timer_lock);
 }
 
 static inline void loopback_timer_stop(struct loopback_pcm *dpcm)
 {
+       spin_lock(&dpcm->timer_lock);
        del_timer(&dpcm->timer);
        dpcm->timer.expires = 0;
+       spin_unlock(&dpcm->timer_lock);
 }
 
 #define CABLE_VALID_PLAYBACK   (1 << SNDRV_PCM_STREAM_PLAYBACK)
@@ -658,6 +663,7 @@ static int loopback_open(struct snd_pcm_substream *substream)
        dpcm->substream = substream;
        setup_timer(&dpcm->timer, loopback_timer_function,
                    (unsigned long)dpcm);
+       spin_lock_init(&dpcm->timer_lock);
 
        cable = loopback->cables[substream->number][dev];
        if (!cable) {
index 90e5005abdef32c1966dfdaf21e51d2d3c3f66b5..520a20e26fe921c80263bbbf6aff7484fdb405f7 100644 (file)
@@ -144,7 +144,7 @@ SOC_SINGLE("Playback Attenuate (-6dB) Switch", AC97_MASTER_TONE, 6, 1, 0),
 SOC_SINGLE("Bass Volume", AC97_MASTER_TONE, 8, 15, 1),
 SOC_SINGLE("Treble Volume", AC97_MASTER_TONE, 0, 15, 1),
 
-SOC_SINGLE("Capture ADC Switch", AC97_REC_GAIN, 15, 1, 1),
+SOC_SINGLE("Capture Switch", AC97_REC_GAIN, 15, 1, 1),
 SOC_ENUM("Capture Volume Steps", wm9712_enum[6]),
 SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 63, 1),
 SOC_SINGLE("Capture ZC Switch", AC97_REC_GAIN, 7, 1, 0),
index 0de7cbd99ea0440b63632ea97d163d78cae54f43..9363a8cb9e4683d0f95454b08b5757b32c425988 100644 (file)
@@ -1246,6 +1246,13 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void
                /* disable non-functional volume control */
                master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME);
                break;
+       case USB_ID(0x1130, 0xf211):
+               snd_printk(KERN_INFO
+                          "usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n");
+               /* disable non-functional volume control */
+               channels = 0;
+               break;
+
        }
        if (channels > 0)
                first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
index 0b2ae8e1c02d6e62a3671de37915f69f7db482b4..7ccffb2c4e5b087faa072ebdc0cfcac4368823a9 100644 (file)
@@ -2581,6 +2581,59 @@ YAMAHA_DEVICE(0x7010, "UB99"),
        }
 },
 
+/* Microsoft XboxLive Headset/Xbox Communicator */
+{
+       USB_DEVICE(0x045e, 0x0283),
+       .bInterfaceClass = USB_CLASS_PER_INTERFACE,
+       .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) {
+               .vendor_name = "Microsoft",
+               .product_name = "XboxLive Headset/Xbox Communicator",
+               .ifnum = QUIRK_ANY_INTERFACE,
+               .type = QUIRK_COMPOSITE,
+               .data = &(const struct snd_usb_audio_quirk[]) {
+                       {
+                               /* playback */
+                               .ifnum = 0,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 1,
+                                       .iface = 0,
+                                       .altsetting = 0,
+                                       .altset_idx = 0,
+                                       .attributes = 0,
+                                       .endpoint = 0x04,
+                                       .ep_attr = 0x05,
+                                       .rates = SNDRV_PCM_RATE_CONTINUOUS,
+                                       .rate_min = 22050,
+                                       .rate_max = 22050
+                               }
+                       },
+                       {
+                               /* capture */
+                               .ifnum = 1,
+                               .type = QUIRK_AUDIO_FIXED_ENDPOINT,
+                               .data = &(const struct audioformat) {
+                                       .formats = SNDRV_PCM_FMTBIT_S16_LE,
+                                       .channels = 1,
+                                       .iface = 1,
+                                       .altsetting = 0,
+                                       .altset_idx = 0,
+                                       .attributes = 0,
+                                       .endpoint = 0x85,
+                                       .ep_attr = 0x05,
+                                       .rates = SNDRV_PCM_RATE_CONTINUOUS,
+                                       .rate_min = 16000,
+                                       .rate_max = 16000
+                               }
+                       },
+                       {
+                               .ifnum = -1
+                       }
+               }
+       }
+},
+
 {
        /*
         * Some USB MIDI devices don't have an audio control interface,