Merge tag 'sound-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Nov 2012 18:32:11 +0000 (08:32 -1000)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 24 Nov 2012 18:32:11 +0000 (08:32 -1000)
Pull sound build error fix from Takashi Iwai:
 "Only a single commit for fixing the build error without CONFIG_PM in
  hda driver."

* tag 'sound-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai/sound:
  ALSA: hda - Fix build without CONFIG_PM

144 files changed:
CREDITS
Documentation/devicetree/bindings/net/mdio-gpio.txt
Documentation/networking/vxlan.txt
MAINTAINERS
arch/alpha/kernel/osf_sys.c
arch/arm/mach-davinci/dm644x.c
arch/arm/mach-exynos/dma.c
arch/arm/mach-exynos/include/mach/map.h
arch/arm/mach-omap2/board-igep0020.c
arch/arm/mach-omap2/common-board-devices.c
arch/arm/mach-omap2/twl-common.c
arch/arm/plat-omap/i2c.c
arch/m68k/include/asm/signal.h
arch/parisc/kernel/signal32.c
arch/parisc/kernel/sys_parisc.c
arch/powerpc/boot/dts/mpc5200b.dtsi
arch/powerpc/boot/dts/o2d.dtsi
arch/powerpc/boot/dts/pcm030.dts
arch/powerpc/platforms/52xx/mpc52xx_pic.c
arch/sparc/include/asm/prom.h
arch/sparc/kernel/signal_64.c
arch/x86/boot/compressed/eboot.c
arch/x86/boot/header.S
arch/x86/include/asm/ptrace.h
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/mce_intel.c
arch/x86/kernel/entry_64.S
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/ptrace.c
arch/x86/mm/tlb.c
arch/x86/pci/ce4100.c
arch/x86/platform/ce4100/ce4100.c
block/blk-exec.c
drivers/ata/ahci_platform.c
drivers/ata/libata-acpi.c
drivers/ata/libata-core.c
drivers/ata/libata-scsi.c
drivers/ata/pata_arasan_cf.c
drivers/ata/sata_highbank.c
drivers/ata/sata_svw.c
drivers/base/power/qos.c
drivers/block/aoe/aoecmd.c
drivers/block/floppy.c
drivers/block/mtip32xx/mtip32xx.c
drivers/block/mtip32xx/mtip32xx.h
drivers/edac/amd64_edac.h
drivers/edac/edac_stub.c
drivers/edac/mce_amd_inj.c
drivers/firewire/sbp2.c
drivers/gpio/Kconfig
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-mvebu.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/ctxnv40.c
drivers/gpu/drm/nouveau/core/engine/graph/nv40.c
drivers/gpu/drm/nouveau/core/engine/graph/nv40.h
drivers/gpu/drm/nouveau/core/include/core/object.h
drivers/gpu/drm/nouveau/core/include/subdev/clock.h
drivers/gpu/drm/nouveau/core/subdev/bios/dcb.c
drivers/gpu/drm/nouveau/core/subdev/clock/nva3.c
drivers/gpu/drm/nouveau/core/subdev/clock/nvc0.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/radeon_agp.c
drivers/i2c/busses/i2c-at91.c
drivers/i2c/busses/i2c-mxs.c
drivers/i2c/busses/i2c-omap.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/input/input-mt.c
drivers/input/mousedev.c
drivers/input/touchscreen/ads7846.c
drivers/iommu/intel-iommu.c
drivers/iommu/tegra-smmu.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mtd/devices/slram.c
drivers/mtd/nand/nand_base.c
drivers/mtd/ofpart.c
drivers/mtd/onenand/onenand_base.c
drivers/net/bonding/bond_main.c
drivers/net/ethernet/8390/ne.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/xilinx/xilinx_axienet_main.c
drivers/net/ethernet/xscale/ixp4xx_eth.c
drivers/net/irda/sir_dev.c
drivers/net/phy/mdio-gpio.c
drivers/net/team/team_mode_broadcast.c
drivers/net/wan/ixp4xx_hss.c
drivers/net/wireless/ath/ath9k/hw.c
drivers/net/wireless/iwlwifi/dvm/mac80211.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/mwifiex/cmdevt.c
drivers/net/wireless/mwifiex/sdio.c
drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
drivers/net/xen-netfront.c
drivers/nfc/pn533.c
drivers/pinctrl/Kconfig
drivers/scsi/isci/request.c
drivers/scsi/scsi.c
drivers/scsi/scsi_lib.c
drivers/scsi/sd.c
drivers/scsi/sd.h
drivers/usb/storage/scsiglue.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/omapfb/omapfb-ioctl.c
drivers/xen/privcmd.c
fs/ext3/balloc.c
fs/file.c
fs/jffs2/file.c
fs/notify/fanotify/fanotify_user.c
fs/reiserfs/inode.c
fs/reiserfs/stree.c
fs/reiserfs/super.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_buf.c
include/drm/drm_pciids.h
include/linux/i2c-omap.h
include/linux/of_address.h
include/linux/spi/ads7846.h
include/net/xfrm.h
include/scsi/scsi_device.h
mm/page_alloc.c
net/core/net-sysfs.c
net/ipv4/route.c
net/ipv4/xfrm4_policy.c
net/ipv6/inet6_connection_sock.c
net/mac80211/ibss.c
net/netfilter/ipset/ip_set_hash_ip.c
net/netfilter/ipset/ip_set_hash_ipport.c
net/netfilter/ipset/ip_set_hash_ipportip.c
net/netfilter/ipset/ip_set_hash_ipportnet.c
net/netfilter/nfnetlink_cttimeout.c
net/nfc/llcp/llcp.c
scripts/sign-file
security/selinux/netnode.c

diff --git a/CREDITS b/CREDITS
index d8fe12a9421fa2a7145e7218cc7d664232d0d7ce..2346b09ca8bb9cfc3c4371eb5ad6058be818d990 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -1823,6 +1823,11 @@ S: Kattreinstr 38
 S: D-64295
 S: Germany
 
+N: Avi Kivity
+E: avi.kivity@gmail.com
+D: Kernel-based Virtual Machine (KVM)
+S: Ra'annana, Israel
+
 N: Andi Kleen
 E: andi@firstfloor.org
 U: http://www.halobates.de
index bc9549529014730bc6abd0cefba2d82824370e5e..c79bab025369af4bb6320ba0e90f7fb386942cc1 100644 (file)
@@ -8,9 +8,16 @@ gpios property as described in section VIII.1 in the following order:
 
 MDC, MDIO.
 
+Note: Each gpio-mdio bus should have an alias correctly numbered in "aliases"
+node.
+
 Example:
 
-mdio {
+aliases {
+       mdio-gpio0 = <&mdio0>;
+};
+
+mdio0: mdio {
        compatible = "virtual,mdio-gpio";
        #address-cells = <1>;
        #size-cells = <0>;
index 5b34b762d7d5139a8cf0a9a8eacf739cae279a5b..6d993510f091c2088877ccfc1edf15474c5c723f 100644 (file)
@@ -32,7 +32,7 @@ no entry is in the forwarding table.
   # ip link delete vxlan0
 
 3. Show vxlan info
-  # ip -d show vxlan0
+  # ip -d link show vxlan0
 
 It is possible to create, destroy and display the vxlan
 forwarding table using the new bridge command.
@@ -41,7 +41,7 @@ forwarding table using the new bridge command.
   # bridge fdb add to 00:17:42:8a:b4:05 dst 192.19.0.2 dev vxlan0
 
 2. Delete forwarding table entry
-  # bridge fdb delete 00:17:42:8a:b4:05
+  # bridge fdb delete 00:17:42:8a:b4:05 dev vxlan0
 
 3. Show forwarding table
   # bridge fdb show dev vxlan0
index bb0b27db673f3c5d0f2cbf4215dc3efbe2c17bd3..9386a63ea8f63b653b683379ea54c7bd91eb72ec 100644 (file)
@@ -526,17 +526,17 @@ F:        drivers/video/geode/
 F:     arch/x86/include/asm/geode.h
 
 AMD IOMMU (AMD-VI)
-M:     Joerg Roedel <joerg.roedel@amd.com>
+M:     Joerg Roedel <joro@8bytes.org>
 L:     iommu@lists.linux-foundation.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu.git
-S:     Supported
+S:     Maintained
 F:     drivers/iommu/amd_iommu*.[ch]
 F:     include/linux/amd-iommu.h
 
 AMD MICROCODE UPDATE SUPPORT
-M:     Andreas Herrmann <andreas.herrmann3@amd.com>
+M:     Andreas Herrmann <herrmann.der.user@googlemail.com>
 L:     amd64-microcode@amd64.org
-S:     Supported
+S:     Maintained
 F:     arch/x86/kernel/microcode_amd.c
 
 AMS (Apple Motion Sensor) DRIVER
@@ -841,6 +841,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/kristoffer/linux-hpc.git
 F:     arch/arm/mach-sa1100/jornada720.c
 F:     arch/arm/mach-sa1100/include/mach/jornada720.h
 
+ARM/IGEP MACHINE SUPPORT
+M:     Enric Balletbo i Serra <eballetbo@gmail.com>
+M:     Javier Martinez Canillas <javier@dowhile0.org>
+L:     linux-omap@vger.kernel.org
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-omap2/board-igep0020.c
+
 ARM/INCOME PXA270 SUPPORT
 M:     Marek Vasut <marek.vasut@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -2708,10 +2716,10 @@ F:      include/linux/edac.h
 
 EDAC-AMD64
 M:     Doug Thompson <dougthompson@xmission.com>
-M:     Borislav Petkov <borislav.petkov@amd.com>
+M:     Borislav Petkov <bp@alien8.de>
 L:     linux-edac@vger.kernel.org
 W:     bluesmoke.sourceforge.net
-S:     Supported
+S:     Maintained
 F:     drivers/edac/amd64_edac*
 
 EDAC-E752X
@@ -3753,7 +3761,7 @@ S:        Maintained
 F:     drivers/platform/x86/ideapad-laptop.c
 
 IDE/ATAPI DRIVERS
-M:     Borislav Petkov <petkovbb@gmail.com>
+M:     Borislav Petkov <bp@alien8.de>
 L:     linux-ide@vger.kernel.org
 S:     Maintained
 F:     Documentation/cdrom/ide-cd
@@ -4280,8 +4288,8 @@ F:        include/linux/lockd/
 F:     include/linux/sunrpc/
 
 KERNEL VIRTUAL MACHINE (KVM)
-M:     Avi Kivity <avi@redhat.com>
 M:     Marcelo Tosatti <mtosatti@redhat.com>
+M:     Gleb Natapov <gleb@redhat.com>
 L:     kvm@vger.kernel.org
 W:     http://kvm.qumranet.com
 S:     Supported
@@ -5413,7 +5421,7 @@ S:        Maintained
 F:     sound/drivers/opl4/
 
 OPROFILE
-M:     Robert Richter <robert.richter@amd.com>
+M:     Robert Richter <rric@kernel.org>
 L:     oprofile-list@lists.sf.net
 S:     Maintained
 F:     arch/*/include/asm/oprofile*.h
@@ -8198,7 +8206,7 @@ F:        drivers/platform/x86
 
 X86 MCE INFRASTRUCTURE
 M:     Tony Luck <tony.luck@intel.com>
-M:     Borislav Petkov <bp@amd64.org>
+M:     Borislav Petkov <bp@alien8.de>
 L:     linux-edac@vger.kernel.org
 S:     Maintained
 F:     arch/x86/kernel/cpu/mcheck/*
index 1e6956a9060809e5b1cfed409d376f2d1e726f43..14db93e4c8a83662d459a6ff1a818318dbc61eeb 100644 (file)
@@ -445,7 +445,7 @@ struct procfs_args {
  * unhappy with OSF UFS. [CHECKME]
  */
 static int
-osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+osf_ufs_mount(const char *dirname, struct ufs_args __user *args, int flags)
 {
        int retval;
        struct cdfs_args tmp;
@@ -465,7 +465,7 @@ osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
 }
 
 static int
-osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+osf_cdfs_mount(const char *dirname, struct cdfs_args __user *args, int flags)
 {
        int retval;
        struct cdfs_args tmp;
@@ -485,7 +485,7 @@ osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
 }
 
 static int
-osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+osf_procfs_mount(const char *dirname, struct procfs_args __user *args, int flags)
 {
        struct procfs_args tmp;
 
index cd0c8b1e1ecfad4a868b9aae3f6180a1b1794ed1..14e9947bad6e028bc9c41aecb5e1f36b045fe79a 100644 (file)
@@ -713,8 +713,7 @@ static int dm644x_venc_setup_clock(enum vpbe_enc_timings_type type,
                break;
        case VPBE_ENC_CUSTOM_TIMINGS:
                if (pclock <= 27000000) {
-                       v |= DM644X_VPSS_MUXSEL_PLL2_MODE |
-                            DM644X_VPSS_DACCLKEN;
+                       v |= DM644X_VPSS_DACCLKEN;
                        writel(v, DAVINCI_SYSMOD_VIRT(SYSMOD_VPSS_CLKCTL));
                } else {
                        /*
index 21d568b3b1497c90bb79eb841f45595020351db9..87e07d6fc615769df8edfe0af8be1569859d5422 100644 (file)
@@ -275,6 +275,9 @@ static int __init exynos_dma_init(void)
                exynos_pdma1_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4210_pdma1_peri);
                exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
+
+               if (samsung_rev() == EXYNOS4210_REV_0)
+                       exynos_mdma1_device.res.start = EXYNOS4_PA_S_MDMA1;
        } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
                exynos_pdma0_pdata.nr_valid_peri =
                        ARRAY_SIZE(exynos4212_pdma0_peri);
index 8480849affb922789d73b597c81b76c6778a16ba..ed4da4544cd2fded70c65dd7d5d5539635a94557 100644 (file)
@@ -90,6 +90,7 @@
 
 #define EXYNOS4_PA_MDMA0               0x10810000
 #define EXYNOS4_PA_MDMA1               0x12850000
+#define EXYNOS4_PA_S_MDMA1             0x12840000
 #define EXYNOS4_PA_PDMA0               0x12680000
 #define EXYNOS4_PA_PDMA1               0x12690000
 #define EXYNOS5_PA_MDMA0               0x10800000
index 48d5e41dfbfabe3da59ef00f7457f5012cf2ec1a..378590694447bdbdd3ed4d7862624ac5d000c402 100644 (file)
@@ -580,6 +580,11 @@ static void __init igep_wlan_bt_init(void)
        } else
                return;
 
+       /* Make sure that the GPIO pins are muxed correctly */
+       omap_mux_init_gpio(igep_wlan_bt_gpios[0].gpio, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(igep_wlan_bt_gpios[1].gpio, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(igep_wlan_bt_gpios[2].gpio, OMAP_PIN_OUTPUT);
+
        err = gpio_request_array(igep_wlan_bt_gpios,
                                 ARRAY_SIZE(igep_wlan_bt_gpios));
        if (err) {
index 48daac2581b4a154a768923681ac108c084a5c98..84551f205e46fd20d5ef47849d279643f80f3b18 100644 (file)
@@ -64,30 +64,36 @@ void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce,
        struct spi_board_info *spi_bi = &ads7846_spi_board_info;
        int err;
 
-       err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
-       if (err) {
-               pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
-               return;
-       }
+       /*
+        * If a board defines get_pendown_state() function, request the pendown
+        * GPIO and set the GPIO debounce time.
+        * If a board does not define the get_pendown_state() function, then
+        * the ads7846 driver will setup the pendown GPIO itself.
+        */
+       if (board_pdata && board_pdata->get_pendown_state) {
+               err = gpio_request_one(gpio_pendown, GPIOF_IN, "TSPenDown");
+               if (err) {
+                       pr_err("Couldn't obtain gpio for TSPenDown: %d\n", err);
+                       return;
+               }
 
-       if (gpio_debounce)
-               gpio_set_debounce(gpio_pendown, gpio_debounce);
+               if (gpio_debounce)
+                       gpio_set_debounce(gpio_pendown, gpio_debounce);
+
+               gpio_export(gpio_pendown, 0);
+       }
 
        spi_bi->bus_num = bus_num;
        spi_bi->irq     = gpio_to_irq(gpio_pendown);
 
+       ads7846_config.gpio_pendown = gpio_pendown;
+
        if (board_pdata) {
                board_pdata->gpio_pendown = gpio_pendown;
+               board_pdata->gpio_pendown_debounce = gpio_debounce;
                spi_bi->platform_data = board_pdata;
-               if (board_pdata->get_pendown_state)
-                       gpio_export(gpio_pendown, 0);
-       } else {
-               ads7846_config.gpio_pendown = gpio_pendown;
        }
 
-       if (!board_pdata || (board_pdata && !board_pdata->get_pendown_state))
-               gpio_free(gpio_pendown);
-
        spi_register_board_info(&ads7846_spi_board_info, 1);
 }
 #else
index 44c42057b61c2c292e42dbdcb5b47f3b9b65c401..a256135d8e48f59c0a6a103b2d449682caac9be2 100644 (file)
@@ -73,6 +73,7 @@ void __init omap4_pmic_init(const char *pmic_type,
 {
        /* PMIC part*/
        omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
+       omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
        omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
 
        /* Register additional devices on i2c1 bus if needed */
index a5683a84c6ee0e313cb8f9daeeb7c72106952a4e..6013831a043e320fafd5e718a558d422fa67a624 100644 (file)
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <linux/i2c.h>
+#include <linux/i2c-omap.h>
 #include <linux/slab.h>
 #include <linux/err.h>
 #include <linux/clk.h>
 
 #include <mach/irqs.h>
 #include <plat/i2c.h>
+#include <plat/omap-pm.h>
 #include <plat/omap_device.h>
 
 #define OMAP_I2C_SIZE          0x3f
@@ -127,6 +129,16 @@ static inline int omap1_i2c_add_bus(int bus_id)
 
 
 #ifdef CONFIG_ARCH_OMAP2PLUS
+/*
+ * XXX This function is a temporary compatibility wrapper - only
+ * needed until the I2C driver can be converted to call
+ * omap_pm_set_max_dev_wakeup_lat() and handle a return code.
+ */
+static void omap_pm_set_max_mpu_wakeup_lat_compat(struct device *dev, long t)
+{
+       omap_pm_set_max_mpu_wakeup_lat(dev, t);
+}
+
 static inline int omap2_i2c_add_bus(int bus_id)
 {
        int l;
@@ -158,6 +170,15 @@ static inline int omap2_i2c_add_bus(int bus_id)
        dev_attr = (struct omap_i2c_dev_attr *)oh->dev_attr;
        pdata->flags = dev_attr->flags;
 
+       /*
+        * When waiting for completion of a i2c transfer, we need to
+        * set a wake up latency constraint for the MPU. This is to
+        * ensure quick enough wakeup from idle, when transfer
+        * completes.
+        * Only omap3 has support for constraints
+        */
+       if (cpu_is_omap34xx())
+               pdata->set_mpu_wkup_lat = omap_pm_set_max_mpu_wakeup_lat_compat;
        pdev = omap_device_build(name, bus_id, oh, pdata,
                        sizeof(struct omap_i2c_bus_platform_data),
                        NULL, 0, 0);
index 67e489d8d1bd6450945b3b5a4320eaa4d1840953..2df26b57c26ada5cf34d6c608f75ebd7460f2063 100644 (file)
@@ -41,7 +41,7 @@ struct k_sigaction {
 static inline void sigaddset(sigset_t *set, int _sig)
 {
        asm ("bfset %0{%1,#1}"
-               : "+od" (*set)
+               : "+o" (*set)
                : "id" ((_sig - 1) ^ 31)
                : "cc");
 }
@@ -49,7 +49,7 @@ static inline void sigaddset(sigset_t *set, int _sig)
 static inline void sigdelset(sigset_t *set, int _sig)
 {
        asm ("bfclr %0{%1,#1}"
-               : "+od" (*set)
+               : "+o" (*set)
                : "id" ((_sig - 1) ^ 31)
                : "cc");
 }
@@ -65,7 +65,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig)
        int ret;
        asm ("bfextu %1{%2,#1},%0"
                : "=d" (ret)
-               : "od" (*set), "id" ((_sig-1) ^ 31)
+               : "o" (*set), "id" ((_sig-1) ^ 31)
                : "cc");
        return ret;
 }
index fd49aeda9eb8b0c4fef656e3dc00228c59251b99..5dede04f2f3ead372752fdbaaafceca5708b6902 100644 (file)
@@ -65,7 +65,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
 {
        compat_sigset_t s;
 
-       if (sz != sizeof *set) panic("put_sigset32()");
+       if (sz != sizeof *set)
+               return -EINVAL;
        sigset_64to32(&s, set);
 
        return copy_to_user(up, &s, sizeof s);
@@ -77,7 +78,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz)
        compat_sigset_t s;
        int r;
 
-       if (sz != sizeof *set) panic("put_sigset32()");
+       if (sz != sizeof *set)
+               return -EINVAL;
 
        if ((r = copy_from_user(&s, up, sz)) == 0) {
                sigset_32to64(set, &s);
index 7426e40699bdbf08575d0b69722e6594a1535076..f76c10863c6266c5a4fe6a3fbbe22d022a11e8c2 100644 (file)
@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping,
        struct vm_area_struct *vma;
        int offset = mapping ? get_offset(mapping) : 0;
 
+       offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000;
+
        addr = DCACHE_ALIGN(addr - offset) + offset;
 
        for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
index 7ab286ab5300accb706fb72a2e343153cae85d18..39ed65a44c5fac5fc996b2d5ec25effbbf9853fa 100644 (file)
                        interrupts = <2 7 0>;
                };
 
+               sclpc@3c00 {
+                       compatible = "fsl,mpc5200-lpbfifo";
+                       reg = <0x3c00 0x60>;
+                       interrupts = <2 23 0>;
+               };
+
                i2c@3d00 {
                        #address-cells = <1>;
                        #size-cells = <0>;
index 3444eb8f0ade62305d0a935ca50a02d1fca16933..24f668039295b1b95ff279ff91974a7ad65582ff 100644 (file)
                                reg = <0>;
                        };
                };
-
-               sclpc@3c00 {
-                       compatible = "fsl,mpc5200-lpbfifo";
-                       reg = <0x3c00 0x60>;
-                       interrupts = <3 23 0>;
-               };
        };
 
        localbus {
index 9e354997eb7e3ccc29ac70ee5e7a31c3ce319b01..96512c05803336d937cabcf5b06aa86e43bc538b 100644 (file)
@@ -59,7 +59,7 @@
                        #gpio-cells = <2>;
                };
 
-               psc@2000 { /* PSC1 in ac97 mode */
+               audioplatform: psc@2000 { /* PSC1 in ac97 mode */
                        compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97";
                        cell-index = <0>;
                };
        localbus {
                status = "disabled";
        };
+
+       sound {
+               compatible = "phytec,pcm030-audio-fabric";
+               asoc-platform = <&audioplatform>;
+       };
 };
index 8520b58a5e9a0f0027a95f0df2662cbab898c5cc..b89ef65392dc229b4ec026e23ddd9c5228ed05f1 100644 (file)
@@ -372,10 +372,11 @@ static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
        case MPC52xx_IRQ_L1_MAIN: irqchip = &mpc52xx_main_irqchip; break;
        case MPC52xx_IRQ_L1_PERP: irqchip = &mpc52xx_periph_irqchip; break;
        case MPC52xx_IRQ_L1_SDMA: irqchip = &mpc52xx_sdma_irqchip; break;
-       default:
-               pr_err("%s: invalid irq: virq=%i, l1=%i, l2=%i\n",
-                      __func__, virq, l1irq, l2irq);
-               return -EINVAL;
+       case MPC52xx_IRQ_L1_CRIT:
+               pr_warn("%s: Critical IRQ #%d is unsupported! Nopping it.\n",
+                       __func__, l2irq);
+               irq_set_chip(virq, &no_irq_chip);
+               return 0;
        }
 
        irq_set_chip_and_handler(virq, irqchip, handle_level_irq);
index f93003123bce01119544ef88b15b3e479fbf92cd..67c62578d17043bbff42e1880f1f7e8986cb971c 100644 (file)
@@ -63,10 +63,13 @@ extern char *of_console_options;
 extern void irq_trans_init(struct device_node *dp);
 extern char *build_path_component(struct device_node *dp);
 
-/* SPARC has a local implementation */
+/* SPARC has local implementations */
 extern int of_address_to_resource(struct device_node *dev, int index,
                                  struct resource *r);
 #define of_address_to_resource of_address_to_resource
 
+void __iomem *of_iomap(struct device_node *node, int index);
+#define of_iomap of_iomap
+
 #endif /* __KERNEL__ */
 #endif /* _SPARC_PROM_H */
index 867de2f8189c32acf359fe5575cd139ede6b7048..689e1ba628097e12ab95496d60df2225817483d3 100644 (file)
@@ -295,9 +295,7 @@ void do_rt_sigreturn(struct pt_regs *regs)
                err |= restore_fpu_state(regs, fpu_save);
 
        err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t));
-       err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf);
-
-       if (err)
+       if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT)
                goto segv;
 
        err |= __get_user(rwin_save, &sf->rwin_save);
index c760e073963ed59004f66ed65aade6355c852da9..e87b0cac14b5e5d735ab21f9f3b163db52349938 100644 (file)
@@ -12,6 +12,8 @@
 #include <asm/setup.h>
 #include <asm/desc.h>
 
+#undef memcpy                  /* Use memcpy from misc.c */
+
 #include "eboot.h"
 
 static efi_system_table_t *sys_table;
index 2a017441b8b2ebc665a24a001f5d635f7dbc9955..8c132a625b94991c179def21973bb8ad3c3a56d5 100644 (file)
@@ -476,6 +476,3 @@ die:
 setup_corrupt:
        .byte   7
        .string "No setup signature found...\n"
-
-       .data
-dummy: .long   0
index dcfde52979c3e63be0d42627cc567cf76c733090..19f16ebaf4fa826216c7858d224365b2fdb74670 100644 (file)
@@ -205,21 +205,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs)
 }
 #endif
 
-/*
- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
- * when it traps.  The previous stack will be directly underneath the saved
- * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
- *
- * This is valid only for kernel mode traps.
- */
-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
-{
 #ifdef CONFIG_X86_32
-       return (unsigned long)(&regs->sp);
+extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
 #else
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
        return regs->sp;
-#endif
 }
+#endif
 
 #define GET_IP(regs) ((regs)->ip)
 #define GET_FP(regs) ((regs)->bp)
index f7e98a2c0d123ae0ebe7f61f1521f7dd7090a335..1b7d1656a042ff5e1886c6fff40bbde1ff3a472d 100644 (file)
@@ -631,6 +631,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
                }
        }
 
+       /*
+        * The way access filter has a performance penalty on some workloads.
+        * Disable it on the affected CPUs.
+        */
+       if ((c->x86 == 0x15) &&
+           (c->x86_model >= 0x02) && (c->x86_model < 0x20)) {
+               u64 val;
+
+               if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) {
+                       val |= 0x1E;
+                       wrmsrl_safe(0xc0011021, val);
+               }
+       }
+
        cpu_detect_cache_sizes(c);
 
        /* Multi core CPU? */
index 698b6ec12e0f40f9c292b9f84206b38214d9fe7e..1ac581f38dfa2771ba054c980cd9163546e8c9db 100644 (file)
@@ -6,7 +6,7 @@
  *
  *  Written by Jacob Shin - AMD, Inc.
  *
- *  Support: borislav.petkov@amd.com
+ *  Maintained by: Borislav Petkov <bp@alien8.de>
  *
  *  April 2006
  *     - added support for AMD Family 0x10 processors
index 5f88abf07e9ccc2237b27fea29c57b4b59315a64..4f9a3cbfc4a33fb801ab1bf2c822a83e76122897 100644 (file)
@@ -285,34 +285,39 @@ void cmci_clear(void)
        raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
 }
 
+static long cmci_rediscover_work_func(void *arg)
+{
+       int banks;
+
+       /* Recheck banks in case CPUs don't all have the same */
+       if (cmci_supported(&banks))
+               cmci_discover(banks);
+
+       return 0;
+}
+
 /*
  * After a CPU went down cycle through all the others and rediscover
  * Must run in process context.
  */
 void cmci_rediscover(int dying)
 {
-       int banks;
-       int cpu;
-       cpumask_var_t old;
+       int cpu, banks;
 
        if (!cmci_supported(&banks))
                return;
-       if (!alloc_cpumask_var(&old, GFP_KERNEL))
-               return;
-       cpumask_copy(old, &current->cpus_allowed);
 
        for_each_online_cpu(cpu) {
                if (cpu == dying)
                        continue;
-               if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
+
+               if (cpu == smp_processor_id()) {
+                       cmci_rediscover_work_func(NULL);
                        continue;
-               /* Recheck banks in case CPUs don't all have the same */
-               if (cmci_supported(&banks))
-                       cmci_discover(banks);
-       }
+               }
 
-       set_cpus_allowed_ptr(current, old);
-       free_cpumask_var(old);
+               work_on_cpu(cpu, cmci_rediscover_work_func, NULL);
+       }
 }
 
 /*
index b51b2c7ee51fcbc3738ed8bd05fb4fd67d9fe1ab..1328fe49a3f14d7a33615e892efca8d110aeb6e6 100644 (file)
@@ -995,8 +995,8 @@ END(interrupt)
         */
        .p2align CONFIG_X86_L1_CACHE_SHIFT
 common_interrupt:
-       ASM_CLAC
        XCPT_FRAME
+       ASM_CLAC
        addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
        interrupt do_IRQ
        /* 0(%rsp): old_rsp-ARGOFFSET */
@@ -1135,8 +1135,8 @@ END(common_interrupt)
  */
 .macro apicinterrupt num sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        pushq_cfi $~(\num)
 .Lcommon_\sym:
        interrupt \do_sym
@@ -1190,8 +1190,8 @@ apicinterrupt IRQ_WORK_VECTOR \
  */
 .macro zeroentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
@@ -1208,8 +1208,8 @@ END(\sym)
 
 .macro paranoidzeroentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
@@ -1227,8 +1227,8 @@ END(\sym)
 #define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
 .macro paranoidzeroentry_ist sym do_sym ist
 ENTRY(\sym)
-       ASM_CLAC
        INTR_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
        subq $ORIG_RAX-R15, %rsp
@@ -1247,8 +1247,8 @@ END(\sym)
 
 .macro errorentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        XCPT_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
@@ -1266,8 +1266,8 @@ END(\sym)
        /* error code is on the stack already */
 .macro paranoiderrorentry sym do_sym
 ENTRY(\sym)
-       ASM_CLAC
        XCPT_FRAME
+       ASM_CLAC
        PARAVIRT_ADJUST_EXCEPTION_FRAME
        subq $ORIG_RAX-R15, %rsp
        CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
index 7720ff5a9ee292a449ad13a9347e603def50df53..efdec7cd8e01057aeee63c95b16713c4803c7e61 100644 (file)
@@ -8,8 +8,8 @@
  *  Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
  *
  *  Maintainers:
- *  Andreas Herrmann <andreas.herrmann3@amd.com>
- *  Borislav Petkov <borislav.petkov@amd.com>
+ *  Andreas Herrmann <herrmann.der.user@googlemail.com>
+ *  Borislav Petkov <bp@alien8.de>
  *
  *  This driver allows to upgrade microcode on F10h AMD
  *  CPUs and later.
@@ -190,6 +190,7 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
 #define F1XH_MPB_MAX_SIZE 2048
 #define F14H_MPB_MAX_SIZE 1824
 #define F15H_MPB_MAX_SIZE 4096
+#define F16H_MPB_MAX_SIZE 3458
 
        switch (c->x86) {
        case 0x14:
@@ -198,6 +199,9 @@ static unsigned int verify_patch_size(int cpu, u32 patch_size,
        case 0x15:
                max_size = F15H_MPB_MAX_SIZE;
                break;
+       case 0x16:
+               max_size = F16H_MPB_MAX_SIZE;
+               break;
        default:
                max_size = F1XH_MPB_MAX_SIZE;
                break;
index b00b33a183908bdb201aa38266e40ad87d89754d..5e0596b0632e244676c686bb0b338ea1e255473f 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/perf_event.h>
 #include <linux/hw_breakpoint.h>
 #include <linux/rcupdate.h>
+#include <linux/module.h>
 
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
@@ -166,6 +167,35 @@ static inline bool invalid_selector(u16 value)
 
 #define FLAG_MASK              FLAG_MASK_32
 
+/*
+ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode
+ * when it traps.  The previous stack will be directly underneath the saved
+ * registers, and 'sp/ss' won't even have been saved. Thus the '&regs->sp'.
+ *
+ * Now, if the stack is empty, '&regs->sp' is out of range. In this
+ * case we try to take the previous stack. To always return a non-null
+ * stack pointer we fall back to regs as stack if no previous stack
+ * exists.
+ *
+ * This is valid only for kernel mode traps.
+ */
+unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+       unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
+       unsigned long sp = (unsigned long)&regs->sp;
+       struct thread_info *tinfo;
+
+       if (context == (sp & ~(THREAD_SIZE - 1)))
+               return sp;
+
+       tinfo = (struct thread_info *)context;
+       if (tinfo->previous_esp)
+               return tinfo->previous_esp;
+
+       return (unsigned long)regs;
+}
+EXPORT_SYMBOL_GPL(kernel_stack_pointer);
+
 static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno)
 {
        BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0);
index 0777f042e4002e695b316c99889b594daebc5448..60f926cd8b0edbf44277a18718dd86cb4a077bd1 100644 (file)
@@ -197,7 +197,7 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
        }
 
        if (end == TLB_FLUSH_ALL || tlb_flushall_shift == -1
-                                       || vmflag == VM_HUGETLB) {
+                                       || vmflag & VM_HUGETLB) {
                local_flush_tlb();
                goto flush_all;
        }
index 41bd2a2d2c50f61a4948f954f296631c8dfd7961..b914e20b5a0033c7cb856c1e82eeea207187eb07 100644 (file)
@@ -115,6 +115,16 @@ static void sata_revid_read(struct sim_dev_reg *reg, u32 *value)
        reg_read(reg, value);
 }
 
+static void reg_noirq_read(struct sim_dev_reg *reg, u32 *value)
+{
+       unsigned long flags;
+
+       raw_spin_lock_irqsave(&pci_config_lock, flags);
+       /* force interrupt pin value to 0 */
+       *value = reg->sim_reg.value & 0xfff00ff;
+       raw_spin_unlock_irqrestore(&pci_config_lock, flags);
+}
+
 static struct sim_dev_reg bus1_fixups[] = {
        DEFINE_REG(2, 0, 0x10, (16*MB), reg_init, reg_read, reg_write)
        DEFINE_REG(2, 0, 0x14, (256), reg_init, reg_read, reg_write)
@@ -144,6 +154,7 @@ static struct sim_dev_reg bus1_fixups[] = {
        DEFINE_REG(11, 5, 0x10, (64*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(11, 6, 0x10, (256), reg_init, reg_read, reg_write)
        DEFINE_REG(11, 7, 0x10, (64*KB), reg_init, reg_read, reg_write)
+       DEFINE_REG(11, 7, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
        DEFINE_REG(12, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(12, 0, 0x14, (256), reg_init, reg_read, reg_write)
        DEFINE_REG(12, 1, 0x10, (1024), reg_init, reg_read, reg_write)
@@ -161,8 +172,10 @@ static struct sim_dev_reg bus1_fixups[] = {
        DEFINE_REG(16, 0, 0x10, (64*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(16, 0, 0x14, (64*MB), reg_init, reg_read, reg_write)
        DEFINE_REG(16, 0, 0x18, (64*MB), reg_init, reg_read, reg_write)
+       DEFINE_REG(16, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
        DEFINE_REG(17, 0, 0x10, (128*KB), reg_init, reg_read, reg_write)
        DEFINE_REG(18, 0, 0x10, (1*KB), reg_init, reg_read, reg_write)
+       DEFINE_REG(18, 0, 0x3c, 256, reg_init, reg_noirq_read, reg_write)
 };
 
 static void __init init_sim_regs(void)
index 4c61b52191eb293bcaef2cac2380c6114cdaefcf..92525cb8e54c84c0ebe6b2caeacb47f4f8ab299b 100644 (file)
 #include <asm/i8259.h>
 #include <asm/io.h>
 #include <asm/io_apic.h>
+#include <asm/emergency-restart.h>
 
 static int ce4100_i8042_detect(void)
 {
        return 0;
 }
 
+/*
+ * The CE4100 platform has an internal 8051 Microcontroller which is
+ * responsible for signaling to the external Power Management Unit the
+ * intention to reset, reboot or power off the system. This 8051 device has
+ * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
+ * to power off the system.
+ */
+static void ce4100_power_off(void)
+{
+       outb(0x4, 0xcf9);
+}
+
 #ifdef CONFIG_SERIAL_8250
 
 static unsigned int mem_serial_in(struct uart_port *p, int offset)
@@ -139,8 +152,19 @@ void __init x86_ce4100_early_setup(void)
        x86_init.mpparse.find_smp_config = x86_init_noop;
        x86_init.pci.init = ce4100_pci_init;
 
+       /*
+        * By default, the reboot method is ACPI which is supported by the
+        * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
+        * the bootloader will however issue a system power off instead of
+        * reboot. By using BOOT_KBD we ensure proper system reboot as
+        * expected.
+        */
+       reboot_type = BOOT_KBD;
+
 #ifdef CONFIG_X86_IO_APIC
        x86_init.pci.init_irq = sdv_pci_init;
        x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
 #endif
+
+       pm_power_off = ce4100_power_off;
 }
index 8b6dc5bd4dd061336172b144ae8c09da6bbc56f8..f71eac35c1b942005f3973fd9c919091b9b08bbd 100644 (file)
@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
                           rq_end_io_fn *done)
 {
        int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
+       bool is_pm_resume;
 
        WARN_ON(irqs_disabled());
 
        rq->rq_disk = bd_disk;
        rq->end_io = done;
+       /*
+        * need to check this before __blk_run_queue(), because rq can
+        * be freed before that returns.
+        */
+       is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
 
        spin_lock_irq(q->queue_lock);
 
@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
        __elv_add_request(q, rq, where);
        __blk_run_queue(q);
        /* the queue is stopped so it won't be run */
-       if (rq->cmd_type == REQ_TYPE_PM_RESUME)
+       if (is_pm_resume)
                q->request_fn(q);
        spin_unlock_irq(q->queue_lock);
 }
index b1ae48054dc5773eab42917d5e9d10f9764602ae..b7078afddb74fe91c403642cf4679beec06c0ef9 100644 (file)
@@ -238,7 +238,7 @@ static int __devexit ahci_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ahci_suspend(struct device *dev)
 {
        struct ahci_platform_data *pdata = dev_get_platdata(dev);
index fd9ecf74e631afc800e56927069d97bd61774862..5b0ba3f20edcfd4603538ce7505d69dd93f8bb29 100644 (file)
@@ -1105,10 +1105,15 @@ static int ata_acpi_bind_device(struct ata_port *ap, struct scsi_device *sdev,
        struct acpi_device *acpi_dev;
        struct acpi_device_power_state *states;
 
-       if (ap->flags & ATA_FLAG_ACPI_SATA)
-               ata_dev = &ap->link.device[sdev->channel];
-       else
+       if (ap->flags & ATA_FLAG_ACPI_SATA) {
+               if (!sata_pmp_attached(ap))
+                       ata_dev = &ap->link.device[sdev->id];
+               else
+                       ata_dev = &ap->pmp_link[sdev->channel].device[sdev->id];
+       }
+       else {
                ata_dev = &ap->link.device[sdev->id];
+       }
 
        *handle = ata_dev_acpi_handle(ata_dev);
 
index 3cc7096cfda758e9b3ffd514aaf6e70928ec7f8d..f46fbd3bd3fb6b4456032a0255bad894e8fc9cdb 100644 (file)
@@ -2942,6 +2942,10 @@ const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
 
        if (xfer_mode == t->mode)
                return t;
+
+       WARN_ONCE(true, "%s: unable to find timing for xfer_mode 0x%x\n",
+                       __func__, xfer_mode);
+
        return NULL;
 }
 
index e3bda074fa12f59381f9e8911338c49a88611f36..a6df6a351d6e4ce263f717ad52bf1ebb72a1aae0 100644 (file)
@@ -1052,6 +1052,8 @@ static void ata_scsi_sdev_config(struct scsi_device *sdev)
 {
        sdev->use_10_for_rw = 1;
        sdev->use_10_for_ms = 1;
+       sdev->no_report_opcodes = 1;
+       sdev->no_write_same = 1;
 
        /* Schedule policy is determined by ->qc_defer() callback and
         * it needs to see every deferred qc.  Set dev_blocked to 1 to
index 26201ebef3ca652a16c12c6719eae2127ed212ad..371fd2c698b70ce1b283672c0c2472f8ce6ac263 100644 (file)
@@ -317,6 +317,12 @@ static int cf_init(struct arasan_cf_dev *acdev)
                return ret;
        }
 
+       ret = clk_set_rate(acdev->clk, 166000000);
+       if (ret) {
+               dev_warn(acdev->host->dev, "clock set rate failed");
+               return ret;
+       }
+
        spin_lock_irqsave(&acdev->host->lock, flags);
        /* configure CF interface clock */
        writel((pdata->cf_if_clk <= CF_IF_CLK_200M) ? pdata->cf_if_clk :
@@ -908,7 +914,7 @@ static int __devexit arasan_cf_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int arasan_cf_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
index 0d7c4c2cd26fed3d81ecf2bfe78a1a4939588da9..400bf1c3e982eac392652d086358858ef903ca82 100644 (file)
@@ -260,7 +260,7 @@ static const struct of_device_id ahci_of_match[] = {
 };
 MODULE_DEVICE_TABLE(of, ahci_of_match);
 
-static int __init ahci_highbank_probe(struct platform_device *pdev)
+static int __devinit ahci_highbank_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
        struct ahci_host_priv *hpriv;
@@ -378,7 +378,7 @@ static int __devexit ahci_highbank_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
 static int ahci_highbank_suspend(struct device *dev)
 {
        struct ata_host *host = dev_get_drvdata(dev);
index 44a4256533e1bd4e8aef978abdfc7e34361ad053..08608de87e4e6a971e9a36ef701f343a6982d5b7 100644 (file)
@@ -142,6 +142,39 @@ static int k2_sata_scr_write(struct ata_link *link,
        return 0;
 }
 
+static int k2_sata_softreset(struct ata_link *link,
+                            unsigned int *class, unsigned long deadline)
+{
+       u8 dmactl;
+       void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+       dmactl = readb(mmio + ATA_DMA_CMD);
+
+       /* Clear the start bit */
+       if (dmactl & ATA_DMA_START) {
+               dmactl &= ~ATA_DMA_START;
+               writeb(dmactl, mmio + ATA_DMA_CMD);
+       }
+
+       return ata_sff_softreset(link, class, deadline);
+}
+
+static int k2_sata_hardreset(struct ata_link *link,
+                            unsigned int *class, unsigned long deadline)
+{
+       u8 dmactl;
+       void __iomem *mmio = link->ap->ioaddr.bmdma_addr;
+
+       dmactl = readb(mmio + ATA_DMA_CMD);
+
+       /* Clear the start bit */
+       if (dmactl & ATA_DMA_START) {
+               dmactl &= ~ATA_DMA_START;
+               writeb(dmactl, mmio + ATA_DMA_CMD);
+       }
+
+       return sata_sff_hardreset(link, class, deadline);
+}
 
 static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
 {
@@ -346,6 +379,8 @@ static struct scsi_host_template k2_sata_sht = {
 
 static struct ata_port_operations k2_sata_ops = {
        .inherits               = &ata_bmdma_port_ops,
+       .softreset              = k2_sata_softreset,
+       .hardreset              = k2_sata_hardreset,
        .sff_tf_load            = k2_sata_tf_load,
        .sff_tf_read            = k2_sata_tf_read,
        .sff_check_status       = k2_stat_check_status,
index 74a67e0019a2d246820ccb2858399bca193ff789..fbbd4ed2edf288318f8d165760c91ee420f1812c 100644 (file)
@@ -451,7 +451,7 @@ int dev_pm_qos_add_ancestor_request(struct device *dev,
        if (ancestor)
                error = dev_pm_qos_add_request(ancestor, req, value);
 
-       if (error)
+       if (error < 0)
                req->dev = NULL;
 
        return error;
index 3804a0af3ef192c441643eee425d06f747b86d97..9fe4f1865558abce9ccd7ebf1ee10a56284caf61 100644 (file)
@@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
 
        /* cf. http://lkml.org/lkml/2006/10/31/28 */
        if (!fastfail)
-               q->request_fn(q);
+               __blk_run_queue(q);
 }
 
 static void
index 1c49d7173966ae52754cd75776ff50fa48f30d60..2ddd64a9ffdee43429653806be3262a80c372240 100644 (file)
@@ -4330,6 +4330,7 @@ out_unreg_region:
 out_unreg_blkdev:
        unregister_blkdev(FLOPPY_MAJOR, "fd");
 out_put_disk:
+       destroy_workqueue(floppy_wq);
        for (drive = 0; drive < N_DRIVE; drive++) {
                if (!disks[drive])
                        break;
@@ -4340,7 +4341,6 @@ out_put_disk:
                }
                put_disk(disks[drive]);
        }
-       destroy_workqueue(floppy_wq);
        return err;
 }
 
@@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void)
        unregister_blkdev(FLOPPY_MAJOR, "fd");
        platform_driver_unregister(&floppy_driver);
 
+       destroy_workqueue(floppy_wq);
+
        for (drive = 0; drive < N_DRIVE; drive++) {
                del_timer_sync(&motor_off_timer[drive]);
 
@@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void)
 
        cancel_delayed_work_sync(&fd_timeout);
        cancel_delayed_work_sync(&fd_timer);
-       destroy_workqueue(floppy_wq);
 
        if (atomic_read(&usage_count))
                floppy_release_irq_and_dma();
index adc6f36564cf3c9f214ca37c9b3cf8faffbe6cf8..9694dd99bbbc7253af1c3626823899f985b1afc2 100644 (file)
@@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data)
        struct mtip_cmd *command;
        int tag, cmdto_cnt = 0;
        unsigned int bit, group;
-       unsigned int num_command_slots = port->dd->slot_groups * 32;
+       unsigned int num_command_slots;
        unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
 
        if (unlikely(!port))
@@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data)
        }
        /* clear the tag accumulator */
        memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+       num_command_slots = port->dd->slot_groups * 32;
 
        for (tag = 0; tag < num_command_slots; tag++) {
                /*
@@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
                fis.device);
 
        /* check for erase mode support during secure erase.*/
-       if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
-                                       && (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
+       if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
+                                       (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
                erasemode = 1;
        }
 
@@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
  * return value
  *     None
  */
-static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
+static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
                              int nsect, int nents, int tag, void *callback,
                              void *data, int dir)
 {
@@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
        struct mtip_port *port = dd->port;
        struct mtip_cmd *command = &port->commands[tag];
        int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       u64 start = sector;
 
        /* Map the scatter list for DMA access */
        nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
@@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
        fis->opts        = 1 << 7;
        fis->command     =
                (dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
-       *((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
-       *((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
+       fis->lba_low     = start & 0xFF;
+       fis->lba_mid     = (start >> 8) & 0xFF;
+       fis->lba_hi      = (start >> 16) & 0xFF;
+       fis->lba_low_ex  = (start >> 24) & 0xFF;
+       fis->lba_mid_ex  = (start >> 32) & 0xFF;
+       fis->lba_hi_ex   = (start >> 40) & 0xFF;
        fis->device      = 1 << 6;
        fis->features    = nsect & 0xFF;
        fis->features_ex = (nsect >> 8) & 0xFF;
index 5f4a917bd8bbcfe88283b3509e365a0faefbb3b5..b1742640556a782cee77701c78c4ddf7e961591a 100644 (file)
@@ -34,7 +34,7 @@
 #define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET  0x48
 
 /* check for erase mode support during secure erase */
-#define MTIP_SEC_ERASE_MODE     0x3
+#define MTIP_SEC_ERASE_MODE     0x2
 
 /* # of times to retry timed out/failed IOs */
 #define MTIP_MAX_RETRIES       2
@@ -155,14 +155,14 @@ enum {
        MTIP_DDF_REBUILD_FAILED_BIT = 8,
 };
 
-__packed struct smart_attr{
+struct smart_attr {
        u8 attr_id;
        u16 flags;
        u8 cur;
        u8 worst;
        u32 data;
        u8 res[3];
-};
+} __packed;
 
 /* Register Frame Information Structure (FIS), host to device. */
 struct host_to_dev_fis {
index 8d4804732bacb77be17f07465e48f24ea513ab0c..8c4139647efc06acf50528a8d66f1e09995cc4cc 100644 (file)
@@ -33,7 +33,7 @@
  *             detection. The mods to Rev F required more family
  *             information detection.
  *
- *     Changes/Fixes by Borislav Petkov <borislav.petkov@amd.com>:
+ *     Changes/Fixes by Borislav Petkov <bp@alien8.de>:
  *             - misc fixes and code cleanups
  *
  * This module is based on the following documents
index 6c86f6e545587202d3a9f729457ef38314490f70..351945fa2ecdca3f59a691db38206a0f13795797 100644 (file)
@@ -5,7 +5,7 @@
  *
  * 2007 (c) MontaVista Software, Inc.
  * 2010 (c) Advanced Micro Devices Inc.
- *         Borislav Petkov <borislav.petkov@amd.com>
+ *         Borislav Petkov <bp@alien8.de>
  *
  * This file is licensed under the terms of the GNU General Public
  * License version 2. This program is licensed "as is" without any
index 66b5151c10807083e1e724cb58cc384cdde2f11c..2ae78f20cc28f3850cc26a9156927c7b4e8b085e 100644 (file)
@@ -6,7 +6,7 @@
  * This file may be distributed under the terms of the GNU General Public
  * License version 2.
  *
- * Copyright (c) 2010:  Borislav Petkov <borislav.petkov@amd.com>
+ * Copyright (c) 2010:  Borislav Petkov <bp@alien8.de>
  *                     Advanced Micro Devices Inc.
  */
 
@@ -168,6 +168,6 @@ module_init(edac_init_mce_inject);
 module_exit(edac_exit_mce_inject);
 
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Borislav Petkov <borislav.petkov@amd.com>");
+MODULE_AUTHOR("Borislav Petkov <bp@alien8.de>");
 MODULE_AUTHOR("AMD Inc.");
 MODULE_DESCRIPTION("MCE injection facility for testing MCE decoding");
index 1162d6b3bf8561d6ed1cfe399643dff6deb4027b..bb1b392f5cdacd1194e9af1772469c834e4192dd 100644 (file)
@@ -1546,6 +1546,8 @@ static int sbp2_scsi_slave_configure(struct scsi_device *sdev)
        struct sbp2_logical_unit *lu = sdev->hostdata;
 
        sdev->use_10_for_rw = 1;
+       sdev->no_report_opcodes = 1;
+       sdev->no_write_same = 1;
 
        if (sbp2_param_exclusive_login)
                sdev->manage_start_stop = 1;
index f11d8e3b4041b780c8c9f9ec154c3f0878025ec6..47150f5ded04ea4df13c14434fc3fa553141abaf 100644 (file)
@@ -466,7 +466,7 @@ config GPIO_ADP5588_IRQ
 
 config GPIO_ADNP
        tristate "Avionic Design N-bit GPIO expander"
-       depends on I2C && OF
+       depends on I2C && OF_GPIO
        help
          This option enables support for N GPIOs found on Avionic Design
          I2C GPIO expanders. The register space will be extended by powers
index 0f425189de11b3b5b8003a9c9abb330d9debd116..ce1c847600764602f568994f08167b8fc044dd9a 100644 (file)
@@ -77,7 +77,7 @@ struct mcp23s08_driver_data {
 
 /*----------------------------------------------------------------------*/
 
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
 
 static int mcp23008_read(struct mcp23s08 *mcp, unsigned reg)
 {
@@ -399,7 +399,7 @@ static int mcp23s08_probe_one(struct mcp23s08 *mcp, struct device *dev,
                break;
 #endif /* CONFIG_SPI_MASTER */
 
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
        case MCP_TYPE_008:
                mcp->ops = &mcp23008_ops;
                mcp->chip.ngpio = 8;
@@ -473,7 +473,7 @@ fail:
 
 /*----------------------------------------------------------------------*/
 
-#ifdef CONFIG_I2C
+#if IS_ENABLED(CONFIG_I2C)
 
 static int __devinit mcp230xx_probe(struct i2c_client *client,
                                    const struct i2c_device_id *id)
index cf7afb9eb61ab02c4060532dbdd39e2c2c6e9561..be65c0451ad556e174b961f3542cc8d7b40e201c 100644 (file)
@@ -92,6 +92,11 @@ static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
        return mvchip->membase + GPIO_OUT_OFF;
 }
 
+static inline void __iomem *mvebu_gpioreg_blink(struct mvebu_gpio_chip *mvchip)
+{
+       return mvchip->membase + GPIO_BLINK_EN_OFF;
+}
+
 static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
 {
        return mvchip->membase + GPIO_IO_CONF_OFF;
@@ -206,6 +211,23 @@ static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
        return (u >> pin) & 1;
 }
 
+static void mvebu_gpio_blink(struct gpio_chip *chip, unsigned pin, int value)
+{
+       struct mvebu_gpio_chip *mvchip =
+               container_of(chip, struct mvebu_gpio_chip, chip);
+       unsigned long flags;
+       u32 u;
+
+       spin_lock_irqsave(&mvchip->lock, flags);
+       u = readl_relaxed(mvebu_gpioreg_blink(mvchip));
+       if (value)
+               u |= 1 << pin;
+       else
+               u &= ~(1 << pin);
+       writel_relaxed(u, mvebu_gpioreg_blink(mvchip));
+       spin_unlock_irqrestore(&mvchip->lock, flags);
+}
+
 static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
 {
        struct mvebu_gpio_chip *mvchip =
@@ -244,6 +266,7 @@ static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
        if (ret)
                return ret;
 
+       mvebu_gpio_blink(chip, pin, 0);
        mvebu_gpio_set(chip, pin, value);
 
        spin_lock_irqsave(&mvchip->lock, flags);
index 05a909a17ceee67590195f0e9c60fd4a10857ed1..15b182c84ce8b74b55f0cb1073c452a410329f6e 100644 (file)
@@ -49,13 +49,7 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
                if (chan->vblank.crtc != crtc)
                        continue;
 
-               if (nv_device(priv)->chipset == 0x50) {
-                       nv_wr32(priv, 0x001704, chan->vblank.channel);
-                       nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
-                       bar->flush(bar);
-                       nv_wr32(priv, 0x001570, chan->vblank.offset);
-                       nv_wr32(priv, 0x001574, chan->vblank.value);
-               } else {
+               if (nv_device(priv)->chipset >= 0xc0) {
                        nv_wr32(priv, 0x001718, 0x80000000 | chan->vblank.channel);
                        bar->flush(bar);
                        nv_wr32(priv, 0x06000c,
@@ -63,6 +57,17 @@ nv50_disp_intr_vblank(struct nv50_disp_priv *priv, int crtc)
                        nv_wr32(priv, 0x060010,
                                lower_32_bits(chan->vblank.offset));
                        nv_wr32(priv, 0x060014, chan->vblank.value);
+               } else {
+                       nv_wr32(priv, 0x001704, chan->vblank.channel);
+                       nv_wr32(priv, 0x001710, 0x80000000 | chan->vblank.ctxdma);
+                       bar->flush(bar);
+                       if (nv_device(priv)->chipset == 0x50) {
+                               nv_wr32(priv, 0x001570, chan->vblank.offset);
+                               nv_wr32(priv, 0x001574, chan->vblank.value);
+                       } else {
+                               nv_wr32(priv, 0x060010, chan->vblank.offset);
+                               nv_wr32(priv, 0x060014, chan->vblank.value);
+                       }
                }
 
                list_del(&chan->vblank.head);
index e45035efb8ca084c37785ffd4a0ca789523d4a2b..7bbb1e1b7a8d1ebbeeb275dd38e27caaa7a1400c 100644 (file)
@@ -669,21 +669,27 @@ nv40_grctx_fill(struct nouveau_device *device, struct nouveau_gpuobj *mem)
                           });
 }
 
-void
+int
 nv40_grctx_init(struct nouveau_device *device, u32 *size)
 {
-       u32 ctxprog[256], i;
+       u32 *ctxprog = kmalloc(256 * 4, GFP_KERNEL), i;
        struct nouveau_grctx ctx = {
                .device = device,
                .mode = NOUVEAU_GRCTX_PROG,
                .data = ctxprog,
-               .ctxprog_max = ARRAY_SIZE(ctxprog)
+               .ctxprog_max = 256,
        };
 
+       if (!ctxprog)
+               return -ENOMEM;
+
        nv40_grctx_generate(&ctx);
 
        nv_wr32(device, 0x400324, 0);
        for (i = 0; i < ctx.ctxprog_len; i++)
                nv_wr32(device, 0x400328, ctxprog[i]);
        *size = ctx.ctxvals_pos * 4;
+
+       kfree(ctxprog);
+       return 0;
 }
index 425001204a89b6133648d6752fb362159fca4cef..cc6574eeb80e6459166224185a5bd81ce41e9aa4 100644 (file)
@@ -346,7 +346,9 @@ nv40_graph_init(struct nouveau_object *object)
                return ret;
 
        /* generate and upload context program */
-       nv40_grctx_init(nv_device(priv), &priv->size);
+       ret = nv40_grctx_init(nv_device(priv), &priv->size);
+       if (ret)
+               return ret;
 
        /* No context present currently */
        nv_wr32(priv, NV40_PGRAPH_CTXCTL_CUR, 0x00000000);
index d2ac975afc2e8f46feff64c6be4c16e04faf73b3..7da35a4e7970d1684433f3fd036c60492a5ddb0d 100644 (file)
@@ -15,7 +15,7 @@ nv44_graph_class(void *priv)
        return !(0x0baf & (1 << (device->chipset & 0x0f)));
 }
 
-void nv40_grctx_init(struct nouveau_device *, u32 *size);
+int  nv40_grctx_init(struct nouveau_device *, u32 *size);
 void nv40_grctx_fill(struct nouveau_device *, struct nouveau_gpuobj *);
 
 #endif
index 818feabbf4a0969f090f897852fe4476a422b1b6..486f1a9217fd6cd51dc2c5530d455eaa155c86b4 100644 (file)
@@ -175,14 +175,18 @@ nv_mo32(void *obj, u32 addr, u32 mask, u32 data)
        return temp;
 }
 
-static inline bool
-nv_strncmp(void *obj, u32 addr, u32 len, const char *str)
+static inline int
+nv_memcmp(void *obj, u32 addr, const char *str, u32 len)
 {
+       unsigned char c1, c2;
+
        while (len--) {
-               if (nv_ro08(obj, addr++) != *(str++))
-                       return false;
+               c1 = nv_ro08(obj, addr++);
+               c2 = *(str++);
+               if (c1 != c2)
+                       return c1 - c2;
        }
-       return true;
+       return 0;
 }
 
 #endif
index 39e73b91d360a3753dbcc3c210ca1e77f00332ec..41b7a6a76f1981ae3427278bacaf8d2c99edbe09 100644 (file)
@@ -54,6 +54,7 @@ int nv04_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
                        int clk, struct nouveau_pll_vals *);
 int nv04_clock_pll_prog(struct nouveau_clock *, u32 reg1,
                        struct nouveau_pll_vals *);
-
+int nva3_clock_pll_calc(struct nouveau_clock *, struct nvbios_pll *,
+                       int clk, struct nouveau_pll_vals *);
 
 #endif
index 7d750382a833c33b64fbbf40c98a1d8f936399e2..c511971577490499002426ed04087a517d8e0dab 100644 (file)
@@ -64,7 +64,7 @@ dcb_table(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len)
                }
        } else
        if (*ver >= 0x15) {
-               if (!nv_strncmp(bios, dcb - 7, 7, "DEV_REC")) {
+               if (!nv_memcmp(bios, dcb - 7, "DEV_REC", 7)) {
                        u16 i2c = nv_ro16(bios, dcb + 2);
                        *hdr = 4;
                        *cnt = (i2c - dcb) / 10;
index cc8d7d162d7c348da6770d7404f195e9191d41e1..9068c98b96f64f2f8c0c5a18d478400d09855553 100644 (file)
@@ -66,6 +66,24 @@ nva3_clock_pll_set(struct nouveau_clock *clk, u32 type, u32 freq)
        return ret;
 }
 
+int
+nva3_clock_pll_calc(struct nouveau_clock *clock, struct nvbios_pll *info,
+                   int clk, struct nouveau_pll_vals *pv)
+{
+       int ret, N, M, P;
+
+       ret = nva3_pll_calc(clock, info, clk, &N, NULL, &M, &P);
+
+       if (ret > 0) {
+               pv->refclk = info->refclk;
+               pv->N1 = N;
+               pv->M1 = M;
+               pv->log2P = P;
+       }
+       return ret;
+}
+
+
 static int
 nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                struct nouveau_oclass *oclass, void *data, u32 size,
@@ -80,6 +98,7 @@ nva3_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        priv->base.pll_set = nva3_clock_pll_set;
+       priv->base.pll_calc = nva3_clock_pll_calc;
        return 0;
 }
 
index 5ccce0b17bf3dd209bde225486b41bab55fcdf79..f6962c9b6c36b6f00db2715287dedf1c3adb9f4e 100644 (file)
@@ -79,6 +79,7 @@ nvc0_clock_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
                return ret;
 
        priv->base.pll_set = nvc0_clock_pll_set;
+       priv->base.pll_calc = nva3_clock_pll_calc;
        return 0;
 }
 
index cc79c796afee316cd810125b93dd86fbef940d5e..cbf1fc60a38682be197dc1bb1f0f15c8853a06e6 100644 (file)
@@ -241,6 +241,10 @@ nouveau_abi16_ioctl_channel_alloc(ABI16_IOCTL_ARGS)
 
        if (unlikely(!abi16))
                return -ENOMEM;
+
+       if (!drm->channel)
+               return nouveau_abi16_put(abi16, -ENODEV);
+
        client = nv_client(abi16->client);
 
        if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0)
index 0910125cbbc3be3cfa757f0976f3afea8cac6a21..8503b2ea570a0616234e409c524593f37a98648e 100644 (file)
@@ -129,7 +129,8 @@ nouveau_accel_init(struct nouveau_drm *drm)
 
        /* initialise synchronisation routines */
        if      (device->card_type < NV_10) ret = nv04_fence_create(drm);
-       else if (device->chipset   <  0x84) ret = nv10_fence_create(drm);
+       else if (device->card_type < NV_50) ret = nv10_fence_create(drm);
+       else if (device->chipset   <  0x84) ret = nv50_fence_create(drm);
        else if (device->card_type < NV_C0) ret = nv84_fence_create(drm);
        else                                ret = nvc0_fence_create(drm);
        if (ret) {
index af31f829f4a8bd6910b8e19ec5bff001b660d3e6..219942c660d7d2129e7c21ac375761dcd5309c9f 100644 (file)
@@ -1330,6 +1330,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        break;
                                udelay(1);
                        }
+               } else {
+                       save->crtc_enabled[i] = false;
                }
        }
 
index 10ea17a6b2a6edfae76955f7e8f2ccd08ea7b1fe..42433344cb1b24860998067fe7c96baf332c1ece 100644 (file)
@@ -69,9 +69,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = {
        /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/
        { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59,
                PCI_VENDOR_ID_DELL, 0x00e3, 2},
-       /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */
+       /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */
        { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66,
                PCI_VENDOR_ID_DELL, 0x0149, 1},
+       /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */
+       { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66,
+               PCI_VENDOR_ID_IBM, 0x0531, 1},
        /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */
        { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50,
                0x1025, 0x0061, 1},
index aa59a254be2c3e3755a377d0c0c1f02dfa6c797d..c02bf208084f0a52cbb08548a36cece9c0ca3fb9 100644 (file)
@@ -39,6 +39,7 @@
 #define        AT91_TWI_STOP           0x0002  /* Send a Stop Condition */
 #define        AT91_TWI_MSEN           0x0004  /* Master Transfer Enable */
 #define        AT91_TWI_SVDIS          0x0020  /* Slave Transfer Disable */
+#define        AT91_TWI_QUICK          0x0040  /* SMBus quick command */
 #define        AT91_TWI_SWRST          0x0080  /* Software Reset */
 
 #define        AT91_TWI_MMR            0x0004  /* Master Mode Register */
@@ -212,7 +213,11 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
 
        INIT_COMPLETION(dev->cmd_complete);
        dev->transfer_status = 0;
-       if (dev->msg->flags & I2C_M_RD) {
+
+       if (!dev->buf_len) {
+               at91_twi_write(dev, AT91_TWI_CR, AT91_TWI_QUICK);
+               at91_twi_write(dev, AT91_TWI_IER, AT91_TWI_TXCOMP);
+       } else if (dev->msg->flags & I2C_M_RD) {
                unsigned start_flags = AT91_TWI_START;
 
                if (at91_twi_read(dev, AT91_TWI_SR) & AT91_TWI_RXRDY) {
index 286ca191782098fe44f8a9c995b0015e2e3693ef..0670da79ee5e53cd0682dbdc1e7b311a196f73e1 100644 (file)
@@ -287,12 +287,14 @@ read_init_dma_fail:
 select_init_dma_fail:
        dma_unmap_sg(i2c->dev, &i2c->sg_io[0], 1, DMA_TO_DEVICE);
 select_init_pio_fail:
+       dmaengine_terminate_all(i2c->dmach);
        return -EINVAL;
 
 /* Write failpath. */
 write_init_dma_fail:
        dma_unmap_sg(i2c->dev, i2c->sg_io, 2, DMA_TO_DEVICE);
 write_init_pio_fail:
+       dmaengine_terminate_all(i2c->dmach);
        return -EINVAL;
 }
 
index db31eaed6ea59b32083df49f757c1fc949c60303..3525c9e62cb0f9bf23f47705fd86d3bba9707b45 100644 (file)
@@ -43,7 +43,6 @@
 #include <linux/slab.h>
 #include <linux/i2c-omap.h>
 #include <linux/pm_runtime.h>
-#include <linux/pm_qos.h>
 
 /* I2C controller revisions */
 #define OMAP_I2C_OMAP1_REV_2           0x20
@@ -187,8 +186,9 @@ struct omap_i2c_dev {
        int                     reg_shift;      /* bit shift for I2C register addresses */
        struct completion       cmd_complete;
        struct resource         *ioarea;
-       u32                     latency;        /* maximum MPU wkup latency */
-       struct pm_qos_request   pm_qos_request;
+       u32                     latency;        /* maximum mpu wkup latency */
+       void                    (*set_mpu_wkup_lat)(struct device *dev,
+                                                   long latency);
        u32                     speed;          /* Speed of bus in kHz */
        u32                     dtrev;          /* extra revision from DT */
        u32                     flags;
@@ -494,7 +494,9 @@ static void omap_i2c_resize_fifo(struct omap_i2c_dev *dev, u8 size, bool is_rx)
                dev->b_hw = 1; /* Enable hardware fixes */
 
        /* calculate wakeup latency constraint for MPU */
-       dev->latency = (1000000 * dev->threshold) / (1000 * dev->speed / 8);
+       if (dev->set_mpu_wkup_lat != NULL)
+               dev->latency = (1000000 * dev->threshold) /
+                       (1000 * dev->speed / 8);
 }
 
 /*
@@ -522,6 +524,9 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
        dev->buf = msg->buf;
        dev->buf_len = msg->len;
 
+       /* make sure writes to dev->buf_len are ordered */
+       barrier();
+
        omap_i2c_write_reg(dev, OMAP_I2C_CNT_REG, dev->buf_len);
 
        /* Clear the FIFO Buffers */
@@ -579,7 +584,6 @@ static int omap_i2c_xfer_msg(struct i2c_adapter *adap,
         */
        timeout = wait_for_completion_timeout(&dev->cmd_complete,
                                                OMAP_I2C_TIMEOUT);
-       dev->buf_len = 0;
        if (timeout == 0) {
                dev_err(dev->dev, "controller timed out\n");
                omap_i2c_init(dev);
@@ -629,16 +633,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
        if (r < 0)
                goto out;
 
-       /*
-        * When waiting for completion of a i2c transfer, we need to
-        * set a wake up latency constraint for the MPU. This is to
-        * ensure quick enough wakeup from idle, when transfer
-        * completes.
-        */
-       if (dev->latency)
-               pm_qos_add_request(&dev->pm_qos_request,
-                                  PM_QOS_CPU_DMA_LATENCY,
-                                  dev->latency);
+       if (dev->set_mpu_wkup_lat != NULL)
+               dev->set_mpu_wkup_lat(dev->dev, dev->latency);
 
        for (i = 0; i < num; i++) {
                r = omap_i2c_xfer_msg(adap, &msgs[i], (i == (num - 1)));
@@ -646,8 +642,8 @@ omap_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
                        break;
        }
 
-       if (dev->latency)
-               pm_qos_remove_request(&dev->pm_qos_request);
+       if (dev->set_mpu_wkup_lat != NULL)
+               dev->set_mpu_wkup_lat(dev->dev, -1);
 
        if (r == 0)
                r = num;
@@ -1104,6 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
        } else if (pdata != NULL) {
                dev->speed = pdata->clkrate;
                dev->flags = pdata->flags;
+               dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
                dev->dtrev = pdata->rev;
        }
 
@@ -1159,8 +1156,9 @@ omap_i2c_probe(struct platform_device *pdev)
                        dev->b_hw = 1; /* Enable hardware fixes */
 
                /* calculate wakeup latency constraint for MPU */
-               dev->latency = (1000000 * dev->fifo_size) /
-                              (1000 * dev->speed / 8);
+               if (dev->set_mpu_wkup_lat != NULL)
+                       dev->latency = (1000000 * dev->fifo_size) /
+                                      (1000 * dev->speed / 8);
        }
 
        /* reset ASAP, clearing any IRQs */
index 3e0335f1fc60df8b78ce50c7a4e5284e82ad432f..9d902725bac94f28c91aa26ec57615988cd96e77 100644 (file)
@@ -806,6 +806,7 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
                        dev_err(i2c->dev, "invalid gpio[%d]: %d\n", idx, gpio);
                        goto free_gpio;
                }
+               i2c->gpios[idx] = gpio;
 
                ret = gpio_request(gpio, "i2c-bus");
                if (ret) {
index c0ec7d42c3be05ffd574daec1870d44d560ed15a..1abbc170d8b77f302154323a0ea944474bfd5d37 100644 (file)
@@ -26,10 +26,14 @@ static void copy_abs(struct input_dev *dev, unsigned int dst, unsigned int src)
  * input_mt_init_slots() - initialize MT input slots
  * @dev: input device supporting MT events and finger tracking
  * @num_slots: number of slots used by the device
+ * @flags: mt tasks to handle in core
  *
  * This function allocates all necessary memory for MT slot handling
  * in the input device, prepares the ABS_MT_SLOT and
  * ABS_MT_TRACKING_ID events for use and sets up appropriate buffers.
+ * Depending on the flags set, it also performs pointer emulation and
+ * frame synchronization.
+ *
  * May be called repeatedly. Returns -EINVAL if attempting to
  * reinitialize with a different number of slots.
  */
index 8f02e3d0e712789dc2cccbb0cb52fe40fc0620db..4c842c320c2ede9f2bf42d507ee5c91f00eef7e3 100644 (file)
@@ -12,8 +12,8 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #define MOUSEDEV_MINOR_BASE    32
-#define MOUSEDEV_MINORS                32
-#define MOUSEDEV_MIX           31
+#define MOUSEDEV_MINORS                31
+#define MOUSEDEV_MIX           63
 
 #include <linux/sched.h>
 #include <linux/slab.h>
index f02028ec3db6a6384dc194e7633ee12e8bbb7685..78e5d9ab0ba7fd991e55cff240257248f42b7ba1 100644 (file)
@@ -955,7 +955,8 @@ static int ads7846_resume(struct device *dev)
 
 static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume);
 
-static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts)
+static int __devinit ads7846_setup_pendown(struct spi_device *spi,
+                                          struct ads7846 *ts)
 {
        struct ads7846_platform_data *pdata = spi->dev.platform_data;
        int err;
@@ -981,6 +982,9 @@ static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads784
 
                ts->gpio_pendown = pdata->gpio_pendown;
 
+               if (pdata->gpio_pendown_debounce)
+                       gpio_set_debounce(pdata->gpio_pendown,
+                                         pdata->gpio_pendown_debounce);
        } else {
                dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n");
                return -EINVAL;
index d4a4cd445cabb40504bd2a1c7d5300656feceaff..0badfa48b32b7e84a16449ebc8804e461529c8b9 100644 (file)
@@ -4108,7 +4108,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
 static int intel_iommu_add_device(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
-       struct pci_dev *bridge, *dma_pdev;
+       struct pci_dev *bridge, *dma_pdev = NULL;
        struct iommu_group *group;
        int ret;
 
@@ -4122,7 +4122,7 @@ static int intel_iommu_add_device(struct device *dev)
                        dma_pdev = pci_get_domain_bus_and_slot(
                                                pci_domain_nr(pdev->bus),
                                                bridge->subordinate->number, 0);
-               else
+               if (!dma_pdev)
                        dma_pdev = pci_dev_get(bridge);
        } else
                dma_pdev = pci_dev_get(pdev);
index a649f146d17bad0b62d15a1d174c57147a9a2a2f..c0f7a42662635a288fc8c98fabe3d8f1b35f1141 100644 (file)
@@ -1054,6 +1054,7 @@ static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
                        stats[i], val, offs);
        }
        seq_printf(s, "\n");
+       dput(dent);
 
        return 0;
 }
index 02db9183ca01e8b64ce54a649dacc943c883f38f..77e6eff41cae9bb7c693aac363c509f65c44251d 100644 (file)
@@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
        if (!md_in_flight(md))
                wake_up(&md->wait);
 
+       /*
+        * Run this off this callpath, as drivers could invoke end_io while
+        * inside their request_fn (and holding the queue lock). Calling
+        * back into ->request_fn() could deadlock attempting to grab the
+        * queue lock again.
+        */
        if (run_queue)
-               blk_run_queue(md->queue);
+               blk_run_queue_async(md->queue);
 
        /*
         * dm_put() must be at the end of this function. See the comment above
index 9ab768acfb623f8bbb13870e5f65150a60ecad07..61200717687b85b3fed040f9599ffd1987b8842d 100644 (file)
@@ -1817,10 +1817,10 @@ retry:
                        memset(bbp, 0xff, PAGE_SIZE);
 
                        for (i = 0 ; i < bb->count ; i++) {
-                               u64 internal_bb = *p++;
+                               u64 internal_bb = p[i];
                                u64 store_bb = ((BB_OFFSET(internal_bb) << 10)
                                                | BB_LEN(internal_bb));
-                               *bbp++ = cpu_to_le64(store_bb);
+                               bbp[i] = cpu_to_le64(store_bb);
                        }
                        bb->changed = 0;
                        if (read_seqretry(&bb->lock, seq))
@@ -5294,7 +5294,7 @@ void md_stop_writes(struct mddev *mddev)
 }
 EXPORT_SYMBOL_GPL(md_stop_writes);
 
-void md_stop(struct mddev *mddev)
+static void __md_stop(struct mddev *mddev)
 {
        mddev->ready = 0;
        mddev->pers->stop(mddev);
@@ -5304,6 +5304,18 @@ void md_stop(struct mddev *mddev)
        mddev->pers = NULL;
        clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 }
+
+void md_stop(struct mddev *mddev)
+{
+       /* stop the array and free an attached data structures.
+        * This is called from dm-raid
+        */
+       __md_stop(mddev);
+       bitmap_destroy(mddev);
+       if (mddev->bio_set)
+               bioset_free(mddev->bio_set);
+}
+
 EXPORT_SYMBOL_GPL(md_stop);
 
 static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
@@ -5364,7 +5376,7 @@ static int do_md_stop(struct mddev * mddev, int mode,
                        set_disk_ro(disk, 0);
 
                __md_stop_writes(mddev);
-               md_stop(mddev);
+               __md_stop(mddev);
                mddev->queue->merge_bvec_fn = NULL;
                mddev->queue->backing_dev_info.congested_fn = NULL;
 
@@ -7936,9 +7948,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
                   sector_t *first_bad, int *bad_sectors)
 {
        int hi;
-       int lo = 0;
+       int lo;
        u64 *p = bb->page;
-       int rv = 0;
+       int rv;
        sector_t target = s + sectors;
        unsigned seq;
 
@@ -7953,7 +7965,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors,
 
 retry:
        seq = read_seqbegin(&bb->lock);
-
+       lo = 0;
+       rv = 0;
        hi = bb->count;
 
        /* Binary search between lo and hi for 'target'
index d1295aff41739eea048f0e43513dfba6ade4c8f1..0d5d0ff2c0f7beb47a02bd4692273b31369b2bc5 100644 (file)
@@ -499,7 +499,7 @@ static void raid10_end_write_request(struct bio *bio, int error)
         */
        one_write_done(r10_bio);
        if (dec_rdev)
-               rdev_dec_pending(conf->mirrors[dev].rdev, conf->mddev);
+               rdev_dec_pending(rdev, conf->mddev);
 }
 
 /*
@@ -1334,18 +1334,21 @@ retry_write:
                        blocked_rdev = rrdev;
                        break;
                }
+               if (rdev && (test_bit(Faulty, &rdev->flags)
+                            || test_bit(Unmerged, &rdev->flags)))
+                       rdev = NULL;
                if (rrdev && (test_bit(Faulty, &rrdev->flags)
                              || test_bit(Unmerged, &rrdev->flags)))
                        rrdev = NULL;
 
                r10_bio->devs[i].bio = NULL;
                r10_bio->devs[i].repl_bio = NULL;
-               if (!rdev || test_bit(Faulty, &rdev->flags) ||
-                   test_bit(Unmerged, &rdev->flags)) {
+
+               if (!rdev && !rrdev) {
                        set_bit(R10BIO_Degraded, &r10_bio->state);
                        continue;
                }
-               if (test_bit(WriteErrorSeen, &rdev->flags)) {
+               if (rdev && test_bit(WriteErrorSeen, &rdev->flags)) {
                        sector_t first_bad;
                        sector_t dev_sector = r10_bio->devs[i].addr;
                        int bad_sectors;
@@ -1387,8 +1390,10 @@ retry_write:
                                        max_sectors = good_sectors;
                        }
                }
-               r10_bio->devs[i].bio = bio;
-               atomic_inc(&rdev->nr_pending);
+               if (rdev) {
+                       r10_bio->devs[i].bio = bio;
+                       atomic_inc(&rdev->nr_pending);
+               }
                if (rrdev) {
                        r10_bio->devs[i].repl_bio = bio;
                        atomic_inc(&rrdev->nr_pending);
@@ -1444,69 +1449,71 @@ retry_write:
        for (i = 0; i < conf->copies; i++) {
                struct bio *mbio;
                int d = r10_bio->devs[i].devnum;
-               if (!r10_bio->devs[i].bio)
-                       continue;
+               if (r10_bio->devs[i].bio) {
+                       struct md_rdev *rdev = conf->mirrors[d].rdev;
+                       mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+                       md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                                   max_sectors);
+                       r10_bio->devs[i].bio = mbio;
+
+                       mbio->bi_sector = (r10_bio->devs[i].addr+
+                                          choose_data_offset(r10_bio,
+                                                             rdev));
+                       mbio->bi_bdev = rdev->bdev;
+                       mbio->bi_end_io = raid10_end_write_request;
+                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_private = r10_bio;
 
-               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-                           max_sectors);
-               r10_bio->devs[i].bio = mbio;
+                       atomic_inc(&r10_bio->remaining);
 
-               mbio->bi_sector = (r10_bio->devs[i].addr+
-                                  choose_data_offset(r10_bio,
-                                                     conf->mirrors[d].rdev));
-               mbio->bi_bdev = conf->mirrors[d].rdev->bdev;
-               mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
-               mbio->bi_private = r10_bio;
+                       cb = blk_check_plugged(raid10_unplug, mddev,
+                                              sizeof(*plug));
+                       if (cb)
+                               plug = container_of(cb, struct raid10_plug_cb,
+                                                   cb);
+                       else
+                               plug = NULL;
+                       spin_lock_irqsave(&conf->device_lock, flags);
+                       if (plug) {
+                               bio_list_add(&plug->pending, mbio);
+                               plug->pending_cnt++;
+                       } else {
+                               bio_list_add(&conf->pending_bio_list, mbio);
+                               conf->pending_count++;
+                       }
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       if (!plug)
+                               md_wakeup_thread(mddev->thread);
+               }
 
-               atomic_inc(&r10_bio->remaining);
+               if (r10_bio->devs[i].repl_bio) {
+                       struct md_rdev *rdev = conf->mirrors[d].replacement;
+                       if (rdev == NULL) {
+                               /* Replacement just got moved to main 'rdev' */
+                               smp_mb();
+                               rdev = conf->mirrors[d].rdev;
+                       }
+                       mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+                       md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
+                                   max_sectors);
+                       r10_bio->devs[i].repl_bio = mbio;
+
+                       mbio->bi_sector = (r10_bio->devs[i].addr +
+                                          choose_data_offset(
+                                                  r10_bio, rdev));
+                       mbio->bi_bdev = rdev->bdev;
+                       mbio->bi_end_io = raid10_end_write_request;
+                       mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
+                       mbio->bi_private = r10_bio;
 
-               cb = blk_check_plugged(raid10_unplug, mddev, sizeof(*plug));
-               if (cb)
-                       plug = container_of(cb, struct raid10_plug_cb, cb);
-               else
-                       plug = NULL;
-               spin_lock_irqsave(&conf->device_lock, flags);
-               if (plug) {
-                       bio_list_add(&plug->pending, mbio);
-                       plug->pending_cnt++;
-               } else {
+                       atomic_inc(&r10_bio->remaining);
+                       spin_lock_irqsave(&conf->device_lock, flags);
                        bio_list_add(&conf->pending_bio_list, mbio);
                        conf->pending_count++;
+                       spin_unlock_irqrestore(&conf->device_lock, flags);
+                       if (!mddev_check_plugged(mddev))
+                               md_wakeup_thread(mddev->thread);
                }
-               spin_unlock_irqrestore(&conf->device_lock, flags);
-               if (!plug)
-                       md_wakeup_thread(mddev->thread);
-
-               if (!r10_bio->devs[i].repl_bio)
-                       continue;
-
-               mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
-               md_trim_bio(mbio, r10_bio->sector - bio->bi_sector,
-                           max_sectors);
-               r10_bio->devs[i].repl_bio = mbio;
-
-               /* We are actively writing to the original device
-                * so it cannot disappear, so the replacement cannot
-                * become NULL here
-                */
-               mbio->bi_sector = (r10_bio->devs[i].addr +
-                                  choose_data_offset(
-                                          r10_bio,
-                                          conf->mirrors[d].replacement));
-               mbio->bi_bdev = conf->mirrors[d].replacement->bdev;
-               mbio->bi_end_io = raid10_end_write_request;
-               mbio->bi_rw = WRITE | do_sync | do_fua | do_discard;
-               mbio->bi_private = r10_bio;
-
-               atomic_inc(&r10_bio->remaining);
-               spin_lock_irqsave(&conf->device_lock, flags);
-               bio_list_add(&conf->pending_bio_list, mbio);
-               conf->pending_count++;
-               spin_unlock_irqrestore(&conf->device_lock, flags);
-               if (!mddev_check_plugged(mddev))
-                       md_wakeup_thread(mddev->thread);
        }
 
        /* Don't remove the bias on 'remaining' (one_write_done) until
index c5439dce0295078ecf82094af5474a649284ce61..a4502686e7a8763fb5aeded988f026b2a9dd1c99 100644 (file)
@@ -2774,10 +2774,12 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                        dev = &sh->dev[i];
                        if (!test_bit(R5_LOCKED, &dev->flags) &&
                            (test_bit(R5_UPTODATE, &dev->flags) ||
-                            test_and_clear_bit(R5_Discard, &dev->flags))) {
+                            test_bit(R5_Discard, &dev->flags))) {
                                /* We can return any write requests */
                                struct bio *wbi, *wbi2;
                                pr_debug("Return write for disc %d\n", i);
+                               if (test_and_clear_bit(R5_Discard, &dev->flags))
+                                       clear_bit(R5_UPTODATE, &dev->flags);
                                wbi = dev->written;
                                dev->written = NULL;
                                while (wbi && wbi->bi_sector <
@@ -2795,7 +2797,8 @@ static void handle_stripe_clean_event(struct r5conf *conf,
                                         !test_bit(STRIPE_DEGRADED, &sh->state),
                                                0);
                        }
-               }
+               } else if (test_bit(R5_Discard, &sh->dev[i].flags))
+                       clear_bit(R5_Discard, &sh->dev[i].flags);
 
        if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state))
                if (atomic_dec_and_test(&conf->pending_full_writes))
@@ -3490,40 +3493,6 @@ static void handle_stripe(struct stripe_head *sh)
                        handle_failed_sync(conf, sh, &s);
        }
 
-       /*
-        * might be able to return some write requests if the parity blocks
-        * are safe, or on a failed drive
-        */
-       pdev = &sh->dev[sh->pd_idx];
-       s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
-               || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
-       qdev = &sh->dev[sh->qd_idx];
-       s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
-               || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
-               || conf->level < 6;
-
-       if (s.written &&
-           (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
-                            && !test_bit(R5_LOCKED, &pdev->flags)
-                            && (test_bit(R5_UPTODATE, &pdev->flags) ||
-                                test_bit(R5_Discard, &pdev->flags))))) &&
-           (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
-                            && !test_bit(R5_LOCKED, &qdev->flags)
-                            && (test_bit(R5_UPTODATE, &qdev->flags) ||
-                                test_bit(R5_Discard, &qdev->flags))))))
-               handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
-
-       /* Now we might consider reading some blocks, either to check/generate
-        * parity, or to satisfy requests
-        * or to load a block that is being partially written.
-        */
-       if (s.to_read || s.non_overwrite
-           || (conf->level == 6 && s.to_write && s.failed)
-           || (s.syncing && (s.uptodate + s.compute < disks))
-           || s.replacing
-           || s.expanding)
-               handle_stripe_fill(sh, &s, disks);
-
        /* Now we check to see if any write operations have recently
         * completed
         */
@@ -3561,6 +3530,40 @@ static void handle_stripe(struct stripe_head *sh)
                        s.dec_preread_active = 1;
        }
 
+       /*
+        * might be able to return some write requests if the parity blocks
+        * are safe, or on a failed drive
+        */
+       pdev = &sh->dev[sh->pd_idx];
+       s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx)
+               || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx);
+       qdev = &sh->dev[sh->qd_idx];
+       s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx)
+               || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx)
+               || conf->level < 6;
+
+       if (s.written &&
+           (s.p_failed || ((test_bit(R5_Insync, &pdev->flags)
+                            && !test_bit(R5_LOCKED, &pdev->flags)
+                            && (test_bit(R5_UPTODATE, &pdev->flags) ||
+                                test_bit(R5_Discard, &pdev->flags))))) &&
+           (s.q_failed || ((test_bit(R5_Insync, &qdev->flags)
+                            && !test_bit(R5_LOCKED, &qdev->flags)
+                            && (test_bit(R5_UPTODATE, &qdev->flags) ||
+                                test_bit(R5_Discard, &qdev->flags))))))
+               handle_stripe_clean_event(conf, sh, disks, &s.return_bi);
+
+       /* Now we might consider reading some blocks, either to check/generate
+        * parity, or to satisfy requests
+        * or to load a block that is being partially written.
+        */
+       if (s.to_read || s.non_overwrite
+           || (conf->level == 6 && s.to_write && s.failed)
+           || (s.syncing && (s.uptodate + s.compute < disks))
+           || s.replacing
+           || s.expanding)
+               handle_stripe_fill(sh, &s, disks);
+
        /* Now to consider new write requests and what else, if anything
         * should be read.  We do not handle new writes when:
         * 1/ A 'write' operation (copy+xor) is already in flight.
@@ -5529,6 +5532,10 @@ static int run(struct mddev *mddev)
                 * discard data disk but write parity disk
                 */
                stripe = stripe * PAGE_SIZE;
+               /* Round up to power of 2, as discard handling
+                * currently assumes that */
+               while ((stripe-1) & stripe)
+                       stripe = (stripe | (stripe-1)) + 1;
                mddev->queue->limits.discard_alignment = stripe;
                mddev->queue->limits.discard_granularity = stripe;
                /*
index 8f52fc858e48bec08f900963996c0d37bd4601cb..5a5cd2ace4a6b05126e642e06d04950f52108ab8 100644 (file)
@@ -240,7 +240,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength)
 
        if (*(szlength) != '+') {
                devlength = simple_strtoul(szlength, &buffer, 0);
-               devlength = handle_unit(devlength, buffer) - devstart;
+               devlength = handle_unit(devlength, buffer);
                if (devlength < devstart)
                        goto err_out;
 
index ec6841d8e956f74f5224363392632900a39e1f8e..1a03b7f673ce0d59c8613144fd51d1ffb299e4fd 100644 (file)
@@ -2983,13 +2983,15 @@ static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
        /*
         * Field definitions are in the following datasheets:
         * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
-        * New style   (6 byte ID): Samsung K9GAG08U0F (p.44)
+        * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
         * Hynix MLC   (6 byte ID): Hynix H27UBG8T2B (p.22)
         *
-        * Check for ID length, cell type, and Hynix/Samsung ID to decide what
-        * to do.
+        * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
+        * ID to decide what to do.
         */
-       if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG) {
+       if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
+                       (chip->cellinfo & NAND_CI_CELLTYPE_MSK) &&
+                       id_data[5] != 0x00) {
                /* Calc pagesize */
                mtd->writesize = 2048 << (extid & 0x03);
                extid >>= 2;
index 64be8f0848b075cdccb647309ff88d12b3fa547d..d9127e2ed808eab6ddfe5835c68cfa88ef490cf0 100644 (file)
@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master,
        nr_parts = plen / sizeof(part[0]);
 
        *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL);
-       if (!pparts)
+       if (!*pparts)
                return -ENOMEM;
 
        names = of_get_property(dp, "partition-names", &plen);
index 7153e0d27101e3eed06ac17addcdbd4734c7ccc3..b3f41f200622b39f5ef7d46ab6b772ed2eb9e97e 100644 (file)
@@ -3694,7 +3694,7 @@ static int flexonenand_check_blocks_erased(struct mtd_info *mtd, int start, int
  * flexonenand_set_boundary    - Writes the SLC boundary
  * @param mtd                  - mtd info structure
  */
-int flexonenand_set_boundary(struct mtd_info *mtd, int die,
+static int flexonenand_set_boundary(struct mtd_info *mtd, int die,
                                    int boundary, int lock)
 {
        struct onenand_chip *this = mtd->priv;
index b2530b00212558411f917abbe077ec75cb985f5e..5f5b69f37d2e50d4a6ebbe91f4c3bc1346e042e3 100644 (file)
@@ -1379,6 +1379,8 @@ static void bond_compute_features(struct bonding *bond)
        struct net_device *bond_dev = bond->dev;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
        unsigned short max_hard_header_len = ETH_HLEN;
+       unsigned int gso_max_size = GSO_MAX_SIZE;
+       u16 gso_max_segs = GSO_MAX_SEGS;
        int i;
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
 
@@ -1394,11 +1396,16 @@ static void bond_compute_features(struct bonding *bond)
                dst_release_flag &= slave->dev->priv_flags;
                if (slave->dev->hard_header_len > max_hard_header_len)
                        max_hard_header_len = slave->dev->hard_header_len;
+
+               gso_max_size = min(gso_max_size, slave->dev->gso_max_size);
+               gso_max_segs = min(gso_max_segs, slave->dev->gso_max_segs);
        }
 
 done:
        bond_dev->vlan_features = vlan_features;
        bond_dev->hard_header_len = max_hard_header_len;
+       bond_dev->gso_max_segs = gso_max_segs;
+       netif_set_gso_max_size(bond_dev, gso_max_size);
 
        flags = bond_dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
        bond_dev->priv_flags = flags | dst_release_flag;
index d04911d33b647080278f81678869c032e1a3c853..47618e505355ae91fc0ade4e50231ec780bcc56f 100644 (file)
@@ -813,6 +813,7 @@ static int __init ne_drv_probe(struct platform_device *pdev)
                dev->irq = irq[this_dev];
                dev->mem_end = bad[this_dev];
        }
+       SET_NETDEV_DEV(dev, &pdev->dev);
        err = do_ne_probe(dev);
        if (err) {
                free_netdev(dev);
index bd1fd3d87c24d3979f01a0e9864af9866c62b9aa..01611b33a93de82c8b735b2e1c3e115a9b8e807f 100644 (file)
@@ -9545,10 +9545,13 @@ static int __devinit bnx2x_prev_unload_common(struct bnx2x *bp)
  */
 static void __devinit bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
 {
-       u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
-       if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
-               BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
-               REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, 1 << BP_FUNC(bp));
+       if (!CHIP_IS_E1x(bp)) {
+               u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
+               if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
+                       BNX2X_ERR("was error bit was found to be set in pglueb upon startup. Clearing");
+                       REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
+                              1 << BP_FUNC(bp));
+               }
        }
 }
 
index 1c818254b7bec650da02fc352625570a3937b6f8..b01f83a044c4d77f70141c81fee3815cd050f258 100644 (file)
@@ -979,17 +979,6 @@ static void cp_init_hw (struct cp_private *cp)
        cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
        cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
 
-       cpw32_f(HiTxRingAddr, 0);
-       cpw32_f(HiTxRingAddr + 4, 0);
-
-       ring_dma = cp->ring_dma;
-       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
-
-       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
-       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
-       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
-
        cp_start_hw(cp);
        cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
 
@@ -1003,6 +992,17 @@ static void cp_init_hw (struct cp_private *cp)
 
        cpw8(Config5, cpr8(Config5) & PMEStatus);
 
+       cpw32_f(HiTxRingAddr, 0);
+       cpw32_f(HiTxRingAddr + 4, 0);
+
+       ring_dma = cp->ring_dma;
+       cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
+
+       ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
+       cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
+       cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
+
        cpw16(MultiIntr, 0);
 
        cpw8_f(Cfg9346, Cfg9346_Lock);
index fb9f6b38511f97beddbff380c2ae7d31878e4ff4..edf5edb13140fb5908f0197298d17cf3b1cb7503 100644 (file)
@@ -2479,7 +2479,7 @@ static int sis900_resume(struct pci_dev *pci_dev)
        netif_start_queue(net_dev);
 
        /* Workaround for EDB */
-       sis900_set_mode(ioaddr, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
+       sis900_set_mode(sis_priv, HW_SPEED_10_MBPS, FDX_CAPABLE_HALF_SELECTED);
 
        /* Enable all known interrupts by setting the interrupt mask. */
        sw32(imr, RxSOVR | RxORN | RxERR | RxOK | TxURN | TxERR | TxIDLE);
index 77e6db9dcfedb2514da5e9a939f60313ceabbfb7..a788501e978e116908cf0e7a0c92745c275b2009 100644 (file)
@@ -894,6 +894,8 @@ out:
        return IRQ_HANDLED;
 }
 
+static void axienet_dma_err_handler(unsigned long data);
+
 /**
  * axienet_open - Driver open routine.
  * @ndev:      Pointer to net_device structure
index 98934bdf6acffc053fcd317721e8d92cf649ba14..477d6729b17f7f391c34a520bc08f8031bfbd4ae 100644 (file)
@@ -1102,10 +1102,12 @@ static int init_queues(struct port *port)
 {
        int i;
 
-       if (!ports_open)
-               if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
-                                                POOL_ALLOC_SIZE, 32, 0)))
+       if (!ports_open) {
+               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+                                          POOL_ALLOC_SIZE, 32, 0);
+               if (!dma_pool)
                        return -ENOMEM;
+       }
 
        if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
                                              &port->desc_tab_phys)))
index 5039f08f5a5b23cacd0f145cd790a30fce47806e..43e9ab4f4d7edc0b6771c1664d2064c164e37ec3 100644 (file)
@@ -222,7 +222,7 @@ static void sirdev_config_fsm(struct work_struct *work)
                        break;
 
                case SIRDEV_STATE_DONGLE_SPEED:
-                       if (dev->dongle_drv->reset) {
+                       if (dev->dongle_drv->set_speed) {
                                ret = dev->dongle_drv->set_speed(dev, fsm->param);
                                if (ret < 0) {
                                        fsm->result = ret;
index 899274f2f9b1dd1da0aac0442807b6e42b0cddc2..2ed1140df3e9fc0afbc25bce03e5a6eba7ceb228 100644 (file)
@@ -185,17 +185,20 @@ static int __devinit mdio_gpio_probe(struct platform_device *pdev)
 {
        struct mdio_gpio_platform_data *pdata;
        struct mii_bus *new_bus;
-       int ret;
+       int ret, bus_id;
 
-       if (pdev->dev.of_node)
+       if (pdev->dev.of_node) {
                pdata = mdio_gpio_of_get_data(pdev);
-       else
+               bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio");
+       } else {
                pdata = pdev->dev.platform_data;
+               bus_id = pdev->id;
+       }
 
        if (!pdata)
                return -ENODEV;
 
-       new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, pdev->id);
+       new_bus = mdio_gpio_bus_init(&pdev->dev, pdata, bus_id);
        if (!new_bus)
                return -ENODEV;
 
index 9db0171e93669f483eeae19791ed4e090c333bb0..c5db428e73fa2200f3ef8a5ea5124ceeebb6ef78 100644 (file)
@@ -29,8 +29,8 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
                        if (last) {
                                skb2 = skb_clone(skb, GFP_ATOMIC);
                                if (skb2) {
-                                       ret = team_dev_queue_xmit(team, last,
-                                                                 skb2);
+                                       ret = !team_dev_queue_xmit(team, last,
+                                                                  skb2);
                                        if (!sum_ret)
                                                sum_ret = ret;
                                }
@@ -39,7 +39,7 @@ static bool bc_transmit(struct team *team, struct sk_buff *skb)
                }
        }
        if (last) {
-               ret = team_dev_queue_xmit(team, last, skb);
+               ret = !team_dev_queue_xmit(team, last, skb);
                if (!sum_ret)
                        sum_ret = ret;
        }
index 3f575afd8cfcb03f283df0932f6fb33bb1495cca..e9a3da588e954b1ae38b8becf465b132dbbcd22f 100644 (file)
@@ -969,10 +969,12 @@ static int init_hdlc_queues(struct port *port)
 {
        int i;
 
-       if (!ports_open)
-               if (!(dma_pool = dma_pool_create(DRV_NAME, NULL,
-                                                POOL_ALLOC_SIZE, 32, 0)))
+       if (!ports_open) {
+               dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
+                                          POOL_ALLOC_SIZE, 32, 0);
+               if (!dma_pool)
                        return -ENOMEM;
+       }
 
        if (!(port->desc_tab = dma_pool_alloc(dma_pool, GFP_KERNEL,
                                              &port->desc_tab_phys)))
index 8e1559aba495a0bd7447ac2c79703d49caf70abd..1829b445d0b01852ea11111692174c766b3c7e55 100644 (file)
@@ -1456,7 +1456,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
        switch (type) {
        case ATH9K_RESET_POWER_ON:
                ret = ath9k_hw_set_reset_power_on(ah);
-               if (!ret)
+               if (ret)
                        ah->reset_power_on = true;
                break;
        case ATH9K_RESET_WARM:
index fa4d1b8cd9f6fb62ab56e4bdc9f1811efde7d30d..2d9eee93c743aa0f7bfe6d98330ccacead4fe201 100644 (file)
@@ -1354,6 +1354,20 @@ static int iwlagn_mac_add_interface(struct ieee80211_hw *hw,
        vif_priv->ctx = ctx;
        ctx->vif = vif;
 
+       /*
+        * In SNIFFER device type, the firmware reports the FCS to
+        * the host, rather than snipping it off. Unfortunately,
+        * mac80211 doesn't (yet) provide a per-packet flag for
+        * this, so that we have to set the hardware flag based
+        * on the interfaces added. As the monitor interface can
+        * only be present by itself, and will be removed before
+        * other interfaces are added, this is safe.
+        */
+       if (vif->type == NL80211_IFTYPE_MONITOR)
+               priv->hw->flags |= IEEE80211_HW_RX_INCLUDES_FCS;
+       else
+               priv->hw->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS;
+
        err = iwl_setup_interface(priv, ctx);
        if (!err || reset)
                goto out;
index 105e3af3c621b0b9e335fbe42d1ea1e444bfa579..79a4ddc002d3dac1ce4aa9182bd2a2a0cf76630b 100644 (file)
@@ -480,20 +480,12 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       u16 rd_ptr, wr_ptr;
-       int n_bd = trans_pcie->txq[txq_id].q.n_bd;
 
        if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
                WARN_ONCE(1, "queue %d not used", txq_id);
                return;
        }
 
-       rd_ptr = iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & (n_bd - 1);
-       wr_ptr = iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id));
-
-       WARN_ONCE(rd_ptr != wr_ptr, "queue %d isn't empty: [%d,%d]",
-                 txq_id, rd_ptr, wr_ptr);
-
        iwl_txq_set_inactive(trans, txq_id);
        IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
 }
index 8d465107f52b2c5073acad20d8d0acbe0485be5e..ae9010ed58debd4a8dc5001e03cbe866b2c9a8a2 100644 (file)
@@ -890,9 +890,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
                return;
        }
        cmd_node = adapter->curr_cmd;
-       if (cmd_node->wait_q_enabled)
-               adapter->cmd_wait_q.status = -ETIMEDOUT;
-
        if (cmd_node) {
                adapter->dbg.timeout_cmd_id =
                        adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index];
@@ -938,6 +935,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context)
 
                dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n",
                        adapter->ps_mode, adapter->ps_state);
+
+               if (cmd_node->wait_q_enabled) {
+                       adapter->cmd_wait_q.status = -ETIMEDOUT;
+                       wake_up_interruptible(&adapter->cmd_wait_q.wait);
+                       mwifiex_cancel_pending_ioctl(adapter);
+                       /* reset cmd_sent flag to unblock new commands */
+                       adapter->cmd_sent = false;
+               }
        }
        if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING)
                mwifiex_init_fw_complete(adapter);
index fc8a9bfa1248305fababa37b59ca90110a57afaa..82cf0fa2d9f683a391bb2215f94447d53d09ef40 100644 (file)
@@ -161,7 +161,6 @@ static int mwifiex_sdio_suspend(struct device *dev)
        struct sdio_mmc_card *card;
        struct mwifiex_adapter *adapter;
        mmc_pm_flag_t pm_flag = 0;
-       int hs_actived = 0;
        int i;
        int ret = 0;
 
@@ -188,12 +187,14 @@ static int mwifiex_sdio_suspend(struct device *dev)
        adapter = card->adapter;
 
        /* Enable the Host Sleep */
-       hs_actived = mwifiex_enable_hs(adapter);
-       if (hs_actived) {
-               pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n");
-               ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+       if (!mwifiex_enable_hs(adapter)) {
+               dev_err(adapter->dev, "cmd: failed to suspend\n");
+               return -EFAULT;
        }
 
+       dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n");
+       ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
+
        /* Indicate device suspended */
        adapter->is_suspended = true;
 
index 9970c2b1b19979dc3577472c7c21e27703a38a76..b7e6607e6b6d038bd604657bb4fdcc883b171d7a 100644 (file)
@@ -297,6 +297,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = {
        /*=== Customer ID ===*/
        /****** 8188CU ********/
        {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/
+       {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/
        {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/
        {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/
        {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/
index caa011008cd0c9fe11b38507e7f550552bd93170..fc24eb9b3948666b0819156539138958dd6888f1 100644 (file)
@@ -452,29 +452,85 @@ static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
        /* Grant backend access to each skb fragment page. */
        for (i = 0; i < frags; i++) {
                skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+               struct page *page = skb_frag_page(frag);
 
-               tx->flags |= XEN_NETTXF_more_data;
+               len = skb_frag_size(frag);
+               offset = frag->page_offset;
 
-               id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
-               np->tx_skbs[id].skb = skb_get(skb);
-               tx = RING_GET_REQUEST(&np->tx, prod++);
-               tx->id = id;
-               ref = gnttab_claim_grant_reference(&np->gref_tx_head);
-               BUG_ON((signed short)ref < 0);
+               /* Data must not cross a page boundary. */
+               BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
 
-               mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
-               gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
-                                               mfn, GNTMAP_readonly);
+               /* Skip unused frames from start of page */
+               page += offset >> PAGE_SHIFT;
+               offset &= ~PAGE_MASK;
 
-               tx->gref = np->grant_tx_ref[id] = ref;
-               tx->offset = frag->page_offset;
-               tx->size = skb_frag_size(frag);
-               tx->flags = 0;
+               while (len > 0) {
+                       unsigned long bytes;
+
+                       BUG_ON(offset >= PAGE_SIZE);
+
+                       bytes = PAGE_SIZE - offset;
+                       if (bytes > len)
+                               bytes = len;
+
+                       tx->flags |= XEN_NETTXF_more_data;
+
+                       id = get_id_from_freelist(&np->tx_skb_freelist,
+                                                 np->tx_skbs);
+                       np->tx_skbs[id].skb = skb_get(skb);
+                       tx = RING_GET_REQUEST(&np->tx, prod++);
+                       tx->id = id;
+                       ref = gnttab_claim_grant_reference(&np->gref_tx_head);
+                       BUG_ON((signed short)ref < 0);
+
+                       mfn = pfn_to_mfn(page_to_pfn(page));
+                       gnttab_grant_foreign_access_ref(ref,
+                                                       np->xbdev->otherend_id,
+                                                       mfn, GNTMAP_readonly);
+
+                       tx->gref = np->grant_tx_ref[id] = ref;
+                       tx->offset = offset;
+                       tx->size = bytes;
+                       tx->flags = 0;
+
+                       offset += bytes;
+                       len -= bytes;
+
+                       /* Next frame */
+                       if (offset == PAGE_SIZE && len) {
+                               BUG_ON(!PageCompound(page));
+                               page++;
+                               offset = 0;
+                       }
+               }
        }
 
        np->tx.req_prod_pvt = prod;
 }
 
+/*
+ * Count how many ring slots are required to send the frags of this
+ * skb. Each frag might be a compound page.
+ */
+static int xennet_count_skb_frag_slots(struct sk_buff *skb)
+{
+       int i, frags = skb_shinfo(skb)->nr_frags;
+       int pages = 0;
+
+       for (i = 0; i < frags; i++) {
+               skb_frag_t *frag = skb_shinfo(skb)->frags + i;
+               unsigned long size = skb_frag_size(frag);
+               unsigned long offset = frag->page_offset;
+
+               /* Skip unused frames from start of page */
+               offset &= ~PAGE_MASK;
+
+               pages += PFN_UP(offset + size);
+       }
+
+       return pages;
+}
+
 static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        unsigned short id;
@@ -487,23 +543,23 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        grant_ref_t ref;
        unsigned long mfn;
        int notify;
-       int frags = skb_shinfo(skb)->nr_frags;
+       int slots;
        unsigned int offset = offset_in_page(data);
        unsigned int len = skb_headlen(skb);
        unsigned long flags;
 
-       frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
-       if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
-               printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
-                      frags);
-               dump_stack();
+       slots = DIV_ROUND_UP(offset + len, PAGE_SIZE) +
+               xennet_count_skb_frag_slots(skb);
+       if (unlikely(slots > MAX_SKB_FRAGS + 1)) {
+               net_alert_ratelimited(
+                       "xennet: skb rides the rocket: %d slots\n", slots);
                goto drop;
        }
 
        spin_lock_irqsave(&np->tx_lock, flags);
 
        if (unlikely(!netif_carrier_ok(dev) ||
-                    (frags > 1 && !xennet_can_sg(dev)) ||
+                    (slots > 1 && !xennet_can_sg(dev)) ||
                     netif_needs_gso(skb, netif_skb_features(skb)))) {
                spin_unlock_irqrestore(&np->tx_lock, flags);
                goto drop;
index 97c440a8cd615798a1e61250628e0030ede37694..30ae18a03a9ccc650a195547086866a66646cd98 100644 (file)
@@ -698,13 +698,14 @@ static void pn533_wq_cmd(struct work_struct *work)
 
        cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue);
 
+       list_del(&cmd->queue);
+
        mutex_unlock(&dev->cmd_lock);
 
        __pn533_send_cmd_frame_async(dev, cmd->out_frame, cmd->in_frame,
                                     cmd->in_frame_len, cmd->cmd_complete,
                                     cmd->arg, cmd->flags);
 
-       list_del(&cmd->queue);
        kfree(cmd);
 }
 
@@ -1678,11 +1679,14 @@ static void pn533_deactivate_target(struct nfc_dev *nfc_dev,
 static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
                                                u8 *params, int params_len)
 {
-       struct pn533_cmd_jump_dep *cmd;
        struct pn533_cmd_jump_dep_response *resp;
        struct nfc_target nfc_target;
        u8 target_gt_len;
        int rc;
+       struct pn533_cmd_jump_dep *cmd = (struct pn533_cmd_jump_dep *)arg;
+       u8 active = cmd->active;
+
+       kfree(arg);
 
        if (params_len == -ENOENT) {
                nfc_dev_dbg(&dev->interface->dev, "");
@@ -1704,7 +1708,6 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
        }
 
        resp = (struct pn533_cmd_jump_dep_response *) params;
-       cmd = (struct pn533_cmd_jump_dep *) arg;
        rc = resp->status & PN533_CMD_RET_MASK;
        if (rc != PN533_CMD_RET_SUCCESS) {
                nfc_dev_err(&dev->interface->dev,
@@ -1734,7 +1737,7 @@ static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg,
        if (rc == 0)
                rc = nfc_dep_link_is_up(dev->nfc_dev,
                                                dev->nfc_dev->targets[0].idx,
-                                               !cmd->active, NFC_RF_INITIATOR);
+                                               !active, NFC_RF_INITIATOR);
 
        return 0;
 }
@@ -1819,12 +1822,8 @@ static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target,
        rc = pn533_send_cmd_frame_async(dev, dev->out_frame, dev->in_frame,
                                dev->in_maxlen, pn533_in_dep_link_up_complete,
                                cmd, GFP_KERNEL);
-       if (rc)
-               goto out;
-
-
-out:
-       kfree(cmd);
+       if (rc < 0)
+               kfree(cmd);
 
        return rc;
 }
@@ -2078,8 +2077,12 @@ error:
 static int pn533_tm_send_complete(struct pn533 *dev, void *arg,
                                  u8 *params, int params_len)
 {
+       struct sk_buff *skb_out = arg;
+
        nfc_dev_dbg(&dev->interface->dev, "%s", __func__);
 
+       dev_kfree_skb(skb_out);
+
        if (params_len < 0) {
                nfc_dev_err(&dev->interface->dev,
                            "Error %d when sending data",
@@ -2117,7 +2120,7 @@ static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb)
 
        rc = pn533_send_cmd_frame_async(dev, out_frame, dev->in_frame,
                                        dev->in_maxlen, pn533_tm_send_complete,
-                                       NULL, GFP_KERNEL);
+                                       skb, GFP_KERNEL);
        if (rc) {
                nfc_dev_err(&dev->interface->dev,
                            "Error %d when trying to send data", rc);
index d96caefd914a90d2db11f05278f565a7e7cc79ee..aeecf0f72cad50c633953e4ffd9617709239095b 100644 (file)
@@ -178,7 +178,7 @@ config PINCTRL_COH901
          ports of 8 GPIO pins each.
 
 config PINCTRL_SAMSUNG
-       bool "Samsung pinctrl driver"
+       bool
        depends on OF && GPIOLIB
        select PINMUX
        select PINCONF
index c1bafc3f3fb19bad3a1587c85345ef2c4fa9545f..9594ab62702b1786d51d7ac80f7e10b365b6ce61 100644 (file)
@@ -1972,7 +1972,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
                                                                      frame_index,
                                                                      (void **)&frame_buffer);
 
-                       sci_controller_copy_sata_response(&ireq->stp.req,
+                       sci_controller_copy_sata_response(&ireq->stp.rsp,
                                                               frame_header,
                                                               frame_buffer);
 
index 2936b447cae93bc544fc054c862ecd6d1abb92c6..2c0d0ec8150b62d62d8ca3ca9ecc777a950a7c0a 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/cpu.h>
 #include <linux/mutex.h>
 #include <linux/async.h>
+#include <asm/unaligned.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_cmnd.h>
@@ -1061,6 +1062,50 @@ int scsi_get_vpd_page(struct scsi_device *sdev, u8 page, unsigned char *buf,
 }
 EXPORT_SYMBOL_GPL(scsi_get_vpd_page);
 
+/**
+ * scsi_report_opcode - Find out if a given command opcode is supported
+ * @sdev:      scsi device to query
+ * @buffer:    scratch buffer (must be at least 20 bytes long)
+ * @len:       length of buffer
+ * @opcode:    opcode for command to look up
+ *
+ * Uses the REPORT SUPPORTED OPERATION CODES to look up the given
+ * opcode. Returns 0 if RSOC fails or if the command opcode is
+ * unsupported. Returns 1 if the device claims to support the command.
+ */
+int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+                      unsigned int len, unsigned char opcode)
+{
+       unsigned char cmd[16];
+       struct scsi_sense_hdr sshdr;
+       int result;
+
+       if (sdev->no_report_opcodes || sdev->scsi_level < SCSI_SPC_3)
+               return 0;
+
+       memset(cmd, 0, 16);
+       cmd[0] = MAINTENANCE_IN;
+       cmd[1] = MI_REPORT_SUPPORTED_OPERATION_CODES;
+       cmd[2] = 1;             /* One command format */
+       cmd[3] = opcode;
+       put_unaligned_be32(len, &cmd[6]);
+       memset(buffer, 0, len);
+
+       result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
+                                 &sshdr, 30 * HZ, 3, NULL);
+
+       if (result && scsi_sense_valid(&sshdr) &&
+           sshdr.sense_key == ILLEGAL_REQUEST &&
+           (sshdr.asc == 0x20 || sshdr.asc == 0x24) && sshdr.ascq == 0x00)
+               return 0;
+
+       if ((buffer[1] & 3) == 3) /* Command supported */
+               return 1;
+
+       return 0;
+}
+EXPORT_SYMBOL(scsi_report_opcode);
+
 /**
  * scsi_device_get  -  get an additional reference to a scsi_device
  * @sdev:      device to get a reference to
index da36a3a81a9ee2206f753a04d4fbbe91b883e00a..9032e910bca3044055ed23310b38c126318736bb 100644 (file)
@@ -900,11 +900,23 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                                action = ACTION_FAIL;
                                error = -EILSEQ;
                        /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
-                       } else if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
-                                  (cmd->cmnd[0] == UNMAP ||
-                                   cmd->cmnd[0] == WRITE_SAME_16 ||
-                                   cmd->cmnd[0] == WRITE_SAME)) {
-                               description = "Discard failure";
+                       } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+                               switch (cmd->cmnd[0]) {
+                               case UNMAP:
+                                       description = "Discard failure";
+                                       break;
+                               case WRITE_SAME:
+                               case WRITE_SAME_16:
+                                       if (cmd->cmnd[1] & 0x8)
+                                               description = "Discard failure";
+                                       else
+                                               description =
+                                                       "Write same failure";
+                                       break;
+                               default:
+                                       description = "Invalid command failure";
+                                       break;
+                               }
                                action = ACTION_FAIL;
                                error = -EREMOTEIO;
                        } else
index 12f6fdfc11474a9363ae6c641959832d2c1d6440..352bc77b7c886fb80c057019c3edc2d5b95b7d7e 100644 (file)
@@ -99,6 +99,7 @@ MODULE_ALIAS_SCSI_DEVICE(TYPE_RBC);
 #endif
 
 static void sd_config_discard(struct scsi_disk *, unsigned int);
+static void sd_config_write_same(struct scsi_disk *);
 static int  sd_revalidate_disk(struct gendisk *);
 static void sd_unlock_native_capacity(struct gendisk *disk);
 static int  sd_probe(struct device *);
@@ -395,6 +396,45 @@ sd_store_max_medium_access_timeouts(struct device *dev,
        return err ? err : count;
 }
 
+static ssize_t
+sd_show_write_same_blocks(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+       return snprintf(buf, 20, "%u\n", sdkp->max_ws_blocks);
+}
+
+static ssize_t
+sd_store_write_same_blocks(struct device *dev, struct device_attribute *attr,
+                          const char *buf, size_t count)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+       struct scsi_device *sdp = sdkp->device;
+       unsigned long max;
+       int err;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       if (sdp->type != TYPE_DISK)
+               return -EINVAL;
+
+       err = kstrtoul(buf, 10, &max);
+
+       if (err)
+               return err;
+
+       if (max == 0)
+               sdp->no_write_same = 1;
+       else if (max <= SD_MAX_WS16_BLOCKS)
+               sdkp->max_ws_blocks = max;
+
+       sd_config_write_same(sdkp);
+
+       return count;
+}
+
 static struct device_attribute sd_disk_attrs[] = {
        __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
               sd_store_cache_type),
@@ -410,6 +450,8 @@ static struct device_attribute sd_disk_attrs[] = {
        __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
        __ATTR(provisioning_mode, S_IRUGO|S_IWUSR, sd_show_provisioning_mode,
               sd_store_provisioning_mode),
+       __ATTR(max_write_same_blocks, S_IRUGO|S_IWUSR,
+              sd_show_write_same_blocks, sd_store_write_same_blocks),
        __ATTR(max_medium_access_timeouts, S_IRUGO|S_IWUSR,
               sd_show_max_medium_access_timeouts,
               sd_store_max_medium_access_timeouts),
@@ -561,19 +603,23 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
                return;
 
        case SD_LBP_UNMAP:
-               max_blocks = min_not_zero(sdkp->max_unmap_blocks, 0xffffffff);
+               max_blocks = min_not_zero(sdkp->max_unmap_blocks,
+                                         (u32)SD_MAX_WS16_BLOCKS);
                break;
 
        case SD_LBP_WS16:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks, 0xffffffff);
+               max_blocks = min_not_zero(sdkp->max_ws_blocks,
+                                         (u32)SD_MAX_WS16_BLOCKS);
                break;
 
        case SD_LBP_WS10:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+               max_blocks = min_not_zero(sdkp->max_ws_blocks,
+                                         (u32)SD_MAX_WS10_BLOCKS);
                break;
 
        case SD_LBP_ZERO:
-               max_blocks = min_not_zero(sdkp->max_ws_blocks, (u32)0xffff);
+               max_blocks = min_not_zero(sdkp->max_ws_blocks,
+                                         (u32)SD_MAX_WS10_BLOCKS);
                q->limits.discard_zeroes_data = 1;
                break;
        }
@@ -583,29 +629,26 @@ static void sd_config_discard(struct scsi_disk *sdkp, unsigned int mode)
 }
 
 /**
- * scsi_setup_discard_cmnd - unmap blocks on thinly provisioned device
+ * sd_setup_discard_cmnd - unmap blocks on thinly provisioned device
  * @sdp: scsi device to operate one
  * @rq: Request to prepare
  *
  * Will issue either UNMAP or WRITE SAME(16) depending on preference
  * indicated by target device.
  **/
-static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
+static int sd_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
 {
        struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
-       struct bio *bio = rq->bio;
-       sector_t sector = bio->bi_sector;
-       unsigned int nr_sectors = bio_sectors(bio);
+       sector_t sector = blk_rq_pos(rq);
+       unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
        unsigned int len;
        int ret;
        char *buf;
        struct page *page;
 
-       if (sdkp->device->sector_size == 4096) {
-               sector >>= 3;
-               nr_sectors >>= 3;
-       }
-
+       sector >>= ilog2(sdp->sector_size) - 9;
+       nr_sectors >>= ilog2(sdp->sector_size) - 9;
        rq->timeout = SD_TIMEOUT;
 
        memset(rq->cmd, 0, rq->cmd_len);
@@ -660,6 +703,7 @@ static int scsi_setup_discard_cmnd(struct scsi_device *sdp, struct request *rq)
        blk_add_request_payload(rq, page, len);
        ret = scsi_setup_blk_pc_cmnd(sdp, rq);
        rq->buffer = page_address(page);
+       rq->__data_len = nr_bytes;
 
 out:
        if (ret != BLKPREP_OK) {
@@ -669,6 +713,83 @@ out:
        return ret;
 }
 
+static void sd_config_write_same(struct scsi_disk *sdkp)
+{
+       struct request_queue *q = sdkp->disk->queue;
+       unsigned int logical_block_size = sdkp->device->sector_size;
+       unsigned int blocks = 0;
+
+       if (sdkp->device->no_write_same) {
+               sdkp->max_ws_blocks = 0;
+               goto out;
+       }
+
+       /* Some devices can not handle block counts above 0xffff despite
+        * supporting WRITE SAME(16). Consequently we default to 64k
+        * blocks per I/O unless the device explicitly advertises a
+        * bigger limit.
+        */
+       if (sdkp->max_ws_blocks == 0)
+               sdkp->max_ws_blocks = SD_MAX_WS10_BLOCKS;
+
+       if (sdkp->ws16 || sdkp->max_ws_blocks > SD_MAX_WS10_BLOCKS)
+               blocks = min_not_zero(sdkp->max_ws_blocks,
+                                     (u32)SD_MAX_WS16_BLOCKS);
+       else
+               blocks = min_not_zero(sdkp->max_ws_blocks,
+                                     (u32)SD_MAX_WS10_BLOCKS);
+
+out:
+       blk_queue_max_write_same_sectors(q, blocks * (logical_block_size >> 9));
+}
+
+/**
+ * sd_setup_write_same_cmnd - write the same data to multiple blocks
+ * @sdp: scsi device to operate one
+ * @rq: Request to prepare
+ *
+ * Will issue either WRITE SAME(10) or WRITE SAME(16) depending on
+ * preference indicated by target device.
+ **/
+static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
+{
+       struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+       struct bio *bio = rq->bio;
+       sector_t sector = blk_rq_pos(rq);
+       unsigned int nr_sectors = blk_rq_sectors(rq);
+       unsigned int nr_bytes = blk_rq_bytes(rq);
+       int ret;
+
+       if (sdkp->device->no_write_same)
+               return BLKPREP_KILL;
+
+       BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size);
+
+       sector >>= ilog2(sdp->sector_size) - 9;
+       nr_sectors >>= ilog2(sdp->sector_size) - 9;
+
+       rq->__data_len = sdp->sector_size;
+       rq->timeout = SD_WRITE_SAME_TIMEOUT;
+       memset(rq->cmd, 0, rq->cmd_len);
+
+       if (sdkp->ws16 || sector > 0xffffffff || nr_sectors > 0xffff) {
+               rq->cmd_len = 16;
+               rq->cmd[0] = WRITE_SAME_16;
+               put_unaligned_be64(sector, &rq->cmd[2]);
+               put_unaligned_be32(nr_sectors, &rq->cmd[10]);
+       } else {
+               rq->cmd_len = 10;
+               rq->cmd[0] = WRITE_SAME;
+               put_unaligned_be32(sector, &rq->cmd[2]);
+               put_unaligned_be16(nr_sectors, &rq->cmd[7]);
+       }
+
+       ret = scsi_setup_blk_pc_cmnd(sdp, rq);
+       rq->__data_len = nr_bytes;
+
+       return ret;
+}
+
 static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq)
 {
        rq->timeout = SD_FLUSH_TIMEOUT;
@@ -712,7 +833,10 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
         * block PC requests to make life easier.
         */
        if (rq->cmd_flags & REQ_DISCARD) {
-               ret = scsi_setup_discard_cmnd(sdp, rq);
+               ret = sd_setup_discard_cmnd(sdp, rq);
+               goto out;
+       } else if (rq->cmd_flags & REQ_WRITE_SAME) {
+               ret = sd_setup_write_same_cmnd(sdp, rq);
                goto out;
        } else if (rq->cmd_flags & REQ_FLUSH) {
                ret = scsi_setup_flush_cmnd(sdp, rq);
@@ -1482,12 +1606,21 @@ static int sd_done(struct scsi_cmnd *SCpnt)
        unsigned int good_bytes = result ? 0 : scsi_bufflen(SCpnt);
        struct scsi_sense_hdr sshdr;
        struct scsi_disk *sdkp = scsi_disk(SCpnt->request->rq_disk);
+       struct request *req = SCpnt->request;
        int sense_valid = 0;
        int sense_deferred = 0;
        unsigned char op = SCpnt->cmnd[0];
+       unsigned char unmap = SCpnt->cmnd[1] & 8;
 
-       if ((SCpnt->request->cmd_flags & REQ_DISCARD) && !result)
-               scsi_set_resid(SCpnt, 0);
+       if (req->cmd_flags & REQ_DISCARD || req->cmd_flags & REQ_WRITE_SAME) {
+               if (!result) {
+                       good_bytes = blk_rq_bytes(req);
+                       scsi_set_resid(SCpnt, 0);
+               } else {
+                       good_bytes = 0;
+                       scsi_set_resid(SCpnt, blk_rq_bytes(req));
+               }
+       }
 
        if (result) {
                sense_valid = scsi_command_normalize_sense(SCpnt, &sshdr);
@@ -1536,9 +1669,25 @@ static int sd_done(struct scsi_cmnd *SCpnt)
                if (sshdr.asc == 0x10)  /* DIX: Host detected corruption */
                        good_bytes = sd_completed_bytes(SCpnt);
                /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */
-               if ((sshdr.asc == 0x20 || sshdr.asc == 0x24) &&
-                   (op == UNMAP || op == WRITE_SAME_16 || op == WRITE_SAME))
-                       sd_config_discard(sdkp, SD_LBP_DISABLE);
+               if (sshdr.asc == 0x20 || sshdr.asc == 0x24) {
+                       switch (op) {
+                       case UNMAP:
+                               sd_config_discard(sdkp, SD_LBP_DISABLE);
+                               break;
+                       case WRITE_SAME_16:
+                       case WRITE_SAME:
+                               if (unmap)
+                                       sd_config_discard(sdkp, SD_LBP_DISABLE);
+                               else {
+                                       sdkp->device->no_write_same = 1;
+                                       sd_config_write_same(sdkp);
+
+                                       good_bytes = 0;
+                                       req->__data_len = blk_rq_bytes(req);
+                                       req->cmd_flags |= REQ_QUIET;
+                               }
+                       }
+               }
                break;
        default:
                break;
@@ -2374,9 +2523,7 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
        if (buffer[3] == 0x3c) {
                unsigned int lba_count, desc_count;
 
-               sdkp->max_ws_blocks =
-                       (u32) min_not_zero(get_unaligned_be64(&buffer[36]),
-                                          (u64)0xffffffff);
+               sdkp->max_ws_blocks = (u32)get_unaligned_be64(&buffer[36]);
 
                if (!sdkp->lbpme)
                        goto out;
@@ -2469,6 +2616,13 @@ static void sd_read_block_provisioning(struct scsi_disk *sdkp)
        kfree(buffer);
 }
 
+static void sd_read_write_same(struct scsi_disk *sdkp, unsigned char *buffer)
+{
+       if (scsi_report_opcode(sdkp->device, buffer, SD_BUF_SIZE,
+                              WRITE_SAME_16))
+               sdkp->ws16 = 1;
+}
+
 static int sd_try_extended_inquiry(struct scsi_device *sdp)
 {
        /*
@@ -2528,6 +2682,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
                sd_read_write_protect_flag(sdkp, buffer);
                sd_read_cache_type(sdkp, buffer);
                sd_read_app_tag_own(sdkp, buffer);
+               sd_read_write_same(sdkp, buffer);
        }
 
        sdkp->first_scan = 0;
@@ -2545,6 +2700,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
        blk_queue_flush(sdkp->disk->queue, flush);
 
        set_capacity(disk, sdkp->capacity);
+       sd_config_write_same(sdkp);
        kfree(buffer);
 
  out:
index 47c52a6d733c0f757b4ef42236e89fba172b857c..74a1e4ca5401f9a58e6e705bb2fc8a99fcd04a0f 100644 (file)
@@ -14,6 +14,7 @@
 #define SD_TIMEOUT             (30 * HZ)
 #define SD_MOD_TIMEOUT         (75 * HZ)
 #define SD_FLUSH_TIMEOUT       (60 * HZ)
+#define SD_WRITE_SAME_TIMEOUT  (120 * HZ)
 
 /*
  * Number of allowed retries
@@ -38,6 +39,11 @@ enum {
        SD_MEMPOOL_SIZE = 2,    /* CDB pool size */
 };
 
+enum {
+       SD_MAX_WS10_BLOCKS = 0xffff,
+       SD_MAX_WS16_BLOCKS = 0x7fffff,
+};
+
 enum {
        SD_LBP_FULL = 0,        /* Full logical block provisioning */
        SD_LBP_UNMAP,           /* Use UNMAP command */
@@ -77,6 +83,7 @@ struct scsi_disk {
        unsigned        lbpws : 1;
        unsigned        lbpws10 : 1;
        unsigned        lbpvpd : 1;
+       unsigned        ws16 : 1;
 };
 #define to_scsi_disk(obj) container_of(obj,struct scsi_disk,dev)
 
index a3d54366afccde598a4ba5f3240909044f975266..92f35abee92d281990e17ccdb4f388d33ed67f0e 100644 (file)
@@ -186,6 +186,12 @@ static int slave_configure(struct scsi_device *sdev)
                /* Some devices don't handle VPD pages correctly */
                sdev->skip_vpd_pages = 1;
 
+               /* Do not attempt to use REPORT SUPPORTED OPERATION CODES */
+               sdev->no_report_opcodes = 1;
+
+               /* Do not attempt to use WRITE SAME */
+               sdev->no_write_same = 1;
+
                /* Some disks return the total number of blocks in response
                 * to READ CAPACITY rather than the highest block number.
                 * If this device makes that mistake, tell the sd driver. */
index d64ac3842884f4e9eac994dd03fdc311646d27b9..bee92846cfab930f6304a9ebb4316b25b8780bc7 100644 (file)
@@ -365,11 +365,20 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
        struct omap_dss_output *out;
        enum omap_dss_output_id id;
 
-       id = module == 0 ? OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+       switch (module) {
+       case 0:
+               id = OMAP_DSS_OUTPUT_DSI1;
+               break;
+       case 1:
+               id = OMAP_DSS_OUTPUT_DSI2;
+               break;
+       default:
+               return NULL;
+       }
 
        out = omap_dss_get_output(id);
 
-       return out->pdev;
+       return out ? out->pdev : NULL;
 }
 
 static inline void dsi_write_reg(struct platform_device *dsidev,
index 2ab1c3e96553d81297ad5f942eed2cfb76a5064b..5f6eea801b064a37949b3cc66bf9ca5f2dde3431 100644 (file)
@@ -697,11 +697,15 @@ static int dss_get_clocks(void)
 
        dss.dss_clk = clk;
 
-       clk = clk_get(NULL, dss.feat->clk_name);
-       if (IS_ERR(clk)) {
-               DSSERR("Failed to get %s\n", dss.feat->clk_name);
-               r = PTR_ERR(clk);
-               goto err;
+       if (dss.feat->clk_name) {
+               clk = clk_get(NULL, dss.feat->clk_name);
+               if (IS_ERR(clk)) {
+                       DSSERR("Failed to get %s\n", dss.feat->clk_name);
+                       r = PTR_ERR(clk);
+                       goto err;
+               }
+       } else {
+               clk = NULL;
        }
 
        dss.dpll4_m4_ck = clk;
@@ -805,10 +809,10 @@ static int __init dss_init_features(struct device *dev)
 
        if (cpu_is_omap24xx())
                src = &omap24xx_dss_feats;
-       else if (cpu_is_omap34xx())
-               src = &omap34xx_dss_feats;
        else if (cpu_is_omap3630())
                src = &omap3630_dss_feats;
+       else if (cpu_is_omap34xx())
+               src = &omap34xx_dss_feats;
        else if (cpu_is_omap44xx())
                src = &omap44xx_dss_feats;
        else if (soc_is_omap54xx())
index a48a7dd75b3303a3bba28e10b01e4c1f133c00d3..8c9b8b3b7f773cd41c00f78b923070f87c325f70 100644 (file)
@@ -644,8 +644,10 @@ static void hdmi_dump_regs(struct seq_file *s)
 {
        mutex_lock(&hdmi.lock);
 
-       if (hdmi_runtime_get())
+       if (hdmi_runtime_get()) {
+               mutex_unlock(&hdmi.lock);
                return;
+       }
 
        hdmi.ip_data.ops->dump_wrapper(&hdmi.ip_data, s);
        hdmi.ip_data.ops->dump_pll(&hdmi.ip_data, s);
index 606b89f12351d4e1cc2cb234fd4e5c481bf82cda..d630b26a005c7ae2e0bea55b1c0a644fc64ad1e9 100644 (file)
@@ -787,7 +787,7 @@ int omapfb_ioctl(struct fb_info *fbi, unsigned int cmd, unsigned long arg)
 
        case OMAPFB_WAITFORVSYNC:
                DBG("ioctl WAITFORVSYNC\n");
-               if (!display && !display->output && !display->output->manager) {
+               if (!display || !display->output || !display->output->manager) {
                        r = -EINVAL;
                        break;
                }
index 8adb9cc267f96e201ade041441ef8a317ce336d8..71f5c459b088aa5e21066b77adbcb5f34f7b9843 100644 (file)
@@ -361,13 +361,13 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
        down_write(&mm->mmap_sem);
 
        vma = find_vma(mm, m.addr);
-       ret = -EINVAL;
        if (!vma ||
            vma->vm_ops != &privcmd_vm_ops ||
            (m.addr != vma->vm_start) ||
            ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
            !privcmd_enforce_singleshot_mapping(vma)) {
                up_write(&mm->mmap_sem);
+               ret = -EINVAL;
                goto out;
        }
 
@@ -383,12 +383,16 @@ static long privcmd_ioctl_mmap_batch(void __user *udata, int version)
 
        up_write(&mm->mmap_sem);
 
-       if (state.global_error && (version == 1)) {
-               /* Write back errors in second pass. */
-               state.user_mfn = (xen_pfn_t *)m.arr;
-               state.err      = err_array;
-               ret = traverse_pages(m.num, sizeof(xen_pfn_t),
-                                    &pagelist, mmap_return_errors_v1, &state);
+       if (version == 1) {
+               if (state.global_error) {
+                       /* Write back errors in second pass. */
+                       state.user_mfn = (xen_pfn_t *)m.arr;
+                       state.err      = err_array;
+                       ret = traverse_pages(m.num, sizeof(xen_pfn_t),
+                                            &pagelist, mmap_return_errors_v1, &state);
+               } else
+                       ret = 0;
+
        } else if (version == 2) {
                ret = __copy_to_user(m.err, err_array, m.num * sizeof(int));
                if (ret)
index 7320a66e958f8d45e796c796ee150b786c3f7f8e..22548f56197b52cf06b343285b7e7beb7d74cb10 100644 (file)
@@ -2101,8 +2101,9 @@ int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
        end = start + (range->len >> sb->s_blocksize_bits) - 1;
        minlen = range->minlen >> sb->s_blocksize_bits;
 
-       if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) ||
-           unlikely(start >= max_blks))
+       if (minlen > EXT3_BLOCKS_PER_GROUP(sb) ||
+           start >= max_blks ||
+           range->len < sb->s_blocksize)
                return -EINVAL;
        if (end >= max_blks)
                end = max_blks - 1;
index 708d997a77485989d53583084a3e1b99354fb407..7cb71b99260340fe7302d2a8c5dc4f84f8029aff 100644 (file)
--- a/fs/file.c
+++ b/fs/file.c
@@ -685,7 +685,6 @@ void do_close_on_exec(struct files_struct *files)
        struct fdtable *fdt;
 
        /* exec unshares first */
-       BUG_ON(atomic_read(&files->count) != 1);
        spin_lock(&files->file_lock);
        for (i = 0; ; i++) {
                unsigned long set;
index 60ef3fb707ffbfc6a77f1257efc82544cf0f9489..1506673c087e11ae820245baadc8ae74cb9f01b2 100644 (file)
@@ -138,33 +138,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
        struct page *pg;
        struct inode *inode = mapping->host;
        struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode);
+       struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
+       struct jffs2_raw_inode ri;
+       uint32_t alloc_len = 0;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
        uint32_t pageofs = index << PAGE_CACHE_SHIFT;
        int ret = 0;
 
+       jffs2_dbg(1, "%s()\n", __func__);
+
+       if (pageofs > inode->i_size) {
+               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
+                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
+               if (ret)
+                       return ret;
+       }
+
+       mutex_lock(&f->sem);
        pg = grab_cache_page_write_begin(mapping, index, flags);
-       if (!pg)
+       if (!pg) {
+               if (alloc_len)
+                       jffs2_complete_reservation(c);
+               mutex_unlock(&f->sem);
                return -ENOMEM;
+       }
        *pagep = pg;
 
-       jffs2_dbg(1, "%s()\n", __func__);
-
-       if (pageofs > inode->i_size) {
+       if (alloc_len) {
                /* Make new hole frag from old EOF to new page */
-               struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb);
-               struct jffs2_raw_inode ri;
                struct jffs2_full_dnode *fn;
-               uint32_t alloc_len;
 
                jffs2_dbg(1, "Writing new hole frag 0x%x-0x%x between current EOF and new page\n",
                          (unsigned int)inode->i_size, pageofs);
 
-               ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len,
-                                         ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE);
-               if (ret)
-                       goto out_page;
-
-               mutex_lock(&f->sem);
                memset(&ri, 0, sizeof(ri));
 
                ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK);
@@ -191,7 +197,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                if (IS_ERR(fn)) {
                        ret = PTR_ERR(fn);
                        jffs2_complete_reservation(c);
-                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                ret = jffs2_add_full_dnode_to_inode(c, f, fn);
@@ -206,12 +211,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
                        jffs2_mark_node_obsolete(c, fn->raw);
                        jffs2_free_full_dnode(fn);
                        jffs2_complete_reservation(c);
-                       mutex_unlock(&f->sem);
                        goto out_page;
                }
                jffs2_complete_reservation(c);
                inode->i_size = pageofs;
-               mutex_unlock(&f->sem);
        }
 
        /*
@@ -220,18 +223,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping,
         * case of a short-copy.
         */
        if (!PageUptodate(pg)) {
-               mutex_lock(&f->sem);
                ret = jffs2_do_readpage_nolock(inode, pg);
-               mutex_unlock(&f->sem);
                if (ret)
                        goto out_page;
        }
+       mutex_unlock(&f->sem);
        jffs2_dbg(1, "end write_begin(). pg->flags %lx\n", pg->flags);
        return ret;
 
 out_page:
        unlock_page(pg);
        page_cache_release(pg);
+       mutex_unlock(&f->sem);
        return ret;
 }
 
index 721d692fa8d4a20dd5bf593d3f732d8b3f913261..6fcaeb8c902e4c3bc862c6c81db0a884068274b7 100644 (file)
@@ -258,7 +258,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
        if (ret)
                goto out_close_fd;
 
-       fd_install(fd, f);
+       if (fd != FAN_NOFD)
+               fd_install(fd, f);
        return fanotify_event_metadata.event_len;
 
 out_close_fd:
index f27f01a98aa2c9573c6cc61fe093c821ce6e107c..d83736fbc26cb4679cf074ee33263f125f13afa7 100644 (file)
@@ -1782,8 +1782,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
 
        BUG_ON(!th->t_trans_id);
 
-       dquot_initialize(inode);
+       reiserfs_write_unlock(inode->i_sb);
        err = dquot_alloc_inode(inode);
+       reiserfs_write_lock(inode->i_sb);
        if (err)
                goto out_end_trans;
        if (!dir->i_nlink) {
@@ -1979,8 +1980,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
 
       out_end_trans:
        journal_end(th, th->t_super, th->t_blocks_allocated);
+       reiserfs_write_unlock(inode->i_sb);
        /* Drop can be outside and it needs more credits so it's better to have it outside */
        dquot_drop(inode);
+       reiserfs_write_lock(inode->i_sb);
        inode->i_flags |= S_NOQUOTA;
        make_bad_inode(inode);
 
@@ -3103,10 +3106,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
        /* must be turned off for recursive notify_change calls */
        ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID);
 
-       depth = reiserfs_write_lock_once(inode->i_sb);
        if (is_quota_modification(inode, attr))
                dquot_initialize(inode);
-
+       depth = reiserfs_write_lock_once(inode->i_sb);
        if (attr->ia_valid & ATTR_SIZE) {
                /* version 2 items will be caught by the s_maxbytes check
                 ** done for us in vmtruncate
@@ -3170,7 +3172,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr)
                error = journal_begin(&th, inode->i_sb, jbegin_count);
                if (error)
                        goto out;
+               reiserfs_write_unlock_once(inode->i_sb, depth);
                error = dquot_transfer(inode, attr);
+               depth = reiserfs_write_lock_once(inode->i_sb);
                if (error) {
                        journal_end(&th, inode->i_sb, jbegin_count);
                        goto out;
index f8afa4b162b8f026da18b1cf2364e7d1cd6f5116..2f40a4c70a4d9667ca34e00ef2b31888255276c2 100644 (file)
@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree
                       key2type(&(key->on_disk_key)));
 #endif
 
+       reiserfs_write_unlock(inode->i_sb);
        retval = dquot_alloc_space_nodirty(inode, pasted_size);
+       reiserfs_write_lock(inode->i_sb);
        if (retval) {
                pathrelse(search_path);
                return retval;
@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
                               "reiserquota insert_item(): allocating %u id=%u type=%c",
                               quota_bytes, inode->i_uid, head2type(ih));
 #endif
+               reiserfs_write_unlock(inode->i_sb);
                /* We can't dirty inode here. It would be immediately written but
                 * appropriate stat item isn't inserted yet... */
                retval = dquot_alloc_space_nodirty(inode, quota_bytes);
+               reiserfs_write_lock(inode->i_sb);
                if (retval) {
                        pathrelse(path);
                        return retval;
index 1078ae179993bb12f105d39fb1d847eb84c12414..418bdc3a57da4e11092477fa1b3cbf2e37f71422 100644 (file)
@@ -298,7 +298,9 @@ static int finish_unfinished(struct super_block *s)
                        retval = remove_save_link_only(s, &save_link_key, 0);
                        continue;
                }
+               reiserfs_write_unlock(s);
                dquot_initialize(inode);
+               reiserfs_write_lock(s);
 
                if (truncate && S_ISDIR(inode->i_mode)) {
                        /* We got a truncate request for a dir which is impossible.
@@ -1335,7 +1337,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                                kfree(qf_names[i]);
 #endif
                err = -EINVAL;
-               goto out_err;
+               goto out_unlock;
        }
 #ifdef CONFIG_QUOTA
        handle_quota_files(s, qf_names, &qfmt);
@@ -1379,7 +1381,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        if (blocks) {
                err = reiserfs_resize(s, blocks);
                if (err != 0)
-                       goto out_err;
+                       goto out_unlock;
        }
 
        if (*mount_flags & MS_RDONLY) {
@@ -1389,9 +1391,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                        /* it is read-only already */
                        goto out_ok;
 
+               /*
+                * Drop write lock. Quota will retake it when needed and lock
+                * ordering requires calling dquot_suspend() without it.
+                */
+               reiserfs_write_unlock(s);
                err = dquot_suspend(s, -1);
                if (err < 0)
                        goto out_err;
+               reiserfs_write_lock(s);
 
                /* try to remount file system with read-only permissions */
                if (sb_umount_state(rs) == REISERFS_VALID_FS
@@ -1401,7 +1409,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
 
                err = journal_begin(&th, s, 10);
                if (err)
-                       goto out_err;
+                       goto out_unlock;
 
                /* Mounting a rw partition read-only. */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1416,7 +1424,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
 
                if (reiserfs_is_journal_aborted(journal)) {
                        err = journal->j_errno;
-                       goto out_err;
+                       goto out_unlock;
                }
 
                handle_data_mode(s, mount_options);
@@ -1425,7 +1433,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
                s->s_flags &= ~MS_RDONLY;       /* now it is safe to call journal_begin */
                err = journal_begin(&th, s, 10);
                if (err)
-                       goto out_err;
+                       goto out_unlock;
 
                /* Mount a partition which is read-only, read-write */
                reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1);
@@ -1442,10 +1450,16 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
        SB_JOURNAL(s)->j_must_wait = 1;
        err = journal_end(&th, s, 10);
        if (err)
-               goto out_err;
+               goto out_unlock;
 
        if (!(*mount_flags & MS_RDONLY)) {
+               /*
+                * Drop write lock. Quota will retake it when needed and lock
+                * ordering requires calling dquot_resume() without it.
+                */
+               reiserfs_write_unlock(s);
                dquot_resume(s, -1);
+               reiserfs_write_lock(s);
                finish_unfinished(s);
                reiserfs_xattr_init(s, *mount_flags);
        }
@@ -1455,9 +1469,10 @@ out_ok:
        reiserfs_write_unlock(s);
        return 0;
 
+out_unlock:
+       reiserfs_write_unlock(s);
 out_err:
        kfree(new_opts);
-       reiserfs_write_unlock(s);
        return err;
 }
 
@@ -2095,13 +2110,15 @@ static int reiserfs_write_dquot(struct dquot *dquot)
                          REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
        if (ret)
                goto out;
+       reiserfs_write_unlock(dquot->dq_sb);
        ret = dquot_commit(dquot);
+       reiserfs_write_lock(dquot->dq_sb);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
        if (!ret && err)
                ret = err;
-      out:
+out:
        reiserfs_write_unlock(dquot->dq_sb);
        return ret;
 }
@@ -2117,13 +2134,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot)
                          REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
        if (ret)
                goto out;
+       reiserfs_write_unlock(dquot->dq_sb);
        ret = dquot_acquire(dquot);
+       reiserfs_write_lock(dquot->dq_sb);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb));
        if (!ret && err)
                ret = err;
-      out:
+out:
        reiserfs_write_unlock(dquot->dq_sb);
        return ret;
 }
@@ -2137,19 +2156,21 @@ static int reiserfs_release_dquot(struct dquot *dquot)
        ret =
            journal_begin(&th, dquot->dq_sb,
                          REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
+       reiserfs_write_unlock(dquot->dq_sb);
        if (ret) {
                /* Release dquot anyway to avoid endless cycle in dqput() */
                dquot_release(dquot);
                goto out;
        }
        ret = dquot_release(dquot);
+       reiserfs_write_lock(dquot->dq_sb);
        err =
            journal_end(&th, dquot->dq_sb,
                        REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb));
        if (!ret && err)
                ret = err;
-      out:
        reiserfs_write_unlock(dquot->dq_sb);
+out:
        return ret;
 }
 
@@ -2174,11 +2195,13 @@ static int reiserfs_write_info(struct super_block *sb, int type)
        ret = journal_begin(&th, sb, 2);
        if (ret)
                goto out;
+       reiserfs_write_unlock(sb);
        ret = dquot_commit_info(sb, type);
+       reiserfs_write_lock(sb);
        err = journal_end(&th, sb, 2);
        if (!ret && err)
                ret = err;
-      out:
+out:
        reiserfs_write_unlock(sb);
        return ret;
 }
@@ -2203,8 +2226,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
        struct reiserfs_transaction_handle th;
        int opt = type == USRQUOTA ? REISERFS_USRQUOTA : REISERFS_GRPQUOTA;
 
-       if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt)))
-               return -EINVAL;
+       reiserfs_write_lock(sb);
+       if (!(REISERFS_SB(sb)->s_mount_opt & (1 << opt))) {
+               err = -EINVAL;
+               goto out;
+       }
 
        /* Quotafile not on the same filesystem? */
        if (path->dentry->d_sb != sb) {
@@ -2246,8 +2272,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id,
                if (err)
                        goto out;
        }
-       err = dquot_quota_on(sb, type, format_id, path);
+       reiserfs_write_unlock(sb);
+       return dquot_quota_on(sb, type, format_id, path);
 out:
+       reiserfs_write_unlock(sb);
        return err;
 }
 
@@ -2320,7 +2348,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
                tocopy = sb->s_blocksize - offset < towrite ?
                    sb->s_blocksize - offset : towrite;
                tmp_bh.b_state = 0;
+               reiserfs_write_lock(sb);
                err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE);
+               reiserfs_write_unlock(sb);
                if (err)
                        goto out;
                if (offset || tocopy != sb->s_blocksize)
@@ -2336,10 +2366,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
                flush_dcache_page(bh->b_page);
                set_buffer_uptodate(bh);
                unlock_buffer(bh);
+               reiserfs_write_lock(sb);
                reiserfs_prepare_for_journal(sb, bh, 1);
                journal_mark_dirty(current->journal_info, sb, bh);
                if (!journal_quota)
                        reiserfs_add_ordered_list(inode, bh);
+               reiserfs_write_unlock(sb);
                brelse(bh);
                offset = 0;
                towrite -= tocopy;
index e562dd43f41fe762eb4d49c7814f1f002fe965c9..e57e2daa357c34fc9634871a1d41dd76569cf871 100644 (file)
@@ -481,11 +481,17 @@ static inline int bio_add_buffer(struct bio *bio, struct buffer_head *bh)
  *
  * The fix is two passes across the ioend list - one to start writeback on the
  * buffer_heads, and then submit them for I/O on the second pass.
+ *
+ * If @fail is non-zero, it means that we have a situation where some part of
+ * the submission process has failed after we have marked paged for writeback
+ * and unlocked them. In this situation, we need to fail the ioend chain rather
+ * than submit it to IO. This typically only happens on a filesystem shutdown.
  */
 STATIC void
 xfs_submit_ioend(
        struct writeback_control *wbc,
-       xfs_ioend_t             *ioend)
+       xfs_ioend_t             *ioend,
+       int                     fail)
 {
        xfs_ioend_t             *head = ioend;
        xfs_ioend_t             *next;
@@ -506,6 +512,18 @@ xfs_submit_ioend(
                next = ioend->io_list;
                bio = NULL;
 
+               /*
+                * If we are failing the IO now, just mark the ioend with an
+                * error and finish it. This will run IO completion immediately
+                * as there is only one reference to the ioend at this point in
+                * time.
+                */
+               if (fail) {
+                       ioend->io_error = -fail;
+                       xfs_finish_ioend(ioend);
+                       continue;
+               }
+
                for (bh = ioend->io_buffer_head; bh; bh = bh->b_private) {
 
                        if (!bio) {
@@ -1060,7 +1078,18 @@ xfs_vm_writepage(
 
        xfs_start_page_writeback(page, 1, count);
 
-       if (ioend && imap_valid) {
+       /* if there is no IO to be submitted for this page, we are done */
+       if (!ioend)
+               return 0;
+
+       ASSERT(iohead);
+
+       /*
+        * Any errors from this point onwards need tobe reported through the IO
+        * completion path as we have marked the initial page as under writeback
+        * and unlocked it.
+        */
+       if (imap_valid) {
                xfs_off_t               end_index;
 
                end_index = imap.br_startoff + imap.br_blockcount;
@@ -1079,20 +1108,15 @@ xfs_vm_writepage(
                                  wbc, end_index);
        }
 
-       if (iohead) {
-               /*
-                * Reserve log space if we might write beyond the on-disk
-                * inode size.
-                */
-               if (ioend->io_type != XFS_IO_UNWRITTEN &&
-                   xfs_ioend_is_append(ioend)) {
-                       err = xfs_setfilesize_trans_alloc(ioend);
-                       if (err)
-                               goto error;
-               }
 
-               xfs_submit_ioend(wbc, iohead);
-       }
+       /*
+        * Reserve log space if we might write beyond the on-disk inode size.
+        */
+       err = 0;
+       if (ioend->io_type != XFS_IO_UNWRITTEN && xfs_ioend_is_append(ioend))
+               err = xfs_setfilesize_trans_alloc(ioend);
+
+       xfs_submit_ioend(wbc, iohead, err);
 
        return 0;
 
index d330111ca738eef9ff86b5fd28723c0b63a9b379..70eec1829776309b3cba83e9a660aa6de5ad9693 100644 (file)
@@ -1291,6 +1291,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
        leaf2 = blk2->bp->b_addr;
        ASSERT(leaf1->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
        ASSERT(leaf2->hdr.info.magic == cpu_to_be16(XFS_ATTR_LEAF_MAGIC));
+       ASSERT(leaf2->hdr.count == 0);
        args = state->args;
 
        trace_xfs_attr_leaf_rebalance(args);
@@ -1361,6 +1362,7 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                 * I assert that since all callers pass in an empty
                 * second buffer, this code should never execute.
                 */
+               ASSERT(0);
 
                /*
                 * Figure the total bytes to be added to the destination leaf.
@@ -1422,10 +1424,24 @@ xfs_attr_leaf_rebalance(xfs_da_state_t *state, xfs_da_state_blk_t *blk1,
                        args->index2 = 0;
                        args->blkno2 = blk2->blkno;
                } else {
+                       /*
+                        * On a double leaf split, the original attr location
+                        * is already stored in blkno2/index2, so don't
+                        * overwrite it overwise we corrupt the tree.
+                        */
                        blk2->index = blk1->index
                                    - be16_to_cpu(leaf1->hdr.count);
-                       args->index = args->index2 = blk2->index;
-                       args->blkno = args->blkno2 = blk2->blkno;
+                       args->index = blk2->index;
+                       args->blkno = blk2->blkno;
+                       if (!state->extravalid) {
+                               /*
+                                * set the new attr location to match the old
+                                * one and let the higher level split code
+                                * decide where in the leaf to place it.
+                                */
+                               args->index2 = blk2->index;
+                               args->blkno2 = blk2->blkno;
+                       }
                }
        } else {
                ASSERT(state->inleaf == 1);
index 933b7930b8636da970a627d571388b11e8dabde7..4b0b8dd1b7b0ea2bbc431ef523ec5412eb4fd584 100644 (file)
@@ -1197,9 +1197,14 @@ xfs_buf_bio_end_io(
 {
        xfs_buf_t               *bp = (xfs_buf_t *)bio->bi_private;
 
-       xfs_buf_ioerror(bp, -error);
+       /*
+        * don't overwrite existing errors - otherwise we can lose errors on
+        * buffers that require multiple bios to complete.
+        */
+       if (!bp->b_error)
+               xfs_buf_ioerror(bp, -error);
 
-       if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
+       if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ))
                invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp));
 
        _xfs_buf_ioend(bp, 1);
@@ -1279,6 +1284,11 @@ next_chunk:
                if (size)
                        goto next_chunk;
        } else {
+               /*
+                * This is guaranteed not to be the last io reference count
+                * because the caller (xfs_buf_iorequest) holds a count itself.
+                */
+               atomic_dec(&bp->b_io_remaining);
                xfs_buf_ioerror(bp, EIO);
                bio_put(bio);
        }
index af1cbaf535edeb395954583d1d1c1cd0c255c64c..c5c35e629426b506d2e7d00fe876f2e528a48f80 100644 (file)
        {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x679B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index df804ba73e0b2ae97d935ba2c3797cd36b1499de..92a0dc75bc74661541e5b5c15f4cdd095c922b38 100644 (file)
@@ -34,6 +34,7 @@ struct omap_i2c_bus_platform_data {
        u32             clkrate;
        u32             rev;
        u32             flags;
+       void            (*set_mpu_wkup_lat)(struct device *dev, long set);
 };
 
 #endif
index e20e3af68fb6acbaf6a51c5aec465bb753a54968..0506eb53519b635286be2b2c54b8a4c7e524a3d7 100644 (file)
@@ -42,10 +42,12 @@ static inline struct device_node *of_find_matching_node_by_address(
 {
        return NULL;
 }
+#ifndef of_iomap
 static inline void __iomem *of_iomap(struct device_node *device, int index)
 {
        return NULL;
 }
+#endif
 static inline const __be32 *of_get_address(struct device_node *dev, int index,
                                        u64 *size, unsigned int *flags)
 {
index c64de9dd7631bb44232eeca7d8080c62521c5b1e..2f694f3846a9ad1a5d052e96d5f68cd9a152c313 100644 (file)
@@ -46,8 +46,9 @@ struct ads7846_platform_data {
        u16     debounce_rep;           /* additional consecutive good readings
                                         * required after the first two */
        int     gpio_pendown;           /* the GPIO used to decide the pendown
-                                        * state if get_pendown_state == NULL
-                                        */
+                                        * state if get_pendown_state == NULL */
+       int     gpio_pendown_debounce;  /* platform specific debounce time for
+                                        * the gpio_pendown */
        int     (*get_pendown_state)(void);
        int     (*filter_init)  (const struct ads7846_platform_data *pdata,
                                 void **filter_data);
index 6f0ba01afe7315d443a0aad152a9df2d5a22abf6..63445ede48bb7fd04b175c111935fb77464568b6 100644 (file)
@@ -1351,7 +1351,7 @@ struct xfrm6_tunnel {
 };
 
 extern void xfrm_init(void);
-extern void xfrm4_init(int rt_hash_size);
+extern void xfrm4_init(void);
 extern int xfrm_state_init(struct net *net);
 extern void xfrm_state_fini(struct net *net);
 extern void xfrm4_state_init(void);
index 88fae8d2015471885972da8407d0077106b527cc..55367b04dc9445e142369e98d2134d459eb5ef83 100644 (file)
@@ -135,6 +135,8 @@ struct scsi_device {
                                     * because we did a bus reset. */
        unsigned use_10_for_rw:1; /* first try 10-byte read / write */
        unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+       unsigned no_report_opcodes:1;   /* no REPORT SUPPORTED OPERATION CODES */
+       unsigned no_write_same:1;       /* no WRITE SAME command */
        unsigned skip_ms_page_8:1;      /* do not use MODE SENSE page 0x08 */
        unsigned skip_ms_page_3f:1;     /* do not use MODE SENSE page 0x3f */
        unsigned skip_vpd_pages:1;      /* do not read VPD pages */
@@ -362,6 +364,8 @@ extern int scsi_test_unit_ready(struct scsi_device *sdev, int timeout,
                                int retries, struct scsi_sense_hdr *sshdr);
 extern int scsi_get_vpd_page(struct scsi_device *, u8 page, unsigned char *buf,
                             int buf_len);
+extern int scsi_report_opcode(struct scsi_device *sdev, unsigned char *buffer,
+                             unsigned int len, unsigned char opcode);
 extern int scsi_device_set_state(struct scsi_device *sdev,
                                 enum scsi_device_state state);
 extern struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
index 7bb35ac0964aaa425b3888e0963e3d7fa2ac1e53..bcb72c6e2b2d8b5ec498c5250d11a58f71e2f06a 100644 (file)
@@ -1405,7 +1405,7 @@ int capture_free_page(struct page *page, int alloc_order, int migratetype)
 
        mt = get_pageblock_migratetype(page);
        if (unlikely(mt != MIGRATE_ISOLATE))
-               __mod_zone_freepage_state(zone, -(1UL << order), mt);
+               __mod_zone_freepage_state(zone, -(1UL << alloc_order), mt);
 
        if (alloc_order != order)
                expand(zone, page, alloc_order, order,
index bcf02f608cbfa76ad06da490f2dbf423e728fc17..017a8bacfb2773483c97c8fa3b162e03f09f454a 100644 (file)
@@ -429,6 +429,17 @@ static struct attribute_group netstat_group = {
        .name  = "statistics",
        .attrs  = netstat_attrs,
 };
+
+#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+static struct attribute *wireless_attrs[] = {
+       NULL
+};
+
+static struct attribute_group wireless_group = {
+       .name = "wireless",
+       .attrs = wireless_attrs,
+};
+#endif
 #endif /* CONFIG_SYSFS */
 
 #ifdef CONFIG_RPS
@@ -1409,6 +1420,15 @@ int netdev_register_kobject(struct net_device *net)
                groups++;
 
        *groups++ = &netstat_group;
+
+#if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
+       if (net->ieee80211_ptr)
+               *groups++ = &wireless_group;
+#if IS_ENABLED(CONFIG_WIRELESS_EXT)
+       else if (net->wireless_handlers)
+               *groups++ = &wireless_group;
+#endif
+#endif
 #endif /* CONFIG_SYSFS */
 
        error = device_add(dev);
index a8c651216fa62a44d9226eed3294e3e5f3484a3d..df251424d816b460ae268f21d337c57e64452fc6 100644 (file)
@@ -1785,6 +1785,7 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        if (dev_out->flags & IFF_LOOPBACK)
                flags |= RTCF_LOCAL;
 
+       do_cache = true;
        if (type == RTN_BROADCAST) {
                flags |= RTCF_BROADCAST | RTCF_LOCAL;
                fi = NULL;
@@ -1793,6 +1794,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
                if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
                                     fl4->flowi4_proto))
                        flags &= ~RTCF_LOCAL;
+               else
+                       do_cache = false;
                /* If multicast route do not exist use
                 * default one, but do not gateway in this case.
                 * Yes, it is hack.
@@ -1802,8 +1805,8 @@ static struct rtable *__mkroute_output(const struct fib_result *res,
        }
 
        fnhe = NULL;
-       do_cache = fi != NULL;
-       if (fi) {
+       do_cache &= fi != NULL;
+       if (do_cache) {
                struct rtable __rcu **prth;
                struct fib_nh *nh = &FIB_RES_NH(*res);
 
@@ -2597,7 +2600,7 @@ int __init ip_rt_init(void)
                pr_err("Unable to create route proc files\n");
 #ifdef CONFIG_XFRM
        xfrm_init();
-       xfrm4_init(ip_rt_max_size);
+       xfrm4_init();
 #endif
        rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
 
index 05c5ab8d983c462f75ab6143ca653669d36c1005..3be0ac2c1920e668be3f53f44a3df731a44204fc 100644 (file)
@@ -279,19 +279,8 @@ static void __exit xfrm4_policy_fini(void)
        xfrm_policy_unregister_afinfo(&xfrm4_policy_afinfo);
 }
 
-void __init xfrm4_init(int rt_max_size)
+void __init xfrm4_init(void)
 {
-       /*
-        * Select a default value for the gc_thresh based on the main route
-        * table hash size.  It seems to me the worst case scenario is when
-        * we have ipsec operating in transport mode, in which we create a
-        * dst_entry per socket.  The xfrm gc algorithm starts trying to remove
-        * entries at gc_thresh, and prevents new allocations as 2*gc_thresh
-        * so lets set an initial xfrm gc_thresh value at the rt_max_size/2.
-        * That will let us store an ipsec connection per route table entry,
-        * and start cleaning when were 1/2 full
-        */
-       xfrm4_dst_ops.gc_thresh = rt_max_size/2;
        dst_entries_init(&xfrm4_dst_ops);
 
        xfrm4_state_init();
index c4f934176cabd92ebfb4bba774335427b9f8932a..30647857a375bce469c50071636b5aef6c5a33d7 100644 (file)
@@ -252,6 +252,7 @@ struct dst_entry *inet6_csk_update_pmtu(struct sock *sk, u32 mtu)
                return NULL;
        dst->ops->update_pmtu(dst, sk, NULL, mtu);
 
-       return inet6_csk_route_socket(sk, &fl6);
+       dst = inet6_csk_route_socket(sk, &fl6);
+       return IS_ERR(dst) ? NULL : dst;
 }
 EXPORT_SYMBOL_GPL(inet6_csk_update_pmtu);
index bf87c70ac6c5fe1e2b920c6db827e1e9da6abe36..c21e33d1abd0d945ba041182dff52ad3f9ed4dc6 100644 (file)
@@ -1151,10 +1151,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->u.ibss.mtx);
 
-       sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH;
-       memset(sdata->u.ibss.bssid, 0, ETH_ALEN);
-       sdata->u.ibss.ssid_len = 0;
-
        active_ibss = ieee80211_sta_active_ibss(sdata);
 
        if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
@@ -1175,6 +1171,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata)
                }
        }
 
+       ifibss->state = IEEE80211_IBSS_MLME_SEARCH;
+       memset(ifibss->bssid, 0, ETH_ALEN);
+       ifibss->ssid_len = 0;
+
        sta_info_flush(sdata->local, sdata);
 
        spin_lock_bh(&ifibss->incomplete_lock);
index ec3dba5dcd62f081c1749fb60f2aa71cd31d11ad..5c0b78528e55b1af2288aa2e1a398202131413d5 100644 (file)
@@ -173,6 +173,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                return adtfn(set, &nip, timeout, flags);
        }
 
+       ip_to = ip;
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -185,8 +186,7 @@ hash_ip4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
-       } else
-               ip_to = ip;
+       }
 
        hosts = h->netmask == 32 ? 1 : 2 << (32 - h->netmask - 1);
 
index 0171f7502fa58d035fcda2361a6c4968acf09b7b..6283351f4eebee920c2939613ff05676b16f91ad 100644 (file)
@@ -162,7 +162,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipport4_elem data = { };
-       u32 ip, ip_to = 0, p = 0, port, port_to;
+       u32 ip, ip_to, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
@@ -210,7 +210,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ip_set_eexist(ret, flags) ? 0 : ret;
        }
 
-       ip = ntohl(data.ip);
+       ip_to = ip = ntohl(data.ip);
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -223,8 +223,7 @@ hash_ipport4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
-       } else
-               ip_to = ip;
+       }
 
        port_to = port = ntohs(data.port);
        if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
index 6344ef551ec811208b79ddc54c89a1270c2419cd..6a21271c8d5a9f5da822c9789d55b761924d6da5 100644 (file)
@@ -166,7 +166,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportip4_elem data = { };
-       u32 ip, ip_to = 0, p = 0, port, port_to;
+       u32 ip, ip_to, p = 0, port, port_to;
        u32 timeout = h->timeout;
        bool with_ports = false;
        int ret;
@@ -218,7 +218,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ip_set_eexist(ret, flags) ? 0 : ret;
        }
 
-       ip = ntohl(data.ip);
+       ip_to = ip = ntohl(data.ip);
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -231,8 +231,7 @@ hash_ipportip4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (!cidr || cidr > 32)
                        return -IPSET_ERR_INVALID_CIDR;
                ip_set_mask_from_to(ip, ip_to, cidr);
-       } else
-               ip_to = ip;
+       }
 
        port_to = port = ntohs(data.port);
        if (with_ports && tb[IPSET_ATTR_PORT_TO]) {
index cb71f9a774e7d50d67998aaaadc199a563f43f40..2d5cd4ee30eb4d5872b4aa9be7199a3d8999f6c7 100644 (file)
@@ -215,8 +215,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
        const struct ip_set_hash *h = set->data;
        ipset_adtfn adtfn = set->variant->adt[adt];
        struct hash_ipportnet4_elem data = { .cidr = HOST_MASK - 1 };
-       u32 ip, ip_to = 0, p = 0, port, port_to;
-       u32 ip2_from = 0, ip2_to, ip2_last, ip2;
+       u32 ip, ip_to, p = 0, port, port_to;
+       u32 ip2_from, ip2_to, ip2_last, ip2;
        u32 timeout = h->timeout;
        bool with_ports = false;
        u8 cidr;
@@ -286,6 +286,7 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                return ip_set_eexist(ret, flags) ? 0 : ret;
        }
 
+       ip_to = ip;
        if (tb[IPSET_ATTR_IP_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP_TO], &ip_to);
                if (ret)
@@ -306,6 +307,8 @@ hash_ipportnet4_uadt(struct ip_set *set, struct nlattr *tb[],
                if (port > port_to)
                        swap(port, port_to);
        }
+
+       ip2_to = ip2_from;
        if (tb[IPSET_ATTR_IP2_TO]) {
                ret = ip_set_get_hostipaddr4(tb[IPSET_ATTR_IP2_TO], &ip2_to);
                if (ret)
index 8847b4d8be06b9ad536c2bb32d9bbd3d34a7c289..701c88a20fea4c2a3ca6c9b4b2189521e13f96fe 100644 (file)
@@ -41,7 +41,8 @@ MODULE_DESCRIPTION("cttimeout: Extended Netfilter Connection Tracking timeout tu
 static LIST_HEAD(cttimeout_list);
 
 static const struct nla_policy cttimeout_nla_policy[CTA_TIMEOUT_MAX+1] = {
-       [CTA_TIMEOUT_NAME]      = { .type = NLA_NUL_STRING },
+       [CTA_TIMEOUT_NAME]      = { .type = NLA_NUL_STRING,
+                                   .len  = CTNL_TIMEOUT_NAME_MAX - 1},
        [CTA_TIMEOUT_L3PROTO]   = { .type = NLA_U16 },
        [CTA_TIMEOUT_L4PROTO]   = { .type = NLA_U8 },
        [CTA_TIMEOUT_DATA]      = { .type = NLA_NESTED },
index cc10d073c3381179671ef709b58baf4600e51a2b..9e8f4b2801f6099ebd5afcff20d2b5ef19e16589 100644 (file)
@@ -1210,7 +1210,7 @@ int nfc_llcp_register_device(struct nfc_dev *ndev)
        local->remote_miu = LLCP_DEFAULT_MIU;
        local->remote_lto = LLCP_DEFAULT_LTO;
 
-       list_add(&llcp_devices, &local->list);
+       list_add(&local->list, &llcp_devices);
 
        return 0;
 }
index 87ca59d36e7ef64754892be3d842ac614bf2a9d5..974a20b661b79c6385f5fbc0cd40cd57e2a7ef94 100755 (executable)
@@ -156,12 +156,12 @@ sub asn1_extract($$@)
 
        if ($l == 0x1) {
            $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1));
-       } elsif ($l = 0x2) {
+       } elsif ($l == 0x2) {
            $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0], 2));
-       } elsif ($l = 0x3) {
+       } elsif ($l == 0x3) {
            $len = unpack("C", substr(${$cursor->[2]}, $cursor->[0], 1)) << 16;
            $len = unpack("n", substr(${$cursor->[2]}, $cursor->[0] + 1, 2));
-       } elsif ($l = 0x4) {
+       } elsif ($l == 0x4) {
            $len = unpack("N", substr(${$cursor->[2]}, $cursor->[0], 4));
        } else {
            die $x509, ": ", $cursor->[0], ": ASN.1 element too long (", $l, ")\n";
index 28f911cdd7c79dc292171fea10385adad51a27eb..c5454c0477c346e4d814f5ff209feba86e5b86ad 100644 (file)
@@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node)
        if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
                struct sel_netnode *tail;
                tail = list_entry(
-                       rcu_dereference(sel_netnode_hash[idx].list.prev),
+                       rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+                                                 lockdep_is_held(&sel_netnode_lock)),
                        struct sel_netnode, list);
                list_del_rcu(&tail->list);
                kfree_rcu(tail, rcu);