Merge tag 'dm-3.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Mar 2012 01:21:51 +0000 (17:21 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 9 Mar 2012 01:21:51 +0000 (17:21 -0800)
Pull device-mapper fixes for 3.3 from Alasdair Kergon

Eight small device-mapper bug fixes.

* tag 'dm-3.3-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/agk/linux-dm:
  dm raid: fix flush support
  dm raid: set MD_CHANGE_DEVS when rebuilding
  dm thin metadata: decrement counter after removing mapped block
  dm thin metadata: unlock superblock in init_pmd error path
  dm thin metadata: remove incorrect close_device on creation error paths
  dm flakey: fix crash on read when corrupt_bio_byte not set
  dm io: fix discard support
  dm ioctl: do not leak argv if target message only contains whitespace

166 files changed:
Documentation/input/alps.txt
Documentation/kernel-parameters.txt
MAINTAINERS
arch/alpha/include/asm/futex.h
arch/arm/Kconfig
arch/arm/include/asm/pmu.h
arch/arm/kernel/ecard.c
arch/arm/kernel/perf_event.c
arch/arm/kernel/perf_event_v6.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/perf_event_xscale.c
arch/arm/mach-at91/at91sam9g45_devices.c
arch/arm/mach-at91/at91sam9rl_devices.c
arch/arm/mach-ep93xx/vision_ep9307.c
arch/arm/mach-exynos/mach-universal_c210.c
arch/arm/mach-pxa/generic.h
arch/arm/mach-pxa/mfp-pxa2xx.c
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mach-pxa/pxa95x.c
arch/arm/mach-s3c2440/common.h
arch/arm/mach-s3c2440/mach-anubis.c
arch/arm/mach-s3c2440/mach-at2440evb.c
arch/arm/mach-s3c2440/mach-gta02.c
arch/arm/mach-s3c2440/mach-mini2440.c
arch/arm/mach-s3c2440/mach-nexcoder.c
arch/arm/mach-s3c2440/mach-osiris.c
arch/arm/mach-s3c2440/mach-rx1950.c
arch/arm/mach-s3c2440/mach-rx3715.c
arch/arm/mach-s3c2440/mach-smdk2440.c
arch/arm/mach-s3c2440/s3c2440.c
arch/arm/mach-s3c2440/s3c244x.c
arch/arm/mach-ux500/Kconfig
arch/arm/mach-vexpress/Kconfig
arch/arm/mm/proc-v7.S
arch/arm/plat-s3c24xx/dma.c
arch/arm/plat-samsung/devs.c
arch/arm/plat-spear/time.c
arch/mips/alchemy/common/time.c
arch/mips/ath79/dev-wmac.c
arch/mips/configs/nlm_xlp_defconfig
arch/mips/configs/nlm_xlr_defconfig
arch/mips/configs/powertv_defconfig
arch/mips/include/asm/mach-au1x00/gpio-au1300.h
arch/mips/include/asm/page.h
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/traps.c
arch/mips/kernel/vmlinux.lds.S
arch/mips/mm/fault.c
arch/mips/pci/pci.c
arch/mips/pmc-sierra/yosemite/ht-irq.c
arch/mips/txx9/generic/7segled.c
arch/x86/ia32/ia32_aout.c
arch/x86/mm/hugetlbpage.c
arch/x86/pci/acpi.c
drivers/block/floppy.c
drivers/gpu/drm/gma500/cdv_device.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gtt.c
drivers/hid/hid-ids.h
drivers/hid/hid-input.c
drivers/hid/usbhid/hid-quirks.c
drivers/input/evdev.c
drivers/input/misc/twl4030-vibra.c
drivers/input/mouse/alps.c
drivers/input/tablet/Kconfig
drivers/input/tablet/wacom_wac.c
drivers/md/raid1.c
drivers/md/raid10.c
drivers/mfd/ab8500-core.c
drivers/mfd/mfd-core.c
drivers/mfd/s5m-core.c
drivers/mfd/tps65910.c
drivers/mfd/tps65912-core.c
drivers/mfd/wm8350-irq.c
drivers/mfd/wm8994-core.c
drivers/mfd/wm8994-regmap.c
drivers/misc/c2port/core.c
drivers/mmc/core/core.c
drivers/mmc/core/host.c
drivers/mmc/core/mmc.c
drivers/mmc/core/sd.c
drivers/mmc/core/sdio.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/sdhci-esdhc-imx.c
drivers/net/caif/caif_hsi.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_param.c
drivers/net/ethernet/packetengines/Kconfig
drivers/net/ethernet/qlogic/qla3xxx.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/wireless/ath/ath9k/ar5008_phy.c
drivers/net/wireless/ath/ath9k/ar9002_hw.c
drivers/net/wireless/ath/ath9k/hw.h
drivers/net/wireless/brcm80211/brcmsmac/ampdu.c
drivers/net/wireless/iwlwifi/iwl-agn-sta.c
drivers/net/wireless/rt2x00/rt2x00dev.c
drivers/pps/pps.c
drivers/rapidio/devices/tsi721.c
drivers/regulator/da9052-regulator.c
drivers/rtc/rtc-r9701.c
drivers/s390/cio/qdio_main.c
drivers/scsi/sd_dif.c
drivers/tty/Kconfig
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-fsl.h
fs/aio.c
fs/binfmt_aout.c
fs/cifs/dir.c
fs/cifs/inode.c
fs/dcache.c
fs/exec.c
include/linux/amba/serial.h
include/linux/dcache.h
include/linux/kmsg_dump.h
include/linux/memcontrol.h
include/linux/percpu.h
include/linux/sched.h
include/linux/tcp.h
include/net/tcp.h
kernel/fork.c
kernel/hung_task.c
kernel/irq/manage.c
kernel/kprobes.c
kernel/printk.c
kernel/sched/core.c
lib/debugobjects.c
lib/vsprintf.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memcontrol.c
mm/mempolicy.c
mm/migrate.c
mm/mlock.c
mm/mmap.c
mm/mprotect.c
mm/page_cgroup.c
mm/percpu-vm.c
mm/swap.c
mm/swap_state.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_stp.c
net/bridge/netfilter/ebtables.c
net/core/rtnetlink.c
net/ipv4/tcp_input.c
net/ipv6/addrconf.c
net/mac80211/iface.c
net/mac80211/rate.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_netlink.c
tools/perf/builtin-record.c
tools/perf/builtin-top.c
tools/perf/perf.h
tools/perf/util/top.h
tools/perf/util/util.c

index f274c28b510355dde7c2bfca3c9fc5a4ecd0b400..2f95308251d4162ac21d4ccd3fc9454c371cb217 100644 (file)
@@ -13,7 +13,8 @@ Detection
 
 All ALPS touchpads should respond to the "E6 report" command sequence:
 E8-E6-E6-E6-E9. An ALPS touchpad should respond with either 00-00-0A or
-00-00-64.
+00-00-64 if no buttons are pressed. The bits 0-2 of the first byte will be 1s
+if some buttons are pressed.
 
 If the E6 report is successful, the touchpad model is identified using the "E7
 report" sequence: E8-E7-E7-E7-E9. The response is the model signature and is
index 033d4e69b43b107d9780ec959105823e6cefd07c..d99fd9c0ec0e16900663e1378eac9911d4afc845 100644 (file)
@@ -2211,6 +2211,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
 
                        default: off.
 
+       printk.always_kmsg_dump=
+                       Trigger kmsg_dump for cases other than kernel oops or
+                       panics
+                       Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
+                       default: disabled
+
        printk.time=    Show timing data prefixed to each printk message line
                        Format: <bool>  (1/Y/y=enable, 0/N/n=disable)
 
index 4e41d5255d7235662633f1e16af2b82a744dc85c..b087b3bca2901b5df233b805a5fb037bcfa9c74c 100644 (file)
@@ -1310,7 +1310,7 @@ F:        drivers/atm/
 F:     include/linux/atm*
 
 ATMEL AT91 MCI DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 W:     http://www.atmel.com/products/AT91/
 W:     http://www.at91.com/
@@ -1318,7 +1318,7 @@ S:        Maintained
 F:     drivers/mmc/host/at91_mci.c
 
 ATMEL AT91 / AT32 MCI DRIVER
-M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Ludovic Desroches <ludovic.desroches@atmel.com>
 S:     Maintained
 F:     drivers/mmc/host/atmel-mci.c
 F:     drivers/mmc/host/atmel-mci-regs.h
index e8a761aee088a9bc5a8d2ce62d6bf1fde2183a92..f939794363ac6f94ad82a192ede6b9410f7bc002 100644 (file)
@@ -108,7 +108,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        "       lda     $31,3b-2b(%0)\n"
        "       .previous\n"
        :       "+r"(ret), "=&r"(prev), "=&r"(cmp)
-       :       "r"(uaddr), "r"((long)oldval), "r"(newval)
+       :       "r"(uaddr), "r"((long)(int)oldval), "r"(newval)
        :       "memory");
 
        *uval = prev;
index a48aecc17eacc2e3d3f5cf4b0ff4183f29b33440..dfb0312f4e73024f14eb0181bccf9e40870cbd3a 100644 (file)
@@ -1280,7 +1280,7 @@ config ARM_ERRATA_743622
        depends on CPU_V7
        help
          This option enables the workaround for the 743622 Cortex-A9
-         (r2p0..r2p2) erratum. Under very rare conditions, a faulty
+         (r2p*) erratum. Under very rare conditions, a faulty
          optimisation in the Cortex-A9 Store Buffer may lead to data
          corruption. This workaround sets a specific bit in the diagnostic
          register of the Cortex-A9 which disables the Store Buffer
index b5a5be2536c1158acf5a9c979eccb0f5588bb97c..90114faa9f3c7c087f6fce6b871b16ab5e433b44 100644 (file)
@@ -134,7 +134,7 @@ int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type);
 
 u64 armpmu_event_update(struct perf_event *event,
                        struct hw_perf_event *hwc,
-                       int idx, int overflow);
+                       int idx);
 
 int armpmu_event_set_period(struct perf_event *event,
                            struct hw_perf_event *hwc,
index 4dd0edab6a658880ed505145fff2c5ce001ae639..1651d49507444267e1d5bf5783837e0f1d483420 100644 (file)
@@ -242,6 +242,7 @@ static void ecard_init_pgtables(struct mm_struct *mm)
 
        memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
 
+       vma.vm_flags = VM_EXEC;
        vma.vm_mm = mm;
 
        flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
index 5bb91bf3d47f24c9c6fb75324535c39e54b2c62e..b2abfa18f1378d9cea0b790e3933c60999c4eaa0 100644 (file)
@@ -180,7 +180,7 @@ armpmu_event_set_period(struct perf_event *event,
 u64
 armpmu_event_update(struct perf_event *event,
                    struct hw_perf_event *hwc,
-                   int idx, int overflow)
+                   int idx)
 {
        struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
        u64 delta, prev_raw_count, new_raw_count;
@@ -193,13 +193,7 @@ again:
                             new_raw_count) != prev_raw_count)
                goto again;
 
-       new_raw_count &= armpmu->max_period;
-       prev_raw_count &= armpmu->max_period;
-
-       if (overflow)
-               delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
-       else
-               delta = new_raw_count - prev_raw_count;
+       delta = (new_raw_count - prev_raw_count) & armpmu->max_period;
 
        local64_add(delta, &event->count);
        local64_sub(delta, &hwc->period_left);
@@ -216,7 +210,7 @@ armpmu_read(struct perf_event *event)
        if (hwc->idx < 0)
                return;
 
-       armpmu_event_update(event, hwc, hwc->idx, 0);
+       armpmu_event_update(event, hwc, hwc->idx);
 }
 
 static void
@@ -232,7 +226,7 @@ armpmu_stop(struct perf_event *event, int flags)
        if (!(hwc->state & PERF_HES_STOPPED)) {
                armpmu->disable(hwc, hwc->idx);
                barrier(); /* why? */
-               armpmu_event_update(event, hwc, hwc->idx, 0);
+               armpmu_event_update(event, hwc, hwc->idx);
                hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
        }
 }
@@ -518,7 +512,13 @@ __hw_perf_event_init(struct perf_event *event)
        hwc->config_base            |= (unsigned long)mapping;
 
        if (!hwc->sample_period) {
-               hwc->sample_period  = armpmu->max_period;
+               /*
+                * For non-sampling runs, limit the sample_period to half
+                * of the counter width. That way, the new counter value
+                * is far less likely to overtake the previous one unless
+                * you have some serious IRQ latency issues.
+                */
+               hwc->sample_period  = armpmu->max_period >> 1;
                hwc->last_period    = hwc->sample_period;
                local64_set(&hwc->period_left, hwc->sample_period);
        }
@@ -679,6 +679,28 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu)
        armpmu->type = ARM_PMU_DEVICE_CPU;
 }
 
+/*
+ * PMU hardware loses all context when a CPU goes offline.
+ * When a CPU is hotplugged back in, since some hardware registers are
+ * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
+ * junk values out of them.
+ */
+static int __cpuinit pmu_cpu_notify(struct notifier_block *b,
+                                       unsigned long action, void *hcpu)
+{
+       if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
+               return NOTIFY_DONE;
+
+       if (cpu_pmu && cpu_pmu->reset)
+               cpu_pmu->reset(NULL);
+
+       return NOTIFY_OK;
+}
+
+static struct notifier_block __cpuinitdata pmu_cpu_notifier = {
+       .notifier_call = pmu_cpu_notify,
+};
+
 /*
  * CPU PMU identification and registration.
  */
@@ -730,6 +752,7 @@ init_hw_perf_events(void)
                pr_info("enabled with %s PMU driver, %d counters available\n",
                        cpu_pmu->name, cpu_pmu->num_events);
                cpu_pmu_init(cpu_pmu);
+               register_cpu_notifier(&pmu_cpu_notifier);
                armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW);
        } else {
                pr_info("no hardware support available\n");
index 533be9930ec22f803e672a2e57217b8c65bfc40b..b78af0cc6ef36ddf8b371c9b57541b19caab5677 100644 (file)
@@ -467,23 +467,6 @@ armv6pmu_enable_event(struct hw_perf_event *hwc,
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
-static int counter_is_active(unsigned long pmcr, int idx)
-{
-       unsigned long mask = 0;
-       if (idx == ARMV6_CYCLE_COUNTER)
-               mask = ARMV6_PMCR_CCOUNT_IEN;
-       else if (idx == ARMV6_COUNTER0)
-               mask = ARMV6_PMCR_COUNT0_IEN;
-       else if (idx == ARMV6_COUNTER1)
-               mask = ARMV6_PMCR_COUNT1_IEN;
-
-       if (mask)
-               return pmcr & mask;
-
-       WARN_ONCE(1, "invalid counter number (%d)\n", idx);
-       return 0;
-}
-
 static irqreturn_t
 armv6pmu_handle_irq(int irq_num,
                    void *dev)
@@ -513,7 +496,8 @@ armv6pmu_handle_irq(int irq_num,
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
 
-               if (!counter_is_active(pmcr, idx))
+               /* Ignore if we don't have an event. */
+               if (!event)
                        continue;
 
                /*
@@ -524,7 +508,7 @@ armv6pmu_handle_irq(int irq_num,
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
index 6933244c68f964ffed73f11cfbdaac1ca13b2f74..4d7095af2ab3a55075f080b3d4fee66b84d39f17 100644 (file)
@@ -809,6 +809,11 @@ static inline int armv7_pmnc_disable_intens(int idx)
 
        counter = ARMV7_IDX_TO_COUNTER(idx);
        asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
+       isb();
+       /* Clear the overflow flag in case an interrupt is pending. */
+       asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
+       isb();
+
        return idx;
 }
 
@@ -955,6 +960,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
 
+               /* Ignore if we don't have an event. */
+               if (!event)
+                       continue;
+
                /*
                 * We have a single interrupt for all counters. Check that
                 * each counter has overflowed before we process it.
@@ -963,7 +972,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
index 3b99d8269829b971db3d8f0618629c77d8317c5a..71a21e6712f5356daa77aa134796701101ce1a13 100644 (file)
@@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev)
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
 
+               if (!event)
+                       continue;
+
                if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
@@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev)
                struct perf_event *event = cpuc->events[idx];
                struct hw_perf_event *hwc;
 
-               if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+               if (!event)
+                       continue;
+
+               if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx))
                        continue;
 
                hwc = &event->hw;
-               armpmu_event_update(event, hwc, idx, 1);
+               armpmu_event_update(event, hwc, idx);
                data.period = event->hw.last_period;
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
@@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
 static void
 xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
 {
-       unsigned long flags, ien, evtsel;
+       unsigned long flags, ien, evtsel, of_flags;
        struct pmu_hw_events *events = cpu_pmu->get_hw_events();
 
        ien = xscale2pmu_read_int_enable();
@@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
        switch (idx) {
        case XSCALE_CYCLE_COUNTER:
                ien &= ~XSCALE2_CCOUNT_INT_EN;
+               of_flags = XSCALE2_CCOUNT_OVERFLOW;
                break;
        case XSCALE_COUNTER0:
                ien &= ~XSCALE2_COUNT0_INT_EN;
                evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+               of_flags = XSCALE2_COUNT0_OVERFLOW;
                break;
        case XSCALE_COUNTER1:
                ien &= ~XSCALE2_COUNT1_INT_EN;
                evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+               of_flags = XSCALE2_COUNT1_OVERFLOW;
                break;
        case XSCALE_COUNTER2:
                ien &= ~XSCALE2_COUNT2_INT_EN;
                evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+               of_flags = XSCALE2_COUNT2_OVERFLOW;
                break;
        case XSCALE_COUNTER3:
                ien &= ~XSCALE2_COUNT3_INT_EN;
                evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
                evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+               of_flags = XSCALE2_COUNT3_OVERFLOW;
                break;
        default:
                WARN_ONCE(1, "invalid counter number (%d)\n", idx);
@@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
        raw_spin_lock_irqsave(&events->pmu_lock, flags);
        xscale2pmu_write_event_select(evtsel);
        xscale2pmu_write_int_enable(ien);
+       xscale2pmu_write_overflow_flags(of_flags);
        raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
 }
 
index b7582dd10dc3bd86c577a59b5ef3c1be3269dd23..96e2adcd5a841907be15beda7bdbb67dd87b5259 100644 (file)
 #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
 static u64 hdmac_dmamask = DMA_BIT_MASK(32);
 
-static struct at_dma_platform_data atdma_pdata = {
-       .nr_channels    = 8,
-};
-
 static struct resource hdmac_resources[] = {
        [0] = {
                .start  = AT91SAM9G45_BASE_DMA,
@@ -56,12 +52,11 @@ static struct resource hdmac_resources[] = {
 };
 
 static struct platform_device at_hdmac_device = {
-       .name           = "at_hdmac",
+       .name           = "at91sam9g45_dma",
        .id             = -1,
        .dev            = {
                                .dma_mask               = &hdmac_dmamask,
                                .coherent_dma_mask      = DMA_BIT_MASK(32),
-                               .platform_data          = &atdma_pdata,
        },
        .resource       = hdmac_resources,
        .num_resources  = ARRAY_SIZE(hdmac_resources),
@@ -69,9 +64,15 @@ static struct platform_device at_hdmac_device = {
 
 void __init at91_add_device_hdmac(void)
 {
-       dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
-       dma_cap_set(DMA_SLAVE, atdma_pdata.cap_mask);
-       platform_device_register(&at_hdmac_device);
+#if defined(CONFIG_OF)
+       struct device_node *of_node =
+               of_find_node_by_name(NULL, "dma-controller");
+
+       if (of_node)
+               of_node_put(of_node);
+       else
+#endif
+               platform_device_register(&at_hdmac_device);
 }
 #else
 void __init at91_add_device_hdmac(void) {}
index 61908dce978447156f557866055fe93da689fe52..9be71c11d0f098ddaadf4528cc2cdc1d17b52a3c 100644 (file)
 #if defined(CONFIG_AT_HDMAC) || defined(CONFIG_AT_HDMAC_MODULE)
 static u64 hdmac_dmamask = DMA_BIT_MASK(32);
 
-static struct at_dma_platform_data atdma_pdata = {
-       .nr_channels    = 2,
-};
-
 static struct resource hdmac_resources[] = {
        [0] = {
                .start  = AT91SAM9RL_BASE_DMA,
@@ -51,12 +47,11 @@ static struct resource hdmac_resources[] = {
 };
 
 static struct platform_device at_hdmac_device = {
-       .name           = "at_hdmac",
+       .name           = "at91sam9rl_dma",
        .id             = -1,
        .dev            = {
                                .dma_mask               = &hdmac_dmamask,
                                .coherent_dma_mask      = DMA_BIT_MASK(32),
-                               .platform_data          = &atdma_pdata,
        },
        .resource       = hdmac_resources,
        .num_resources  = ARRAY_SIZE(hdmac_resources),
@@ -64,7 +59,6 @@ static struct platform_device at_hdmac_device = {
 
 void __init at91_add_device_hdmac(void)
 {
-       dma_cap_set(DMA_MEMCPY, atdma_pdata.cap_mask);
        platform_device_register(&at_hdmac_device);
 }
 #else
index d5fb44f16d317e4796e9c2c08fae5cd16344f4d3..d67d0b4feb6f3fa2266e66cb878444724d97c069 100644 (file)
@@ -34,6 +34,7 @@
 #include <mach/ep93xx_spi.h>
 #include <mach/gpio-ep93xx.h>
 
+#include <asm/hardware/vic.h>
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
 #include <asm/mach/arch.h>
@@ -361,6 +362,7 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
        .atag_offset    = 0x100,
        .map_io         = vision_map_io,
        .init_irq       = ep93xx_init_irq,
+       .handle_irq     = vic_handle_irq,
        .timer          = &ep93xx_timer,
        .init_machine   = vision_init_machine,
        .restart        = ep93xx_restart,
index 0fc65ffde8ff7526f0f9362c8e18e3e27e3c6539..38939956c34f7097958eda9fcd8690feac4e84ba 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/i2c.h>
 #include <linux/gpio_keys.h>
 #include <linux/gpio.h>
+#include <linux/interrupt.h>
 #include <linux/fb.h>
 #include <linux/mfd/max8998.h>
 #include <linux/regulator/machine.h>
@@ -595,6 +596,7 @@ static struct mxt_platform_data qt602240_platform_data = {
        .threshold      = 0x28,
        .voltage        = 2800000,              /* 2.8V */
        .orient         = MXT_DIAGONAL,
+       .irqflags       = IRQF_TRIGGER_FALLING,
 };
 
 static struct i2c_board_info i2c3_devs[] __initdata = {
index 0d729e6619dfb411c6b4a5e875cc4e75a1928731..42d5cca66257bd1be97cb0c0b37c8569476d0795 100644 (file)
@@ -49,7 +49,6 @@ extern unsigned pxa3xx_get_clk_frequency_khz(int);
 #endif
 
 extern struct syscore_ops pxa_irq_syscore_ops;
-extern struct syscore_ops pxa_gpio_syscore_ops;
 extern struct syscore_ops pxa2xx_mfp_syscore_ops;
 extern struct syscore_ops pxa3xx_mfp_syscore_ops;
 
index f14775536b8385172e929791a627c3a689fb94ac..29b62afc6f7ca3d219b4f4498c4b62b3a6fee223 100644 (file)
@@ -226,6 +226,12 @@ static void __init pxa25x_mfp_init(void)
 {
        int i;
 
+       /* running before pxa_gpio_probe() */
+#ifdef CONFIG_CPU_PXA26x
+       pxa_last_gpio = 89;
+#else
+       pxa_last_gpio = 84;
+#endif
        for (i = 0; i <= pxa_last_gpio; i++)
                gpio_desc[i].valid = 1;
 
@@ -295,6 +301,7 @@ static void __init pxa27x_mfp_init(void)
 {
        int i, gpio;
 
+       pxa_last_gpio = 120;    /* running before pxa_gpio_probe() */
        for (i = 0; i <= pxa_last_gpio; i++) {
                /* skip GPIO2, 5, 6, 7, 8, they are not
                 * valid pins allow configuration
index 00d6eacab8e4dbb7f765a04a2fbaa44c6348f320..3352b37b60cf8218025ab54df4af1358ed62ae1f 100644 (file)
@@ -208,6 +208,7 @@ static struct clk_lookup pxa25x_clkregs[] = {
        INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"),
        INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"),
        INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL),
+       INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
 };
 
 static struct clk_lookup pxa25x_hwuart_clkreg =
@@ -367,7 +368,6 @@ static int __init pxa25x_init(void)
 
                register_syscore_ops(&pxa_irq_syscore_ops);
                register_syscore_ops(&pxa2xx_mfp_syscore_ops);
-               register_syscore_ops(&pxa_gpio_syscore_ops);
                register_syscore_ops(&pxa2xx_clock_syscore_ops);
 
                ret = platform_add_devices(pxa25x_devices,
index c1673b3441d4c9c3db04e50ac9f6a2ba4660b871..6bce78edce7a3c070b197b57f1fcb16197c3ba7f 100644 (file)
@@ -229,6 +229,7 @@ static struct clk_lookup pxa27x_clkregs[] = {
        INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
        INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
        INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
+       INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
 };
 
 #ifdef CONFIG_PM
@@ -455,7 +456,6 @@ static int __init pxa27x_init(void)
 
                register_syscore_ops(&pxa_irq_syscore_ops);
                register_syscore_ops(&pxa2xx_mfp_syscore_ops);
-               register_syscore_ops(&pxa_gpio_syscore_ops);
                register_syscore_ops(&pxa2xx_clock_syscore_ops);
 
                ret = platform_add_devices(devices, ARRAY_SIZE(devices));
index 4f402afa6609c0ed584100d951347ec1272f8531..3918a672238e4a368f0474c699ebc1c2cb680bb7 100644 (file)
@@ -462,7 +462,6 @@ static int __init pxa3xx_init(void)
 
                register_syscore_ops(&pxa_irq_syscore_ops);
                register_syscore_ops(&pxa3xx_mfp_syscore_ops);
-               register_syscore_ops(&pxa_gpio_syscore_ops);
                register_syscore_ops(&pxa3xx_clock_syscore_ops);
 
                ret = platform_add_devices(devices, ARRAY_SIZE(devices));
index d082a583df78a14c0bc4074db270bb8ce2393ca1..5ce434b95e87a52941f38ca8e023753b940802e5 100644 (file)
@@ -283,7 +283,6 @@ static int __init pxa95x_init(void)
                        return ret;
 
                register_syscore_ops(&pxa_irq_syscore_ops);
-               register_syscore_ops(&pxa_gpio_syscore_ops);
                register_syscore_ops(&pxa3xx_clock_syscore_ops);
 
                ret = platform_add_devices(devices, ARRAY_SIZE(devices));
index db8a98ac68c54dea4e51e526fa0f7ed809e4e8c7..0c1eb1dfc534ac3d564d660d855d8ab32bea4a73 100644 (file)
@@ -12,6 +12,6 @@
 #ifndef __ARCH_ARM_MACH_S3C2440_COMMON_H
 #define __ARCH_ARM_MACH_S3C2440_COMMON_H
 
-void s3c2440_restart(char mode, const char *cmd);
+void s3c244x_restart(char mode, const char *cmd);
 
 #endif /* __ARCH_ARM_MACH_S3C2440_COMMON_H */
index 24569550de1aa4967d9c2748310496c7425500b6..19b577bc09b80cbc3789c511c9f0c9d19c618c1e 100644 (file)
@@ -487,5 +487,5 @@ MACHINE_START(ANUBIS, "Simtec-Anubis")
        .init_machine   = anubis_init,
        .init_irq       = s3c24xx_init_irq,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index d6a9763110cdccaa22e543eeea09d1d8faa6a321..d7ae49c90118443cf0cc218f04a687945b6d1ab4 100644 (file)
@@ -222,5 +222,5 @@ MACHINE_START(AT2440EVB, "AT2440EVB")
        .init_machine   = at2440evb_init,
        .init_irq       = s3c24xx_init_irq,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 5859e609d28c51382009a01a9ced278054985987..9a4a5bc008e66149433ca0edc9147f14cfdc96f6 100644 (file)
@@ -601,5 +601,5 @@ MACHINE_START(NEO1973_GTA02, "GTA02")
        .init_irq       = s3c24xx_init_irq,
        .init_machine   = gta02_machine_init,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index adbbb85bc4cdd636f4196c4bf50468a21d7f0974..5d66fb218a41bcb121c5845e201009ffe0a0bbab 100644 (file)
@@ -701,5 +701,5 @@ MACHINE_START(MINI2440, "MINI2440")
        .init_machine   = mini2440_init,
        .init_irq       = s3c24xx_init_irq,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 40eaf844bc1f61aa92b4dd33250850025bf380e8..5198e3e1c5bedb1fcf5d60820d6ffd4ef30675a1 100644 (file)
@@ -158,5 +158,5 @@ MACHINE_START(NEXCODER_2440, "NexVision - Nexcoder 2440")
        .init_machine   = nexcoder_init,
        .init_irq       = s3c24xx_init_irq,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 4c480ef734f64ed97d5a8953f632ceb877464147..c5daeb612a88a35235c0f879e6789e0a94f0c1c1 100644 (file)
@@ -436,5 +436,5 @@ MACHINE_START(OSIRIS, "Simtec-OSIRIS")
        .init_irq       = s3c24xx_init_irq,
        .init_machine   = osiris_init,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 80077f6472ee7d7d4acfc1084a10ca94a85b0bbf..6f68abf44fab75156f6be5b69f7b32ba07582780 100644 (file)
@@ -822,5 +822,5 @@ MACHINE_START(RX1950, "HP iPAQ RX1950")
        .init_irq = s3c24xx_init_irq,
        .init_machine = rx1950_init_machine,
        .timer = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 20103bafbd4bfa34b2e6d58e14b0d10d4486b3cd..56af354475984edf7af488359991fb6041baef9b 100644 (file)
@@ -213,5 +213,5 @@ MACHINE_START(RX3715, "IPAQ-RX3715")
        .init_irq       = rx3715_init_irq,
        .init_machine   = rx3715_init_machine,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 1deb60d12a60612284c3e1c0e6c141f5509dd9e4..83a1036d7dcbe5a7d329ba1f12856ccf4d657795 100644 (file)
@@ -183,5 +183,5 @@ MACHINE_START(S3C2440, "SMDK2440")
        .map_io         = smdk2440_map_io,
        .init_machine   = smdk2440_machine_init,
        .timer          = &s3c24xx_timer,
-       .restart        = s3c2440_restart,
+       .restart        = s3c244x_restart,
 MACHINE_END
index 517623a09fc5464f9c7209baa7ef3d8e0aa9fcce..2b3dddb49af75939d397d1f395cf2cc068ded504 100644 (file)
@@ -35,7 +35,6 @@
 #include <plat/cpu.h>
 #include <plat/s3c244x.h>
 #include <plat/pm.h>
-#include <plat/watchdog-reset.h>
 
 #include <plat/gpio-core.h>
 #include <plat/gpio-cfg.h>
@@ -74,15 +73,3 @@ void __init s3c2440_map_io(void)
        s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up;
        s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up;
 }
-
-void s3c2440_restart(char mode, const char *cmd)
-{
-       if (mode == 's') {
-               soft_restart(0);
-       }
-
-       arch_wdt_reset();
-
-       /* we'll take a jump through zero as a poor second */
-       soft_restart(0);
-}
index 36bc60f61d0a46b9dfa9d74fd4a91d5be0425485..744930a870ebef2260dbe6fc85e152fd6947a85f 100644 (file)
@@ -46,6 +46,7 @@
 #include <plat/pm.h>
 #include <plat/pll.h>
 #include <plat/nand-core.h>
+#include <plat/watchdog-reset.h>
 
 static struct map_desc s3c244x_iodesc[] __initdata = {
        IODESC_ENT(CLKPWR),
@@ -196,3 +197,13 @@ struct syscore_ops s3c244x_pm_syscore_ops = {
        .suspend        = s3c244x_suspend,
        .resume         = s3c244x_resume,
 };
+
+void s3c244x_restart(char mode, const char *cmd)
+{
+       if (mode == 's')
+               soft_restart(0);
+
+       arch_wdt_reset();
+
+       /* we'll take a jump through zero as a poor second */
+       soft_restart(0);
index 52af00446a6335ff9fd5bb1fccbd9a3d8805caae..c59e8b892d6b9af1781f62666be3992885c6cfcf 100644 (file)
@@ -5,7 +5,7 @@ config UX500_SOC_COMMON
        default y
        select ARM_GIC
        select HAS_MTU
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select ARM_ERRATA_754322
        select ARM_ERRATA_764369
 
index 9b3d0fbaee729564f34a11749795d59577961bec..88c3ba151e8716c93abbabebc8265f81db806ea8 100644 (file)
@@ -7,7 +7,7 @@ config ARCH_VEXPRESS_CA9X4
        select ARM_GIC
        select ARM_ERRATA_720789
        select ARM_ERRATA_751472
-       select ARM_ERRATA_753970
+       select PL310_ERRATA_753970
        select HAVE_SMP
        select MIGHT_HAVE_CACHE_L2X0
 
index 0404ccbb8aa3ee39f0a8084c55879fb464115ca2..f1c8486f750169b0ffe8974b35e1b5555e53ad32 100644 (file)
@@ -230,9 +230,7 @@ __v7_setup:
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
 #endif
 #ifdef CONFIG_ARM_ERRATA_743622
-       teq     r6, #0x20                       @ present in r2p0
-       teqne   r6, #0x21                       @ present in r2p1
-       teqne   r6, #0x22                       @ present in r2p2
+       teq     r5, #0x00200000                 @ only present in r2p*
        mrceq   p15, 0, r10, c15, c0, 1         @ read diagnostic register
        orreq   r10, r10, #1 << 6               @ set bit #6
        mcreq   p15, 0, r10, c15, c0, 1         @ write diagnostic register
index 9fe35348e03b433d5294cfcd02c1742c268f3391..2bab4c99a2342218fff937cbe5bc701149f76d0c 100644 (file)
@@ -1249,7 +1249,7 @@ static void s3c2410_dma_resume(void)
        struct s3c2410_dma_chan *cp = s3c2410_chans + dma_channels - 1;
        int channel;
 
-       for (channel = dma_channels - 1; channel >= 0; cp++, channel--)
+       for (channel = dma_channels - 1; channel >= 0; cp--, channel--)
                s3c2410_dma_resume_chan(cp);
 }
 
index f10768e988d480d8e49758d900ba890a3649099a..d21d744e4d99d9d9623bb9d26cfb628e87c82a63 100644 (file)
@@ -1409,7 +1409,7 @@ void __init s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd)
 
 #ifdef CONFIG_S3C_DEV_USB_HSOTG
 static struct resource s3c_usb_hsotg_resources[] = {
-       [0] = DEFINE_RES_MEM(S3C_PA_USB_HSOTG, SZ_16K),
+       [0] = DEFINE_RES_MEM(S3C_PA_USB_HSOTG, SZ_128K),
        [1] = DEFINE_RES_IRQ(IRQ_OTG),
 };
 
index 0c77e42986758554bc25931cc6ed88287353a9b9..abb5bdecd509acbe13f0b70c78b65a2607becd1d 100644 (file)
@@ -145,11 +145,13 @@ static void clockevent_set_mode(enum clock_event_mode mode,
 static int clockevent_next_event(unsigned long cycles,
                                 struct clock_event_device *clk_event_dev)
 {
-       u16 val;
+       u16 val = readw(gpt_base + CR(CLKEVT));
+
+       if (val & CTRL_ENABLE)
+               writew(val & ~CTRL_ENABLE, gpt_base + CR(CLKEVT));
 
        writew(cycles, gpt_base + LOAD(CLKEVT));
 
-       val = readw(gpt_base + CR(CLKEVT));
        val |= CTRL_ENABLE | CTRL_INT_ENABLE;
        writew(val, gpt_base + CR(CLKEVT));
 
index 7da4d0081487b73f341cd4a51f6ef5efe33141ae..a7193ae13a5d2d6ab8df643a76c6b33cd9621421 100644 (file)
@@ -146,7 +146,7 @@ static int __init alchemy_time_init(unsigned int m2int)
        cd->shift = 32;
        cd->mult = div_sc(32768, NSEC_PER_SEC, cd->shift);
        cd->max_delta_ns = clockevent_delta2ns(0xffffffff, cd);
-       cd->min_delta_ns = clockevent_delta2ns(8, cd);  /* ~0.25ms */
+       cd->min_delta_ns = clockevent_delta2ns(9, cd);  /* ~0.28ms */
        clockevents_register_device(cd);
        setup_irq(m2int, &au1x_rtcmatch2_irqaction);
 
index 24f546985b69b5b572e60dfc60be97a92a726483..e21507052066fbfe5aaec7185286376f2eaf4d02 100644 (file)
@@ -96,7 +96,7 @@ void __init ath79_register_wmac(u8 *cal_data)
 {
        if (soc_is_ar913x())
                ar913x_wmac_setup();
-       if (soc_is_ar933x())
+       else if (soc_is_ar933x())
                ar933x_wmac_setup();
        else
                BUG();
index 4479fd669ac1877a4d84a65284c714bd68310633..28c6b276c21624a4550c2411042065936464d19c 100644 (file)
@@ -8,7 +8,7 @@ CONFIG_HIGH_RES_TIMERS=y
 # CONFIG_SECCOMP is not set
 CONFIG_USE_OF=y
 CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="mips-linux-gnu-"
+CONFIG_CROSS_COMPILE=""
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -22,7 +22,7 @@ CONFIG_AUDIT=y
 CONFIG_CGROUPS=y
 CONFIG_NAMESPACES=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs.xlp"
+CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
 CONFIG_INITRAMFS_COMPRESSION_LZMA=y
index 7c68666fdd646c3354f872ffc91eeeecfa4785d3..d0b857d98c91703b01950f9d1a705c1048cd7ac0 100644 (file)
@@ -8,7 +8,7 @@ CONFIG_HIGH_RES_TIMERS=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_KEXEC=y
 CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="mips-linux-gnu-"
+CONFIG_CROSS_COMPILE=""
 # CONFIG_LOCALVERSION_AUTO is not set
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
@@ -22,7 +22,7 @@ CONFIG_AUDIT=y
 CONFIG_NAMESPACES=y
 CONFIG_SCHED_AUTOGROUP=y
 CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="usr/dev_file_list usr/rootfs.xlr"
+CONFIG_INITRAMFS_SOURCE=""
 CONFIG_RD_BZIP2=y
 CONFIG_RD_LZMA=y
 CONFIG_INITRAMFS_COMPRESSION_GZIP=y
index 3b0b6e8c85334d610099d554f5a7a72b941ffee9..7fda0ce5f692cd51ed8b06f788bffc6a14f866f7 100644 (file)
@@ -6,7 +6,7 @@ CONFIG_HZ_1000=y
 CONFIG_PREEMPT=y
 # CONFIG_SECCOMP is not set
 CONFIG_EXPERIMENTAL=y
-CONFIG_CROSS_COMPILE="mips-linux-"
+CONFIG_CROSS_COMPILE=""
 # CONFIG_SWAP is not set
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=16
index 556e1be20bf63862bacf3fcb464b6b87418c50eb..fb9975c74c571a01526f42667a8f0d7cbedf235e 100644 (file)
@@ -11,6 +11,9 @@
 #include <asm/io.h>
 #include <asm/mach-au1x00/au1000.h>
 
+struct gpio;
+struct gpio_chip;
+
 /* with the current GPIC design, up to 128 GPIOs are possible.
  * The only implementation so far is in the Au1300, which has 75 externally
  * available GPIOs.
@@ -203,7 +206,22 @@ static inline int gpio_request(unsigned int gpio, const char *label)
        return 0;
 }
 
-static inline void gpio_free(unsigned int gpio)
+static inline int gpio_request_one(unsigned gpio,
+                                       unsigned long flags, const char *label)
+{
+       return 0;
+}
+
+static inline int gpio_request_array(struct gpio *array, size_t num)
+{
+       return 0;
+}
+
+static inline void gpio_free(unsigned gpio)
+{
+}
+
+static inline void gpio_free_array(struct gpio *array, size_t num)
 {
 }
 
index d41790928c648de70fb02a7404517f1fc7a94fba..da9bd7d270d18a761f74f6168653d2eb16da7118 100644 (file)
@@ -39,9 +39,6 @@
 #define HPAGE_MASK     (~(HPAGE_SIZE - 1))
 #define HUGETLB_PAGE_ORDER     (HPAGE_SHIFT - PAGE_SHIFT)
 #else /* !CONFIG_HUGETLB_PAGE */
-# ifndef BUILD_BUG
-#  define BUILD_BUG() do { extern void __build_bug(void); __build_bug(); } while (0)
-# endif
 #define HPAGE_SHIFT    ({BUILD_BUG(); 0; })
 #define HPAGE_SIZE     ({BUILD_BUG(); 0; })
 #define HPAGE_MASK     ({BUILD_BUG(); 0; })
index 58fe71afd8797dd37e1cf0442f1b360082af63a2..d5e950ab852792b15c2226f193f55061719f0e20 100644 (file)
@@ -8,7 +8,6 @@
  * SMP support for BMIPS
  */
 
-#include <linux/version.h>
 #include <linux/init.h>
 #include <linux/sched.h>
 #include <linux/mm.h>
index cc4a3f120f54d6f036e66812cf47e5c35e7dbff9..d79ae5437b5871efda13f052c5f7511ccf9587b9 100644 (file)
@@ -1135,7 +1135,7 @@ asmlinkage void do_mt(struct pt_regs *regs)
                printk(KERN_DEBUG "YIELD Scheduler Exception\n");
                break;
        case 5:
-               printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
+               printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
                break;
        default:
                printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
index a81176f44c74d4e39be2eaf8d939ccdf1aa5157b..924da5eb7031498ea93dc050a7492aa78f5bb0ec 100644 (file)
@@ -69,7 +69,6 @@ SECTIONS
        RODATA
 
        /* writeable */
-       _sdata = .;                             /* Start of data section */
        .data : {       /* Data */
                . = . + DATAOFFSET;             /* for CONFIG_MAPPED_KERNEL */
 
index 937cf3368164c6f6d4a6db4b1867ca5866a36ed7..69ebd586d7ffbef5029b8540ae3d021135035295 100644 (file)
@@ -42,6 +42,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        const int field = sizeof(unsigned long) * 2;
        siginfo_t info;
        int fault;
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
+                                                (write ? FAULT_FLAG_WRITE : 0);
 
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -91,6 +93,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -144,7 +147,11 @@ good_area:
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, address, flags);
+
+       if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
+               return;
+
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
@@ -153,12 +160,27 @@ good_area:
                        goto do_sigbus;
                BUG();
        }
-       if (fault & VM_FAULT_MAJOR) {
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
-               tsk->maj_flt++;
-       } else {
-               perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
-               tsk->min_flt++;
+       if (flags & FAULT_FLAG_ALLOW_RETRY) {
+               if (fault & VM_FAULT_MAJOR) {
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
+                                                 regs, address);
+                       tsk->maj_flt++;
+               } else {
+                       perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
+                                                 regs, address);
+                       tsk->min_flt++;
+               }
+               if (fault & VM_FAULT_RETRY) {
+                       flags &= ~FAULT_FLAG_ALLOW_RETRY;
+
+                       /*
+                        * No need to up_read(&mm->mmap_sem) as we would
+                        * have already released it in __lock_page_or_retry
+                        * in mm/filemap.c.
+                        */
+
+                       goto retry;
+               }
        }
 
        up_read(&mm->mmap_sem);
index aec2b111d35b0131f805e736bd80c2ca19d59cbf..15521505ebe80a4a6975f11905e4694111474a08 100644 (file)
@@ -279,7 +279,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
 {
        /* Propagate hose info into the subordinate devices.  */
 
-       struct list_head *ln;
        struct pci_dev *dev = bus->self;
 
        if (pci_probe_only && dev &&
@@ -288,9 +287,7 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus)
                pcibios_fixup_device_resources(dev, bus);
        }
 
-       for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) {
-               dev = pci_dev_b(ln);
-
+       list_for_each_entry(dev, &bus->devices, bus_list) {
                if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
                        pcibios_fixup_device_resources(dev, bus);
        }
index 86b98e98fb4f2f09906a8ee271550e1ab9b961c9..62ead6601c69437aeaafe5dcd3bc7c895fcee5f1 100644 (file)
  */
 void __init titan_ht_pcibios_fixup_bus(struct pci_bus *bus)
 {
-       struct pci_bus *current_bus = bus;
-       struct pci_dev *devices;
-       struct list_head *devices_link;
-
-       list_for_each(devices_link, &(current_bus->devices)) {
-               devices = pci_dev_b(devices_link);
-               if (devices == NULL)
-                       continue;
-       }
-
        /*
         * PLX and SPKT related changes go here
         */
index 8e93b21225249fe489b8666b94a4ac407b43406b..4642f56e70e54c38aca1d98df27fee53b7761402 100644 (file)
@@ -102,7 +102,7 @@ static int __init tx_7segled_init_sysfs(void)
                        break;
                }
                dev->id = i;
-               dev->dev = &tx_7segled_subsys;
+               dev->bus = &tx_7segled_subsys;
                error = device_register(dev);
                if (!error) {
                        device_create_file(dev, &dev_attr_ascii);
index fd843877e84152d87d437fca5173b858336a104a..39e49091f64841873ee1d79d768aa677832c500f 100644 (file)
@@ -315,6 +315,13 @@ static int load_aout_binary(struct linux_binprm *bprm, struct pt_regs *regs)
        current->mm->free_area_cache = TASK_UNMAPPED_BASE;
        current->mm->cached_hole_size = 0;
 
+       retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
+       if (retval < 0) {
+               /* Someone check-me: is this error path enough? */
+               send_sig(SIGKILL, current, 0);
+               return retval;
+       }
+
        install_exec_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
 
@@ -410,13 +417,6 @@ beyond_if:
 
        set_brk(current->mm->start_brk, current->mm->brk);
 
-       retval = setup_arg_pages(bprm, IA32_STACK_TOP, EXSTACK_DEFAULT);
-       if (retval < 0) {
-               /* Someone check-me: is this error path enough? */
-               send_sig(SIGKILL, current, 0);
-               return retval;
-       }
-
        current->mm->start_stack =
                (unsigned long)create_aout_tables((char __user *)bprm->p, bprm);
        /* start thread */
index f581a18c0d4d7d07aac5cdfa8fb443106e3d5e92..8ecbb4bba4b3b44241b64b46079476ad90f7aad4 100644 (file)
@@ -333,13 +333,15 @@ try_again:
                 * Lookup failure means no vma is above this address,
                 * i.e. return with success:
                 */
-               if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
+               vma = find_vma(mm, addr);
+               if (!vma)
                        return addr;
 
                /*
                 * new region fits between prev_vma->vm_end and
                 * vma->vm_start, use it:
                 */
+               prev_vma = vma->vm_prev;
                if (addr + len <= vma->vm_start &&
                            (!prev_vma || (addr >= prev_vma->vm_end))) {
                        /* remember the address as a hint for next time */
index a312e76063a7c4b1320eb80718cf81c33e7361dc..49a5cb55429b6ae51696f43c5a05c63a07df5566 100644 (file)
@@ -60,6 +60,16 @@ static const struct dmi_system_id pci_use_crs_table[] __initconst = {
                        DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
                },
        },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=42619 */
+       {
+               .callback = set_use_crs,
+               .ident = "MSI MS-7253",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"),
+                       DMI_MATCH(DMI_BOARD_NAME, "MS-7253"),
+                       DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
+               },
+       },
 
        /* Now for the blacklist.. */
 
@@ -282,9 +292,6 @@ static void add_resources(struct pci_root_info *info)
        int i;
        struct resource *res, *root, *conflict;
 
-       if (!pci_use_crs)
-               return;
-
        coalesce_windows(info, IORESOURCE_MEM);
        coalesce_windows(info, IORESOURCE_IO);
 
@@ -336,8 +343,13 @@ get_current_resources(struct acpi_device *device, int busnum,
        acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
                                &info);
 
-       add_resources(&info);
-       return;
+       if (pci_use_crs) {
+               add_resources(&info);
+
+               return;
+       }
+
+       kfree(info.name);
 
 name_alloc_fail:
        kfree(info.res);
index 9baf11e8636205c413bd5c00498baab628657b44..744f078f4dd8311caed330939afbcdc29c2da913 100644 (file)
@@ -3832,7 +3832,7 @@ static int __floppy_read_block_0(struct block_device *bdev)
        bio.bi_size = size;
        bio.bi_bdev = bdev;
        bio.bi_sector = 0;
-       bio.bi_flags = BIO_QUIET;
+       bio.bi_flags = (1 << BIO_QUIET);
        init_completion(&complete);
        bio.bi_private = &complete;
        bio.bi_end_io = floppy_rb0_complete;
index 4a5b099c3bc5ac9430c9b24a651f326f0295c748..53404af2e748b74a6801f5d5f70079e4ce556a88 100644 (file)
@@ -321,6 +321,8 @@ static int cdv_chip_setup(struct drm_device *dev)
        cdv_get_core_freq(dev);
        gma_intel_opregion_init(dev);
        psb_intel_init_bios(dev);
+       REG_WRITE(PORT_HOTPLUG_EN, 0);
+       REG_WRITE(PORT_HOTPLUG_STAT, REG_READ(PORT_HOTPLUG_STAT));
        return 0;
 }
 
index 830dfdd6bf154a473e15357654cf4d19be8d6b81..be616735ec918502238df831ad59510513358996 100644 (file)
@@ -247,7 +247,6 @@ static struct fb_ops psbfb_roll_ops = {
        .fb_imageblit = cfb_imageblit,
        .fb_pan_display = psbfb_pan,
        .fb_mmap = psbfb_mmap,
-       .fb_sync = psbfb_sync,
        .fb_ioctl = psbfb_ioctl,
 };
 
index 5d5330f667f14031512c022e12ed3d8eb61bc530..aff194fbe9f3ee00c627e012f8d364af3caa2f8d 100644 (file)
@@ -446,10 +446,9 @@ int psb_gtt_init(struct drm_device *dev, int resume)
        pg->gtt_start = pci_resource_start(dev->pdev, PSB_GTT_RESOURCE);
        gtt_pages = pci_resource_len(dev->pdev, PSB_GTT_RESOURCE)
                                                                >> PAGE_SHIFT;
-       /* Some CDV firmware doesn't report this currently. In which case the
-          system has 64 gtt pages */
+       /* CDV doesn't report this. In which case the system has 64 gtt pages */
        if (pg->gtt_start == 0 || gtt_pages == 0) {
-               dev_err(dev->dev, "GTT PCI BAR not initialized.\n");
+               dev_dbg(dev->dev, "GTT PCI BAR not initialized.\n");
                gtt_pages = 64;
                pg->gtt_start = dev_priv->pge_ctl;
        }
@@ -461,10 +460,10 @@ int psb_gtt_init(struct drm_device *dev, int resume)
 
        if (pg->gatt_pages == 0 || pg->gatt_start == 0) {
                static struct resource fudge;   /* Preferably peppermint */
-               /* This can occur on CDV SDV systems. Fudge it in this case.
+               /* This can occur on CDV systems. Fudge it in this case.
                   We really don't care what imaginary space is being allocated
                   at this point */
-               dev_err(dev->dev, "GATT PCI BAR not initialized.\n");
+               dev_dbg(dev->dev, "GATT PCI BAR not initialized.\n");
                pg->gatt_start = 0x40000000;
                pg->gatt_pages = (128 * 1024 * 1024) >> PAGE_SHIFT;
                /* This is a little confusing but in fact the GTT is providing
index b8574cddd95352a360ef7ff67200527185b3c1fb..63552e30d0c38986308ad2a385bccd8ab6bb9218 100644 (file)
@@ -59,6 +59,9 @@
 #define USB_VENDOR_ID_AIRCABLE         0x16CA
 #define USB_DEVICE_ID_AIRCABLE1                0x1502
 
+#define USB_VENDOR_ID_AIREN            0x1a2c
+#define USB_DEVICE_ID_AIREN_SLIMPLUS   0x0002
+
 #define USB_VENDOR_ID_ALCOR            0x058f
 #define USB_DEVICE_ID_ALCOR_USBRS232   0x9720
 
index 9333d692a786b03df115ebe87d920e748b607dc6..627850a54d34ecd6da97f281324b1eff98a368fc 100644 (file)
@@ -986,8 +986,13 @@ void hidinput_hid_event(struct hid_device *hid, struct hid_field *field, struct
                return;
        }
 
-       /* Ignore out-of-range values as per HID specification, section 5.10 */
-       if (value < field->logical_minimum || value > field->logical_maximum) {
+       /*
+        * Ignore out-of-range values as per HID specification,
+        * section 5.10 and 6.2.25
+        */
+       if ((field->flags & HID_MAIN_ITEM_VARIABLE) &&
+           (value < field->logical_minimum ||
+            value > field->logical_maximum)) {
                dbg_hid("Ignoring out-of-range value %x\n", value);
                return;
        }
index c831af937481c66123965743f62aaf5f8a634b3c..57d4e1e1df48df061461681449f5e98f49c20a1e 100644 (file)
@@ -54,6 +54,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_TOUCHPACK, USB_DEVICE_ID_TOUCHPACK_RTS, HID_QUIRK_MULTI_INPUT },
 
+       { USB_VENDOR_ID_AIREN, USB_DEVICE_ID_AIREN_SLIMPLUS, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_UC100KM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS124U, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
index afc166fcc3d9c41ad6654bae17d5c893382c3705..7df5bfef2624d5c1e49bd39208135c29aaed8653 100644 (file)
@@ -332,7 +332,7 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
        struct evdev_client *client = file->private_data;
        struct evdev *evdev = client->evdev;
        struct input_event event;
-       int retval;
+       int retval = 0;
 
        if (count < input_event_size())
                return -EINVAL;
index 37651373a95b0ebb19d25ca32cfcd62599ee4a9b..f3bc4189a7ba136154d418b49028cccb680fb970 100644 (file)
@@ -172,7 +172,7 @@ static void twl4030_vibra_close(struct input_dev *input)
 }
 
 /*** Module ***/
-#if CONFIG_PM
+#if CONFIG_PM_SLEEP
 static int twl4030_vibra_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -189,10 +189,10 @@ static int twl4030_vibra_resume(struct device *dev)
        vibra_disable_leds();
        return 0;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops,
                         twl4030_vibra_suspend, twl4030_vibra_resume);
-#endif
 
 static int __devinit twl4030_vibra_probe(struct platform_device *pdev)
 {
@@ -273,9 +273,7 @@ static struct platform_driver twl4030_vibra_driver = {
        .driver         = {
                .name   = "twl4030-vibra",
                .owner  = THIS_MODULE,
-#ifdef CONFIG_PM
                .pm     = &twl4030_vibra_pm_ops,
-#endif
        },
 };
 module_platform_driver(twl4030_vibra_driver);
index bd87380bd879edcfd9a278d53c887c444a14b9d6..4c6a72d3d48c33c875efe6b969652d0f45961270 100644 (file)
@@ -952,7 +952,9 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
 
        /*
         * First try "E6 report".
-        * ALPS should return 0,0,10 or 0,0,100
+        * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed.
+        * The bits 0-2 of the first byte will be 1s if some buttons are
+        * pressed.
         */
        param[0] = 0;
        if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRES) ||
@@ -968,7 +970,8 @@ static const struct alps_model_info *alps_get_model(struct psmouse *psmouse, int
        psmouse_dbg(psmouse, "E6 report: %2.2x %2.2x %2.2x",
                    param[0], param[1], param[2]);
 
-       if (param[0] != 0 || param[1] != 0 || (param[2] != 10 && param[2] != 100))
+       if ((param[0] & 0xf8) != 0 || param[1] != 0 ||
+           (param[2] != 10 && param[2] != 100))
                return NULL;
 
        /*
index 58a87755b936eee386811396b9ba0ae418a3e0ea..e53f4081a586956974bea8731d366366515ff6f8 100644 (file)
@@ -77,6 +77,8 @@ config TABLET_USB_WACOM
        tristate "Wacom Intuos/Graphire tablet support (USB)"
        depends on USB_ARCH_HAS_HCD
        select USB
+       select NEW_LEDS
+       select LEDS_CLASS
        help
          Say Y here if you want to use the USB version of the Wacom Intuos
          or Graphire tablet.  Make sure to say Y to "Mouse support"
index 88672ec296c116e10d7340c7f07397e10e09afe1..cd3ed29e0801347f20fb9b5da9e2fbbddfbd14b5 100644 (file)
@@ -926,7 +926,7 @@ static int wacom_bpt3_touch(struct wacom_wac *wacom)
 {
        struct input_dev *input = wacom->input;
        unsigned char *data = wacom->data;
-       int count = data[1] & 0x03;
+       int count = data[1] & 0x07;
        int i;
 
        if (data[0] != 0x02)
index a368db2431a596020a98a3e64f67685ccef9c6cc..a0b225eb4ac449b0ae4b74349be9dbb1e0874fa7 100644 (file)
@@ -624,7 +624,7 @@ int md_raid1_congested(struct mddev *mddev, int bits)
                return 1;
 
        rcu_read_lock();
-       for (i = 0; i < conf->raid_disks; i++) {
+       for (i = 0; i < conf->raid_disks * 2; i++) {
                struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
                if (rdev && !test_bit(Faulty, &rdev->flags)) {
                        struct request_queue *q = bdev_get_queue(rdev->bdev);
index 6e8aa213f0d5208d917b8222a56980ab3582b4d6..58c44d6453a0bfa0fb40f155c0357e4392731999 100644 (file)
@@ -67,6 +67,7 @@ static int max_queued_requests = 1024;
 
 static void allow_barrier(struct r10conf *conf);
 static void lower_barrier(struct r10conf *conf);
+static int enough(struct r10conf *conf, int ignore);
 
 static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
 {
@@ -347,6 +348,19 @@ static void raid10_end_read_request(struct bio *bio, int error)
                 * wait for the 'master' bio.
                 */
                set_bit(R10BIO_Uptodate, &r10_bio->state);
+       } else {
+               /* If all other devices that store this block have
+                * failed, we want to return the error upwards rather
+                * than fail the last device.  Here we redefine
+                * "uptodate" to mean "Don't want to retry"
+                */
+               unsigned long flags;
+               spin_lock_irqsave(&conf->device_lock, flags);
+               if (!enough(conf, rdev->raid_disk))
+                       uptodate = 1;
+               spin_unlock_irqrestore(&conf->device_lock, flags);
+       }
+       if (uptodate) {
                raid_end_bio_io(r10_bio);
                rdev_dec_pending(rdev, conf->mddev);
        } else {
@@ -2052,6 +2066,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                       "md/raid10:%s: %s: Failing raid device\n",
                       mdname(mddev), b);
                md_error(mddev, conf->mirrors[d].rdev);
+               r10_bio->devs[r10_bio->read_slot].bio = IO_BLOCKED;
                return;
        }
 
@@ -2105,8 +2120,11 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
                                    rdev,
                                    r10_bio->devs[r10_bio->read_slot].addr
                                    + sect,
-                                   s, 0))
+                                   s, 0)) {
                                md_error(mddev, rdev);
+                               r10_bio->devs[r10_bio->read_slot].bio
+                                       = IO_BLOCKED;
+                       }
                        break;
                }
 
@@ -2299,17 +2317,20 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
         * This is all done synchronously while the array is
         * frozen.
         */
+       bio = r10_bio->devs[slot].bio;
+       bdevname(bio->bi_bdev, b);
+       bio_put(bio);
+       r10_bio->devs[slot].bio = NULL;
+
        if (mddev->ro == 0) {
                freeze_array(conf);
                fix_read_error(conf, mddev, r10_bio);
                unfreeze_array(conf);
-       }
+       } else
+               r10_bio->devs[slot].bio = IO_BLOCKED;
+
        rdev_dec_pending(rdev, mddev);
 
-       bio = r10_bio->devs[slot].bio;
-       bdevname(bio->bi_bdev, b);
-       r10_bio->devs[slot].bio =
-               mddev->ro ? IO_BLOCKED : NULL;
 read_more:
        rdev = read_balance(conf, r10_bio, &max_sectors);
        if (rdev == NULL) {
@@ -2318,13 +2339,10 @@ read_more:
                       mdname(mddev), b,
                       (unsigned long long)r10_bio->sector);
                raid_end_bio_io(r10_bio);
-               bio_put(bio);
                return;
        }
 
        do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC);
-       if (bio)
-               bio_put(bio);
        slot = r10_bio->read_slot;
        printk_ratelimited(
                KERN_ERR
@@ -2360,7 +2378,6 @@ read_more:
                        mbio->bi_phys_segments++;
                spin_unlock_irq(&conf->device_lock);
                generic_make_request(bio);
-               bio = NULL;
 
                r10_bio = mempool_alloc(conf->r10bio_pool,
                                        GFP_NOIO);
@@ -3243,7 +3260,6 @@ static int run(struct mddev *mddev)
                        disk->rdev = rdev;
                }
 
-               disk->rdev = rdev;
                disk_stack_limits(mddev->gendisk, rdev->bdev,
                                  rdev->data_offset << 9);
                /* as we don't honour merge_bvec_fn, we must never risk
index 53e2a80f42facb931f56c4adcde9221f5b5c69e1..d295941c9a3db63ab75408470ac8edfefbdb2cb2 100644 (file)
@@ -956,11 +956,12 @@ int __devinit ab8500_init(struct ab8500 *ab8500)
        return ret;
 
 out_freeirq:
-       if (ab8500->irq_base) {
+       if (ab8500->irq_base)
                free_irq(ab8500->irq, ab8500);
 out_removeirq:
+       if (ab8500->irq_base)
                ab8500_irq_remove(ab8500);
-       }
+
        return ret;
 }
 
index 0f5922812bffdee8e3e892cab978052548d92114..411f523d4878bd1cec6f8783ca6c99d6b57b669e 100644 (file)
@@ -123,7 +123,7 @@ static int mfd_add_device(struct device *parent, int id,
                }
 
                if (!cell->ignore_resource_conflicts) {
-                       ret = acpi_check_resource_conflict(res);
+                       ret = acpi_check_resource_conflict(&res[r]);
                        if (ret)
                                goto fail_res;
                }
index e075c113eec6f7d77a67b5ca1eb145abe8eebbdf..caadabeed8e94d59ae55fd8dbde84b37014769bb 100644 (file)
@@ -105,7 +105,7 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
        s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
        i2c_set_clientdata(s5m87xx->rtc, s5m87xx);
 
-       if (pdata->cfg_pmic_irq)
+       if (pdata && pdata->cfg_pmic_irq)
                pdata->cfg_pmic_irq();
 
        s5m_irq_init(s5m87xx);
index 01cf5012a08fb26c3b561fde99341d6ab84749e2..4392f6bca156e20a71d7acb8cce0b9316db42753 100644 (file)
@@ -168,7 +168,7 @@ static int tps65910_i2c_probe(struct i2c_client *i2c,
                goto err;
 
        init_data->irq = pmic_plat_data->irq;
-       init_data->irq_base = pmic_plat_data->irq;
+       init_data->irq_base = pmic_plat_data->irq_base;
 
        tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
 
index 5fec23a9ac039f34eaf6cf902acbfb6521aa7ab4..74fd8cb5f37224e576f58398c20fda9f4884d9e7 100644 (file)
@@ -151,7 +151,7 @@ int tps65912_device_init(struct tps65912 *tps65912)
                goto err;
 
        init_data->irq = pmic_plat_data->irq;
-       init_data->irq_base = pmic_plat_data->irq;
+       init_data->irq_base = pmic_plat_data->irq_base;
        ret = tps65912_irq_init(tps65912, init_data->irq, init_data);
        if (ret < 0)
                goto err;
index 8a1fafd0bf7d129184e8910b73a0848954dcf42c..9fd01bf63c510eafab2c1efc987728576aa54f5e 100644 (file)
@@ -496,7 +496,6 @@ int wm8350_irq_init(struct wm8350 *wm8350, int irq,
 
        mutex_init(&wm8350->irq_lock);
        wm8350->chip_irq = irq;
-       wm8350->irq_base = pdata->irq_base;
 
        if (pdata && pdata->irq_base > 0)
                irq_base = pdata->irq_base;
index f117e7fb932194fc174ed52439dadcd57e81684c..a04b3c108c8ca38a06f54ac946f77ce4ec920f0c 100644 (file)
@@ -256,6 +256,20 @@ static int wm8994_suspend(struct device *dev)
                break;
        }
 
+       switch (wm8994->type) {
+       case WM1811:
+               ret = wm8994_reg_read(wm8994, WM8994_ANTIPOP_2);
+               if (ret < 0) {
+                       dev_err(dev, "Failed to read jackdet: %d\n", ret);
+               } else if (ret & WM1811_JACKDET_MODE_MASK) {
+                       dev_dbg(dev, "CODEC still active, ignoring suspend\n");
+                       return 0;
+               }
+               break;
+       default:
+               break;
+       }
+
        /* Disable LDO pulldowns while the device is suspended if we
         * don't know that something will be driving them. */
        if (!wm8994->ldo_ena_always_driven)
index c598ae69b8ff052584c94e75b44db9f07ea8d694..bc0c5096539a5bdbd874ef99babdd60dbf17a0c0 100644 (file)
@@ -806,6 +806,7 @@ static bool wm1811_readable_register(struct device *dev, unsigned int reg)
        case WM8994_DC_SERVO_2:
        case WM8994_DC_SERVO_READBACK:
        case WM8994_DC_SERVO_4:
+       case WM8994_DC_SERVO_4E:
        case WM8994_ANALOGUE_HP_1:
        case WM8958_MIC_DETECT_1:
        case WM8958_MIC_DETECT_2:
index 19fc7c1cb428c47f822254c2d748b7fe572b2da3..f428d86bfc103256ccd48f50436d23d6bd4d48a7 100644 (file)
@@ -984,9 +984,9 @@ static int __init c2port_init(void)
                " - (C) 2007 Rodolfo Giometti\n");
 
        c2port_class = class_create(THIS_MODULE, "c2port");
-       if (!c2port_class) {
+       if (IS_ERR(c2port_class)) {
                printk(KERN_ERR "c2port: failed to allocate class\n");
-               return -ENOMEM;
+               return PTR_ERR(c2port_class);
        }
        c2port_class->dev_attrs = c2port_attrs;
 
index 690255c7d4dcc8c407e7b0852216edab75f4ef31..132378b89d76a0072fe31b7f436975525d04e353 100644 (file)
@@ -2068,6 +2068,9 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq)
         */
        mmc_hw_reset_for_init(host);
 
+       /* Initialization should be done at 3.3 V I/O voltage. */
+       mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        /*
         * sdio_reset sends CMD52 to reset card.  Since we do not know
         * if the card is being re-initialized, just send it.  CMD52
index 30055f2b0d445b3e0c08ed131f939146fef68626..c3704e293a7b30d52c9cc1893da88a0e4844131b 100644 (file)
@@ -238,10 +238,10 @@ static inline void mmc_host_clk_init(struct mmc_host *host)
        /* Hold MCI clock for 8 cycles by default */
        host->clk_delay = 8;
        /*
-        * Default clock gating delay is 200ms.
+        * Default clock gating delay is 0ms to avoid wasting power.
         * This value can be tuned by writing into sysfs entry.
         */
-       host->clkgate_delay = 200;
+       host->clkgate_delay = 0;
        host->clk_gated = false;
        INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work);
        spin_lock_init(&host->clk_lock);
index a48066344fa87316997b0bcca501d0842ea7e8a1..2b9ed1401dc439bf1dd29e4aa751f74e0175bc93 100644 (file)
@@ -816,6 +816,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
        if (!mmc_host_is_spi(host))
                mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
 
+       /* Initialization should be done at 3.3 V I/O voltage. */
+       mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        /*
         * Since we're changing the OCR value, we seem to
         * need to tell some cards to go back to the idle
index 5017f9354ce28af6a67418d1ce0a2c438ed07edd..c272c6868ecf6d11a39c3cf2be99f1257fccd026 100644 (file)
@@ -911,6 +911,9 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,
        BUG_ON(!host);
        WARN_ON(!host->claimed);
 
+       /* The initialization should be done at 3.3 V I/O voltage. */
+       mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        err = mmc_sd_get_cid(host, ocr, cid, &rocr);
        if (err)
                return err;
@@ -1156,11 +1159,6 @@ int mmc_attach_sd(struct mmc_host *host)
        BUG_ON(!host);
        WARN_ON(!host->claimed);
 
-       /* Make sure we are at 3.3V signalling voltage */
-       err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, false);
-       if (err)
-               return err;
-
        /* Disable preset value enable if already set since last time */
        if (host->ops->enable_preset_value) {
                mmc_host_clk_hold(host);
index 12cde6ee17f50732ac5cd05f6843928abf87d0c9..2c7c83f832d289c25eaf83cf4d7ef32d420fb002 100644 (file)
@@ -585,6 +585,9 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,
         * Inform the card of the voltage
         */
        if (!powered_resume) {
+               /* The initialization should be done at 3.3 V I/O voltage. */
+               mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
                err = mmc_send_io_op_cond(host, host->ocr, &ocr);
                if (err)
                        goto err;
@@ -996,6 +999,11 @@ static int mmc_sdio_power_restore(struct mmc_host *host)
         * With these steps taken, mmc_select_voltage() is also required to
         * restore the correct voltage setting of the card.
         */
+
+       /* The initialization should be done at 3.3 V I/O voltage. */
+       if (!mmc_card_keep_power(host))
+               mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0);
+
        sdio_reset(host);
        mmc_go_idle(host);
        mmc_send_if_cond(host, host->ocr_avail);
index 6985cdb0bb26e641fb340c0218d331e48844614a..e4449a54ae8f9fc68c3d9f719fe3a09e3e028adf 100644 (file)
@@ -1948,12 +1948,12 @@ static bool atmci_filter(struct dma_chan *chan, void *slave)
        }
 }
 
-static void atmci_configure_dma(struct atmel_mci *host)
+static bool atmci_configure_dma(struct atmel_mci *host)
 {
        struct mci_platform_data        *pdata;
 
        if (host == NULL)
-               return;
+               return false;
 
        pdata = host->pdev->dev.platform_data;
 
@@ -1970,12 +1970,15 @@ static void atmci_configure_dma(struct atmel_mci *host)
                host->dma.chan =
                        dma_request_channel(mask, atmci_filter, pdata->dma_slave);
        }
-       if (!host->dma.chan)
-               dev_notice(&host->pdev->dev, "DMA not available, using PIO\n");
-       else
+       if (!host->dma.chan) {
+               dev_warn(&host->pdev->dev, "no DMA channel available\n");
+               return false;
+       } else {
                dev_info(&host->pdev->dev,
                                        "Using %s for DMA transfers\n",
                                        dma_chan_name(host->dma.chan));
+               return true;
+       }
 }
 
 static inline unsigned int atmci_get_version(struct atmel_mci *host)
@@ -2085,8 +2088,7 @@ static int __init atmci_probe(struct platform_device *pdev)
 
        /* Get MCI capabilities and set operations according to it */
        atmci_get_cap(host);
-       if (host->caps.has_dma) {
-               dev_info(&pdev->dev, "using DMA\n");
+       if (host->caps.has_dma && atmci_configure_dma(host)) {
                host->prepare_data = &atmci_prepare_data_dma;
                host->submit_data = &atmci_submit_data_dma;
                host->stop_transfer = &atmci_stop_transfer_dma;
@@ -2096,15 +2098,12 @@ static int __init atmci_probe(struct platform_device *pdev)
                host->submit_data = &atmci_submit_data_pdc;
                host->stop_transfer = &atmci_stop_transfer_pdc;
        } else {
-               dev_info(&pdev->dev, "no DMA, no PDC\n");
+               dev_info(&pdev->dev, "using PIO\n");
                host->prepare_data = &atmci_prepare_data;
                host->submit_data = &atmci_submit_data;
                host->stop_transfer = &atmci_stop_transfer;
        }
 
-       if (host->caps.has_dma)
-               atmci_configure_dma(host);
-
        platform_set_drvdata(pdev, host);
 
        /* We need at least one slot to succeed */
index 0d955ffaf44e2c3ec5961f966687da3e675819d9..11e589cd8233e5f7438f7240d14c8f7897fbf8d7 100644 (file)
@@ -1271,12 +1271,13 @@ static int __devinit mmci_probe(struct amba_device *dev,
        /*
         * Block size can be up to 2048 bytes, but must be a power of two.
         */
-       mmc->max_blk_size = 2048;
+       mmc->max_blk_size = 1 << 11;
 
        /*
-        * No limit on the number of blocks transferred.
+        * Limit the number of blocks transferred so that we don't overflow
+        * the maximum request size.
         */
-       mmc->max_blk_count = mmc->max_req_size;
+       mmc->max_blk_count = mmc->max_req_size >> 11;
 
        spin_lock_init(&host->lock);
 
index d601e41af282105ad5c3a86d30d569dfef3def8c..0be4e2013632f97ce4dddc489212c2ed6f7c4985 100644 (file)
@@ -269,8 +269,9 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)
                imx_data->scratchpad = val;
                return;
        case SDHCI_COMMAND:
-               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION)
-                       && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
+               if ((host->cmd->opcode == MMC_STOP_TRANSMISSION ||
+                    host->cmd->opcode == MMC_SET_BLOCK_COUNT) &&
+                   (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT))
                        val |= SDHCI_CMD_ABORTCMD;
 
                if (is_imx6q_usdhc(imx_data)) {
index 0a4fc62a381dfb80652220e896f973e1618a5901..c998e1afebc6831be282088a379d98ee8978c491 100644 (file)
@@ -978,7 +978,7 @@ static void cfhsi_setup(struct net_device *dev)
        dev->netdev_ops = &cfhsi_ops;
        dev->type = ARPHRD_CAIF;
        dev->flags = IFF_POINTOPOINT | IFF_NOARP;
-       dev->mtu = CFHSI_MAX_PAYLOAD_SZ;
+       dev->mtu = CFHSI_MAX_CAIF_FRAME_SZ;
        dev->tx_queue_len = 0;
        dev->destructor = free_netdev;
        skb_queue_head_init(&cfhsi->qhead);
index a1f2e0fed78bc2b23b7caa2c1457255e6eae9b89..35c2a202d67aa643a452e245293d5e7fdd748e39 100644 (file)
@@ -5352,7 +5352,7 @@ static void tg3_tx(struct tg3_napi *tnapi)
                }
        }
 
-       netdev_completed_queue(tp->dev, pkts_compl, bytes_compl);
+       netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
 
        tnapi->tx_cons = sw_idx;
 
@@ -6793,7 +6793,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
        }
 
        skb_tx_timestamp(skb);
-       netdev_sent_queue(tp->dev, skb->len);
+       netdev_tx_sent_queue(txq, skb->len);
 
        /* Packets are ready, update Tx producer idx local and on card. */
        tw32_tx_mbox(tnapi->prodmbox, entry);
@@ -7275,8 +7275,8 @@ static void tg3_free_rings(struct tg3 *tp)
 
                        dev_kfree_skb_any(skb);
                }
+               netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
        }
-       netdev_reset_queue(tp->dev);
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -7886,10 +7886,8 @@ static int tg3_chip_reset(struct tg3 *tp)
        return 0;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *,
-                                                struct rtnl_link_stats64 *);
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *,
-                                               struct tg3_ethtool_stats *);
+static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
+static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
 
 /* tp->lock is held. */
 static int tg3_halt(struct tg3 *tp, int kind, int silent)
@@ -7910,7 +7908,7 @@ static int tg3_halt(struct tg3 *tp, int kind, int silent)
 
        if (tp->hw_stats) {
                /* Save the stats across chip resets... */
-               tg3_get_stats64(tp->dev, &tp->net_stats_prev),
+               tg3_get_nstats(tp, &tp->net_stats_prev),
                tg3_get_estats(tp, &tp->estats_prev);
 
                /* And make sure the next sample is new data */
@@ -9847,7 +9845,7 @@ static inline u64 get_stat64(tg3_stat64_t *val)
        return ((u64)val->high << 32) | ((u64)val->low);
 }
 
-static u64 calc_crc_errors(struct tg3 *tp)
+static u64 tg3_calc_crc_errors(struct tg3 *tp)
 {
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
@@ -9856,14 +9854,12 @@ static u64 calc_crc_errors(struct tg3 *tp)
             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
                u32 val;
 
-               spin_lock_bh(&tp->lock);
                if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
                        tg3_writephy(tp, MII_TG3_TEST1,
                                     val | MII_TG3_TEST1_CRC_EN);
                        tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
                } else
                        val = 0;
-               spin_unlock_bh(&tp->lock);
 
                tp->phy_crc_errors += val;
 
@@ -9877,14 +9873,13 @@ static u64 calc_crc_errors(struct tg3 *tp)
        estats->member =        old_estats->member + \
                                get_stat64(&hw_stats->member)
 
-static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
-                                              struct tg3_ethtool_stats *estats)
+static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
 {
        struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
        if (!hw_stats)
-               return old_estats;
+               return;
 
        ESTAT_ADD(rx_octets);
        ESTAT_ADD(rx_fragments);
@@ -9963,20 +9958,13 @@ static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp,
        ESTAT_ADD(nic_tx_threshold_hit);
 
        ESTAT_ADD(mbuf_lwm_thresh_hit);
-
-       return estats;
 }
 
-static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
-                                                struct rtnl_link_stats64 *stats)
+static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
 {
-       struct tg3 *tp = netdev_priv(dev);
        struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
        struct tg3_hw_stats *hw_stats = tp->hw_stats;
 
-       if (!hw_stats)
-               return old_stats;
-
        stats->rx_packets = old_stats->rx_packets +
                get_stat64(&hw_stats->rx_ucast_packets) +
                get_stat64(&hw_stats->rx_mcast_packets) +
@@ -10019,15 +10007,13 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
                get_stat64(&hw_stats->tx_carrier_sense_errors);
 
        stats->rx_crc_errors = old_stats->rx_crc_errors +
-               calc_crc_errors(tp);
+               tg3_calc_crc_errors(tp);
 
        stats->rx_missed_errors = old_stats->rx_missed_errors +
                get_stat64(&hw_stats->rx_discards);
 
        stats->rx_dropped = tp->rx_dropped;
        stats->tx_dropped = tp->tx_dropped;
-
-       return stats;
 }
 
 static inline u32 calc_crc(unsigned char *buf, int len)
@@ -15409,6 +15395,21 @@ static void __devinit tg3_init_coal(struct tg3 *tp)
        }
 }
 
+static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+                                               struct rtnl_link_stats64 *stats)
+{
+       struct tg3 *tp = netdev_priv(dev);
+
+       if (!tp->hw_stats)
+               return &tp->net_stats_prev;
+
+       spin_lock_bh(&tp->lock);
+       tg3_get_nstats(tp, stats);
+       spin_unlock_bh(&tp->lock);
+
+       return stats;
+}
+
 static const struct net_device_ops tg3_netdev_ops = {
        .ndo_open               = tg3_open,
        .ndo_stop               = tg3_close,
index e83d12c7bf20cde0b1c2becba254329b0955b2aa..9d76e59d9526164ca9cfc247769315d04b8c4fca 100644 (file)
@@ -196,6 +196,8 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x4408, 4),
        CH_DEVICE(0x4409, 4),
        CH_DEVICE(0x440a, 4),
+       CH_DEVICE(0x440d, 4),
+       CH_DEVICE(0x440e, 4),
        { 0, }
 };
 
index e53365a71484baabc0f9251ef100eea7f12f027d..d963c1d57f71590afef3aabc94b81a686c8ed061 100644 (file)
@@ -2892,6 +2892,8 @@ static struct pci_device_id cxgb4vf_pci_tbl[] = {
        CH_DEVICE(0x4808, 0),   /* T420-cx */
        CH_DEVICE(0x4809, 0),   /* T420-bt */
        CH_DEVICE(0x480a, 0),   /* T404-bt */
+       CH_DEVICE(0x480d, 0),   /* T480-cr */
+       CH_DEVICE(0x480e, 0),   /* T440-lp-cr */
        { 0, }
 };
 
index ee93a2087fe6c0cfd116dcf3f4536185480ba362..c52295cd05ef009b368555e8b716bc082ad6caf5 100644 (file)
@@ -94,7 +94,7 @@ struct enic {
        u32 rx_coalesce_usecs;
        u32 tx_coalesce_usecs;
 #ifdef CONFIG_PCI_IOV
-       u32 num_vfs;
+       u16 num_vfs;
 #endif
        struct enic_port_profile *pp;
 
index ab3f67f980d8c3f7554d19bf3fdbaf275deefed3..0e4edd3b6bee0275cf3ca0c01ffd1f1f60ec9d49 100644 (file)
@@ -2370,7 +2370,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
        pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
        if (pos) {
                pci_read_config_word(pdev, pos + PCI_SRIOV_TOTAL_VF,
-                       (u16 *)&enic->num_vfs);
+                       &enic->num_vfs);
                if (enic->num_vfs) {
                        err = pci_enable_sriov(pdev, enic->num_vfs);
                        if (err) {
index 738f950a1ce59e69c6297517882913d90f22f784..fb2b36759cbf657d66dfa7c245d49306d5a06c58 100644 (file)
@@ -151,11 +151,6 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
                context->log_page_size   = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
        }
 
-       port = ((context->pri_path.sched_queue >> 6) & 1) + 1;
-       if (dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH)
-               context->pri_path.sched_queue = (context->pri_path.sched_queue &
-                                               0xc3);
-
        *(__be32 *) mailbox->buf = cpu_to_be32(optpar);
        memcpy(mailbox->buf + 8, context, sizeof *context);
 
index 9cb5f912e4891f5b832bb941bce6916c5248bdb6..29e23bec809c495104b08d009943ba2b10155660 100644 (file)
@@ -321,10 +321,10 @@ static void pch_gbe_check_copper_options(struct pch_gbe_adapter *adapter)
                        pr_debug("AutoNeg specified along with Speed or Duplex, AutoNeg parameter ignored\n");
                        hw->phy.autoneg_advertised = opt.def;
                } else {
-                       hw->phy.autoneg_advertised = AutoNeg;
-                       pch_gbe_validate_option(
-                               (int *)(&hw->phy.autoneg_advertised),
-                               &opt, adapter);
+                       int tmp = AutoNeg;
+
+                       pch_gbe_validate_option(&tmp, &opt, adapter);
+                       hw->phy.autoneg_advertised = tmp;
                }
        }
 
@@ -495,9 +495,10 @@ void pch_gbe_check_options(struct pch_gbe_adapter *adapter)
                        .arg  = { .l = { .nr = (int)ARRAY_SIZE(fc_list),
                                         .p = fc_list } }
                };
-               hw->mac.fc = FlowControl;
-               pch_gbe_validate_option((int *)(&hw->mac.fc),
-                                               &opt, adapter);
+               int tmp = FlowControl;
+
+               pch_gbe_validate_option(&tmp, &opt, adapter);
+               hw->mac.fc = tmp;
        }
 
        pch_gbe_check_copper_options(adapter);
index b97132d9dff0c09fdd804ad63335e10a8225343d..8f29feb35548273c56d6ea2247031214eb820a22 100644 (file)
@@ -4,6 +4,7 @@
 
 config NET_PACKET_ENGINE
        bool "Packet Engine devices"
+       default y
        depends on PCI
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
index 7931531c3a40be12f1d4f838fd854878c16b9470..e61560e16385e855164a2062aa56ae254212721d 100644 (file)
@@ -3017,7 +3017,6 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
                (void __iomem *)port_regs;
        u32 delay = 10;
        int status = 0;
-       unsigned long hw_flags = 0;
 
        if (ql_mii_setup(qdev))
                return -1;
@@ -3228,9 +3227,9 @@ static int ql_adapter_initialize(struct ql3_adapter *qdev)
                value = ql_read_page0_reg(qdev, &port_regs->portStatus);
                if (value & PORT_STATUS_IC)
                        break;
-               spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
+               spin_unlock_irq(&qdev->hw_lock);
                msleep(500);
-               spin_lock_irqsave(&qdev->hw_lock, hw_flags);
+               spin_lock_irq(&qdev->hw_lock);
        } while (--delay);
 
        if (delay == 0) {
index 7a0c800b50adc90051ed008548a754f1af6b5695..bbacb3741ec029e3c4fbc9f74c616955f56e4fc8 100644 (file)
@@ -3781,12 +3781,20 @@ static void rtl8169_init_ring_indexes(struct rtl8169_private *tp)
 
 static void rtl_hw_jumbo_enable(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Cfg9346, Cfg9346_Unlock);
        rtl_generic_op(tp, tp->jumbo_ops.enable);
+       RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
 static void rtl_hw_jumbo_disable(struct rtl8169_private *tp)
 {
+       void __iomem *ioaddr = tp->mmio_addr;
+
+       RTL_W8(Cfg9346, Cfg9346_Unlock);
        rtl_generic_op(tp, tp->jumbo_ops.disable);
+       RTL_W8(Cfg9346, Cfg9346_Lock);
 }
 
 static void r8168c_hw_jumbo_enable(struct rtl8169_private *tp)
@@ -6186,6 +6194,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
        struct rtl8169_private *tp = netdev_priv(dev);
+       struct device *d = &pdev->dev;
+
+       pm_runtime_get_sync(d);
 
        rtl8169_net_suspend(dev);
 
@@ -6207,6 +6218,8 @@ static void rtl_shutdown(struct pci_dev *pdev)
                pci_wake_from_d3(pdev, true);
                pci_set_power_state(pdev, PCI_D3hot);
        }
+
+       pm_runtime_put_noidle(d);
 }
 
 static struct pci_driver rtl8169_pci_driver = {
index 3dcd3857a36cb8346c82c8131c1131da40a53063..756c0f5565a5b185250668c5617b93ee357ff335 100644 (file)
@@ -830,13 +830,8 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
                                        ctx->l4_hdr_size = ((struct tcphdr *)
                                           skb_transport_header(skb))->doff * 4;
                                else if (iph->protocol == IPPROTO_UDP)
-                                       /*
-                                        * Use tcp header size so that bytes to
-                                        * be copied are more than required by
-                                        * the device.
-                                        */
                                        ctx->l4_hdr_size =
-                                                       sizeof(struct tcphdr);
+                                                       sizeof(struct udphdr);
                                else
                                        ctx->l4_hdr_size = 0;
                        } else {
index ed54797db1916d717fe3141559bf95bca2013547..fc46a81ad5384969b1670114765e3df12799a256 100644 (file)
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.1.18.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.1.29.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01011200
+#define VMXNET3_DRIVER_VERSION_NUM      0x01011D00
 
 #if defined(CONFIG_PCI_MSI)
        /* RSS only makes sense if MSI-X is supported. */
index f901a17f76baf6da3a2ad3e55880a8aa25df35e8..86a891f93fc9dc70939088e70080af874343f4b4 100644 (file)
@@ -489,8 +489,6 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
        ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
        ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
        ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
-       ATH_ALLOC_BANK(ah->addac5416_21,
-                      ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
        ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
 
        return 0;
@@ -519,7 +517,6 @@ static void ar5008_hw_rf_free_ext_banks(struct ath_hw *ah)
        ATH_FREE_BANK(ah->analogBank6Data);
        ATH_FREE_BANK(ah->analogBank6TPCData);
        ATH_FREE_BANK(ah->analogBank7Data);
-       ATH_FREE_BANK(ah->addac5416_21);
        ATH_FREE_BANK(ah->bank6Temp);
 
 #undef ATH_FREE_BANK
@@ -805,27 +802,7 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
        if (ah->eep_ops->set_addac)
                ah->eep_ops->set_addac(ah, chan);
 
-       if (AR_SREV_5416_22_OR_LATER(ah)) {
-               REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
-       } else {
-               struct ar5416IniArray temp;
-               u32 addacSize =
-                       sizeof(u32) * ah->iniAddac.ia_rows *
-                       ah->iniAddac.ia_columns;
-
-               /* For AR5416 2.0/2.1 */
-               memcpy(ah->addac5416_21,
-                      ah->iniAddac.ia_array, addacSize);
-
-               /* override CLKDRV value at [row, column] = [31, 1] */
-               (ah->addac5416_21)[31 * ah->iniAddac.ia_columns + 1] = 0;
-
-               temp.ia_array = ah->addac5416_21;
-               temp.ia_columns = ah->iniAddac.ia_columns;
-               temp.ia_rows = ah->iniAddac.ia_rows;
-               REG_WRITE_ARRAY(&temp, 1, regWrites);
-       }
-
+       REG_WRITE_ARRAY(&ah->iniAddac, 1, regWrites);
        REG_WRITE(ah, AR_PHY_ADC_SERIAL_CTL, AR_PHY_SEL_INTERNAL_ADDAC);
 
        ENABLE_REGWRITE_BUFFER(ah);
index 11f192a1ceb72d332ccde32cf98f0ef7c3ff41c3..d190411ac8f51e5003fb2c639b7818be8df34b4c 100644 (file)
@@ -180,6 +180,25 @@ static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
                INIT_INI_ARRAY(&ah->iniAddac, ar5416Addac,
                               ARRAY_SIZE(ar5416Addac), 2);
        }
+
+       /* iniAddac needs to be modified for these chips */
+       if (AR_SREV_9160(ah) || !AR_SREV_5416_22_OR_LATER(ah)) {
+               struct ar5416IniArray *addac = &ah->iniAddac;
+               u32 size = sizeof(u32) * addac->ia_rows * addac->ia_columns;
+               u32 *data;
+
+               data = kmalloc(size, GFP_KERNEL);
+               if (!data)
+                       return;
+
+               memcpy(data, addac->ia_array, size);
+               addac->ia_array = data;
+
+               if (!AR_SREV_5416_22_OR_LATER(ah)) {
+                       /* override CLKDRV value */
+                       INI_RA(addac, 31,1) = 0;
+               }
+       }
 }
 
 /* Support for Japan ch.14 (2484) spread */
index 6a29004a71b0864afe06f6c5a4b82b77f8c3d050..c8261d4fc780aa3b4db12586e2738a1065376604 100644 (file)
@@ -940,7 +940,6 @@ struct ath_hw {
        u32 *analogBank6Data;
        u32 *analogBank6TPCData;
        u32 *analogBank7Data;
-       u32 *addac5416_21;
        u32 *bank6Temp;
 
        u8 txpower_limit;
index 90911eec0cf55948fddecffb225eb1543412cc6a..30b58870b1b68455b09adecfd127b354dbcc975c 100644 (file)
@@ -1051,17 +1051,13 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
                }
                /* either retransmit or send bar if ack not recd */
                if (!ack_recd) {
-                       struct ieee80211_tx_rate *txrate =
-                           tx_info->status.rates;
-                       if (retry && (txrate[0].count < (int)retry_limit)) {
+                       if (retry && (ini->txretry[index] < (int)retry_limit)) {
                                ini->txretry[index]++;
                                ini->tx_in_transit--;
                                /*
                                 * Use high prededence for retransmit to
                                 * give some punch
                                 */
-                               /* brcms_c_txq_enq(wlc, scb, p,
-                                * BRCMS_PRIO_TO_PREC(tid)); */
                                brcms_c_txq_enq(wlc, scb, p,
                                                BRCMS_PRIO_TO_HI_PREC(tid));
                        } else {
@@ -1074,9 +1070,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb,
                                    IEEE80211_TX_STAT_AMPDU_NO_BACK;
                                skb_pull(p, D11_PHY_HDR_LEN);
                                skb_pull(p, D11_TXH_LEN);
-                               wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_"
-                                       "transit %d\n", "AMPDU status", seq,
-                                       ini->tx_in_transit);
+                               BCMMSG(wiphy,
+                                      "BA Timeout, seq %d, in_transit %d\n",
+                                      seq, ini->tx_in_transit);
                                ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw,
                                                            p);
                        }
index 7353826095f110a8766d6c1a4425a71c49ba8a91..e483cfa8d14e175aac3432f77951d3641989f116 100644 (file)
@@ -1187,6 +1187,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
        unsigned long flags;
        struct iwl_addsta_cmd sta_cmd;
        u8 sta_id = iwlagn_key_sta_id(priv, ctx->vif, sta);
+       __le16 key_flags;
 
        /* if station isn't there, neither is the key */
        if (sta_id == IWL_INVALID_STATION)
@@ -1212,7 +1213,14 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
                IWL_ERR(priv, "offset %d not used in uCode key table.\n",
                        keyconf->hw_key_idx);
 
-       sta_cmd.key.key_flags = STA_KEY_FLG_NO_ENC | STA_KEY_FLG_INVALID;
+       key_flags = cpu_to_le16(keyconf->keyidx << STA_KEY_FLG_KEYID_POS);
+       key_flags |= STA_KEY_FLG_MAP_KEY_MSK | STA_KEY_FLG_NO_ENC |
+                    STA_KEY_FLG_INVALID;
+
+       if (!(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE))
+               key_flags |= STA_KEY_MULTICAST_MSK;
+
+       sta_cmd.key.key_flags = key_flags;
        sta_cmd.key.key_offset = WEP_INVALID_OFFSET;
        sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
        sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
index c3e1aa7c1a8057b7eb19270f8aacb3f718b86577..d2a1ea98d0f23a144653ed7fceefeb62a2191e75 100644 (file)
@@ -1220,7 +1220,8 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
                cancel_work_sync(&rt2x00dev->rxdone_work);
                cancel_work_sync(&rt2x00dev->txdone_work);
        }
-       destroy_workqueue(rt2x00dev->workqueue);
+       if (rt2x00dev->workqueue)
+               destroy_workqueue(rt2x00dev->workqueue);
 
        /*
         * Free the tx status fifo.
index 2baadd21b7a664dea71cce5a237d66b0b8e36a25..98fbe62694d4eddbead50fde0de22fe834ce67a4 100644 (file)
@@ -369,9 +369,9 @@ static int __init pps_init(void)
        int err;
 
        pps_class = class_create(THIS_MODULE, "pps");
-       if (!pps_class) {
+       if (IS_ERR(pps_class)) {
                pr_err("failed to allocate class\n");
-               return -ENOMEM;
+               return PTR_ERR(pps_class);
        }
        pps_class->dev_attrs = pps_attrs;
 
index 691b1ab1a3d0499d85815bd03551e9d22518e494..30d2072f480b72947c74401d5522fbb9d697d313 100644 (file)
@@ -410,13 +410,14 @@ static void tsi721_db_dpc(struct work_struct *work)
         */
        mport = priv->mport;
 
-       wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE));
-       rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE));
+       wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE;
+       rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE;
 
        while (wr_ptr != rd_ptr) {
                idb_entry = (u64 *)(priv->idb_base +
                                        (TSI721_IDB_ENTRY_SIZE * rd_ptr));
                rd_ptr++;
+               rd_ptr %= IDB_QSIZE;
                idb.msg = *idb_entry;
                *idb_entry = 0;
 
index 3767364452fdd890785da5276f1f9b648be89aea..ea4d8f575ac6524676550789e93beb143cdb7cff 100644 (file)
@@ -260,8 +260,8 @@ static int da9052_set_ldo5_6_voltage(struct regulator_dev *rdev,
         * the LDO activate bit to implment the changes on the
         * LDO output.
        */
-       return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
-                                info->activate_bit);
+       return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+                                info->activate_bit, info->activate_bit);
 }
 
 static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
@@ -280,8 +280,8 @@ static int da9052_set_dcdc_voltage(struct regulator_dev *rdev,
         * the DCDC activate bit to implment the changes on the
         * DCDC output.
        */
-       return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG, 0,
-                                info->activate_bit);
+       return da9052_reg_update(regulator->da9052, DA9052_SUPPLY_REG,
+                                info->activate_bit, info->activate_bit);
 }
 
 static int da9052_get_regulator_voltage_sel(struct regulator_dev *rdev)
index 9beba49c3c5b48c2abcca65c916eabc0d782a49a..2853c2a6f10f4a055d2aadb8d3fac0380bff7f97 100644 (file)
@@ -125,6 +125,13 @@ static int __devinit r9701_probe(struct spi_device *spi)
        unsigned char tmp;
        int res;
 
+       tmp = R100CNT;
+       res = read_regs(&spi->dev, &tmp, 1);
+       if (res || tmp != 0x20) {
+               dev_err(&spi->dev, "cannot read RTC register\n");
+               return -ENODEV;
+       }
+
        rtc = rtc_device_register("r9701",
                                &spi->dev, &r9701_rtc_ops, THIS_MODULE);
        if (IS_ERR(rtc))
@@ -132,13 +139,6 @@ static int __devinit r9701_probe(struct spi_device *spi)
 
        dev_set_drvdata(&spi->dev, rtc);
 
-       tmp = R100CNT;
-       res = read_regs(&spi->dev, &tmp, 1);
-       if (res || tmp != 0x20) {
-               rtc_device_unregister(rtc);
-               return res;
-       }
-
        return 0;
 }
 
index 3ef8d071c64a683316a01b6e12ecd68f3d6f1d9d..770a740a393cd57bd24de3e0e19c1b852d9cd771 100644 (file)
@@ -167,7 +167,7 @@ again:
        DBF_ERROR("%4x EQBS ERROR", SCH_NO(q));
        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
-                  0, -1, -1, q->irq_ptr->int_parm);
+                  q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
        return 0;
 }
 
@@ -215,7 +215,7 @@ again:
        DBF_ERROR("%4x SQBS ERROR", SCH_NO(q));
        DBF_ERROR("%3d%3d%2d", count, tmp_count, nr);
        q->handler(q->irq_ptr->cdev, QDIO_ERROR_ACTIVATE_CHECK_CONDITION,
-                  0, -1, -1, q->irq_ptr->int_parm);
+                  q->nr, q->first_to_kick, count, q->irq_ptr->int_parm);
        return 0;
 }
 
index 0cb39ff21171b323c3df916db675285420822136..f8fb2d691c0a69e6aed9385ba1a81d2cb893967c 100644 (file)
@@ -408,7 +408,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
                        kunmap_atomic(sdt, KM_USER0);
                }
 
-               bio->bi_flags |= BIO_MAPPED_INTEGRITY;
+               bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
        }
 
        return 0;
index b3d17416d86a393d75bc518b9198fa7277b165d8..830cd62d84925e548cd7480b2d6fb3a4e582a08d 100644 (file)
@@ -365,7 +365,7 @@ config PPC_EPAPR_HV_BYTECHAN
 
 config PPC_EARLY_DEBUG_EHV_BC
        bool "Early console (udbg) support for ePAPR hypervisors"
-       depends on PPC_EPAPR_HV_BYTECHAN
+       depends on PPC_EPAPR_HV_BYTECHAN=y
        help
          Select this option to enable early console (a.k.a. "udbg") support
          via an ePAPR byte channel.  You also need to choose the byte channel
index c26a82e83f6e6d11b78f002dc3982df9a3b0a27b..b556a72264d1084b5e6809ea5b4a3f59c5583b82 100644 (file)
@@ -239,7 +239,7 @@ static void ehci_fsl_setup_phy(struct ehci_hcd *ehci,
        ehci_writel(ehci, portsc, &ehci->regs->port_status[port_offset]);
 }
 
-static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
+static void ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 {
        struct usb_hcd *hcd = ehci_to_hcd(ehci);
        struct fsl_usb2_platform_data *pdata;
@@ -299,19 +299,12 @@ static int ehci_fsl_usb_setup(struct ehci_hcd *ehci)
 #endif
                out_be32(non_ehci + FSL_SOC_USB_SICTRL, 0x00000001);
        }
-
-       if (!(in_be32(non_ehci + FSL_SOC_USB_CTRL) & CTRL_PHY_CLK_VALID)) {
-               printk(KERN_WARNING "fsl-ehci: USB PHY clock invalid\n");
-               return -ENODEV;
-       }
-       return 0;
 }
 
 /* called after powerup, by probe or system-pm "wakeup" */
 static int ehci_fsl_reinit(struct ehci_hcd *ehci)
 {
-       if (ehci_fsl_usb_setup(ehci))
-               return -ENODEV;
+       ehci_fsl_usb_setup(ehci);
        ehci_port_power(ehci, 0);
 
        return 0;
index bdf43e2adc51a4fdb4f12ca4e70b1e5c42c62f2c..4918062211656c276f5904cc50d45605076ed026 100644 (file)
@@ -45,6 +45,5 @@
 #define FSL_SOC_USB_PRICTRL    0x40c   /* NOTE: big-endian */
 #define FSL_SOC_USB_SICTRL     0x410   /* NOTE: big-endian */
 #define FSL_SOC_USB_CTRL       0x500   /* NOTE: big-endian */
-#define CTRL_PHY_CLK_VALID     (1 << 17)
 #define SNOOP_SIZE_2GB         0x1e
 #endif                         /* _EHCI_FSL_H */
index 969beb0e22311a4f7e7f4155524709d5dd8269bf..67e4b9047cc944eb63beca468f77e1194956f931 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -490,6 +490,8 @@ static void kiocb_batch_free(struct kioctx *ctx, struct kiocb_batch *batch)
                kmem_cache_free(kiocb_cachep, req);
                ctx->reqs_active--;
        }
+       if (unlikely(!ctx->reqs_active && ctx->dead))
+               wake_up_all(&ctx->wait);
        spin_unlock_irq(&ctx->ctx_lock);
 }
 
index a6395bdb26aeb13b7b98c74df4f77780c1c95412..1ff94054d35aba8a5c0748585006641010a322af 100644 (file)
@@ -259,6 +259,13 @@ static int load_aout_binary(struct linux_binprm * bprm, struct pt_regs * regs)
        current->mm->free_area_cache = current->mm->mmap_base;
        current->mm->cached_hole_size = 0;
 
+       retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
+       if (retval < 0) {
+               /* Someone check-me: is this error path enough? */
+               send_sig(SIGKILL, current, 0);
+               return retval;
+       }
+
        install_exec_creds(bprm);
        current->flags &= ~PF_FORKNOEXEC;
 
@@ -352,13 +359,6 @@ beyond_if:
                return retval;
        }
 
-       retval = setup_arg_pages(bprm, STACK_TOP, EXSTACK_DEFAULT);
-       if (retval < 0) { 
-               /* Someone check-me: is this error path enough? */ 
-               send_sig(SIGKILL, current, 0); 
-               return retval;
-       }
-
        current->mm->start_stack =
                (unsigned long) create_aout_tables((char __user *) bprm->p, bprm);
 #ifdef __alpha__
index 63a196b97d5094a02e0f68b4fe5084051e7ad33a..bc7e24420ac0bee0ac2cab03e96224d1edf4adc7 100644 (file)
@@ -584,10 +584,26 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
                         * If either that or op not supported returned, follow
                         * the normal lookup.
                         */
-                       if ((rc == 0) || (rc == -ENOENT))
+                       switch (rc) {
+                       case 0:
+                               /*
+                                * The server may allow us to open things like
+                                * FIFOs, but the client isn't set up to deal
+                                * with that. If it's not a regular file, just
+                                * close it and proceed as if it were a normal
+                                * lookup.
+                                */
+                               if (newInode && !S_ISREG(newInode->i_mode)) {
+                                       CIFSSMBClose(xid, pTcon, fileHandle);
+                                       break;
+                               }
+                       case -ENOENT:
                                posix_open = true;
-                       else if ((rc == -EINVAL) || (rc != -EOPNOTSUPP))
+                       case -EOPNOTSUPP:
+                               break;
+                       default:
                                pTcon->broken_posix_open = true;
+                       }
                }
                if (!posix_open)
                        rc = cifs_get_inode_info_unix(&newInode, full_path,
index a5f54b7d98224dd613efaa3c89edfdebdc8817e6..745da3d0653e38a0f48db4f0741f3414e44d974e 100644 (file)
@@ -534,6 +534,11 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
        if (fattr->cf_cifsattrs & ATTR_DIRECTORY) {
                fattr->cf_mode = S_IFDIR | cifs_sb->mnt_dir_mode;
                fattr->cf_dtype = DT_DIR;
+               /*
+                * Server can return wrong NumberOfLinks value for directories
+                * when Unix extensions are disabled - fake it.
+                */
+               fattr->cf_nlink = 2;
        } else {
                fattr->cf_mode = S_IFREG | cifs_sb->mnt_file_mode;
                fattr->cf_dtype = DT_REG;
@@ -541,9 +546,9 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
                /* clear write bits if ATTR_READONLY is set */
                if (fattr->cf_cifsattrs & ATTR_READONLY)
                        fattr->cf_mode &= ~(S_IWUGO);
-       }
 
-       fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+               fattr->cf_nlink = le32_to_cpu(info->NumberOfLinks);
+       }
 
        fattr->cf_uid = cifs_sb->mnt_uid;
        fattr->cf_gid = cifs_sb->mnt_gid;
@@ -1322,7 +1327,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
                        }
 /*BB check (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID ) to see if need
        to set uid/gid */
-                       inc_nlink(inode);
 
                        cifs_unix_basic_to_fattr(&fattr, pInfo, cifs_sb);
                        cifs_fill_uniqueid(inode->i_sb, &fattr);
@@ -1355,7 +1359,6 @@ mkdir_retry_old:
                d_drop(direntry);
        } else {
 mkdir_get_info:
-               inc_nlink(inode);
                if (pTcon->unix_ext)
                        rc = cifs_get_inode_info_unix(&newinode, full_path,
                                                      inode->i_sb, xid);
@@ -1436,6 +1439,11 @@ mkdir_get_info:
                }
        }
 mkdir_out:
+       /*
+        * Force revalidate to get parent dir info when needed since cached
+        * attributes are invalid now.
+        */
+       CIFS_I(inode)->time = 0;
        kfree(full_path);
        FreeXid(xid);
        cifs_put_tlink(tlink);
@@ -1475,7 +1483,6 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
        cifs_put_tlink(tlink);
 
        if (!rc) {
-               drop_nlink(inode);
                spin_lock(&direntry->d_inode->i_lock);
                i_size_write(direntry->d_inode, 0);
                clear_nlink(direntry->d_inode);
@@ -1483,12 +1490,15 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
        }
 
        cifsInode = CIFS_I(direntry->d_inode);
-       cifsInode->time = 0;    /* force revalidate to go get info when
-                                  needed */
+       /* force revalidate to go get info when needed */
+       cifsInode->time = 0;
 
        cifsInode = CIFS_I(inode);
-       cifsInode->time = 0;    /* force revalidate to get parent dir info
-                                  since cached search results now invalid */
+       /*
+        * Force revalidate to get parent dir info when needed since cached
+        * attributes are invalid now.
+        */
+       cifsInode->time = 0;
 
        direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
                current_fs_time(inode->i_sb);
index 138be96e25b674b6e6c92f1fe926f2c6fc24f605..bcbdb33fcc205aad37110be50752131aae894062 100644 (file)
@@ -137,6 +137,26 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer,
 }
 #endif
 
+/*
+ * Compare 2 name strings, return 0 if they match, otherwise non-zero.
+ * The strings are both count bytes long, and count is non-zero.
+ */
+static inline int dentry_cmp(const unsigned char *cs, size_t scount,
+                               const unsigned char *ct, size_t tcount)
+{
+       if (scount != tcount)
+               return 1;
+
+       do {
+               if (*cs != *ct)
+                       return 1;
+               cs++;
+               ct++;
+               tcount--;
+       } while (tcount);
+       return 0;
+}
+
 static void __d_free(struct rcu_head *head)
 {
        struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu);
index 92ce83a11e90acbb0bceda45b682a83535089342..153dee14fe559b9180a2f79b4b1f8534e25c76f5 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1915,7 +1915,6 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
 {
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       struct completion *vfork_done;
        int core_waiters = -EBUSY;
 
        init_completion(&core_state->startup);
@@ -1927,22 +1926,9 @@ static int coredump_wait(int exit_code, struct core_state *core_state)
                core_waiters = zap_threads(tsk, mm, core_state, exit_code);
        up_write(&mm->mmap_sem);
 
-       if (unlikely(core_waiters < 0))
-               goto fail;
-
-       /*
-        * Make sure nobody is waiting for us to release the VM,
-        * otherwise we can deadlock when we wait on each other
-        */
-       vfork_done = tsk->vfork_done;
-       if (vfork_done) {
-               tsk->vfork_done = NULL;
-               complete(vfork_done);
-       }
-
-       if (core_waiters)
+       if (core_waiters > 0)
                wait_for_completion(&core_state->startup);
-fail:
+
        return core_waiters;
 }
 
index 514ed45c462eaf0516cbdd9f07d3f96bfada4cc7..d117b29d106227b1036520f870b25b9df316c4f3 100644 (file)
@@ -23,6 +23,8 @@
 #ifndef ASM_ARM_HARDWARE_SERIAL_AMBA_H
 #define ASM_ARM_HARDWARE_SERIAL_AMBA_H
 
+#include <linux/types.h>
+
 /* -------------------------------------------------------------------------------
  *  From AMBA UART (PL010) Block Specification
  * -------------------------------------------------------------------------------
index 4270bedd230849c48a1e5387eb2d8a6b6e530a80..ff5f5256d175c8dde1345394f8fedd4e701b03e0 100644 (file)
@@ -47,26 +47,6 @@ struct dentry_stat_t {
 };
 extern struct dentry_stat_t dentry_stat;
 
-/*
- * Compare 2 name strings, return 0 if they match, otherwise non-zero.
- * The strings are both count bytes long, and count is non-zero.
- */
-static inline int dentry_cmp(const unsigned char *cs, size_t scount,
-                               const unsigned char *ct, size_t tcount)
-{
-       if (scount != tcount)
-               return 1;
-
-       do {
-               if (*cs != *ct)
-                       return 1;
-               cs++;
-               ct++;
-               tcount--;
-       } while (tcount);
-       return 0;
-}
-
 /* Name hashing routines. Initial hash value */
 /* Hash courtesy of the R5 hash in reiserfs modulo sign bits */
 #define init_name_hash()               0
index fee66317e071547a4a75e0b90b61399b97249ed2..35f7237ec972bef4ce1ba24cd617571a193ce806 100644 (file)
 #include <linux/errno.h>
 #include <linux/list.h>
 
+/*
+ * Keep this list arranged in rough order of priority. Anything listed after
+ * KMSG_DUMP_OOPS will not be logged by default unless printk.always_kmsg_dump
+ * is passed to the kernel.
+ */
 enum kmsg_dump_reason {
-       KMSG_DUMP_OOPS,
        KMSG_DUMP_PANIC,
+       KMSG_DUMP_OOPS,
+       KMSG_DUMP_EMERG,
        KMSG_DUMP_RESTART,
        KMSG_DUMP_HALT,
        KMSG_DUMP_POWEROFF,
-       KMSG_DUMP_EMERG,
 };
 
 /**
index 4d34356fe644ee5dca447071829ae06ab68eeb60..b80de520670b64a67dc2ceb8b4e4e11c1d623071 100644 (file)
@@ -129,7 +129,6 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
-extern void mem_cgroup_reset_owner(struct page *page);
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
 extern int do_swap_account;
 #endif
@@ -392,10 +391,6 @@ static inline void mem_cgroup_replace_page_cache(struct page *oldpage,
                                struct page *newpage)
 {
 }
-
-static inline void mem_cgroup_reset_owner(struct page *page)
-{
-}
 #endif /* CONFIG_CGROUP_MEM_CONT */
 
 #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM)
index 32cd1f67462e9b4a80f602474878c6ff632d6976..21638ae14e07c71a005d6cfcea61958e8cd8f970 100644 (file)
@@ -348,9 +348,9 @@ do {                                                                        \
 #define _this_cpu_generic_to_op(pcp, val, op)                          \
 do {                                                                   \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        *__this_cpu_ptr(&(pcp)) op val;                                 \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
 } while (0)
 
 #ifndef this_cpu_write
@@ -449,10 +449,10 @@ do {                                                                      \
 ({                                                                     \
        typeof(pcp) ret__;                                              \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        __this_cpu_add(pcp, val);                                       \
        ret__ = __this_cpu_read(pcp);                                   \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -479,10 +479,10 @@ do {                                                                      \
 #define _this_cpu_generic_xchg(pcp, nval)                              \
 ({     typeof(pcp) ret__;                                              \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        ret__ = __this_cpu_read(pcp);                                   \
        __this_cpu_write(pcp, nval);                                    \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -507,11 +507,11 @@ do {                                                                      \
 ({                                                                     \
        typeof(pcp) ret__;                                              \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        ret__ = __this_cpu_read(pcp);                                   \
        if (ret__ == (oval))                                            \
                __this_cpu_write(pcp, nval);                            \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -544,10 +544,10 @@ do {                                                                      \
 ({                                                                     \
        int ret__;                                                      \
        unsigned long flags;                                            \
-       local_irq_save(flags);                                          \
+       raw_local_irq_save(flags);                                      \
        ret__ = __this_cpu_generic_cmpxchg_double(pcp1, pcp2,           \
                        oval1, oval2, nval1, nval2);                    \
-       local_irq_restore(flags);                                       \
+       raw_local_irq_restore(flags);                                   \
        ret__;                                                          \
 })
 
@@ -718,12 +718,13 @@ do {                                                                      \
 # ifndef __this_cpu_add_return_8
 #  define __this_cpu_add_return_8(pcp, val)    __this_cpu_generic_add_return(pcp, val)
 # endif
-# define __this_cpu_add_return(pcp, val)       __pcpu_size_call_return2(this_cpu_add_return_, pcp, val)
+# define __this_cpu_add_return(pcp, val)       \
+       __pcpu_size_call_return2(__this_cpu_add_return_, pcp, val)
 #endif
 
-#define __this_cpu_sub_return(pcp, val)        this_cpu_add_return(pcp, -(val))
-#define __this_cpu_inc_return(pcp)     this_cpu_add_return(pcp, 1)
-#define __this_cpu_dec_return(pcp)     this_cpu_add_return(pcp, -1)
+#define __this_cpu_sub_return(pcp, val)        __this_cpu_add_return(pcp, -(val))
+#define __this_cpu_inc_return(pcp)     __this_cpu_add_return(pcp, 1)
+#define __this_cpu_dec_return(pcp)     __this_cpu_add_return(pcp, -1)
 
 #define __this_cpu_generic_xchg(pcp, nval)                             \
 ({     typeof(pcp) ret__;                                              \
index 7d379a6bfd886679fcfefee2ed90fb636d6cd028..0657368bd78fb9207369b484fa2aa294f90ce5dc 100644 (file)
@@ -1777,7 +1777,6 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *
 /*
  * Per process flags
  */
-#define PF_STARTING    0x00000002      /* being created */
 #define PF_EXITING     0x00000004      /* getting shut down */
 #define PF_EXITPIDONE  0x00000008      /* pi exit done on shut down */
 #define PF_VCPU                0x00000010      /* I'm a virtual CPU */
@@ -2371,7 +2370,7 @@ static inline int thread_group_empty(struct task_struct *p)
  * Protects ->fs, ->files, ->mm, ->group_info, ->comm, keyring
  * subscriptions and synchronises with wait4().  Also used in procfs.  Also
  * pins the final release of task.io_context.  Also protects ->cpuset and
- * ->cgroup.subsys[].
+ * ->cgroup.subsys[]. And ->vfork_done.
  *
  * Nests both inside and outside of read_lock(&tasklist_lock).
  * It must not be nested with write_lock_irq(&tasklist_lock),
index 46a85c9e1f25bebddd3a1887b30fb182dcf5b5e4..3c7ffdb40dc64ba62cd8f9586fd04d668c4a469c 100644 (file)
@@ -412,7 +412,8 @@ struct tcp_sock {
 
        struct tcp_sack_block recv_sack_cache[4];
 
-       struct sk_buff *highest_sack;   /* highest skb with SACK received
+       struct sk_buff *highest_sack;   /* skb just after the highest
+                                        * skb with SACKed bit set
                                         * (validity guaranteed only if
                                         * sacked_out > 0)
                                         */
index 42c29bfbcee34d8d6ada5d18533850d82973f352..2d80c291fffbad01828648c46df1da0f69763e49 100644 (file)
@@ -1364,8 +1364,9 @@ static inline void tcp_push_pending_frames(struct sock *sk)
        }
 }
 
-/* Start sequence of the highest skb with SACKed bit, valid only if
- * sacked > 0 or when the caller has ensured validity by itself.
+/* Start sequence of the skb just after the highest skb with SACKed
+ * bit, valid only if sacked_out > 0 or when the caller has ensured
+ * validity by itself.
  */
 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
 {
index e2cd3e2a5ae8b73dac244bd4ee98fbe70d5d5364..26a7a6707fa738e181a6b397d8ba9bea47269deb 100644 (file)
@@ -668,6 +668,38 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
        return mm;
 }
 
+static void complete_vfork_done(struct task_struct *tsk)
+{
+       struct completion *vfork;
+
+       task_lock(tsk);
+       vfork = tsk->vfork_done;
+       if (likely(vfork)) {
+               tsk->vfork_done = NULL;
+               complete(vfork);
+       }
+       task_unlock(tsk);
+}
+
+static int wait_for_vfork_done(struct task_struct *child,
+                               struct completion *vfork)
+{
+       int killed;
+
+       freezer_do_not_count();
+       killed = wait_for_completion_killable(vfork);
+       freezer_count();
+
+       if (killed) {
+               task_lock(child);
+               child->vfork_done = NULL;
+               task_unlock(child);
+       }
+
+       put_task_struct(child);
+       return killed;
+}
+
 /* Please note the differences between mmput and mm_release.
  * mmput is called whenever we stop holding onto a mm_struct,
  * error success whatever.
@@ -683,8 +715,6 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
  */
 void mm_release(struct task_struct *tsk, struct mm_struct *mm)
 {
-       struct completion *vfork_done = tsk->vfork_done;
-
        /* Get rid of any futexes when releasing the mm */
 #ifdef CONFIG_FUTEX
        if (unlikely(tsk->robust_list)) {
@@ -704,17 +734,15 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
        /* Get rid of any cached register state */
        deactivate_mm(tsk, mm);
 
-       /* notify parent sleeping on vfork() */
-       if (vfork_done) {
-               tsk->vfork_done = NULL;
-               complete(vfork_done);
-       }
+       if (tsk->vfork_done)
+               complete_vfork_done(tsk);
 
        /*
         * If we're exiting normally, clear a user-space tid field if
         * requested.  We leave this alone when dying by signal, to leave
         * the value intact in a core dump, and to save the unnecessary
-        * trouble otherwise.  Userland only wants this done for a sys_exit.
+        * trouble, say, a killed vfork parent shouldn't touch this mm.
+        * Userland only wants this done for a sys_exit.
         */
        if (tsk->clear_child_tid) {
                if (!(tsk->flags & PF_SIGNALED) &&
@@ -1018,7 +1046,6 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
 
        new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
        new_flags |= PF_FORKNOEXEC;
-       new_flags |= PF_STARTING;
        p->flags = new_flags;
 }
 
@@ -1548,16 +1575,9 @@ long do_fork(unsigned long clone_flags,
                if (clone_flags & CLONE_VFORK) {
                        p->vfork_done = &vfork;
                        init_completion(&vfork);
+                       get_task_struct(p);
                }
 
-               /*
-                * We set PF_STARTING at creation in case tracing wants to
-                * use this to distinguish a fully live task from one that
-                * hasn't finished SIGSTOP raising yet.  Now we clear it
-                * and set the child going.
-                */
-               p->flags &= ~PF_STARTING;
-
                wake_up_new_task(p);
 
                /* forking complete and child started to run, tell ptracer */
@@ -1565,10 +1585,8 @@ long do_fork(unsigned long clone_flags,
                        ptrace_event(trace, nr);
 
                if (clone_flags & CLONE_VFORK) {
-                       freezer_do_not_count();
-                       wait_for_completion(&vfork);
-                       freezer_count();
-                       ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
+                       if (!wait_for_vfork_done(p, &vfork))
+                               ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
                }
        } else {
                nr = PTR_ERR(p);
index 2e48ec0c2e91cf099642f8bf43e90476fb6457a3..c21449f85a2a4061bd9a34fc4844b5a3240850b3 100644 (file)
@@ -119,15 +119,20 @@ static void check_hung_task(struct task_struct *t, unsigned long timeout)
  * For preemptible RCU it is sufficient to call rcu_read_unlock in order
  * to exit the grace period. For classic RCU, a reschedule is required.
  */
-static void rcu_lock_break(struct task_struct *g, struct task_struct *t)
+static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
 {
+       bool can_cont;
+
        get_task_struct(g);
        get_task_struct(t);
        rcu_read_unlock();
        cond_resched();
        rcu_read_lock();
+       can_cont = pid_alive(g) && pid_alive(t);
        put_task_struct(t);
        put_task_struct(g);
+
+       return can_cont;
 }
 
 /*
@@ -154,9 +159,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout)
                        goto unlock;
                if (!--batch_count) {
                        batch_count = HUNG_TASK_BATCHING;
-                       rcu_lock_break(g, t);
-                       /* Exit if t or g was unhashed during refresh. */
-                       if (t->state == TASK_DEAD || g->state == TASK_DEAD)
+                       if (!rcu_lock_break(g, t))
                                goto unlock;
                }
                /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
index 32313c08444285e2f4cca1c41e2aa64887ba5a66..0f0d4704ddd8a3865d0926067fbf654d48dabbbc 100644 (file)
@@ -985,6 +985,11 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 
                /* add new interrupt at end of irq queue */
                do {
+                       /*
+                        * Or all existing action->thread_mask bits,
+                        * so we can find the next zero bit for this
+                        * new action.
+                        */
                        thread_mask |= old->thread_mask;
                        old_ptr = &old->next;
                        old = *old_ptr;
@@ -993,14 +998,41 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
        }
 
        /*
-        * Setup the thread mask for this irqaction. Unlikely to have
-        * 32 resp 64 irqs sharing one line, but who knows.
+        * Setup the thread mask for this irqaction for ONESHOT. For
+        * !ONESHOT irqs the thread mask is 0 so we can avoid a
+        * conditional in irq_wake_thread().
         */
-       if (new->flags & IRQF_ONESHOT && thread_mask == ~0UL) {
-               ret = -EBUSY;
-               goto out_mask;
+       if (new->flags & IRQF_ONESHOT) {
+               /*
+                * Unlikely to have 32 resp 64 irqs sharing one line,
+                * but who knows.
+                */
+               if (thread_mask == ~0UL) {
+                       ret = -EBUSY;
+                       goto out_mask;
+               }
+               /*
+                * The thread_mask for the action is or'ed to
+                * desc->thread_active to indicate that the
+                * IRQF_ONESHOT thread handler has been woken, but not
+                * yet finished. The bit is cleared when a thread
+                * completes. When all threads of a shared interrupt
+                * line have completed desc->threads_active becomes
+                * zero and the interrupt line is unmasked. See
+                * handle.c:irq_wake_thread() for further information.
+                *
+                * If no thread is woken by primary (hard irq context)
+                * interrupt handlers, then desc->threads_active is
+                * also checked for zero to unmask the irq line in the
+                * affected hard irq flow handlers
+                * (handle_[fasteoi|level]_irq).
+                *
+                * The new action gets the first zero bit of
+                * thread_mask assigned. See the loop above which or's
+                * all existing action->thread_mask bits.
+                */
+               new->thread_mask = 1 << ffz(thread_mask);
        }
-       new->thread_mask = 1 << ffz(thread_mask);
 
        if (!shared) {
                init_waitqueue_head(&desc->wait_for_threads);
index 9788c0ec6f4378f19cd80e873d42a0aec7171548..c62b8546cc90e0b39de0e824b0f7ed7179d8ba92 100644 (file)
@@ -1334,8 +1334,10 @@ int __kprobes register_kprobe(struct kprobe *p)
        if (!kernel_text_address((unsigned long) p->addr) ||
            in_kprobes_functions((unsigned long) p->addr) ||
            ftrace_text_reserved(p->addr, p->addr) ||
-           jump_label_text_reserved(p->addr, p->addr))
-               goto fail_with_jump_label;
+           jump_label_text_reserved(p->addr, p->addr)) {
+               ret = -EINVAL;
+               goto cannot_probe;
+       }
 
        /* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
        p->flags &= KPROBE_FLAG_DISABLED;
@@ -1352,7 +1354,7 @@ int __kprobes register_kprobe(struct kprobe *p)
                 * its code to prohibit unexpected unloading.
                 */
                if (unlikely(!try_module_get(probed_mod)))
-                       goto fail_with_jump_label;
+                       goto cannot_probe;
 
                /*
                 * If the module freed .init.text, we couldn't insert
@@ -1361,7 +1363,7 @@ int __kprobes register_kprobe(struct kprobe *p)
                if (within_module_init((unsigned long)p->addr, probed_mod) &&
                    probed_mod->state != MODULE_STATE_COMING) {
                        module_put(probed_mod);
-                       goto fail_with_jump_label;
+                       goto cannot_probe;
                }
                /* ret will be updated by following code */
        }
@@ -1409,7 +1411,7 @@ out:
 
        return ret;
 
-fail_with_jump_label:
+cannot_probe:
        preempt_enable();
        jump_label_unlock();
        return ret;
index 13c0a1143f49438f7cc97718e61ecf5bc5b23747..32690a0b7a1838e1bd9b8bcf8473654955807dae 100644 (file)
@@ -702,6 +702,9 @@ static bool printk_time = 0;
 #endif
 module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
 
+static bool always_kmsg_dump;
+module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
+
 /* Check if we have any console registered that can be called early in boot. */
 static int have_callable_console(void)
 {
@@ -1732,6 +1735,9 @@ void kmsg_dump(enum kmsg_dump_reason reason)
        unsigned long l1, l2;
        unsigned long flags;
 
+       if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
+               return;
+
        /* Theoretically, the log could move on after we do this, but
           there's not a lot we can do about that. The new messages
           will overwrite the start of what we dump. */
index 33a0676ea74409e7ae66888da26c9934a4139d7c..b342f57879e693f54c12a5b15d1e4e7c5bdde7e1 100644 (file)
@@ -6728,7 +6728,7 @@ int __init sched_create_sysfs_power_savings_entries(struct device *dev)
 static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
                             void *hcpu)
 {
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_ONLINE:
        case CPU_DOWN_FAILED:
                cpuset_update_active_cpus();
@@ -6741,7 +6741,7 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
 static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
                               void *hcpu)
 {
-       switch (action) {
+       switch (action & ~CPU_TASKS_FROZEN) {
        case CPU_DOWN_PREPARE:
                cpuset_update_active_cpus();
                return NOTIFY_OK;
index 77cb245f8e7bdee634b98f6c275ae221c7c90715..0ab9ae8057f00e8545deba43ff31fab7873a619c 100644 (file)
@@ -818,17 +818,9 @@ static int __init fixup_activate(void *addr, enum debug_obj_state state)
                if (obj->static_init == 1) {
                        debug_object_init(obj, &descr_type_test);
                        debug_object_activate(obj, &descr_type_test);
-                       /*
-                        * Real code should return 0 here ! This is
-                        * not a fixup of some bad behaviour. We
-                        * merily call the debug_init function to keep
-                        * track of the object.
-                        */
-                       return 1;
-               } else {
-                       /* Real code needs to emit a warning here */
+                       return 0;
                }
-               return 0;
+               return 1;
 
        case ODEBUG_STATE_ACTIVE:
                debug_object_deactivate(obj, &descr_type_test);
@@ -967,7 +959,7 @@ static void __init debug_objects_selftest(void)
 
        obj.static_init = 1;
        debug_object_activate(&obj, &descr_type_test);
-       if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, warnings))
+       if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
                goto out;
        debug_object_init(&obj, &descr_type_test);
        if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
index 8e75003d62f632c52b8c0ea56ca62bdc72e560f0..38e612e66da5993f7b1eb92065ebfec45216d83c 100644 (file)
@@ -891,9 +891,15 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
        case 'U':
                return uuid_string(buf, end, ptr, spec, fmt);
        case 'V':
-               return buf + vsnprintf(buf, end > buf ? end - buf : 0,
-                                      ((struct va_format *)ptr)->fmt,
-                                      *(((struct va_format *)ptr)->va));
+               {
+                       va_list va;
+
+                       va_copy(va, *((struct va_format *)ptr)->va);
+                       buf += vsnprintf(buf, end > buf ? end - buf : 0,
+                                        ((struct va_format *)ptr)->fmt, va);
+                       va_end(va);
+                       return buf;
+               }
        case 'K':
                /*
                 * %pK cannot be used in IRQ context because its test
index 91d3efb25d15472332e1f2d9c382a5732629ce69..8f7fc394f63672954cc4444da0f2158a42470b2c 100644 (file)
@@ -671,6 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
                set_pmd_at(mm, haddr, pmd, entry);
                prepare_pmd_huge_pte(pgtable, mm);
                add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
+               mm->nr_ptes++;
                spin_unlock(&mm->page_table_lock);
        }
 
@@ -789,6 +790,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
        pmd = pmd_mkold(pmd_wrprotect(pmd));
        set_pmd_at(dst_mm, addr, dst_pmd, pmd);
        prepare_pmd_huge_pte(pgtable, dst_mm);
+       dst_mm->nr_ptes++;
 
        ret = 0;
 out_unlock:
@@ -887,7 +889,6 @@ static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
        }
        kfree(pages);
 
-       mm->nr_ptes++;
        smp_wmb(); /* make pte visible before pmd */
        pmd_populate(mm, pmd, pgtable);
        page_remove_rmap(page);
@@ -1047,6 +1048,7 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
                        VM_BUG_ON(page_mapcount(page) < 0);
                        add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
                        VM_BUG_ON(!PageHead(page));
+                       tlb->mm->nr_ptes--;
                        spin_unlock(&tlb->mm->page_table_lock);
                        tlb_remove_page(tlb, page);
                        pte_free(tlb->mm, pgtable);
@@ -1375,7 +1377,6 @@ static int __split_huge_page_map(struct page *page,
                        pte_unmap(pte);
                }
 
-               mm->nr_ptes++;
                smp_wmb(); /* make pte visible before pmd */
                /*
                 * Up to this point the pmd is present and huge and
@@ -1988,7 +1989,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        set_pmd_at(mm, address, pmd, _pmd);
        update_mmu_cache(vma, address, _pmd);
        prepare_pmd_huge_pte(pgtable, mm);
-       mm->nr_ptes--;
        spin_unlock(&mm->page_table_lock);
 
 #ifndef CONFIG_NUMA
index 5f34bd8dda34bbc8224303ad080103f224fa5ad6..a876871f6be56d15b06d1da56a9f3dbbd61cda91 100644 (file)
@@ -2277,8 +2277,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                        set_page_dirty(page);
                list_add(&page->lru, &page_list);
        }
-       spin_unlock(&mm->page_table_lock);
        flush_tlb_range(vma, start, end);
+       spin_unlock(&mm->page_table_lock);
        mmu_notifier_invalidate_range_end(mm, start, end);
        list_for_each_entry_safe(page, tmp, &page_list, lru) {
                page_remove_rmap(page);
index 1925ffbfb27f00ac3d3d262ce0ea1c8aaa3a117f..310544a379ae9c7b886b3b50815e5f3d5a991ba8 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -28,7 +28,6 @@
 #include <linux/kthread.h>
 #include <linux/wait.h>
 #include <linux/slab.h>
-#include <linux/memcontrol.h>
 #include <linux/rbtree.h>
 #include <linux/memory.h>
 #include <linux/mmu_notifier.h>
@@ -1572,16 +1571,6 @@ struct page *ksm_does_need_to_copy(struct page *page,
 
        new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
        if (new_page) {
-               /*
-                * The memcg-specific accounting when moving
-                * pages around the LRU lists relies on the
-                * page's owner (memcg) to be valid.  Usually,
-                * pages are assigned to a new owner before
-                * being put on the LRU list, but since this
-                * is not the case here, the stale owner from
-                * a previous allocation cycle must be reset.
-                */
-               mem_cgroup_reset_owner(new_page);
                copy_user_highpage(new_page, page, address, vma);
 
                SetPageDirty(new_page);
index 228d6461c12ade8941b37416ca867d3df3277acd..5585dc3d36466189b0d89adf8e86f424372bdd23 100644 (file)
@@ -1042,6 +1042,19 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
 
        pc = lookup_page_cgroup(page);
        memcg = pc->mem_cgroup;
+
+       /*
+        * Surreptitiously switch any uncharged page to root:
+        * an uncharged page off lru does nothing to secure
+        * its former mem_cgroup from sudden removal.
+        *
+        * Our caller holds lru_lock, and PageCgroupUsed is updated
+        * under page_cgroup lock: between them, they make all uses
+        * of pc->mem_cgroup safe.
+        */
+       if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup)
+               pc->mem_cgroup = memcg = root_mem_cgroup;
+
        mz = page_cgroup_zoneinfo(memcg, page);
        /* compound_order() is stabilized through lru_lock */
        MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
@@ -2408,8 +2421,12 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                                       struct page *page,
                                       unsigned int nr_pages,
                                       struct page_cgroup *pc,
-                                      enum charge_type ctype)
+                                      enum charge_type ctype,
+                                      bool lrucare)
 {
+       struct zone *uninitialized_var(zone);
+       bool was_on_lru = false;
+
        lock_page_cgroup(pc);
        if (unlikely(PageCgroupUsed(pc))) {
                unlock_page_cgroup(pc);
@@ -2420,6 +2437,21 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
         * we don't need page_cgroup_lock about tail pages, becase they are not
         * accessed by any other context at this point.
         */
+
+       /*
+        * In some cases, SwapCache and FUSE(splice_buf->radixtree), the page
+        * may already be on some other mem_cgroup's LRU.  Take care of it.
+        */
+       if (lrucare) {
+               zone = page_zone(page);
+               spin_lock_irq(&zone->lru_lock);
+               if (PageLRU(page)) {
+                       ClearPageLRU(page);
+                       del_page_from_lru_list(zone, page, page_lru(page));
+                       was_on_lru = true;
+               }
+       }
+
        pc->mem_cgroup = memcg;
        /*
         * We access a page_cgroup asynchronously without lock_page_cgroup().
@@ -2443,9 +2475,18 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
                break;
        }
 
+       if (lrucare) {
+               if (was_on_lru) {
+                       VM_BUG_ON(PageLRU(page));
+                       SetPageLRU(page);
+                       add_page_to_lru_list(zone, page, page_lru(page));
+               }
+               spin_unlock_irq(&zone->lru_lock);
+       }
+
        mem_cgroup_charge_statistics(memcg, PageCgroupCache(pc), nr_pages);
        unlock_page_cgroup(pc);
-       WARN_ON_ONCE(PageLRU(page));
+
        /*
         * "charge_statistics" updated event counter. Then, check it.
         * Insert ancestor (and ancestor's ancestors), to softlimit RB-tree.
@@ -2643,7 +2684,7 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
        ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
        if (ret == -ENOMEM)
                return ret;
-       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, page, nr_pages, pc, ctype, false);
        return 0;
 }
 
@@ -2663,35 +2704,6 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *ptr,
                                        enum charge_type ctype);
 
-static void
-__mem_cgroup_commit_charge_lrucare(struct page *page, struct mem_cgroup *memcg,
-                                       enum charge_type ctype)
-{
-       struct page_cgroup *pc = lookup_page_cgroup(page);
-       struct zone *zone = page_zone(page);
-       unsigned long flags;
-       bool removed = false;
-
-       /*
-        * In some case, SwapCache, FUSE(splice_buf->radixtree), the page
-        * is already on LRU. It means the page may on some other page_cgroup's
-        * LRU. Take care of it.
-        */
-       spin_lock_irqsave(&zone->lru_lock, flags);
-       if (PageLRU(page)) {
-               del_page_from_lru_list(zone, page, page_lru(page));
-               ClearPageLRU(page);
-               removed = true;
-       }
-       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
-       if (removed) {
-               add_page_to_lru_list(zone, page, page_lru(page));
-               SetPageLRU(page);
-       }
-       spin_unlock_irqrestore(&zone->lru_lock, flags);
-       return;
-}
-
 int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
                                gfp_t gfp_mask)
 {
@@ -2769,13 +2781,16 @@ static void
 __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
                                        enum charge_type ctype)
 {
+       struct page_cgroup *pc;
+
        if (mem_cgroup_disabled())
                return;
        if (!memcg)
                return;
        cgroup_exclude_rmdir(&memcg->css);
 
-       __mem_cgroup_commit_charge_lrucare(page, memcg, ctype);
+       pc = lookup_page_cgroup(page);
+       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype, true);
        /*
         * Now swap is on-memory. This means this page may be
         * counted both as mem and swap....double count.
@@ -3027,23 +3042,6 @@ void mem_cgroup_uncharge_end(void)
        batch->memcg = NULL;
 }
 
-/*
- * A function for resetting pc->mem_cgroup for newly allocated pages.
- * This function should be called if the newpage will be added to LRU
- * before start accounting.
- */
-void mem_cgroup_reset_owner(struct page *newpage)
-{
-       struct page_cgroup *pc;
-
-       if (mem_cgroup_disabled())
-               return;
-
-       pc = lookup_page_cgroup(newpage);
-       VM_BUG_ON(PageCgroupUsed(pc));
-       pc->mem_cgroup = root_mem_cgroup;
-}
-
 #ifdef CONFIG_SWAP
 /*
  * called after __delete_from_swap_cache() and drop "page" account.
@@ -3248,7 +3246,7 @@ int mem_cgroup_prepare_migration(struct page *page,
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype, false);
        return ret;
 }
 
@@ -3332,7 +3330,7 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
         * the newpage may be on LRU(or pagevec for LRU) already. We lock
         * LRU while we overwrite pc->mem_cgroup.
         */
-       __mem_cgroup_commit_charge_lrucare(newpage, memcg, type);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, type, true);
 }
 
 #ifdef CONFIG_DEBUG_VM
@@ -5077,7 +5075,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
                return NULL;
        if (PageAnon(page)) {
                /* we don't move shared anon */
-               if (!move_anon() || page_mapcount(page) > 2)
+               if (!move_anon() || page_mapcount(page) > 1)
                        return NULL;
        } else if (!move_file())
                /* we ignore mapcount for file pages */
index 06b145fb64ab59a399344bc73ca277be0e39ed7b..47296fee23dbaa67c3408227d7653037a371bcdf 100644 (file)
@@ -640,10 +640,11 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
        unsigned long vmstart;
        unsigned long vmend;
 
-       vma = find_vma_prev(mm, start, &prev);
+       vma = find_vma(mm, start);
        if (!vma || vma->vm_start > start)
                return -EFAULT;
 
+       prev = vma->vm_prev;
        if (start > vma->vm_start)
                prev = vma;
 
index df141f60289eef61c7d278080e8290c3dfc717b9..1503b6b54ecbd1817e6e92968d40c404a6e90f75 100644 (file)
@@ -839,8 +839,6 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
        if (!newpage)
                return -ENOMEM;
 
-       mem_cgroup_reset_owner(newpage);
-
        if (page_count(page) == 1) {
                /* page was freed from under us. So we are done. */
                goto out;
index 4f4f53bdc65de30d11c637b2bc745aa1333a16bd..ef726e8aa8e9ca56c0713abfa4b621f44a210d4c 100644 (file)
@@ -385,10 +385,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
                return -EINVAL;
        if (end == start)
                return 0;
-       vma = find_vma_prev(current->mm, start, &prev);
+       vma = find_vma(current->mm, start);
        if (!vma || vma->vm_start > start)
                return -ENOMEM;
 
+       prev = vma->vm_prev;
        if (start > vma->vm_start)
                prev = vma;
 
index 3f758c7f4c815c2b0edf494526b2823b41fbc142..da15a79b1441b665b0e4b962eea38a92407018fd 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1266,8 +1266,9 @@ munmap_back:
        vma->vm_pgoff = pgoff;
        INIT_LIST_HEAD(&vma->anon_vma_chain);
 
+       error = -EINVAL;        /* when rejecting VM_GROWSDOWN|VM_GROWSUP */
+
        if (file) {
-               error = -EINVAL;
                if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
                        goto free_vma;
                if (vm_flags & VM_DENYWRITE) {
@@ -1293,6 +1294,8 @@ munmap_back:
                pgoff = vma->vm_pgoff;
                vm_flags = vma->vm_flags;
        } else if (vm_flags & VM_SHARED) {
+               if (unlikely(vm_flags & (VM_GROWSDOWN|VM_GROWSUP)))
+                       goto free_vma;
                error = shmem_zero_setup(vma);
                if (error)
                        goto free_vma;
@@ -1605,7 +1608,6 @@ EXPORT_SYMBOL(find_vma);
 
 /*
  * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
- * Note: pprev is set to NULL when return value is NULL.
  */
 struct vm_area_struct *
 find_vma_prev(struct mm_struct *mm, unsigned long addr,
@@ -1614,7 +1616,16 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
        struct vm_area_struct *vma;
 
        vma = find_vma(mm, addr);
-       *pprev = vma ? vma->vm_prev : NULL;
+       if (vma) {
+               *pprev = vma->vm_prev;
+       } else {
+               struct rb_node *rb_node = mm->mm_rb.rb_node;
+               *pprev = NULL;
+               while (rb_node) {
+                       *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
+                       rb_node = rb_node->rb_right;
+               }
+       }
        return vma;
 }
 
index 5a688a2756bec54435adbd5f3c13a33fbdd2c11e..f437d054c3bf6884fd5ba6f18338fbf7272c43a4 100644 (file)
@@ -262,10 +262,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
 
        down_write(&current->mm->mmap_sem);
 
-       vma = find_vma_prev(current->mm, start, &prev);
+       vma = find_vma(current->mm, start);
        error = -ENOMEM;
        if (!vma)
                goto out;
+       prev = vma->vm_prev;
        if (unlikely(grows & PROT_GROWSDOWN)) {
                if (vma->vm_start >= end)
                        goto out;
index de1616aa9b1e2b3fac054ce4077073cbb05a3c62..1ccbd714059cdd7ddb9d90cb3ca92438f2cb5d34 100644 (file)
@@ -379,13 +379,15 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
        pgoff_t offset = swp_offset(ent);
        struct swap_cgroup_ctrl *ctrl;
        struct page *mappage;
+       struct swap_cgroup *sc;
 
        ctrl = &swap_cgroup_ctrl[swp_type(ent)];
        if (ctrlp)
                *ctrlp = ctrl;
 
        mappage = ctrl->map[offset / SC_PER_PAGE];
-       return page_address(mappage) + offset % SC_PER_PAGE;
+       sc = page_address(mappage);
+       return sc + offset % SC_PER_PAGE;
 }
 
 /**
index 12a48a88c0d8cb00dd55b2adcea3e536493dee1a..405d331804c3da95c52347878387a807eb5e6c60 100644 (file)
@@ -184,8 +184,7 @@ static void pcpu_unmap_pages(struct pcpu_chunk *chunk,
                                   page_end - page_start);
        }
 
-       for (i = page_start; i < page_end; i++)
-               __clear_bit(i, populated);
+       bitmap_clear(populated, page_start, page_end - page_start);
 }
 
 /**
index fff1ff7fb9ada36fca1be10d33dec593486f50a6..14380e9fbe3380b927cb58d75c5a950a5ec14f77 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -652,7 +652,7 @@ EXPORT_SYMBOL(__pagevec_release);
 void lru_add_page_tail(struct zone* zone,
                       struct page *page, struct page *page_tail)
 {
-       int active;
+       int uninitialized_var(active);
        enum lru_list lru;
        const int file = 0;
 
@@ -672,7 +672,6 @@ void lru_add_page_tail(struct zone* zone,
                        active = 0;
                        lru = LRU_INACTIVE_ANON;
                }
-               update_page_reclaim_stat(zone, page_tail, file, active);
        } else {
                SetPageUnevictable(page_tail);
                lru = LRU_UNEVICTABLE;
@@ -693,6 +692,9 @@ void lru_add_page_tail(struct zone* zone,
                list_head = page_tail->lru.prev;
                list_move_tail(&page_tail->lru, list_head);
        }
+
+       if (!PageUnevictable(page))
+               update_page_reclaim_stat(zone, page_tail, file, active);
 }
 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 
@@ -710,8 +712,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
        SetPageLRU(page);
        if (active)
                SetPageActive(page);
-       update_page_reclaim_stat(zone, page, file, active);
        add_page_to_lru_list(zone, page, lru);
+       update_page_reclaim_stat(zone, page, file, active);
 }
 
 /*
index 470038a9187383790751817cda61a07cb50f9512..ea6b32d61873185f59db1eb9ae303488ec6de471 100644 (file)
@@ -300,16 +300,6 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                        new_page = alloc_page_vma(gfp_mask, vma, addr);
                        if (!new_page)
                                break;          /* Out of memory */
-                       /*
-                        * The memcg-specific accounting when moving
-                        * pages around the LRU lists relies on the
-                        * page's owner (memcg) to be valid.  Usually,
-                        * pages are assigned to a new owner before
-                        * being put on the LRU list, but since this
-                        * is not the case here, the stale owner from
-                        * a previous allocation cycle must be reset.
-                        */
-                       mem_cgroup_reset_owner(new_page);
                }
 
                /*
index 568d5bf175341b4f84ef284173b5d6da623ceafb..702a1ae9220b79bd9e60cc25732201fe8750dcd2 100644 (file)
@@ -446,8 +446,11 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
        ip6h->nexthdr = IPPROTO_HOPOPTS;
        ip6h->hop_limit = 1;
        ipv6_addr_set(&ip6h->daddr, htonl(0xff020000), 0, 0, htonl(1));
-       ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
-                          &ip6h->saddr);
+       if (ipv6_dev_get_saddr(dev_net(br->dev), br->dev, &ip6h->daddr, 0,
+                              &ip6h->saddr)) {
+               kfree_skb(skb);
+               return NULL;
+       }
        ipv6_eth_mc_map(&ip6h->daddr, eth->h_dest);
 
        hopopt = (u8 *)(ip6h + 1);
index 84122472656c3a70f0c0a93020768e81ecb3972a..dec4f3817133c879b524a65f0919b5dabcff46ea 100644 (file)
@@ -62,6 +62,15 @@ static int brnf_filter_pppoe_tagged __read_mostly = 0;
 #define brnf_filter_pppoe_tagged 0
 #endif
 
+#define IS_IP(skb) \
+       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IP))
+
+#define IS_IPV6(skb) \
+       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_IPV6))
+
+#define IS_ARP(skb) \
+       (!vlan_tx_tag_present(skb) && skb->protocol == htons(ETH_P_ARP))
+
 static inline __be16 vlan_proto(const struct sk_buff *skb)
 {
        if (vlan_tx_tag_present(skb))
@@ -639,8 +648,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
                return NF_DROP;
        br = p->br;
 
-       if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
-           IS_PPPOE_IPV6(skb)) {
+       if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb)) {
                if (!brnf_call_ip6tables && !br->nf_call_ip6tables)
                        return NF_ACCEPT;
 
@@ -651,8 +659,7 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
        if (!brnf_call_iptables && !br->nf_call_iptables)
                return NF_ACCEPT;
 
-       if (skb->protocol != htons(ETH_P_IP) && !IS_VLAN_IP(skb) &&
-           !IS_PPPOE_IP(skb))
+       if (!IS_IP(skb) && !IS_VLAN_IP(skb) && !IS_PPPOE_IP(skb))
                return NF_ACCEPT;
 
        nf_bridge_pull_encap_header_rcsum(skb);
@@ -701,7 +708,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
        struct nf_bridge_info *nf_bridge = skb->nf_bridge;
        struct net_device *in;
 
-       if (skb->protocol != htons(ETH_P_ARP) && !IS_VLAN_ARP(skb)) {
+       if (!IS_ARP(skb) && !IS_VLAN_ARP(skb)) {
                in = nf_bridge->physindev;
                if (nf_bridge->mask & BRNF_PKT_TYPE) {
                        skb->pkt_type = PACKET_OTHERHOST;
@@ -718,6 +725,7 @@ static int br_nf_forward_finish(struct sk_buff *skb)
        return 0;
 }
 
+
 /* This is the 'purely bridged' case.  For IP, we pass the packet to
  * netfilter with indev and outdev set to the bridge device,
  * but we are still able to filter on the 'real' indev/outdev
@@ -744,11 +752,9 @@ static unsigned int br_nf_forward_ip(unsigned int hook, struct sk_buff *skb,
        if (!parent)
                return NF_DROP;
 
-       if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
-           IS_PPPOE_IP(skb))
+       if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
                pf = PF_INET;
-       else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
-                IS_PPPOE_IPV6(skb))
+       else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
                pf = PF_INET6;
        else
                return NF_ACCEPT;
@@ -795,7 +801,7 @@ static unsigned int br_nf_forward_arp(unsigned int hook, struct sk_buff *skb,
        if (!brnf_call_arptables && !br->nf_call_arptables)
                return NF_ACCEPT;
 
-       if (skb->protocol != htons(ETH_P_ARP)) {
+       if (!IS_ARP(skb)) {
                if (!IS_VLAN_ARP(skb))
                        return NF_ACCEPT;
                nf_bridge_pull_encap_header(skb);
@@ -853,11 +859,9 @@ static unsigned int br_nf_post_routing(unsigned int hook, struct sk_buff *skb,
        if (!realoutdev)
                return NF_DROP;
 
-       if (skb->protocol == htons(ETH_P_IP) || IS_VLAN_IP(skb) ||
-           IS_PPPOE_IP(skb))
+       if (IS_IP(skb) || IS_VLAN_IP(skb) || IS_PPPOE_IP(skb))
                pf = PF_INET;
-       else if (skb->protocol == htons(ETH_P_IPV6) || IS_VLAN_IPV6(skb) ||
-                IS_PPPOE_IPV6(skb))
+       else if (IS_IPV6(skb) || IS_VLAN_IPV6(skb) || IS_PPPOE_IPV6(skb))
                pf = PF_INET6;
        else
                return NF_ACCEPT;
index dd147d78a5889ab6c2139712199c3672f9e087c6..6751ed4e0c074a8bd0f692ccdf5df0d001e0d3dc 100644 (file)
@@ -17,9 +17,9 @@
 #include "br_private_stp.h"
 
 /* since time values in bpdu are in jiffies and then scaled (1/256)
- * before sending, make sure that is at least one.
+ * before sending, make sure that is at least one STP tick.
  */
-#define MESSAGE_AGE_INCR       ((HZ < 256) ? 1 : (HZ/256))
+#define MESSAGE_AGE_INCR       ((HZ / 256) + 1)
 
 static const char *const br_port_state_names[] = {
        [BR_STATE_DISABLED] = "disabled",
@@ -186,7 +186,7 @@ static void br_record_config_information(struct net_bridge_port *p,
        p->designated_cost = bpdu->root_path_cost;
        p->designated_bridge = bpdu->bridge_id;
        p->designated_port = bpdu->port_id;
-       p->designated_age = jiffies + bpdu->message_age;
+       p->designated_age = jiffies - bpdu->message_age;
 
        mod_timer(&p->message_age_timer, jiffies
                  + (p->br->max_age - bpdu->message_age));
index 5864cc49136993d9de12f4c0f9110ab2dbcf1025..5fe2ff3b01efe44d07292514b814b0ba7b01d4f0 100644 (file)
@@ -1335,7 +1335,12 @@ static inline int ebt_make_matchname(const struct ebt_entry_match *m,
     const char *base, char __user *ubase)
 {
        char __user *hlp = ubase + ((char *)m - base);
-       if (copy_to_user(hlp, m->u.match->name, EBT_FUNCTION_MAXNAMELEN))
+       char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+       /* ebtables expects 32 bytes long names but xt_match names are 29 bytes
+          long. Copy 29 bytes and fill remaining bytes with zeroes. */
+       strncpy(name, m->u.match->name, sizeof(name));
+       if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
                return -EFAULT;
        return 0;
 }
@@ -1344,7 +1349,10 @@ static inline int ebt_make_watchername(const struct ebt_entry_watcher *w,
     const char *base, char __user *ubase)
 {
        char __user *hlp = ubase + ((char *)w - base);
-       if (copy_to_user(hlp , w->u.watcher->name, EBT_FUNCTION_MAXNAMELEN))
+       char name[EBT_FUNCTION_MAXNAMELEN] = {};
+
+       strncpy(name, w->u.watcher->name, sizeof(name));
+       if (copy_to_user(hlp , name, EBT_FUNCTION_MAXNAMELEN))
                return -EFAULT;
        return 0;
 }
@@ -1355,6 +1363,7 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
        int ret;
        char __user *hlp;
        const struct ebt_entry_target *t;
+       char name[EBT_FUNCTION_MAXNAMELEN] = {};
 
        if (e->bitmask == 0)
                return 0;
@@ -1368,7 +1377,8 @@ ebt_make_names(struct ebt_entry *e, const char *base, char __user *ubase)
        ret = EBT_WATCHER_ITERATE(e, ebt_make_watchername, base, ubase);
        if (ret != 0)
                return ret;
-       if (copy_to_user(hlp, t->u.target->name, EBT_FUNCTION_MAXNAMELEN))
+       strncpy(name, t->u.target->name, sizeof(name));
+       if (copy_to_user(hlp, name, EBT_FUNCTION_MAXNAMELEN))
                return -EFAULT;
        return 0;
 }
@@ -1893,10 +1903,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
 
        switch (compat_mwt) {
        case EBT_COMPAT_MATCH:
-               match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE,
-                                               name, 0), "ebt_%s", name);
-               if (match == NULL)
-                       return -ENOENT;
+               match = xt_request_find_match(NFPROTO_BRIDGE, name, 0);
                if (IS_ERR(match))
                        return PTR_ERR(match);
 
@@ -1915,10 +1922,7 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt,
                break;
        case EBT_COMPAT_WATCHER: /* fallthrough */
        case EBT_COMPAT_TARGET:
-               wt = try_then_request_module(xt_find_target(NFPROTO_BRIDGE,
-                                               name, 0), "ebt_%s", name);
-               if (wt == NULL)
-                       return -ENOENT;
+               wt = xt_request_find_target(NFPROTO_BRIDGE, name, 0);
                if (IS_ERR(wt))
                        return PTR_ERR(wt);
                off = xt_compat_target_offset(wt);
index 606a6e8f3671defcc1eab21a471f0468f54f3359..f965dce6f20f83c404f7133ab6a00a84a938d2b4 100644 (file)
@@ -1060,11 +1060,12 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
-       nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
-                   ifla_policy);
+       if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+                       ifla_policy) >= 0) {
 
-       if (tb[IFLA_EXT_MASK])
-               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+               if (tb[IFLA_EXT_MASK])
+                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       }
 
        for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
                idx = 0;
@@ -1900,10 +1901,11 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
        u32 ext_filter_mask = 0;
        u16 min_ifinfo_dump_size = 0;
 
-       nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, ifla_policy);
-
-       if (tb[IFLA_EXT_MASK])
-               ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX,
+                       ifla_policy) >= 0) {
+               if (tb[IFLA_EXT_MASK])
+                       ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
+       }
 
        if (!ext_filter_mask)
                return NLMSG_GOODSIZE;
index 53c8ce4046b207cf6dc65735a8a030932095734d..b5e315f13641d683c2ab9031c0d52969c6c0873a 100644 (file)
@@ -1403,8 +1403,16 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
 
        BUG_ON(!pcount);
 
-       /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */
-       if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint))
+       /* Adjust counters and hints for the newly sacked sequence
+        * range but discard the return value since prev is already
+        * marked. We must tag the range first because the seq
+        * advancement below implicitly advances
+        * tcp_highest_sack_seq() when skb is highest_sack.
+        */
+       tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
+                       start_seq, end_seq, dup_sack, pcount);
+
+       if (skb == tp->lost_skb_hint)
                tp->lost_cnt_hint += pcount;
 
        TCP_SKB_CB(prev)->end_seq += shifted;
@@ -1430,12 +1438,6 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb,
                skb_shinfo(skb)->gso_type = 0;
        }
 
-       /* Adjust counters and hints for the newly sacked sequence range but
-        * discard the return value since prev is already marked.
-        */
-       tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked,
-                       start_seq, end_seq, dup_sack, pcount);
-
        /* Difference in this won't matter, both ACKed by the same cumul. ACK */
        TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS);
 
@@ -1583,6 +1585,10 @@ static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb,
                }
        }
 
+       /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */
+       if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una))
+               goto fallback;
+
        if (!skb_shift(prev, skb, len))
                goto fallback;
        if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack))
@@ -2567,6 +2573,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 
                if (cnt > packets) {
                        if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) ||
+                           (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) ||
                            (oldcnt >= packets))
                                break;
 
index c02280a4d126980540daac44a8b541b92c0b16f0..6b8ebc5da0e1344ad58994e327ba42cac44f3ac3 100644 (file)
@@ -434,6 +434,10 @@ static struct inet6_dev * ipv6_add_dev(struct net_device *dev)
        /* Join all-node multicast group */
        ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
 
+       /* Join all-router multicast group if forwarding is set */
+       if (ndev->cnf.forwarding && dev && (dev->flags & IFF_MULTICAST))
+               ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
+
        return ndev;
 }
 
index 01a21c2f6ab37df336f5f69c55f33de4e0a698f3..8e2137bd87e2435d45294ffa192f34688ab61097 100644 (file)
@@ -1332,6 +1332,9 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
                hw_roc = true;
 
        list_for_each_entry(sdata, &local->interfaces, list) {
+               if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
+                   sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
+                       continue;
                if (sdata->old_idle == sdata->vif.bss_conf.idle)
                        continue;
                if (!ieee80211_sdata_running(sdata))
index ad64f4d5271a58121f93cf7f62806c93687d1193..f9b8e819ca63c0bfba25028c7d27b8cf42540157 100644 (file)
@@ -344,7 +344,7 @@ void rate_control_get_rate(struct ieee80211_sub_if_data *sdata,
        for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) {
                info->control.rates[i].idx = -1;
                info->control.rates[i].flags = 0;
-               info->control.rates[i].count = 1;
+               info->control.rates[i].count = 0;
        }
 
        if (sdata->local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL)
index ed86a3be678ea28590d34018b5ec990cbf052e8d..fa4b82c8ae8078b7a2e94f367987d8bc79ef991c 100644 (file)
@@ -635,8 +635,12 @@ static noinline int early_drop(struct net *net, unsigned int hash)
 
        if (del_timer(&ct->timeout)) {
                death_by_timeout((unsigned long)ct);
-               dropped = 1;
-               NF_CT_STAT_INC_ATOMIC(net, early_drop);
+               /* Check if we indeed killed this entry. Reliable event
+                  delivery may have inserted it into the dying list. */
+               if (test_bit(IPS_DYING_BIT, &ct->status)) {
+                       dropped = 1;
+                       NF_CT_STAT_INC_ATOMIC(net, early_drop);
+               }
        }
        nf_ct_put(ct);
        return dropped;
index 30c9d4ca02180e050d3b98c8480b83dfc71047b9..10687692831eb5f5f097568f0d292fa530c796f1 100644 (file)
@@ -1041,16 +1041,13 @@ ctnetlink_parse_nat_setup(struct nf_conn *ct,
        if (!parse_nat_setup) {
 #ifdef CONFIG_MODULES
                rcu_read_unlock();
-               spin_unlock_bh(&nf_conntrack_lock);
                nfnl_unlock();
                if (request_module("nf-nat-ipv4") < 0) {
                        nfnl_lock();
-                       spin_lock_bh(&nf_conntrack_lock);
                        rcu_read_lock();
                        return -EOPNOTSUPP;
                }
                nfnl_lock();
-               spin_lock_bh(&nf_conntrack_lock);
                rcu_read_lock();
                if (nfnetlink_parse_nat_setup_hook)
                        return -EAGAIN;
index 0abfb18b911fb93f022f0e6ac1e7b4dda4fb1e4d..227b6ae99785118066a7184cc4cb66f27d38c841 100644 (file)
@@ -204,6 +204,9 @@ static void perf_record__open(struct perf_record *rec)
 
                if (opts->group && pos != first)
                        group_fd = first->fd;
+fallback_missing_features:
+               if (opts->exclude_guest_missing)
+                       attr->exclude_guest = attr->exclude_host = 0;
 retry_sample_id:
                attr->sample_id_all = opts->sample_id_all_avail ? 1 : 0;
 try_again:
@@ -217,15 +220,23 @@ try_again:
                        } else if (err ==  ENODEV && opts->cpu_list) {
                                die("No such device - did you specify"
                                        " an out-of-range profile CPU?\n");
-                       } else if (err == EINVAL && opts->sample_id_all_avail) {
-                               /*
-                                * Old kernel, no attr->sample_id_type_all field
-                                */
-                               opts->sample_id_all_avail = false;
-                               if (!opts->sample_time && !opts->raw_samples && !time_needed)
-                                       attr->sample_type &= ~PERF_SAMPLE_TIME;
-
-                               goto retry_sample_id;
+                       } else if (err == EINVAL) {
+                               if (!opts->exclude_guest_missing &&
+                                   (attr->exclude_guest || attr->exclude_host)) {
+                                       pr_debug("Old kernel, cannot exclude "
+                                                "guest or host samples.\n");
+                                       opts->exclude_guest_missing = true;
+                                       goto fallback_missing_features;
+                               } else if (opts->sample_id_all_avail) {
+                                       /*
+                                        * Old kernel, no attr->sample_id_type_all field
+                                        */
+                                       opts->sample_id_all_avail = false;
+                                       if (!opts->sample_time && !opts->raw_samples && !time_needed)
+                                               attr->sample_type &= ~PERF_SAMPLE_TIME;
+
+                                       goto retry_sample_id;
+                               }
                        }
 
                        /*
@@ -503,9 +514,9 @@ static int __cmd_record(struct perf_record *rec, int argc, const char **argv)
                        return err;
        }
 
-       if (!!rec->no_buildid
+       if (!rec->no_buildid
            && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
-               pr_err("Couldn't generating buildids. "
+               pr_err("Couldn't generate buildids. "
                       "Use --no-buildid to profile anyway.\n");
                return -1;
        }
index dd162aa24baad2f44c5cbb1b4176fb2b2cc7b26d..ecff31257eb3771cf9db12acc49065e40e2c4d18 100644 (file)
@@ -857,6 +857,9 @@ static void perf_top__start_counters(struct perf_top *top)
                attr->mmap = 1;
                attr->comm = 1;
                attr->inherit = top->inherit;
+fallback_missing_features:
+               if (top->exclude_guest_missing)
+                       attr->exclude_guest = attr->exclude_host = 0;
 retry_sample_id:
                attr->sample_id_all = top->sample_id_all_avail ? 1 : 0;
 try_again:
@@ -868,12 +871,20 @@ try_again:
                        if (err == EPERM || err == EACCES) {
                                ui__error_paranoid();
                                goto out_err;
-                       } else if (err == EINVAL && top->sample_id_all_avail) {
-                               /*
-                                * Old kernel, no attr->sample_id_type_all field
-                                */
-                               top->sample_id_all_avail = false;
-                               goto retry_sample_id;
+                       } else if (err == EINVAL) {
+                               if (!top->exclude_guest_missing &&
+                                   (attr->exclude_guest || attr->exclude_host)) {
+                                       pr_debug("Old kernel, cannot exclude "
+                                                "guest or host samples.\n");
+                                       top->exclude_guest_missing = true;
+                                       goto fallback_missing_features;
+                               } else if (top->sample_id_all_avail) {
+                                       /*
+                                        * Old kernel, no attr->sample_id_type_all field
+                                        */
+                                       top->sample_id_all_avail = false;
+                                       goto retry_sample_id;
+                               }
                        }
                        /*
                         * If it's cycles then fall back to hrtimer
index 64f8bee31ced8d7eb3586788e4a69f872c33e57a..16e7d20eee8321110797f9f77c928c1e15e013ea 100644 (file)
@@ -199,6 +199,7 @@ struct perf_record_opts {
        bool         sample_address;
        bool         sample_time;
        bool         sample_id_all_avail;
+       bool         exclude_guest_missing;
        bool         system_wide;
        bool         period;
        unsigned int freq;
index a248f3c2c60d9bb730f5603e1b87fcb04160cb6a..f2eab81435ae1b4bf3ffaaedb3734e7e953b492b 100644 (file)
@@ -34,6 +34,7 @@ struct perf_top {
        bool               inherit;
        bool               group;
        bool               sample_id_all_avail;
+       bool               exclude_guest_missing;
        bool               dump_symtab;
        const char         *cpu_list;
        struct hist_entry  *sym_filter_entry;
index 813141047fc22fcfd61ae377444f7ff2a97f06eb..fb25d132921882c3b432ece1bc07bd2700800bc2 100644 (file)
@@ -6,7 +6,7 @@
  * XXX We need to find a better place for these things...
  */
 bool perf_host  = true;
-bool perf_guest = true;
+bool perf_guest = false;
 
 void event_attr_init(struct perf_event_attr *attr)
 {