Merge tag 'v3.10.46' into linux-linaro-lsk
authorMark Brown <broonie@linaro.org>
Tue, 1 Jul 2014 10:19:52 +0000 (11:19 +0100)
committerMark Brown <broonie@linaro.org>
Tue, 1 Jul 2014 10:19:52 +0000 (11:19 +0100)
This is the 3.10.46 stable release

134 files changed:
Makefile
arch/arm/kernel/stacktrace.c
arch/arm/mach-at91/sysirq_mask.c
arch/arm/mach-omap1/board-h2.c
arch/arm/mach-omap1/board-h3.c
arch/arm/mach-omap1/board-innovator.c
arch/arm/mach-omap1/board-osk.c
arch/arm/mm/proc-v7-3level.S
arch/arm64/kernel/ptrace.c
arch/mips/kvm/kvm_mips.c
arch/s390/include/asm/lowcore.h
arch/sparc/net/bpf_jit_comp.c
arch/x86/kernel/entry_32.S
arch/x86/kvm/lapic.c
arch/x86/syscalls/syscall_64.tbl
crypto/crypto_user.c
drivers/acpi/acpica/utstring.c
drivers/acpi/bus.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_uart.h
drivers/char/applicom.c
drivers/connector/cn_proc.c
drivers/extcon/extcon-max8997.c
drivers/hid/hid-core.c
drivers/hv/hv_balloon.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/max1363.c
drivers/iio/magnetometer/ak8975.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/infiniband/ulp/isert/ib_isert.h
drivers/media/pci/ivtv/ivtv-alsa-pcm.c
drivers/media/usb/stk1160/stk1160-core.c
drivers/media/usb/stk1160/stk1160.h
drivers/media/usb/uvc/uvc_video.c
drivers/net/can/sja1000/peak_pci.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/ti/cpsw.c
drivers/net/macvlan.c
drivers/net/team/team.c
drivers/net/usb/qmi_wwan.c
drivers/net/vxlan.c
drivers/rtc/rtc-at91rm9200.c
drivers/scsi/scsi_netlink.c
drivers/staging/iio/light/tsl2x7x_core.c
drivers/staging/tidspbridge/core/dsp-clock.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_spc.c
drivers/target/target_core_transport.c
drivers/usb/class/cdc-acm.c
drivers/usb/class/cdc-acm.h
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/inode.c
drivers/usb/host/pci-quirks.c
drivers/usb/misc/usbtest.c
drivers/usb/phy/phy-isp1301-omap.c
drivers/usb/serial/bus.c
drivers/usb/serial/option.c
drivers/usb/serial/qcserial.c
drivers/usb/serial/sierra.c
drivers/usb/serial/usb_wwan.c
drivers/video/matrox/matroxfb_base.h
fs/aio.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/disk-io.c
fs/btrfs/extent_io.c
fs/btrfs/free-space-cache.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/volumes.c
fs/ext4/mballoc.c
fs/ext4/page-io.c
include/linux/if_team.h
include/linux/irqdesc.h
include/linux/netlink.h
include/linux/ptrace.h
include/linux/sock_diag.h
include/net/inetpeer.h
include/net/sock.h
include/sound/core.h
include/target/target_core_backend.h
include/uapi/sound/compress_offload.h
kernel/audit.c
kernel/fork.c
kernel/irq/manage.c
kernel/irq/spurious.c
kernel/time/tick-sched.c
lib/idr.c
lib/lzo/lzo1x_decompress_safe.c
lib/nlattr.c
mm/memory-failure.c
mm/rmap.c
mm/vmscan.c
net/bluetooth/l2cap_sock.c
net/can/gw.c
net/core/dev.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/dcb/dcbnl.c
net/decnet/dn_dev.c
net/decnet/dn_fib.c
net/decnet/netfilter/dn_rtmsg.c
net/ipv4/datagram.c
net/ipv4/ipip.c
net/ipv4/tcp_input.c
net/ipv6/ip6_tunnel.c
net/ipv6/output_core.c
net/ipv6/sit.c
net/iucv/af_iucv.c
net/mac80211/iface.c
net/netfilter/nfnetlink.c
net/netlink/af_netlink.c
net/netlink/genetlink.c
net/packet/diag.c
net/phonet/pn_netlink.c
net/sched/act_api.c
net/sched/cls_api.c
net/sched/sch_api.c
net/sctp/associola.c
net/tipc/netlink.c
net/xfrm/xfrm_user.c
security/integrity/evm/evm_main.c
security/integrity/ima/ima_crypto.c
sound/core/control.c
sound/core/init.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/max98090.c

index e55476c4aef0cce063030d0f9d99291af63febb4..c226f110181d33c2e0c93b92fb979e1882b3548c 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 44
+SUBLEVEL = 46
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
index af4e8c8a5422c4383396f7f4b3a836a67426b7de..6582c4adc182ceddbfa2e87e73bccf602567fe5f 100644 (file)
@@ -83,13 +83,16 @@ static int save_trace(struct stackframe *frame, void *d)
        return trace->nr_entries >= trace->max_entries;
 }
 
-void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+/* This must be noinline to so that our skip calculation works correctly */
+static noinline void __save_stack_trace(struct task_struct *tsk,
+       struct stack_trace *trace, unsigned int nosched)
 {
        struct stack_trace_data data;
        struct stackframe frame;
 
        data.trace = trace;
        data.skip = trace->skip;
+       data.no_sched_functions = nosched;
 
        if (tsk != current) {
 #ifdef CONFIG_SMP
@@ -102,7 +105,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                        trace->entries[trace->nr_entries++] = ULONG_MAX;
                return;
 #else
-               data.no_sched_functions = 1;
                frame.fp = thread_saved_fp(tsk);
                frame.sp = thread_saved_sp(tsk);
                frame.lr = 0;           /* recovered from the stack */
@@ -111,11 +113,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
        } else {
                register unsigned long current_sp asm ("sp");
 
-               data.no_sched_functions = 0;
+               /* We don't want this function nor the caller */
+               data.skip += 2;
                frame.fp = (unsigned long)__builtin_frame_address(0);
                frame.sp = current_sp;
                frame.lr = (unsigned long)__builtin_return_address(0);
-               frame.pc = (unsigned long)save_stack_trace_tsk;
+               frame.pc = (unsigned long)__save_stack_trace;
        }
 
        walk_stackframe(&frame, save_trace, &data);
@@ -123,9 +126,14 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
                trace->entries[trace->nr_entries++] = ULONG_MAX;
 }
 
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+       __save_stack_trace(tsk, trace, 1);
+}
+
 void save_stack_trace(struct stack_trace *trace)
 {
-       save_stack_trace_tsk(current, trace);
+       __save_stack_trace(current, trace, 0);
 }
 EXPORT_SYMBOL_GPL(save_stack_trace);
 #endif
index 2ba694f9626b69f348b96d71db3450656a859361..f8bc3511a8c8b49c82084a21593e74ffbfa6ccd6 100644 (file)
 
 #include "generic.h"
 
-#define AT91_RTC_IDR   0x24    /* Interrupt Disable Register */
-#define AT91_RTC_IMR   0x28    /* Interrupt Mask Register */
+#define AT91_RTC_IDR           0x24    /* Interrupt Disable Register */
+#define AT91_RTC_IMR           0x28    /* Interrupt Mask Register */
+#define AT91_RTC_IRQ_MASK      0x1f    /* Available IRQs mask */
 
 void __init at91_sysirq_mask_rtc(u32 rtc_base)
 {
        void __iomem *base;
-       u32 mask;
 
        base = ioremap(rtc_base, 64);
        if (!base)
                return;
 
-       mask = readl_relaxed(base + AT91_RTC_IMR);
-       if (mask) {
-               pr_info("AT91: Disabling rtc irq\n");
-               writel_relaxed(mask, base + AT91_RTC_IDR);
-               (void)readl_relaxed(base + AT91_RTC_IMR);       /* flush */
-       }
+       /*
+        * sam9x5 SoCs have the following errata:
+        * "RTC: Interrupt Mask Register cannot be used
+        *  Interrupt Mask Register read always returns 0."
+        *
+        * Hence we're not relying on IMR values to disable
+        * interrupts.
+        */
+       writel_relaxed(AT91_RTC_IRQ_MASK, base + AT91_RTC_IDR);
+       (void)readl_relaxed(base + AT91_RTC_IMR);       /* flush */
 
        iounmap(base);
 }
index 0dac3d239e326345afcc01779d5565dead760085..d712c517223711ceeca0881572e1c92fb793b5f4 100644 (file)
@@ -379,7 +379,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
        /* usb1 has a Mini-AB port and external isp1301 transceiver */
        .otg            = 2,
 
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
        .hmc_mode       = 19,   /* 0:host(off) 1:dev|otg 2:disabled */
        /* .hmc_mode    = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
 #elif  defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
index 816ecd13f81e241c3fca64b4554b3a235b548d69..bfed4f928663a52b3643a7518f35e3c8c0537800 100644 (file)
@@ -366,7 +366,7 @@ static struct omap_usb_config h3_usb_config __initdata = {
        /* usb1 has a Mini-AB port and external isp1301 transceiver */
        .otg        = 2,
 
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
        .hmc_mode       = 19,   /* 0:host(off) 1:dev|otg 2:disabled */
 #elif  defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
        /* NONSTANDARD CABLE NEEDED (B-to-Mini-B) */
index bd5f02e9c3545b1414dc9c3fef1694a46b693cb7..c49ce83cc1ebd066abcc4200f78cd9e008dda61a 100644 (file)
@@ -312,7 +312,7 @@ static struct omap_usb_config h2_usb_config __initdata = {
        /* usb1 has a Mini-AB port and external isp1301 transceiver */
        .otg            = 2,
 
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
        .hmc_mode       = 19,   /* 0:host(off) 1:dev|otg 2:disabled */
        /* .hmc_mode    = 21,*/ /* 0:host(off) 1:dev(loopback) 2:host(loopback) */
 #elif  defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
index a7ce69286688434e0e195e3c8a2e24728ff6dc60..006fbb5f9654b5eb3207b49d0beb1128f3156679 100644 (file)
@@ -280,7 +280,7 @@ static struct omap_usb_config osk_usb_config __initdata = {
         * be used, with a NONSTANDARD gender-bending cable/dongle, as
         * a peripheral.
         */
-#ifdef CONFIG_USB_GADGET_OMAP
+#if IS_ENABLED(CONFIG_USB_OMAP)
        .register_dev   = 1,
        .hmc_mode       = 0,
 #else
index 6ba4bd9118f28163a0d4d0b36686f403f47f8d45..6f3b0476b72919f4ac2786f58d7623c51162e60e 100644 (file)
@@ -56,6 +56,14 @@ ENTRY(cpu_v7_switch_mm)
        mov     pc, lr
 ENDPROC(cpu_v7_switch_mm)
 
+#ifdef __ARMEB__
+#define rl r3
+#define rh r2
+#else
+#define rl r2
+#define rh r3
+#endif
+
 /*
  * cpu_v7_set_pte_ext(ptep, pte)
  *
@@ -65,13 +73,13 @@ ENDPROC(cpu_v7_switch_mm)
  */
 ENTRY(cpu_v7_set_pte_ext)
 #ifdef CONFIG_MMU
-       tst     r2, #L_PTE_VALID
+       tst     rl, #L_PTE_VALID
        beq     1f
-       tst     r3, #1 << (57 - 32)             @ L_PTE_NONE
-       bicne   r2, #L_PTE_VALID
+       tst     rh, #1 << (57 - 32)             @ L_PTE_NONE
+       bicne   rl, #L_PTE_VALID
        bne     1f
-       tst     r3, #1 << (55 - 32)             @ L_PTE_DIRTY
-       orreq   r2, #L_PTE_RDONLY
+       tst     rh, #1 << (55 - 32)             @ L_PTE_DIRTY
+       orreq   rl, #L_PTE_RDONLY
 1:     strd    r2, r3, [r0]
        ALT_SMP(W(nop))
        ALT_UP (mcr     p15, 0, r0, c7, c10, 1)         @ flush_pte
index 0bf195533088ad0db219799861e8193dd287509d..096a7ad5f004c867a7e50d4cf5fbe333b4bb68c5 100644 (file)
@@ -825,6 +825,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
                                    compat_ulong_t val)
 {
        int ret;
+       mm_segment_t old_fs = get_fs();
 
        if (off & 3 || off >= COMPAT_USER_SZ)
                return -EIO;
@@ -832,10 +833,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
        if (off >= sizeof(compat_elf_gregset_t))
                return 0;
 
+       set_fs(KERNEL_DS);
        ret = copy_regset_from_user(tsk, &user_aarch32_view,
                                    REGSET_COMPAT_GPR, off,
                                    sizeof(compat_ulong_t),
                                    &val);
+       set_fs(old_fs);
+
        return ret;
 }
 
index dd203e59e6fd650767a3ae5286e0599f4dbc15b7..426345ac6f6eb73c69f940b7330644f0baf2f3be 100644 (file)
@@ -299,7 +299,7 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
        if (cpu_has_veic || cpu_has_vint) {
                size = 0x200 + VECTORSPACING * 64;
        } else {
-               size = 0x200;
+               size = 0x4000;
        }
 
        /* Save Linux EBASE */
index bbf8141408cdadf2881011a683937c3636e17375..2bed4f02a55839321a42cf6db9ef89f384d3020e 100644 (file)
@@ -142,9 +142,9 @@ struct _lowcore {
        __u8    pad_0x02fc[0x0300-0x02fc];      /* 0x02fc */
 
        /* Interrupt response block */
-       __u8    irb[64];                        /* 0x0300 */
+       __u8    irb[96];                        /* 0x0300 */
 
-       __u8    pad_0x0340[0x0e00-0x0340];      /* 0x0340 */
+       __u8    pad_0x0360[0x0e00-0x0360];      /* 0x0360 */
 
        /*
         * 0xe00 contains the address of the IPL Parameter Information
@@ -288,12 +288,13 @@ struct _lowcore {
        __u8    pad_0x03a0[0x0400-0x03a0];      /* 0x03a0 */
 
        /* Interrupt response block. */
-       __u8    irb[64];                        /* 0x0400 */
+       __u8    irb[96];                        /* 0x0400 */
+       __u8    pad_0x0460[0x0480-0x0460];      /* 0x0460 */
 
        /* Per cpu primary space access list */
-       __u32   paste[16];                      /* 0x0440 */
+       __u32   paste[16];                      /* 0x0480 */
 
-       __u8    pad_0x0480[0x0e00-0x0480];      /* 0x0480 */
+       __u8    pad_0x04c0[0x0e00-0x04c0];      /* 0x04c0 */
 
        /*
         * 0xe00 contains the address of the IPL Parameter Information
index fd95862c65aa82e0c268b6f3e4f5ba208f300c49..224fc0c71b8a0295eea51b9feda3fa234fbb62f6 100644 (file)
@@ -83,9 +83,9 @@ static void bpf_flush_icache(void *start_, void *end_)
 #define BNE            (F2(0, 2) | CONDNE)
 
 #ifdef CONFIG_SPARC64
-#define BNE_PTR                (F2(0, 1) | CONDNE | (2 << 20))
+#define BE_PTR         (F2(0, 1) | CONDE | (2 << 20))
 #else
-#define BNE_PTR                BNE
+#define BE_PTR         BE
 #endif
 
 #define SETHI(K, REG)  \
@@ -600,7 +600,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                        case BPF_S_ANC_IFINDEX:
                                emit_skb_loadptr(dev, r_A);
                                emit_cmpi(r_A, 0);
-                               emit_branch(BNE_PTR, cleanup_addr + 4);
+                               emit_branch(BE_PTR, cleanup_addr + 4);
                                emit_nop();
                                emit_load32(r_A, struct net_device, ifindex, r_A);
                                break;
@@ -613,7 +613,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                        case BPF_S_ANC_HATYPE:
                                emit_skb_loadptr(dev, r_A);
                                emit_cmpi(r_A, 0);
-                               emit_branch(BNE_PTR, cleanup_addr + 4);
+                               emit_branch(BE_PTR, cleanup_addr + 4);
                                emit_nop();
                                emit_load16(r_A, struct net_device, type, r_A);
                                break;
index 94e52cf064b01242e13dda39861f18e594261fb6..ac632817609747468c5c30da5a18cbbe352ebe59 100644 (file)
@@ -434,9 +434,10 @@ sysenter_past_esp:
        jnz sysenter_audit
 sysenter_do_call:
        cmpl $(NR_syscalls), %eax
-       jae syscall_badsys
+       jae sysenter_badsys
        call *sys_call_table(,%eax,4)
        movl %eax,PT_EAX(%esp)
+sysenter_after_call:
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
@@ -554,11 +555,6 @@ ENTRY(iret_exc)
 
        CFI_RESTORE_STATE
 ldt_ss:
-       larl PT_OLDSS(%esp), %eax
-       jnz restore_nocheck
-       testl $0x00400000, %eax         # returning to 32bit stack?
-       jnz restore_nocheck             # allright, normal return
-
 #ifdef CONFIG_PARAVIRT
        /*
         * The kernel can't run on a non-flat stack if paravirt mode
@@ -691,7 +687,12 @@ END(syscall_fault)
 
 syscall_badsys:
        movl $-ENOSYS,PT_EAX(%esp)
-       jmp resume_userspace
+       jmp syscall_exit
+END(syscall_badsys)
+
+sysenter_badsys:
+       movl $-ENOSYS,PT_EAX(%esp)
+       jmp sysenter_after_call
 END(syscall_badsys)
        CFI_ENDPROC
 /*
index 61d9fed5eb31dfebb58e20ee0a9060a2f4e0ba5e..279d093524b415f36196dd69211ebedd3299176e 100644 (file)
@@ -370,6 +370,8 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
 
 static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
 {
+       /* Note that we never get here with APIC virtualization enabled.  */
+
        if (!__apic_test_and_set_vector(vec, apic->regs + APIC_ISR))
                ++apic->isr_count;
        BUG_ON(apic->isr_count > MAX_APIC_VECTOR);
@@ -381,12 +383,48 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
        apic->highest_isr_cache = vec;
 }
 
+static inline int apic_find_highest_isr(struct kvm_lapic *apic)
+{
+       int result;
+
+       /*
+        * Note that isr_count is always 1, and highest_isr_cache
+        * is always -1, with APIC virtualization enabled.
+        */
+       if (!apic->isr_count)
+               return -1;
+       if (likely(apic->highest_isr_cache != -1))
+               return apic->highest_isr_cache;
+
+       result = find_highest_vector(apic->regs + APIC_ISR);
+       ASSERT(result == -1 || result >= 16);
+
+       return result;
+}
+
 static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
 {
-       if (__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
+       struct kvm_vcpu *vcpu;
+       if (!__apic_test_and_clear_vector(vec, apic->regs + APIC_ISR))
+               return;
+
+       vcpu = apic->vcpu;
+
+       /*
+        * We do get here for APIC virtualization enabled if the guest
+        * uses the Hyper-V APIC enlightenment.  In this case we may need
+        * to trigger a new interrupt delivery by writing the SVI field;
+        * on the other hand isr_count and highest_isr_cache are unused
+        * and must be left alone.
+        */
+       if (unlikely(kvm_apic_vid_enabled(vcpu->kvm)))
+               kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
+                                              apic_find_highest_isr(apic));
+       else {
                --apic->isr_count;
-       BUG_ON(apic->isr_count < 0);
-       apic->highest_isr_cache = -1;
+               BUG_ON(apic->isr_count < 0);
+               apic->highest_isr_cache = -1;
+       }
 }
 
 int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu)
@@ -466,22 +504,6 @@ static void pv_eoi_clr_pending(struct kvm_vcpu *vcpu)
        __clear_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention);
 }
 
-static inline int apic_find_highest_isr(struct kvm_lapic *apic)
-{
-       int result;
-
-       /* Note that isr_count is always 1 with vid enabled */
-       if (!apic->isr_count)
-               return -1;
-       if (likely(apic->highest_isr_cache != -1))
-               return apic->highest_isr_cache;
-
-       result = find_highest_vector(apic->regs + APIC_ISR);
-       ASSERT(result == -1 || result >= 16);
-
-       return result;
-}
-
 void kvm_apic_update_tmr(struct kvm_vcpu *vcpu, u32 *tmr)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
@@ -1619,6 +1641,8 @@ int kvm_get_apic_interrupt(struct kvm_vcpu *vcpu)
        int vector = kvm_apic_has_interrupt(vcpu);
        struct kvm_lapic *apic = vcpu->arch.apic;
 
+       /* Note that we never get here with APIC virtualization enabled.  */
+
        if (vector == -1)
                return -1;
 
index 38ae65dfd14ffe91904b4fc409ece711f84e61e9..63a899304d27cb5e976d74279993416a496d4818 100644 (file)
 203    common  sched_setaffinity       sys_sched_setaffinity
 204    common  sched_getaffinity       sys_sched_getaffinity
 205    64      set_thread_area
-206    common  io_setup                sys_io_setup
+206    64      io_setup                sys_io_setup
 207    common  io_destroy              sys_io_destroy
 208    common  io_getevents            sys_io_getevents
-209    common  io_submit               sys_io_submit
+209    64      io_submit               sys_io_submit
 210    common  io_cancel               sys_io_cancel
 211    64      get_thread_area
 212    common  lookup_dcookie          sys_lookup_dcookie
 540    x32     process_vm_writev       compat_sys_process_vm_writev
 541    x32     setsockopt              compat_sys_setsockopt
 542    x32     getsockopt              compat_sys_getsockopt
+543    x32     io_setup                compat_sys_io_setup
+544    x32     io_submit               compat_sys_io_submit
index 1512e41cd93d74a4e7ab3fde6809e64468f797a8..43665d0d0905ddddf018fe68655c1ff7685b0b9e 100644 (file)
@@ -466,7 +466,7 @@ static int crypto_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        type -= CRYPTO_MSG_BASE;
        link = &crypto_dispatch[type];
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((type == (CRYPTO_MSG_GETALG - CRYPTO_MSG_BASE) &&
index b3e36a81aa4d1360b9280d2edd6d9d5f9e7330dd..ca6d2acafa66f4be9801c78a872e6626e664a06b 100644 (file)
@@ -349,7 +349,7 @@ void acpi_ut_print_string(char *string, u8 max_length)
        }
 
        acpi_os_printf("\"");
-       for (i = 0; string[i] && (i < max_length); i++) {
+       for (i = 0; (i < max_length) && string[i]; i++) {
 
                /* Escape sequences */
 
index ccba6e46cfb30113ca52c3465f4abc4345e3a78e..b62207a87430d5ff6e5cf60d941d7edb20abf0a7 100644 (file)
@@ -57,6 +57,12 @@ EXPORT_SYMBOL(acpi_root_dir);
 
 
 #ifdef CONFIG_X86
+#ifdef CONFIG_ACPI_CUSTOM_DSDT
+static inline int set_copy_dsdt(const struct dmi_system_id *id)
+{
+       return 0;
+}
+#else
 static int set_copy_dsdt(const struct dmi_system_id *id)
 {
        printk(KERN_NOTICE "%s detected - "
@@ -64,6 +70,7 @@ static int set_copy_dsdt(const struct dmi_system_id *id)
        acpi_gbl_copy_dsdt_locally = 1;
        return 0;
 }
+#endif
 
 static struct dmi_system_id dsdt_dmi_table[] __initdata = {
        /*
index bc68a440d432cd21b2ee10885e7106cfe2852b79..c4d2f0e4868529a870b372290baf6a5315c4266c 100644 (file)
@@ -118,10 +118,6 @@ static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu)
 
 int hci_uart_tx_wakeup(struct hci_uart *hu)
 {
-       struct tty_struct *tty = hu->tty;
-       struct hci_dev *hdev = hu->hdev;
-       struct sk_buff *skb;
-
        if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
                set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
                return 0;
@@ -129,6 +125,22 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
 
        BT_DBG("");
 
+       schedule_work(&hu->write_work);
+
+       return 0;
+}
+
+static void hci_uart_write_work(struct work_struct *work)
+{
+       struct hci_uart *hu = container_of(work, struct hci_uart, write_work);
+       struct tty_struct *tty = hu->tty;
+       struct hci_dev *hdev = hu->hdev;
+       struct sk_buff *skb;
+
+       /* REVISIT: should we cope with bad skbs or ->write() returning
+        * and error value ?
+        */
+
 restart:
        clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
 
@@ -153,7 +165,6 @@ restart:
                goto restart;
 
        clear_bit(HCI_UART_SENDING, &hu->tx_state);
-       return 0;
 }
 
 static void hci_uart_init_work(struct work_struct *work)
@@ -289,6 +300,7 @@ static int hci_uart_tty_open(struct tty_struct *tty)
        tty->receive_room = 65536;
 
        INIT_WORK(&hu->init_ready, hci_uart_init_work);
+       INIT_WORK(&hu->write_work, hci_uart_write_work);
 
        spin_lock_init(&hu->rx_lock);
 
@@ -326,6 +338,8 @@ static void hci_uart_tty_close(struct tty_struct *tty)
        if (hdev)
                hci_uart_close(hdev);
 
+       cancel_work_sync(&hu->write_work);
+
        if (test_and_clear_bit(HCI_UART_PROTO_SET, &hu->flags)) {
                if (hdev) {
                        if (test_bit(HCI_UART_REGISTERED, &hu->flags))
index fffa61ff5cb14983baeba17a4d6234a58de1e806..12df101ca942d1889280660e43ef37cec55c9340 100644 (file)
@@ -68,6 +68,7 @@ struct hci_uart {
        unsigned long           hdev_flags;
 
        struct work_struct      init_ready;
+       struct work_struct      write_work;
 
        struct hci_uart_proto   *proto;
        void                    *priv;
index 974321a2508d861655d1883480a902476ccc7a58..14790304b84b23fc2f8dfe4b23eeb9f659755d92 100644 (file)
@@ -345,7 +345,6 @@ out:
                        free_irq(apbs[i].irq, &dummy);
                iounmap(apbs[i].RamIO);
        }
-       pci_disable_device(dev);
        return ret;
 }
 
index 18c5b9b16645dfa49a218d4b12253240f97310b7..3165811e2407df9bdc8242640dab50558c37db73 100644 (file)
@@ -369,7 +369,7 @@ static void cn_proc_mcast_ctl(struct cn_msg *msg,
                return;
 
        /* Can only change if privileged. */
-       if (!capable(CAP_NET_ADMIN)) {
+       if (!__netlink_ns_capable(nsp, &init_user_ns, CAP_NET_ADMIN)) {
                err = EPERM;
                goto out;
        }
index 67d6738d85a00d514d3d4c6692e969331663abde..09f4a9374cf53d11b0066ef2eefc923a6883df13 100644 (file)
@@ -712,7 +712,7 @@ static int max8997_muic_probe(struct platform_device *pdev)
                goto err_irq;
        }
 
-       if (pdata->muic_pdata) {
+       if (pdata && pdata->muic_pdata) {
                struct max8997_muic_platform_data *muic_pdata
                        = pdata->muic_pdata;
 
index c4e5cdfa5d247efc0f80dfce1e1ce89c9c8da45b..81d0e6e1f754101479d95dc5c756498fe3bbc720 100644 (file)
@@ -796,7 +796,17 @@ struct hid_report *hid_validate_values(struct hid_device *hid,
         * ->numbered being checked, which may not always be the case when
         * drivers go to access report values.
         */
-       report = hid->report_enum[type].report_id_hash[id];
+       if (id == 0) {
+               /*
+                * Validating on id 0 means we should examine the first
+                * report in the list.
+                */
+               report = list_entry(
+                               hid->report_enum[type].report_list.next,
+                               struct hid_report, list);
+       } else {
+               report = hid->report_enum[type].report_id_hash[id];
+       }
        if (!report) {
                hid_err(hid, "missing %s %u\n", hid_report_names[type], id);
                return NULL;
index deb5c25305aff4622ceb8f84a1598902e484a6ce..694173f662d1bec6be3eecf492907bdf216575e3 100644 (file)
@@ -19,6 +19,7 @@
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 
 #include <linux/kernel.h>
+#include <linux/jiffies.h>
 #include <linux/mman.h>
 #include <linux/delay.h>
 #include <linux/init.h>
@@ -459,6 +460,11 @@ static bool do_hot_add;
  */
 static uint pressure_report_delay = 45;
 
+/*
+ * The last time we posted a pressure report to host.
+ */
+static unsigned long last_post_time;
+
 module_param(hot_add, bool, (S_IRUGO | S_IWUSR));
 MODULE_PARM_DESC(hot_add, "If set attempt memory hot_add");
 
@@ -542,6 +548,7 @@ struct hv_dynmem_device {
 
 static struct hv_dynmem_device dm_device;
 
+static void post_status(struct hv_dynmem_device *dm);
 #ifdef CONFIG_MEMORY_HOTPLUG
 
 static void hv_bring_pgs_online(unsigned long start_pfn, unsigned long size)
@@ -612,7 +619,7 @@ static void hv_mem_hot_add(unsigned long start, unsigned long size,
                 * have not been "onlined" within the allowed time.
                 */
                wait_for_completion_timeout(&dm_device.ol_waitevent, 5*HZ);
-
+               post_status(&dm_device);
        }
 
        return;
@@ -951,11 +958,17 @@ static void post_status(struct hv_dynmem_device *dm)
 {
        struct dm_status status;
        struct sysinfo val;
+       unsigned long now = jiffies;
+       unsigned long last_post = last_post_time;
 
        if (pressure_report_delay > 0) {
                --pressure_report_delay;
                return;
        }
+
+       if (!time_after(now, (last_post_time + HZ)))
+               return;
+
        si_meminfo(&val);
        memset(&status, 0, sizeof(struct dm_status));
        status.hdr.type = DM_STATUS_REPORT;
@@ -983,6 +996,14 @@ static void post_status(struct hv_dynmem_device *dm)
        if (status.hdr.trans_id != atomic_read(&trans_id))
                return;
 
+       /*
+        * If the last post time that we sampled has changed,
+        * we have raced, don't post the status.
+        */
+       if (last_post != last_post_time)
+               return;
+
+       last_post_time = jiffies;
        vmbus_sendpacket(dm->dev->channel, &status,
                                sizeof(struct dm_status),
                                (unsigned long)NULL,
@@ -1117,7 +1138,7 @@ static void balloon_up(struct work_struct *dummy)
 
                        if (ret == -EAGAIN)
                                msleep(20);
-
+                       post_status(&dm_device);
                } while (ret == -EAGAIN);
 
                if (ret) {
@@ -1144,8 +1165,10 @@ static void balloon_down(struct hv_dynmem_device *dm,
        struct dm_unballoon_response resp;
        int i;
 
-       for (i = 0; i < range_count; i++)
+       for (i = 0; i < range_count; i++) {
                free_balloon_pages(dm, &range_array[i]);
+               post_status(&dm_device);
+       }
 
        if (req->more_pages == 1)
                return;
index e5b88d5d3b59425cf07e79ba20937322fa8c50fc..14fdaf0f9d23c6efc88e4fb6d0e387717aa2e92f 100644 (file)
@@ -161,12 +161,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
        return idev->num_channels;
 }
 
-static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
                                             struct at91_adc_trigger *triggers,
                                             const char *trigger_name)
 {
        struct at91_adc_state *st = iio_priv(idev);
-       u8 value = 0;
        int i;
 
        for (i = 0; i < st->trigger_number; i++) {
@@ -179,15 +178,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
                        return -ENOMEM;
 
                if (strcmp(trigger_name, name) == 0) {
-                       value = triggers[i].value;
                        kfree(name);
-                       break;
+                       if (triggers[i].value == 0)
+                               return -EINVAL;
+                       return triggers[i].value;
                }
 
                kfree(name);
        }
 
-       return value;
+       return -EINVAL;
 }
 
 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
@@ -197,14 +197,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
        struct iio_buffer *buffer = idev->buffer;
        struct at91_adc_reg_desc *reg = st->registers;
        u32 status = at91_adc_readl(st, reg->trigger_register);
-       u8 value;
+       int value;
        u8 bit;
 
        value = at91_adc_get_trigger_value_by_name(idev,
                                                   st->trigger_list,
                                                   idev->trig->name);
-       if (value == 0)
-               return -EINVAL;
+       if (value < 0)
+               return value;
 
        if (state) {
                st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
index 9e6da72ad82324d1a7b2d11294d9da76616408ba..b2b5dcbf71227be9ab796ae850797de2ca3dde0e 100644 (file)
@@ -1214,8 +1214,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
                .num_modes = ARRAY_SIZE(max1238_mode_list),
                .default_mode = s0to11,
                .info = &max1238_info,
-               .channels = max1238_channels,
-               .num_channels = ARRAY_SIZE(max1238_channels),
+               .channels = max1038_channels,
+               .num_channels = ARRAY_SIZE(max1038_channels),
        },
        [max11605] = {
                .bits = 8,
@@ -1224,8 +1224,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
                .num_modes = ARRAY_SIZE(max1238_mode_list),
                .default_mode = s0to11,
                .info = &max1238_info,
-               .channels = max1238_channels,
-               .num_channels = ARRAY_SIZE(max1238_channels),
+               .channels = max1038_channels,
+               .num_channels = ARRAY_SIZE(max1038_channels),
        },
        [max11606] = {
                .bits = 10,
@@ -1274,8 +1274,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
                .num_modes = ARRAY_SIZE(max1238_mode_list),
                .default_mode = s0to11,
                .info = &max1238_info,
-               .channels = max1238_channels,
-               .num_channels = ARRAY_SIZE(max1238_channels),
+               .channels = max1138_channels,
+               .num_channels = ARRAY_SIZE(max1138_channels),
        },
        [max11611] = {
                .bits = 10,
@@ -1284,8 +1284,8 @@ static const struct max1363_chip_info max1363_chip_info_tbl[] = {
                .num_modes = ARRAY_SIZE(max1238_mode_list),
                .default_mode = s0to11,
                .info = &max1238_info,
-               .channels = max1238_channels,
-               .num_channels = ARRAY_SIZE(max1238_channels),
+               .channels = max1138_channels,
+               .num_channels = ARRAY_SIZE(max1138_channels),
        },
        [max11612] = {
                .bits = 12,
index af6c320a534ee8a0a7a5e03c5ed628928610603b..53f829004a033278ec0101f814df9f0eaf542deb 100644 (file)
@@ -276,8 +276,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
 {
        struct ak8975_data *data = iio_priv(indio_dev);
        struct i2c_client *client = data->client;
-       u16 meas_reg;
-       s16 raw;
        int ret;
 
        mutex_lock(&data->lock);
@@ -322,16 +320,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
                dev_err(&client->dev, "Read axis data fails\n");
                goto exit;
        }
-       meas_reg = ret;
 
        mutex_unlock(&data->lock);
 
-       /* Endian conversion of the measured values. */
-       raw = (s16) (le16_to_cpu(meas_reg));
-
        /* Clamp to valid range. */
-       raw = clamp_t(s16, raw, -4096, 4095);
-       *val = raw;
+       *val = clamp_t(s16, ret, -4096, 4095);
        return IIO_VAL_INT;
 
 exit:
index 14499991802284bd2bf1283c63f5f6c2eec15cb9..0e93152384f022ae59788e042d938a09fd5d178e 100644 (file)
@@ -572,14 +572,12 @@ isert_disconnect_work(struct work_struct *work)
                isert_put_conn(isert_conn);
                return;
        }
-       if (!isert_conn->logout_posted) {
-               pr_debug("Calling rdma_disconnect for !logout_posted from"
-                        " isert_disconnect_work\n");
+
+       if (isert_conn->disconnect) {
+               /* Send DREQ/DREP towards our initiator */
                rdma_disconnect(isert_conn->conn_cm_id);
-               mutex_unlock(&isert_conn->conn_mutex);
-               iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
-               goto wake_up;
        }
+
        mutex_unlock(&isert_conn->conn_mutex);
 
 wake_up:
@@ -588,10 +586,11 @@ wake_up:
 }
 
 static void
-isert_disconnected_handler(struct rdma_cm_id *cma_id)
+isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
 {
        struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context;
 
+       isert_conn->disconnect = disconnect;
        INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
        schedule_work(&isert_conn->conn_logout_work);
 }
@@ -600,29 +599,28 @@ static int
 isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
 {
        int ret = 0;
+       bool disconnect = false;
 
        pr_debug("isert_cma_handler: event %d status %d conn %p id %p\n",
                 event->event, event->status, cma_id->context, cma_id);
 
        switch (event->event) {
        case RDMA_CM_EVENT_CONNECT_REQUEST:
-               pr_debug("RDMA_CM_EVENT_CONNECT_REQUEST: >>>>>>>>>>>>>>>\n");
                ret = isert_connect_request(cma_id, event);
                break;
        case RDMA_CM_EVENT_ESTABLISHED:
-               pr_debug("RDMA_CM_EVENT_ESTABLISHED >>>>>>>>>>>>>>\n");
                isert_connected_handler(cma_id);
                break;
-       case RDMA_CM_EVENT_DISCONNECTED:
-               pr_debug("RDMA_CM_EVENT_DISCONNECTED: >>>>>>>>>>>>>>\n");
-               isert_disconnected_handler(cma_id);
-               break;
-       case RDMA_CM_EVENT_DEVICE_REMOVAL:
-       case RDMA_CM_EVENT_ADDR_CHANGE:
+       case RDMA_CM_EVENT_ADDR_CHANGE:    /* FALLTHRU */
+       case RDMA_CM_EVENT_DISCONNECTED:   /* FALLTHRU */
+       case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
+               disconnect = true;
+       case RDMA_CM_EVENT_TIMEWAIT_EXIT:  /* FALLTHRU */
+               isert_disconnected_handler(cma_id, disconnect);
                break;
        case RDMA_CM_EVENT_CONNECT_ERROR:
        default:
-               pr_err("Unknown RDMA CMA event: %d\n", event->event);
+               pr_err("Unhandled RDMA CMA event: %d\n", event->event);
                break;
        }
 
@@ -1371,11 +1369,8 @@ isert_do_control_comp(struct work_struct *work)
                break;
        case ISTATE_SEND_LOGOUTRSP:
                pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
-               /*
-                * Call atomic_dec(&isert_conn->post_send_buf_count)
-                * from isert_wait_conn()
-                */
-               isert_conn->logout_posted = true;
+
+               atomic_dec(&isert_conn->post_send_buf_count);
                iscsit_logout_post_handler(cmd, cmd->conn);
                break;
        default:
@@ -1483,6 +1478,8 @@ isert_cq_rx_comp_err(struct isert_conn *isert_conn)
        isert_conn->state = ISER_CONN_DOWN;
        mutex_unlock(&isert_conn->conn_mutex);
 
+       iscsit_cause_connection_reinstatement(isert_conn->conn, 0);
+
        complete(&isert_conn->conn_wait_comp_err);
 }
 
@@ -2190,9 +2187,14 @@ accept_wait:
                return -ENODEV;
 
        spin_lock_bh(&np->np_thread_lock);
-       if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
+       if (np->np_thread_state >= ISCSI_NP_THREAD_RESET) {
                spin_unlock_bh(&np->np_thread_lock);
-               pr_debug("ISCSI_NP_THREAD_RESET for isert_accept_np\n");
+               pr_debug("np_thread_state %d for isert_accept_np\n",
+                        np->np_thread_state);
+               /**
+                * No point in stalling here when np_thread
+                * is in state RESET/SHUTDOWN/EXIT - bail
+                **/
                return -ENODEV;
        }
        spin_unlock_bh(&np->np_thread_lock);
@@ -2242,15 +2244,9 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        struct isert_conn *isert_conn = conn->context;
 
        pr_debug("isert_wait_conn: Starting \n");
-       /*
-        * Decrement post_send_buf_count for special case when called
-        * from isert_do_control_comp() -> iscsit_logout_post_handler()
-        */
-       mutex_lock(&isert_conn->conn_mutex);
-       if (isert_conn->logout_posted)
-               atomic_dec(&isert_conn->post_send_buf_count);
 
-       if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
+       mutex_lock(&isert_conn->conn_mutex);
+       if (isert_conn->conn_cm_id) {
                pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
                rdma_disconnect(isert_conn->conn_cm_id);
        }
@@ -2336,6 +2332,7 @@ destroy_rx_wq:
 
 static void __exit isert_exit(void)
 {
+       flush_scheduled_work();
        kmem_cache_destroy(isert_cmd_cache);
        destroy_workqueue(isert_comp_wq);
        destroy_workqueue(isert_rx_wq);
index dfe4a2ebef0db954788d764f9f5d67544d864b13..032f65abee3694ec56122dd0bd9091eb5e84caa6 100644 (file)
@@ -78,7 +78,6 @@ struct isert_device;
 
 struct isert_conn {
        enum iser_conn_state    state;
-       bool                    logout_posted;
        int                     post_recv_buf_count;
        atomic_t                post_send_buf_count;
        u32                     responder_resources;
@@ -106,6 +105,7 @@ struct isert_conn {
        struct completion       conn_wait;
        struct completion       conn_wait_comp_err;
        struct kref             conn_kref;
+       bool                    disconnect;
 };
 
 #define ISERT_MAX_CQ 64
index e1863dbf4edcaca3376321b116d58b2606e2252c..7a9b98bc208bc252ae9d1dc334fabb48bf39c5f5 100644 (file)
@@ -159,6 +159,12 @@ static int snd_ivtv_pcm_capture_open(struct snd_pcm_substream *substream)
 
        /* Instruct the CX2341[56] to start sending packets */
        snd_ivtv_lock(itvsc);
+
+       if (ivtv_init_on_first_open(itv)) {
+               snd_ivtv_unlock(itvsc);
+               return -ENXIO;
+       }
+
        s = &itv->streams[IVTV_ENC_STREAM_TYPE_PCM];
 
        v4l2_fh_init(&item.fh, s->vdev);
index 34a26e0cfe77565f81924b325fb06a9d78750009..03504dcf3c5240cb8d3ee43bff40eac311c1b4cc 100644 (file)
@@ -67,17 +67,25 @@ int stk1160_read_reg(struct stk1160 *dev, u16 reg, u8 *value)
 {
        int ret;
        int pipe = usb_rcvctrlpipe(dev->udev, 0);
+       u8 *buf;
 
        *value = 0;
+
+       buf = kmalloc(sizeof(u8), GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
        ret = usb_control_msg(dev->udev, pipe, 0x00,
                        USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-                       0x00, reg, value, sizeof(u8), HZ);
+                       0x00, reg, buf, sizeof(u8), HZ);
        if (ret < 0) {
                stk1160_err("read failed on reg 0x%x (%d)\n",
                        reg, ret);
+               kfree(buf);
                return ret;
        }
 
+       *value = *buf;
+       kfree(buf);
        return 0;
 }
 
index 05b05b160e1e9abdd886bf49702bc35c8b3a52dc..abdea484c9987ad25dcaff6e4582881522e5f68d 100644 (file)
@@ -143,7 +143,6 @@ struct stk1160 {
        int num_alt;
 
        struct stk1160_isoc_ctl isoc_ctl;
-       char urb_buf[255];       /* urb control msg buffer */
 
        /* frame properties */
        int width;                /* current frame width */
index e1c5bf3ea11288eb62bd6e533a2daea9f94b9279..c081812ac5c06dc826cbf638750913e35971c362 100644 (file)
@@ -361,6 +361,14 @@ static int uvc_commit_video(struct uvc_streaming *stream,
  * Clocks and timestamps
  */
 
+static inline void uvc_video_get_ts(struct timespec *ts)
+{
+       if (uvc_clock_param == CLOCK_MONOTONIC)
+               ktime_get_ts(ts);
+       else
+               ktime_get_real_ts(ts);
+}
+
 static void
 uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
                       const __u8 *data, int len)
@@ -420,7 +428,7 @@ uvc_video_clock_decode(struct uvc_streaming *stream, struct uvc_buffer *buf,
        stream->clock.last_sof = dev_sof;
 
        host_sof = usb_get_current_frame_number(stream->dev->udev);
-       ktime_get_ts(&ts);
+       uvc_video_get_ts(&ts);
 
        /* The UVC specification allows device implementations that can't obtain
         * the USB frame number to keep their own frame counters as long as they
@@ -1010,10 +1018,7 @@ static int uvc_video_decode_start(struct uvc_streaming *stream,
                        return -ENODATA;
                }
 
-               if (uvc_clock_param == CLOCK_MONOTONIC)
-                       ktime_get_ts(&ts);
-               else
-                       ktime_get_real_ts(&ts);
+               uvc_video_get_ts(&ts);
 
                buf->buf.v4l2_buf.sequence = stream->sequence;
                buf->buf.v4l2_buf.timestamp.tv_sec = ts.tv_sec;
index 6b6f0ad75090c4ea463ae49b8e2041dbdaca55df..7042f5faddd7b9da464b8051811653b07f5f6d6c 100644 (file)
@@ -551,7 +551,7 @@ static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 {
        struct sja1000_priv *priv;
        struct peak_pci_chan *chan;
-       struct net_device *dev;
+       struct net_device *dev, *prev_dev;
        void __iomem *cfg_base, *reg_base;
        u16 sub_sys_id, icr;
        int i, err, channels;
@@ -687,11 +687,13 @@ failure_remove_channels:
        writew(0x0, cfg_base + PITA_ICR + 2);
 
        chan = NULL;
-       for (dev = pci_get_drvdata(pdev); dev; dev = chan->prev_dev) {
-               unregister_sja1000dev(dev);
-               free_sja1000dev(dev);
+       for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) {
                priv = netdev_priv(dev);
                chan = priv->priv;
+               prev_dev = chan->prev_dev;
+
+               unregister_sja1000dev(dev);
+               free_sja1000dev(dev);
        }
 
        /* free any PCIeC resources too */
@@ -725,10 +727,12 @@ static void peak_pci_remove(struct pci_dev *pdev)
 
        /* Loop over all registered devices */
        while (1) {
+               struct net_device *prev_dev = chan->prev_dev;
+
                dev_info(&pdev->dev, "removing device %s\n", dev->name);
                unregister_sja1000dev(dev);
                free_sja1000dev(dev);
-               dev = chan->prev_dev;
+               dev = prev_dev;
 
                if (!dev) {
                        /* do that only for first channel */
index 1b195fc7f4112c390ce28114132b329a9e2526ee..3fb2643d05b4592b4c39f6efd7e545732495f771 100644 (file)
@@ -2129,13 +2129,8 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
        /* Allow large DMA segments, up to the firmware limit of 1 GB */
        dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
 
-       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
-       if (!priv) {
-               err = -ENOMEM;
-               goto err_release_regions;
-       }
-
-       dev       = &priv->dev;
+       dev       = pci_get_drvdata(pdev);
+       priv      = mlx4_priv(dev);
        dev->pdev = pdev;
        INIT_LIST_HEAD(&priv->ctx_list);
        spin_lock_init(&priv->ctx_lock);
@@ -2300,8 +2295,7 @@ slave_start:
        mlx4_sense_init(dev);
        mlx4_start_sense(dev);
 
-       priv->pci_dev_data = pci_dev_data;
-       pci_set_drvdata(pdev, dev);
+       priv->removed = 0;
 
        return 0;
 
@@ -2367,84 +2361,110 @@ err_disable_pdev:
 
 static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
 {
+       struct mlx4_priv *priv;
+       struct mlx4_dev *dev;
+
        printk_once(KERN_INFO "%s", mlx4_version);
 
+       priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       dev       = &priv->dev;
+       pci_set_drvdata(pdev, dev);
+       priv->pci_dev_data = id->driver_data;
+
        return __mlx4_init_one(pdev, id->driver_data);
 }
 
-static void mlx4_remove_one(struct pci_dev *pdev)
+static void __mlx4_remove_one(struct pci_dev *pdev)
 {
        struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
        struct mlx4_priv *priv = mlx4_priv(dev);
+       int               pci_dev_data;
        int p;
 
-       if (dev) {
-               /* in SRIOV it is not allowed to unload the pf's
-                * driver while there are alive vf's */
-               if (mlx4_is_master(dev)) {
-                       if (mlx4_how_many_lives_vf(dev))
-                               printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
-               }
-               mlx4_stop_sense(dev);
-               mlx4_unregister_device(dev);
+       if (priv->removed)
+               return;
 
-               for (p = 1; p <= dev->caps.num_ports; p++) {
-                       mlx4_cleanup_port_info(&priv->port[p]);
-                       mlx4_CLOSE_PORT(dev, p);
-               }
+       pci_dev_data = priv->pci_dev_data;
 
-               if (mlx4_is_master(dev))
-                       mlx4_free_resource_tracker(dev,
-                                                  RES_TR_FREE_SLAVES_ONLY);
-
-               mlx4_cleanup_counters_table(dev);
-               mlx4_cleanup_mcg_table(dev);
-               mlx4_cleanup_qp_table(dev);
-               mlx4_cleanup_srq_table(dev);
-               mlx4_cleanup_cq_table(dev);
-               mlx4_cmd_use_polling(dev);
-               mlx4_cleanup_eq_table(dev);
-               mlx4_cleanup_mr_table(dev);
-               mlx4_cleanup_xrcd_table(dev);
-               mlx4_cleanup_pd_table(dev);
+       /* in SRIOV it is not allowed to unload the pf's
+        * driver while there are alive vf's */
+       if (mlx4_is_master(dev)) {
+               if (mlx4_how_many_lives_vf(dev))
+                       printk(KERN_ERR "Removing PF when there are assigned VF's !!!\n");
+       }
+       mlx4_stop_sense(dev);
+       mlx4_unregister_device(dev);
 
-               if (mlx4_is_master(dev))
-                       mlx4_free_resource_tracker(dev,
-                                                  RES_TR_FREE_STRUCTS_ONLY);
-
-               iounmap(priv->kar);
-               mlx4_uar_free(dev, &priv->driver_uar);
-               mlx4_cleanup_uar_table(dev);
-               if (!mlx4_is_slave(dev))
-                       mlx4_clear_steering(dev);
-               mlx4_free_eq_table(dev);
-               if (mlx4_is_master(dev))
-                       mlx4_multi_func_cleanup(dev);
-               mlx4_close_hca(dev);
-               if (mlx4_is_slave(dev))
-                       mlx4_multi_func_cleanup(dev);
-               mlx4_cmd_cleanup(dev);
-
-               if (dev->flags & MLX4_FLAG_MSI_X)
-                       pci_disable_msix(pdev);
-               if (dev->flags & MLX4_FLAG_SRIOV) {
-                       mlx4_warn(dev, "Disabling SR-IOV\n");
-                       pci_disable_sriov(pdev);
-               }
+       for (p = 1; p <= dev->caps.num_ports; p++) {
+               mlx4_cleanup_port_info(&priv->port[p]);
+               mlx4_CLOSE_PORT(dev, p);
+       }
+
+       if (mlx4_is_master(dev))
+               mlx4_free_resource_tracker(dev,
+                                          RES_TR_FREE_SLAVES_ONLY);
+
+       mlx4_cleanup_counters_table(dev);
+       mlx4_cleanup_qp_table(dev);
+       mlx4_cleanup_srq_table(dev);
+       mlx4_cleanup_cq_table(dev);
+       mlx4_cmd_use_polling(dev);
+       mlx4_cleanup_eq_table(dev);
+       mlx4_cleanup_mcg_table(dev);
+       mlx4_cleanup_mr_table(dev);
+       mlx4_cleanup_xrcd_table(dev);
+       mlx4_cleanup_pd_table(dev);
 
-               if (!mlx4_is_slave(dev))
-                       mlx4_free_ownership(dev);
+       if (mlx4_is_master(dev))
+               mlx4_free_resource_tracker(dev,
+                                          RES_TR_FREE_STRUCTS_ONLY);
 
-               kfree(dev->caps.qp0_tunnel);
-               kfree(dev->caps.qp0_proxy);
-               kfree(dev->caps.qp1_tunnel);
-               kfree(dev->caps.qp1_proxy);
+       iounmap(priv->kar);
+       mlx4_uar_free(dev, &priv->driver_uar);
+       mlx4_cleanup_uar_table(dev);
+       if (!mlx4_is_slave(dev))
+               mlx4_clear_steering(dev);
+       mlx4_free_eq_table(dev);
+       if (mlx4_is_master(dev))
+               mlx4_multi_func_cleanup(dev);
+       mlx4_close_hca(dev);
+       if (mlx4_is_slave(dev))
+               mlx4_multi_func_cleanup(dev);
+       mlx4_cmd_cleanup(dev);
 
-               kfree(priv);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               pci_set_drvdata(pdev, NULL);
+       if (dev->flags & MLX4_FLAG_MSI_X)
+               pci_disable_msix(pdev);
+       if (dev->flags & MLX4_FLAG_SRIOV) {
+               mlx4_warn(dev, "Disabling SR-IOV\n");
+               pci_disable_sriov(pdev);
        }
+
+       if (!mlx4_is_slave(dev))
+               mlx4_free_ownership(dev);
+
+       kfree(dev->caps.qp0_tunnel);
+       kfree(dev->caps.qp0_proxy);
+       kfree(dev->caps.qp1_tunnel);
+       kfree(dev->caps.qp1_proxy);
+
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       memset(priv, 0, sizeof(*priv));
+       priv->pci_dev_data = pci_dev_data;
+       priv->removed = 1;
+}
+
+static void mlx4_remove_one(struct pci_dev *pdev)
+{
+       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       __mlx4_remove_one(pdev);
+       kfree(priv);
+       pci_set_drvdata(pdev, NULL);
 }
 
 int mlx4_restart_one(struct pci_dev *pdev)
@@ -2454,7 +2474,7 @@ int mlx4_restart_one(struct pci_dev *pdev)
        int               pci_dev_data;
 
        pci_dev_data = priv->pci_dev_data;
-       mlx4_remove_one(pdev);
+       __mlx4_remove_one(pdev);
        return __mlx4_init_one(pdev, pci_dev_data);
 }
 
@@ -2509,7 +2529,7 @@ MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
 static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
                                              pci_channel_state_t state)
 {
-       mlx4_remove_one(pdev);
+       __mlx4_remove_one(pdev);
 
        return state == pci_channel_io_perm_failure ?
                PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
@@ -2517,7 +2537,11 @@ static pci_ers_result_t mlx4_pci_err_detected(struct pci_dev *pdev,
 
 static pci_ers_result_t mlx4_pci_slot_reset(struct pci_dev *pdev)
 {
-       int ret = __mlx4_init_one(pdev, 0);
+       struct mlx4_dev  *dev  = pci_get_drvdata(pdev);
+       struct mlx4_priv *priv = mlx4_priv(dev);
+       int               ret;
+
+       ret = __mlx4_init_one(pdev, priv->pci_dev_data);
 
        return ret ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
 }
index df15bb6631cc7d6f68b70191e891321f505fe00b..da4f0002fd27069ae8ccb2ba7178bb06b24570cd 100644 (file)
@@ -743,6 +743,7 @@ struct mlx4_priv {
        spinlock_t              ctx_lock;
 
        int                     pci_dev_data;
+       int                     removed;
 
        struct list_head        pgdir_list;
        struct mutex            pgdir_mutex;
index d1a769f35f9d284f852e1001c87434ae9e96180a..b1ab3a4956a5b83e70097242afd5e3d7d7fe4173 100644 (file)
@@ -1547,6 +1547,10 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
                mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
                phyid = be32_to_cpup(parp+1);
                mdio = of_find_device_by_node(mdio_node);
+               if (!mdio) {
+                       pr_err("Missing mdio platform device\n");
+                       return -EINVAL;
+               }
                snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
                         PHY_ID_FMT, mdio->name, phyid);
 
index c12aeaee22fad71bc11058e25f277571ffee4b2a..155ef4bbde91c71cf38a97ee50b893960a61958c 100644 (file)
@@ -961,7 +961,6 @@ static int macvlan_device_event(struct notifier_block *unused,
                list_for_each_entry_safe(vlan, next, &port->vlans, list)
                        vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill);
                unregister_netdevice_many(&list_kill);
-               list_del(&list_kill);
                break;
        case NETDEV_PRE_TYPE_CHANGE:
                /* Forbid underlaying device to change its type. */
index fe3fd77821bfe580a1d8c1dc4e1460f3ec4ef9dd..12222290c802422509d6a0de7a84887dd6f44b9f 100644 (file)
@@ -1542,6 +1542,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
         * to traverse list in reverse under rcu_read_lock
         */
        mutex_lock(&team->lock);
+       team->port_mtu_change_allowed = true;
        list_for_each_entry(port, &team->port_list, list) {
                err = dev_set_mtu(port->dev, new_mtu);
                if (err) {
@@ -1550,6 +1551,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
                        goto unwind;
                }
        }
+       team->port_mtu_change_allowed = false;
        mutex_unlock(&team->lock);
 
        dev->mtu = new_mtu;
@@ -1559,6 +1561,7 @@ static int team_change_mtu(struct net_device *dev, int new_mtu)
 unwind:
        list_for_each_entry_continue_reverse(port, &team->port_list, list)
                dev_set_mtu(port->dev, dev->mtu);
+       team->port_mtu_change_allowed = false;
        mutex_unlock(&team->lock);
 
        return err;
@@ -2678,7 +2681,9 @@ static int team_device_event(struct notifier_block *unused,
                break;
        case NETDEV_CHANGEMTU:
                /* Forbid to change mtu of underlaying device */
-               return NOTIFY_BAD;
+               if (!port->team->port_mtu_change_allowed)
+                       return NOTIFY_BAD;
+               break;
        case NETDEV_PRE_TYPE_CHANGE:
                /* Forbid to change type of underlaying device */
                return NOTIFY_BAD;
index 7be4860ccfd7bc0086acd76ec43808de2d03aab3..6fb0082b33080985a613d31c27dc0a1e55fded5b 100644 (file)
@@ -739,7 +739,12 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x2357, 0x9000, 4)},    /* TP-LINK MA260 */
        {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)},    /* Telit LE920 */
        {QMI_FIXED_INTF(0x1bc7, 0x1201, 2)},    /* Telit LE920 */
-       {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc000, 4)},    /* Olivetti Olicard 100 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc001, 4)},    /* Olivetti Olicard 120 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc002, 4)},    /* Olivetti Olicard 140 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc004, 6)},    /* Olivetti Olicard 155 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc005, 6)},    /* Olivetti Olicard 200 */
+       {QMI_FIXED_INTF(0x0b3c, 0xc00a, 6)},    /* Olivetti Olicard 160 */
        {QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)},    /* Olivetti Olicard 500 */
        {QMI_FIXED_INTF(0x1e2d, 0x0060, 4)},    /* Cinterion PLxx */
        {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)},    /* Cinterion PHxx,PXxx */
index 9673edfff4512d5220a1971855aaaf8cdf9c7600..fcbd4eee52cc7e1403e098dfffa5ea6a9cc331d1 100644 (file)
@@ -1314,7 +1314,7 @@ static void vxlan_setup(struct net_device *dev)
 
        eth_hw_addr_random(dev);
        ether_setup(dev);
-       dev->hard_header_len = ETH_HLEN + VXLAN_HEADROOM;
+       dev->needed_headroom = ETH_HLEN + VXLAN_HEADROOM;
 
        dev->netdev_ops = &vxlan_netdev_ops;
        dev->destructor = vxlan_free;
@@ -1454,7 +1454,7 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
                        dev->mtu = lowerdev->mtu - VXLAN_HEADROOM;
 
                /* update header length based on lower device */
-               dev->hard_header_len = lowerdev->hard_header_len +
+               dev->needed_headroom = lowerdev->hard_header_len +
                                       VXLAN_HEADROOM;
        }
 
index 1237c2173c6d593e4d262dbd92e3875607d17ff7..e51cc5fec98a3f255faa2b465882aeee2d9c5dcf 100644 (file)
@@ -49,6 +49,7 @@ struct at91_rtc_config {
 
 static const struct at91_rtc_config *at91_rtc_config;
 static DECLARE_COMPLETION(at91_rtc_updated);
+static DECLARE_COMPLETION(at91_rtc_upd_rdy);
 static unsigned int at91_alarm_year = AT91_RTC_EPOCH;
 static void __iomem *at91_rtc_regs;
 static int irq;
@@ -162,6 +163,8 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
                1900 + tm->tm_year, tm->tm_mon, tm->tm_mday,
                tm->tm_hour, tm->tm_min, tm->tm_sec);
 
+       wait_for_completion(&at91_rtc_upd_rdy);
+
        /* Stop Time/Calendar from counting */
        cr = at91_rtc_read(AT91_RTC_CR);
        at91_rtc_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM);
@@ -184,7 +187,9 @@ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm)
 
        /* Restart Time/Calendar */
        cr = at91_rtc_read(AT91_RTC_CR);
+       at91_rtc_write(AT91_RTC_SCCR, AT91_RTC_SECEV);
        at91_rtc_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM));
+       at91_rtc_write_ier(AT91_RTC_SECEV);
 
        return 0;
 }
@@ -291,8 +296,10 @@ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id)
        if (rtsr) {             /* this interrupt is shared!  Is it ours? */
                if (rtsr & AT91_RTC_ALARM)
                        events |= (RTC_AF | RTC_IRQF);
-               if (rtsr & AT91_RTC_SECEV)
-                       events |= (RTC_UF | RTC_IRQF);
+               if (rtsr & AT91_RTC_SECEV) {
+                       complete(&at91_rtc_upd_rdy);
+                       at91_rtc_write_idr(AT91_RTC_SECEV);
+               }
                if (rtsr & AT91_RTC_ACKUPD)
                        complete(&at91_rtc_updated);
 
@@ -415,6 +422,11 @@ static int __init at91_rtc_probe(struct platform_device *pdev)
        }
        platform_set_drvdata(pdev, rtc);
 
+       /* enable SECEV interrupt in order to initialize at91_rtc_upd_rdy
+        * completion.
+        */
+       at91_rtc_write_ier(AT91_RTC_SECEV);
+
        dev_info(&pdev->dev, "AT91 Real Time Clock driver.\n");
        return 0;
 
index fe30ea94ffe67ef4e5d355fdc9cdcb71eee9e0d7..109802f776ed71cea6857eda9ae6ccc3e0b41f80 100644 (file)
@@ -77,7 +77,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb)
                        goto next_msg;
                }
 
-               if (!capable(CAP_SYS_ADMIN)) {
+               if (!netlink_capable(skb, CAP_SYS_ADMIN)) {
                        err = -EPERM;
                        goto next_msg;
                }
index c99f890cc6c65ae9ea13a9aafa8c8258e21e3cb8..64c73adfa3b051eb8e61bef4d59f251c1edf364d 100644 (file)
@@ -672,9 +672,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
        chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
                        chip->tsl2x7x_settings.prox_pulse_count;
        chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
-       chip->tsl2x7x_settings.prox_thres_low;
+                       (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+       chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
+                       (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
        chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
-                       chip->tsl2x7x_settings.prox_thres_high;
+                       (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+       chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
+                       (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
 
        /* and make sure we're not already on */
        if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
index 2f084e181d39de2131360677c65c9deb4ed3143f..a1aca4416ca7fb35361a4f55530597321a76f3b4 100644 (file)
@@ -226,7 +226,7 @@ int dsp_clk_enable(enum dsp_clk_id clk_id)
        case GPT_CLK:
                status = omap_dm_timer_start(timer[clk_id - 1]);
                break;
-#ifdef CONFIG_OMAP_MCBSP
+#ifdef CONFIG_SND_OMAP_SOC_MCBSP
        case MCBSP_CLK:
                omap_mcbsp_request(MCBSP_ID(clk_id));
                omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PAD_SRC);
@@ -302,7 +302,7 @@ int dsp_clk_disable(enum dsp_clk_id clk_id)
        case GPT_CLK:
                status = omap_dm_timer_stop(timer[clk_id - 1]);
                break;
-#ifdef CONFIG_OMAP_MCBSP
+#ifdef CONFIG_SND_OMAP_SOC_MCBSP
        case MCBSP_CLK:
                omap2_mcbsp_set_clks_src(MCBSP_ID(clk_id), MCBSP_CLKS_PRCM_SRC);
                omap_mcbsp_free(MCBSP_ID(clk_id));
index 68dbd88babbd73943048583940ac29c47f6f58fe..72663ba228dc431db865846b1cbd7018c535044b 100644 (file)
@@ -4151,8 +4151,6 @@ int iscsit_close_connection(
        if (conn->conn_transport->iscsit_wait_conn)
                conn->conn_transport->iscsit_wait_conn(conn);
 
-       iscsit_free_queue_reqs_for_conn(conn);
-
        /*
         * During Connection recovery drop unacknowledged out of order
         * commands for this connection, and prepare the other commands
@@ -4169,6 +4167,7 @@ int iscsit_close_connection(
                iscsit_clear_ooo_cmdsns_for_conn(conn);
                iscsit_release_commands_from_conn(conn);
        }
+       iscsit_free_queue_reqs_for_conn(conn);
 
        /*
         * Handle decrementing session or connection usage count if
index 130a1e4f96a1667cd245a452defb21047bec0344..3c9a8dfd1c2e1d96227e3aae2d489d39af3c2985 100644 (file)
@@ -315,6 +315,16 @@ static int chap_server_compute_md5(
                pr_err("Unable to convert incoming challenge\n");
                goto out;
        }
+       /*
+        * During mutual authentication, the CHAP_C generated by the
+        * initiator must not match the original CHAP_C generated by
+        * the target.
+        */
+       if (!memcmp(challenge_binhex, chap->challenge, CHAP_CHALLENGE_LENGTH)) {
+               pr_err("initiator CHAP_C matches target CHAP_C, failing"
+                      " login attempt\n");
+               goto out;
+       }
        /*
         * Generate CHAP_N and CHAP_R for mutual authentication.
         */
index 0d6c3dd25679b9da390e54706816452c7a053369..e14e105acff8f8bd1d7b5498621ab644a817b2b9 100644 (file)
@@ -597,13 +597,8 @@ static int iscsi_login_non_zero_tsih_s2(
         *
         * In our case, we have already located the struct iscsi_tiqn at this point.
         */
-       memset(buf, 0, 32);
-       sprintf(buf, "TargetPortalGroupTag=%hu", ISCSI_TPG_S(sess)->tpgt);
-       if (iscsi_change_param_value(buf, conn->param_list, 0) < 0) {
-               iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
-                               ISCSI_LOGIN_STATUS_NO_RESOURCES);
+       if (iscsi_change_param_sprintf(conn, "TargetPortalGroupTag=%hu", sess->tpg->tpgt))
                return -1;
-       }
 
        return iscsi_login_disable_FIM_keys(conn->param_list, conn);
 }
index 0921a64b555028997691fb28ad7ab84294b10a0d..5c3b6778c22a3b30ce838a7849844b88c31af16d 100644 (file)
@@ -174,7 +174,7 @@ static int rd_build_device_space(struct rd_dev *rd_dev)
                                                - 1;
 
                for (j = 0; j < sg_per_table; j++) {
-                       pg = alloc_pages(GFP_KERNEL, 0);
+                       pg = alloc_pages(GFP_KERNEL | __GFP_ZERO, 0);
                        if (!pg) {
                                pr_err("Unable to allocate scatterlist"
                                        " pages for struct rd_dev_sg_table\n");
index bbc5b0ee2bdc5bb1d2c7d45ea370d8834cbfbed2..0ef75fb0ecbae3cafb85180bd86efbe0c0b75c35 100644 (file)
@@ -63,7 +63,7 @@ sbc_emulate_readcapacity(struct se_cmd *cmd)
                transport_kunmap_data_sg(cmd);
        }
 
-       target_complete_cmd(cmd, GOOD);
+       target_complete_cmd_with_length(cmd, GOOD, 8);
        return 0;
 }
 
@@ -101,7 +101,7 @@ sbc_emulate_readcapacity_16(struct se_cmd *cmd)
                transport_kunmap_data_sg(cmd);
        }
 
-       target_complete_cmd(cmd, GOOD);
+       target_complete_cmd_with_length(cmd, GOOD, 32);
        return 0;
 }
 
index 9fabbf7214cd70cc3d4ad3e1bfa74f7ac516cdd0..34254b2ec4668fd0e285f5bb2d5770e4ee274b46 100644 (file)
@@ -628,6 +628,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
        unsigned char buf[SE_INQUIRY_BUF];
        sense_reason_t ret;
        int p;
+       int len = 0;
 
        memset(buf, 0, SE_INQUIRY_BUF);
 
@@ -645,6 +646,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
                }
 
                ret = spc_emulate_inquiry_std(cmd, buf);
+               len = buf[4] + 5;
                goto out;
        }
 
@@ -652,6 +654,7 @@ spc_emulate_inquiry(struct se_cmd *cmd)
                if (cdb[2] == evpd_handlers[p].page) {
                        buf[1] = cdb[2];
                        ret = evpd_handlers[p].emulate(cmd, buf);
+                       len = get_unaligned_be16(&buf[2]) + 4;
                        goto out;
                }
        }
@@ -667,7 +670,7 @@ out:
        }
 
        if (!ret)
-               target_complete_cmd(cmd, GOOD);
+               target_complete_cmd_with_length(cmd, GOOD, len);
        return ret;
 }
 
@@ -985,7 +988,7 @@ set_length:
                transport_kunmap_data_sg(cmd);
        }
 
-       target_complete_cmd(cmd, GOOD);
+       target_complete_cmd_with_length(cmd, GOOD, length);
        return 0;
 }
 
@@ -1162,7 +1165,7 @@ done:
        buf[3] = (lun_count & 0xff);
        transport_kunmap_data_sg(cmd);
 
-       target_complete_cmd(cmd, GOOD);
+       target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8);
        return 0;
 }
 EXPORT_SYMBOL(spc_emulate_report_luns);
index 21e315874a5472503dfe1ba010dea1236993b553..6866d86e86630bfa8cd15e6c7269af1370f23073 100644 (file)
@@ -488,7 +488,7 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists)
 
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 
-               complete(&cmd->t_transport_stop_comp);
+               complete_all(&cmd->t_transport_stop_comp);
                return 1;
        }
 
@@ -617,7 +617,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
        if (cmd->transport_state & CMD_T_ABORTED &&
            cmd->transport_state & CMD_T_STOP) {
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-               complete(&cmd->t_transport_stop_comp);
+               complete_all(&cmd->t_transport_stop_comp);
                return;
        } else if (cmd->transport_state & CMD_T_FAILED) {
                INIT_WORK(&cmd->work, target_complete_failure_work);
@@ -633,6 +633,23 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
 }
 EXPORT_SYMBOL(target_complete_cmd);
 
+void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
+{
+       if (scsi_status == SAM_STAT_GOOD && length < cmd->data_length) {
+               if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
+                       cmd->residual_count += cmd->data_length - length;
+               } else {
+                       cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
+                       cmd->residual_count = cmd->data_length - length;
+               }
+
+               cmd->data_length = length;
+       }
+
+       target_complete_cmd(cmd, scsi_status);
+}
+EXPORT_SYMBOL(target_complete_cmd_with_length);
+
 static void target_add_to_state_list(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
@@ -1688,7 +1705,7 @@ void target_execute_cmd(struct se_cmd *cmd)
                        cmd->se_tfo->get_task_tag(cmd));
 
                spin_unlock_irq(&cmd->t_state_lock);
-               complete(&cmd->t_transport_stop_comp);
+               complete_all(&cmd->t_transport_stop_comp);
                return;
        }
 
@@ -2877,6 +2894,12 @@ static void target_tmr_work(struct work_struct *work)
 int transport_generic_handle_tmr(
        struct se_cmd *cmd)
 {
+       unsigned long flags;
+
+       spin_lock_irqsave(&cmd->t_state_lock, flags);
+       cmd->transport_state |= CMD_T_ACTIVE;
+       spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
        INIT_WORK(&cmd->work, target_tmr_work);
        queue_work(cmd->se_dev->tmr_wq, &cmd->work);
        return 0;
index 89cce1a32059e639e3284ca1867386f96c36eb58..fbf3f11aed2c6c2914e8c7c0ea9fdf2f2e1465a1 100644 (file)
@@ -122,13 +122,23 @@ static void acm_release_minor(struct acm *acm)
 static int acm_ctrl_msg(struct acm *acm, int request, int value,
                                                        void *buf, int len)
 {
-       int retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
+       int retval;
+
+       retval = usb_autopm_get_interface(acm->control);
+       if (retval)
+               return retval;
+
+       retval = usb_control_msg(acm->dev, usb_sndctrlpipe(acm->dev, 0),
                request, USB_RT_ACM, value,
                acm->control->altsetting[0].desc.bInterfaceNumber,
                buf, len, 5000);
+
        dev_dbg(&acm->control->dev,
                        "%s - rq 0x%02x, val %#x, len %#x, result %d\n",
                        __func__, request, value, len, retval);
+
+       usb_autopm_put_interface(acm->control);
+
        return retval < 0 ? retval : 0;
 }
 
@@ -233,12 +243,9 @@ static int acm_write_start(struct acm *acm, int wbn)
                                                        acm->susp_count);
        usb_autopm_get_interface_async(acm->control);
        if (acm->susp_count) {
-               if (!acm->delayed_wb)
-                       acm->delayed_wb = wb;
-               else
-                       usb_autopm_put_interface_async(acm->control);
+               usb_anchor_urb(wb->urb, &acm->delayed);
                spin_unlock_irqrestore(&acm->write_lock, flags);
-               return 0;       /* A white lie */
+               return 0;
        }
        usb_mark_last_busy(acm->dev);
 
@@ -516,6 +523,7 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
 {
        struct acm *acm = container_of(port, struct acm, port);
        int retval = -ENODEV;
+       int i;
 
        dev_dbg(&acm->control->dev, "%s\n", __func__);
 
@@ -564,6 +572,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
        return 0;
 
 error_submit_read_urbs:
+       for (i = 0; i < acm->rx_buflimit; i++)
+               usb_kill_urb(acm->read_urbs[i]);
        acm->ctrlout = 0;
        acm_set_control(acm, acm->ctrlout);
 error_set_control:
@@ -591,21 +601,35 @@ static void acm_port_destruct(struct tty_port *port)
 static void acm_port_shutdown(struct tty_port *port)
 {
        struct acm *acm = container_of(port, struct acm, port);
+       struct urb *urb;
+       struct acm_wb *wb;
        int i;
+       int pm_err;
 
        dev_dbg(&acm->control->dev, "%s\n", __func__);
 
        mutex_lock(&acm->mutex);
        if (!acm->disconnected) {
-               usb_autopm_get_interface(acm->control);
+               pm_err = usb_autopm_get_interface(acm->control);
                acm_set_control(acm, acm->ctrlout = 0);
+
+               for (;;) {
+                       urb = usb_get_from_anchor(&acm->delayed);
+                       if (!urb)
+                               break;
+                       wb = urb->context;
+                       wb->use = 0;
+                       usb_autopm_put_interface_async(acm->control);
+               }
+
                usb_kill_urb(acm->ctrlurb);
                for (i = 0; i < ACM_NW; i++)
                        usb_kill_urb(acm->wb[i].urb);
                for (i = 0; i < acm->rx_buflimit; i++)
                        usb_kill_urb(acm->read_urbs[i]);
                acm->control->needs_remote_wakeup = 0;
-               usb_autopm_put_interface(acm->control);
+               if (!pm_err)
+                       usb_autopm_put_interface(acm->control);
        }
        mutex_unlock(&acm->mutex);
 }
@@ -1190,6 +1214,7 @@ made_compressed_probe:
                acm->bInterval = epread->bInterval;
        tty_port_init(&acm->port);
        acm->port.ops = &acm_port_ops;
+       init_usb_anchor(&acm->delayed);
 
        buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
        if (!buf) {
@@ -1434,18 +1459,15 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
        struct acm *acm = usb_get_intfdata(intf);
        int cnt;
 
+       spin_lock_irq(&acm->read_lock);
+       spin_lock(&acm->write_lock);
        if (PMSG_IS_AUTO(message)) {
-               int b;
-
-               spin_lock_irq(&acm->write_lock);
-               b = acm->transmitting;
-               spin_unlock_irq(&acm->write_lock);
-               if (b)
+               if (acm->transmitting) {
+                       spin_unlock(&acm->write_lock);
+                       spin_unlock_irq(&acm->read_lock);
                        return -EBUSY;
+               }
        }
-
-       spin_lock_irq(&acm->read_lock);
-       spin_lock(&acm->write_lock);
        cnt = acm->susp_count++;
        spin_unlock(&acm->write_lock);
        spin_unlock_irq(&acm->read_lock);
@@ -1453,8 +1475,7 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
        if (cnt)
                return 0;
 
-       if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags))
-               stop_data_traffic(acm);
+       stop_data_traffic(acm);
 
        return 0;
 }
@@ -1462,29 +1483,24 @@ static int acm_suspend(struct usb_interface *intf, pm_message_t message)
 static int acm_resume(struct usb_interface *intf)
 {
        struct acm *acm = usb_get_intfdata(intf);
-       struct acm_wb *wb;
+       struct urb *urb;
        int rv = 0;
-       int cnt;
 
        spin_lock_irq(&acm->read_lock);
-       acm->susp_count -= 1;
-       cnt = acm->susp_count;
-       spin_unlock_irq(&acm->read_lock);
+       spin_lock(&acm->write_lock);
 
-       if (cnt)
-               return 0;
+       if (--acm->susp_count)
+               goto out;
 
        if (test_bit(ASYNCB_INITIALIZED, &acm->port.flags)) {
-               rv = usb_submit_urb(acm->ctrlurb, GFP_NOIO);
-
-               spin_lock_irq(&acm->write_lock);
-               if (acm->delayed_wb) {
-                       wb = acm->delayed_wb;
-                       acm->delayed_wb = NULL;
-                       spin_unlock_irq(&acm->write_lock);
-                       acm_start_wb(acm, wb);
-               } else {
-                       spin_unlock_irq(&acm->write_lock);
+               rv = usb_submit_urb(acm->ctrlurb, GFP_ATOMIC);
+
+               for (;;) {
+                       urb = usb_get_from_anchor(&acm->delayed);
+                       if (!urb)
+                               break;
+
+                       acm_start_wb(acm, urb->context);
                }
 
                /*
@@ -1492,12 +1508,14 @@ static int acm_resume(struct usb_interface *intf)
                 * do the write path at all cost
                 */
                if (rv < 0)
-                       goto err_out;
+                       goto out;
 
-               rv = acm_submit_read_urbs(acm, GFP_NOIO);
+               rv = acm_submit_read_urbs(acm, GFP_ATOMIC);
        }
+out:
+       spin_unlock(&acm->write_lock);
+       spin_unlock_irq(&acm->read_lock);
 
-err_out:
        return rv;
 }
 
index 0f76e4af600e406c063083c910fdbc3780bb4edc..1683ac161cf6b2aa7f8d2b0a2b9455a5b2722d75 100644 (file)
@@ -117,7 +117,7 @@ struct acm {
        unsigned int throttled:1;                       /* actually throttled */
        unsigned int throttle_req:1;                    /* throttle requested */
        u8 bInterval;
-       struct acm_wb *delayed_wb;                      /* write queued for a device about to be woken */
+       struct usb_anchor delayed;                      /* writes queued for a device about to be woken */
 };
 
 #define CDC_DATA_INTERFACE_TYPE        0x0a
index 69948ad39837876a9a0fae4e981ec98e65acbe56..d868b62c1a16586355197dc1f9c7a0191e7fc264 100644 (file)
@@ -604,6 +604,10 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
 
        dwc3_remove_requests(dwc, dep);
 
+       /* make sure HW endpoint isn't stalled */
+       if (dep->flags & DWC3_EP_STALL)
+               __dwc3_gadget_ep_set_halt(dep, 0);
+
        reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
        reg &= ~DWC3_DALEPENA_EP(dep->number);
        dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
index 570c005062ab8a3c75a9da56131ffebfdce118aa..42a30903d4fd9f27e7e734b675de3e477228f04e 100644 (file)
@@ -1509,7 +1509,7 @@ gadgetfs_setup (struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
                }
                break;
 
-#ifndef        CONFIG_USB_GADGET_PXA25X
+#ifndef        CONFIG_USB_PXA25X
        /* PXA automagically handles this request too */
        case USB_REQ_GET_CONFIGURATION:
                if (ctrl->bRequestType != 0x80)
index 4c338ec03a07d1bfa72ceea172f07e19f151f54b..9cfe3af3101ac271093abbb5134afa96371f761c 100644 (file)
@@ -555,6 +555,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
                        DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
                },
        },
+       {
+               /* HASEE E200 */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+                       DMI_MATCH(DMI_BOARD_NAME, "E210"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+               },
+       },
        { }
 };
 
@@ -564,9 +572,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
 {
        int try_handoff = 1, tried_handoff = 0;
 
-       /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
-        * the handoff on its unused controller.  Skip it. */
-       if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
+       /*
+        * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+        * the handoff on its unused controller.  Skip it.
+        *
+        * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
+        */
+       if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+                       pdev->device == 0x27cc)) {
                if (dmi_check_system(ehci_dmi_nohandoff_table))
                        try_handoff = 0;
        }
index 8b4ca1cb450aacce4dffaf67b0ca650b0cfa2544..98438b90838ff1ef22dffb4fb70d6123f9e6ca64 100644 (file)
@@ -7,9 +7,10 @@
 #include <linux/moduleparam.h>
 #include <linux/scatterlist.h>
 #include <linux/mutex.h>
-
+#include <linux/timer.h>
 #include <linux/usb.h>
 
+#define SIMPLE_IO_TIMEOUT      10000   /* in milliseconds */
 
 /*-------------------------------------------------------------------------*/
 
@@ -366,6 +367,7 @@ static int simple_io(
        int                     max = urb->transfer_buffer_length;
        struct completion       completion;
        int                     retval = 0;
+       unsigned long           expire;
 
        urb->context = &completion;
        while (retval == 0 && iterations-- > 0) {
@@ -378,9 +380,15 @@ static int simple_io(
                if (retval != 0)
                        break;
 
-               /* NOTE:  no timeouts; can't be broken out of by interrupt */
-               wait_for_completion(&completion);
-               retval = urb->status;
+               expire = msecs_to_jiffies(SIMPLE_IO_TIMEOUT);
+               if (!wait_for_completion_timeout(&completion, expire)) {
+                       usb_kill_urb(urb);
+                       retval = (urb->status == -ENOENT ?
+                                 -ETIMEDOUT : urb->status);
+               } else {
+                       retval = urb->status;
+               }
+
                urb->dev = udev;
                if (retval == 0 && usb_pipein(urb->pipe))
                        retval = simple_check_buf(tdev, urb);
@@ -476,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
        return sg;
 }
 
+static void sg_timeout(unsigned long _req)
+{
+       struct usb_sg_request   *req = (struct usb_sg_request *) _req;
+
+       req->status = -ETIMEDOUT;
+       usb_sg_cancel(req);
+}
+
 static int perform_sglist(
        struct usbtest_dev      *tdev,
        unsigned                iterations,
@@ -487,6 +503,9 @@ static int perform_sglist(
 {
        struct usb_device       *udev = testdev_to_usbdev(tdev);
        int                     retval = 0;
+       struct timer_list       sg_timer;
+
+       setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
 
        while (retval == 0 && iterations-- > 0) {
                retval = usb_sg_init(req, udev, pipe,
@@ -497,7 +516,10 @@ static int perform_sglist(
 
                if (retval)
                        break;
+               mod_timer(&sg_timer, jiffies +
+                               msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
                usb_sg_wait(req);
+               del_timer_sync(&sg_timer);
                retval = req->status;
 
                /* FIXME check resulting data pattern */
@@ -1149,6 +1171,11 @@ static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
        urb->context = &completion;
        urb->complete = unlink1_callback;
 
+       if (usb_pipeout(urb->pipe)) {
+               simple_fill_buf(urb);
+               urb->transfer_flags |= URB_ZERO_PACKET;
+       }
+
        /* keep the endpoint busy.  there are lots of hc/hcd-internal
         * states, and testing should get to all of them over time.
         *
@@ -1279,6 +1306,11 @@ static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
                                unlink_queued_callback, &ctx);
                ctx.urbs[i]->transfer_dma = buf_dma;
                ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
+
+               if (usb_pipeout(ctx.urbs[i]->pipe)) {
+                       simple_fill_buf(ctx.urbs[i]);
+                       ctx.urbs[i]->transfer_flags |= URB_ZERO_PACKET;
+               }
        }
 
        /* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
index ae481afcb3ece1358ffd8244ad6df9cbccb93081..9201feb97e9e8ecf6d2d70ae87d812627e4527f1 100644 (file)
@@ -1299,7 +1299,7 @@ isp1301_set_host(struct usb_otg *otg, struct usb_bus *host)
                return isp1301_otg_enable(isp);
        return 0;
 
-#elif  !defined(CONFIG_USB_GADGET_OMAP)
+#elif !IS_ENABLED(CONFIG_USB_OMAP)
        // FIXME update its refcount
        otg->host = host;
 
index 3c4db6d196c630257611883e9f8f7ec46f5c20b6..7229b265870aa1de1f48b4ff0e19db07ddb3d963 100644 (file)
@@ -98,13 +98,19 @@ static int usb_serial_device_remove(struct device *dev)
        struct usb_serial_port *port;
        int retval = 0;
        int minor;
+       int autopm_err;
 
        port = to_usb_serial_port(dev);
        if (!port)
                return -ENODEV;
 
-       /* make sure suspend/resume doesn't race against port_remove */
-       usb_autopm_get_interface(port->serial->interface);
+       /*
+        * Make sure suspend/resume doesn't race against port_remove.
+        *
+        * Note that no further runtime PM callbacks will be made if
+        * autopm_get fails.
+        */
+       autopm_err = usb_autopm_get_interface(port->serial->interface);
 
        minor = port->number;
        tty_unregister_device(usb_serial_tty_driver, minor);
@@ -118,7 +124,9 @@ static int usb_serial_device_remove(struct device *dev)
        dev_info(dev, "%s converter now disconnected from ttyUSB%d\n",
                 driver->description, minor);
 
-       usb_autopm_put_interface(port->serial->interface);
+       if (!autopm_err)
+               usb_autopm_put_interface(port->serial->interface);
+
        return retval;
 }
 
index 948a19f0cdf7cf71b9f3fc5f782d07cb0428496f..70ede84f4f6b402f6ee6c5fc7e2eb8b6a6584496 100644 (file)
@@ -1925,6 +1925,7 @@ static int option_send_setup(struct usb_serial_port *port)
        struct option_private *priv = intfdata->private;
        struct usb_wwan_port_private *portdata;
        int val = 0;
+       int res;
 
        portdata = usb_get_serial_port_data(port);
 
@@ -1933,9 +1934,17 @@ static int option_send_setup(struct usb_serial_port *port)
        if (portdata->rts_state)
                val |= 0x02;
 
-       return usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
+       res = usb_autopm_get_interface(serial->interface);
+       if (res)
+               return res;
+
+       res = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0),
                                0x22, 0x21, val, priv->bInterfaceNumber, NULL,
                                0, USB_CTRL_SET_TIMEOUT);
+
+       usb_autopm_put_interface(serial->interface);
+
+       return res;
 }
 
 MODULE_AUTHOR(DRIVER_AUTHOR);
index 6c0a542e8ec1820d60d03f5a7896843d4a6b96f6..43d93dbf7d71bdba86504f2c8cc66bb3f6260141 100644 (file)
@@ -145,12 +145,33 @@ static const struct usb_device_id id_table[] = {
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 0)},       /* Sierra Wireless EM7355 Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 2)},       /* Sierra Wireless EM7355 NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901f, 3)},       /* Sierra Wireless EM7355 Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 0)},       /* Sierra Wireless Modem Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 2)},       /* Sierra Wireless Modem NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9040, 3)},       /* Sierra Wireless Modem Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 0)},       /* Sierra Wireless MC7305/MC7355 Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 2)},       /* Sierra Wireless MC7305/MC7355 NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9041, 3)},       /* Sierra Wireless MC7305/MC7355 Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)},       /* Netgear AirCard 340U Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)},       /* Netgear AirCard 340U NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)},       /* Netgear AirCard 340U Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 0)},       /* Sierra Wireless Modem Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 2)},       /* Sierra Wireless Modem NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9053, 3)},       /* Sierra Wireless Modem Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 0)},       /* Sierra Wireless Modem Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 2)},       /* Sierra Wireless Modem NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9054, 3)},       /* Sierra Wireless Modem Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 0)},       /* Netgear AirCard 341U Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 2)},       /* Netgear AirCard 341U NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9055, 3)},       /* Netgear AirCard 341U Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 0)},       /* Sierra Wireless Modem Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 2)},       /* Sierra Wireless Modem NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9056, 3)},       /* Sierra Wireless Modem Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 0)},       /* Sierra Wireless Modem Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 2)},       /* Sierra Wireless Modem NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9060, 3)},       /* Sierra Wireless Modem Modem */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 0)},       /* Sierra Wireless Modem Device Management */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 2)},       /* Sierra Wireless Modem NMEA */
+       {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9061, 3)},       /* Sierra Wireless Modem Modem */
        {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 0)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Device Management */
        {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 2)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card NMEA */
        {USB_DEVICE_INTERFACE_NUMBER(0x413c, 0x81a2, 3)},       /* Dell Wireless 5806 Gobi(TM) 4G LTE Mobile Broadband Card Modem */
index 2df566c0e9e8ee2361efdeeb0a556dd1014d5045..4e45908541238f992a07a5b646fd168613576aca 100644 (file)
@@ -58,6 +58,7 @@ struct sierra_intf_private {
        spinlock_t susp_lock;
        unsigned int suspended:1;
        int in_flight;
+       unsigned int open_ports;
 };
 
 static int sierra_set_power_state(struct usb_device *udev, __u16 swiState)
@@ -767,6 +768,7 @@ static void sierra_close(struct usb_serial_port *port)
        struct usb_serial *serial = port->serial;
        struct sierra_port_private *portdata;
        struct sierra_intf_private *intfdata = port->serial->private;
+       struct urb *urb;
 
        portdata = usb_get_serial_port_data(port);
 
@@ -775,7 +777,6 @@ static void sierra_close(struct usb_serial_port *port)
 
        mutex_lock(&serial->disc_mutex);
        if (!serial->disconnected) {
-               serial->interface->needs_remote_wakeup = 0;
                /* odd error handling due to pm counters */
                if (!usb_autopm_get_interface(serial->interface))
                        sierra_send_setup(port);
@@ -786,8 +787,22 @@ static void sierra_close(struct usb_serial_port *port)
        mutex_unlock(&serial->disc_mutex);
        spin_lock_irq(&intfdata->susp_lock);
        portdata->opened = 0;
+       if (--intfdata->open_ports == 0)
+               serial->interface->needs_remote_wakeup = 0;
        spin_unlock_irq(&intfdata->susp_lock);
 
+       for (;;) {
+               urb = usb_get_from_anchor(&portdata->delayed);
+               if (!urb)
+                       break;
+               kfree(urb->transfer_buffer);
+               usb_free_urb(urb);
+               usb_autopm_put_interface_async(serial->interface);
+               spin_lock(&portdata->lock);
+               portdata->outstanding_urbs--;
+               spin_unlock(&portdata->lock);
+       }
+
        sierra_stop_rx_urbs(port);
        for (i = 0; i < portdata->num_in_urbs; i++) {
                sierra_release_urb(portdata->in_urbs[i]);
@@ -824,23 +839,29 @@ static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port)
                        usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN);
 
        err = sierra_submit_rx_urbs(port, GFP_KERNEL);
-       if (err) {
-               /* get rid of everything as in close */
-               sierra_close(port);
-               /* restore balance for autopm */
-               if (!serial->disconnected)
-                       usb_autopm_put_interface(serial->interface);
-               return err;
-       }
+       if (err)
+               goto err_submit;
+
        sierra_send_setup(port);
 
-       serial->interface->needs_remote_wakeup = 1;
        spin_lock_irq(&intfdata->susp_lock);
        portdata->opened = 1;
+       if (++intfdata->open_ports == 1)
+               serial->interface->needs_remote_wakeup = 1;
        spin_unlock_irq(&intfdata->susp_lock);
        usb_autopm_put_interface(serial->interface);
 
        return 0;
+
+err_submit:
+       sierra_stop_rx_urbs(port);
+
+       for (i = 0; i < portdata->num_in_urbs; i++) {
+               sierra_release_urb(portdata->in_urbs[i]);
+               portdata->in_urbs[i] = NULL;
+       }
+
+       return err;
 }
 
 
@@ -936,6 +957,7 @@ static int sierra_port_remove(struct usb_serial_port *port)
        struct sierra_port_private *portdata;
 
        portdata = usb_get_serial_port_data(port);
+       usb_set_serial_port_data(port, NULL);
        kfree(portdata);
 
        return 0;
@@ -952,6 +974,8 @@ static void stop_read_write_urbs(struct usb_serial *serial)
        for (i = 0; i < serial->num_ports; ++i) {
                port = serial->port[i];
                portdata = usb_get_serial_port_data(port);
+               if (!portdata)
+                       continue;
                sierra_stop_rx_urbs(port);
                usb_kill_anchored_urbs(&portdata->active);
        }
@@ -994,6 +1018,9 @@ static int sierra_resume(struct usb_serial *serial)
                port = serial->port[i];
                portdata = usb_get_serial_port_data(port);
 
+               if (!portdata)
+                       continue;
+
                while ((urb = usb_get_from_anchor(&portdata->delayed))) {
                        usb_anchor_urb(urb, &portdata->active);
                        intfdata->in_flight++;
@@ -1001,8 +1028,12 @@ static int sierra_resume(struct usb_serial *serial)
                        if (err < 0) {
                                intfdata->in_flight--;
                                usb_unanchor_urb(urb);
-                               usb_scuttle_anchored_urbs(&portdata->delayed);
-                               break;
+                               kfree(urb->transfer_buffer);
+                               usb_free_urb(urb);
+                               spin_lock(&portdata->lock);
+                               portdata->outstanding_urbs--;
+                               spin_unlock(&portdata->lock);
+                               continue;
                        }
                }
 
index 11952b6dc2247154f44003bb40a8261f9df5174e..36f6b6a569077fc98a395651cf13e398c16cddd6 100644 (file)
@@ -228,8 +228,10 @@ int usb_wwan_write(struct tty_struct *tty, struct usb_serial_port *port,
                        usb_pipeendpoint(this_urb->pipe), i);
 
                err = usb_autopm_get_interface_async(port->serial->interface);
-               if (err < 0)
+               if (err < 0) {
+                       clear_bit(i, &portdata->out_busy);
                        break;
+               }
 
                /* send the data */
                memcpy(this_urb->transfer_buffer, buf, todo);
@@ -386,6 +388,14 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
        portdata = usb_get_serial_port_data(port);
        intfdata = serial->private;
 
+       if (port->interrupt_in_urb) {
+               err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
+               if (err) {
+                       dev_dbg(&port->dev, "%s: submit int urb failed: %d\n",
+                               __func__, err);
+               }
+       }
+
        /* Start reading from the IN endpoint */
        for (i = 0; i < N_IN_URB; i++) {
                urb = portdata->in_urbs[i];
@@ -412,12 +422,26 @@ int usb_wwan_open(struct tty_struct *tty, struct usb_serial_port *port)
 }
 EXPORT_SYMBOL(usb_wwan_open);
 
+static void unbusy_queued_urb(struct urb *urb,
+                                       struct usb_wwan_port_private *portdata)
+{
+       int i;
+
+       for (i = 0; i < N_OUT_URB; i++) {
+               if (urb == portdata->out_urbs[i]) {
+                       clear_bit(i, &portdata->out_busy);
+                       break;
+               }
+       }
+}
+
 void usb_wwan_close(struct usb_serial_port *port)
 {
        int i;
        struct usb_serial *serial = port->serial;
        struct usb_wwan_port_private *portdata;
        struct usb_wwan_intf_private *intfdata = port->serial->private;
+       struct urb *urb;
 
        portdata = usb_get_serial_port_data(port);
 
@@ -426,10 +450,19 @@ void usb_wwan_close(struct usb_serial_port *port)
        portdata->opened = 0;
        spin_unlock_irq(&intfdata->susp_lock);
 
+       for (;;) {
+               urb = usb_get_from_anchor(&portdata->delayed);
+               if (!urb)
+                       break;
+               unbusy_queued_urb(urb, portdata);
+               usb_autopm_put_interface_async(serial->interface);
+       }
+
        for (i = 0; i < N_IN_URB; i++)
                usb_kill_urb(portdata->in_urbs[i]);
        for (i = 0; i < N_OUT_URB; i++)
                usb_kill_urb(portdata->out_urbs[i]);
+       usb_kill_urb(port->interrupt_in_urb);
 
        /* balancing - important as an error cannot be handled*/
        usb_autopm_get_interface_no_resume(serial->interface);
@@ -467,7 +500,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
        struct usb_wwan_port_private *portdata;
        struct urb *urb;
        u8 *buffer;
-       int err;
        int i;
 
        if (!port->bulk_in_size || !port->bulk_out_size)
@@ -507,13 +539,6 @@ int usb_wwan_port_probe(struct usb_serial_port *port)
 
        usb_set_serial_port_data(port, portdata);
 
-       if (port->interrupt_in_urb) {
-               err = usb_submit_urb(port->interrupt_in_urb, GFP_KERNEL);
-               if (err)
-                       dev_dbg(&port->dev, "%s: submit irq_in urb failed %d\n",
-                               __func__, err);
-       }
-
        return 0;
 
 bail_out_error2:
@@ -581,44 +606,29 @@ static void stop_read_write_urbs(struct usb_serial *serial)
 int usb_wwan_suspend(struct usb_serial *serial, pm_message_t message)
 {
        struct usb_wwan_intf_private *intfdata = serial->private;
-       int b;
 
+       spin_lock_irq(&intfdata->susp_lock);
        if (PMSG_IS_AUTO(message)) {
-               spin_lock_irq(&intfdata->susp_lock);
-               b = intfdata->in_flight;
-               spin_unlock_irq(&intfdata->susp_lock);
-
-               if (b)
+               if (intfdata->in_flight) {
+                       spin_unlock_irq(&intfdata->susp_lock);
                        return -EBUSY;
+               }
        }
-
-       spin_lock_irq(&intfdata->susp_lock);
        intfdata->suspended = 1;
        spin_unlock_irq(&intfdata->susp_lock);
+
        stop_read_write_urbs(serial);
 
        return 0;
 }
 EXPORT_SYMBOL(usb_wwan_suspend);
 
-static void unbusy_queued_urb(struct urb *urb, struct usb_wwan_port_private *portdata)
-{
-       int i;
-
-       for (i = 0; i < N_OUT_URB; i++) {
-               if (urb == portdata->out_urbs[i]) {
-                       clear_bit(i, &portdata->out_busy);
-                       break;
-               }
-       }
-}
-
-static void play_delayed(struct usb_serial_port *port)
+static int play_delayed(struct usb_serial_port *port)
 {
        struct usb_wwan_intf_private *data;
        struct usb_wwan_port_private *portdata;
        struct urb *urb;
-       int err;
+       int err = 0;
 
        portdata = usb_get_serial_port_data(port);
        data = port->serial->private;
@@ -635,6 +645,8 @@ static void play_delayed(struct usb_serial_port *port)
                        break;
                }
        }
+
+       return err;
 }
 
 int usb_wwan_resume(struct usb_serial *serial)
@@ -644,54 +656,51 @@ int usb_wwan_resume(struct usb_serial *serial)
        struct usb_wwan_intf_private *intfdata = serial->private;
        struct usb_wwan_port_private *portdata;
        struct urb *urb;
-       int err = 0;
-
-       /* get the interrupt URBs resubmitted unconditionally */
-       for (i = 0; i < serial->num_ports; i++) {
-               port = serial->port[i];
-               if (!port->interrupt_in_urb) {
-                       dev_dbg(&port->dev, "%s: No interrupt URB for port\n", __func__);
-                       continue;
-               }
-               err = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
-               dev_dbg(&port->dev, "Submitted interrupt URB for port (result %d)\n", err);
-               if (err < 0) {
-                       dev_err(&port->dev, "%s: Error %d for interrupt URB\n",
-                               __func__, err);
-                       goto err_out;
-               }
-       }
+       int err;
+       int err_count = 0;
 
+       spin_lock_irq(&intfdata->susp_lock);
        for (i = 0; i < serial->num_ports; i++) {
                /* walk all ports */
                port = serial->port[i];
                portdata = usb_get_serial_port_data(port);
 
                /* skip closed ports */
-               spin_lock_irq(&intfdata->susp_lock);
-               if (!portdata || !portdata->opened) {
-                       spin_unlock_irq(&intfdata->susp_lock);
+               if (!portdata || !portdata->opened)
                        continue;
+
+               if (port->interrupt_in_urb) {
+                       err = usb_submit_urb(port->interrupt_in_urb,
+                                       GFP_ATOMIC);
+                       if (err) {
+                               dev_err(&port->dev,
+                                       "%s: submit int urb failed: %d\n",
+                                       __func__, err);
+                               err_count++;
+                       }
                }
 
+               err = play_delayed(port);
+               if (err)
+                       err_count++;
+
                for (j = 0; j < N_IN_URB; j++) {
                        urb = portdata->in_urbs[j];
                        err = usb_submit_urb(urb, GFP_ATOMIC);
                        if (err < 0) {
                                dev_err(&port->dev, "%s: Error %d for bulk URB %d\n",
                                        __func__, err, i);
-                               spin_unlock_irq(&intfdata->susp_lock);
-                               goto err_out;
+                               err_count++;
                        }
                }
-               play_delayed(port);
-               spin_unlock_irq(&intfdata->susp_lock);
        }
-       spin_lock_irq(&intfdata->susp_lock);
        intfdata->suspended = 0;
        spin_unlock_irq(&intfdata->susp_lock);
-err_out:
-       return err;
+
+       if (err_count)
+               return -EIO;
+
+       return 0;
 }
 EXPORT_SYMBOL(usb_wwan_resume);
 #endif
index 556d96ce40bf27681db498992fb0bb6f1af34f4a..89a8a89a5eb297273c86b111af4cd8ff9e32615e 100644 (file)
@@ -698,7 +698,7 @@ void matroxfb_unregister_driver(struct matroxfb_driver* drv);
 
 #define mga_fifo(n)    do {} while ((mga_inl(M_FIFOSTATUS) & 0xFF) < (n))
 
-#define WaitTillIdle() do {} while (mga_inl(M_STATUS) & 0x10000)
+#define WaitTillIdle() do { mga_inl(M_STATUS); do {} while (mga_inl(M_STATUS) & 0x10000); } while (0)
 
 /* code speedup */
 #ifdef CONFIG_FB_MATROX_MILLENIUM
index ebd06fd0de89c8e61d7de1712b8c43450347a5c0..ded94c4fa30d31a6997625d007548d119c2aa75d 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -310,7 +310,6 @@ static void free_ioctx(struct kioctx *ctx)
 
                avail = (head <= ctx->tail ? ctx->tail : ctx->nr_events) - head;
 
-               atomic_sub(avail, &ctx->reqs_active);
                head += avail;
                head %= ctx->nr_events;
        }
@@ -678,6 +677,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 put_rq:
        /* everything turned out well, dispose of the aiocb. */
        aio_put_req(iocb);
+       atomic_dec(&ctx->reqs_active);
 
        /*
         * We have to order our ring_info tail store above and test
@@ -717,6 +717,8 @@ static long aio_read_events_ring(struct kioctx *ctx,
        if (head == ctx->tail)
                goto out;
 
+       head %= ctx->nr_events;
+
        while (ret < nr) {
                long avail;
                struct io_event *ev;
@@ -755,8 +757,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
        flush_dcache_page(ctx->ring_pages[0]);
 
        pr_debug("%li  h%u t%u\n", ret, head, ctx->tail);
-
-       atomic_sub(ret, &ctx->reqs_active);
 out:
        mutex_unlock(&ctx->ring_lock);
 
index 290e347b6db3f925f414fd9be4e6ea394da6f887..d85f90c92bb4dd46d4e6e624b5170e8f8f432147 100644 (file)
@@ -1347,9 +1347,10 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
  * returns <0 on error
  */
 static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
-                               struct btrfs_extent_item *ei, u32 item_size,
-                               struct btrfs_extent_inline_ref **out_eiref,
-                               int *out_type)
+                                  struct btrfs_key *key,
+                                  struct btrfs_extent_item *ei, u32 item_size,
+                                  struct btrfs_extent_inline_ref **out_eiref,
+                                  int *out_type)
 {
        unsigned long end;
        u64 flags;
@@ -1359,19 +1360,26 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
                /* first call */
                flags = btrfs_extent_flags(eb, ei);
                if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
-                       info = (struct btrfs_tree_block_info *)(ei + 1);
-                       *out_eiref =
-                               (struct btrfs_extent_inline_ref *)(info + 1);
+                       if (key->type == BTRFS_METADATA_ITEM_KEY) {
+                               /* a skinny metadata extent */
+                               *out_eiref =
+                                    (struct btrfs_extent_inline_ref *)(ei + 1);
+                       } else {
+                               WARN_ON(key->type != BTRFS_EXTENT_ITEM_KEY);
+                               info = (struct btrfs_tree_block_info *)(ei + 1);
+                               *out_eiref =
+                                  (struct btrfs_extent_inline_ref *)(info + 1);
+                       }
                } else {
                        *out_eiref = (struct btrfs_extent_inline_ref *)(ei + 1);
                }
                *ptr = (unsigned long)*out_eiref;
-               if ((void *)*ptr >= (void *)ei + item_size)
+               if ((unsigned long)(*ptr) >= (unsigned long)ei + item_size)
                        return -ENOENT;
        }
 
        end = (unsigned long)ei + item_size;
-       *out_eiref = (struct btrfs_extent_inline_ref *)*ptr;
+       *out_eiref = (struct btrfs_extent_inline_ref *)(*ptr);
        *out_type = btrfs_extent_inline_ref_type(eb, *out_eiref);
 
        *ptr += btrfs_extent_inline_ref_size(*out_type);
@@ -1390,8 +1398,8 @@ static int __get_extent_inline_ref(unsigned long *ptr, struct extent_buffer *eb,
  * <0 on error.
  */
 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
-                               struct btrfs_extent_item *ei, u32 item_size,
-                               u64 *out_root, u8 *out_level)
+                           struct btrfs_key *key, struct btrfs_extent_item *ei,
+                           u32 item_size, u64 *out_root, u8 *out_level)
 {
        int ret;
        int type;
@@ -1402,8 +1410,8 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
                return 1;
 
        while (1) {
-               ret = __get_extent_inline_ref(ptr, eb, ei, item_size,
-                                               &eiref, &type);
+               ret = __get_extent_inline_ref(ptr, eb, key, ei, item_size,
+                                             &eiref, &type);
                if (ret < 0)
                        return ret;
 
index 0f446d7ca2c0d62b85e560e93574cb44efffc655..526d09e70c93d7ba9bde8a468409358c2e79b577 100644 (file)
@@ -42,8 +42,8 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
                        u64 *flags);
 
 int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
-                               struct btrfs_extent_item *ei, u32 item_size,
-                               u64 *out_root, u8 *out_level);
+                           struct btrfs_key *key, struct btrfs_extent_item *ei,
+                           u32 item_size, u64 *out_root, u8 *out_level);
 
 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
                                u64 extent_item_objectid,
index 4354b9127713b230e5b5f103b25999013bbe3cc9..abecce3993542adf75463b2f681189787b0ea74b 100644 (file)
@@ -3518,6 +3518,11 @@ int close_ctree(struct btrfs_root *root)
 
        btrfs_free_block_groups(fs_info);
 
+       /*
+        * we must make sure there is not any read request to
+        * submit after we stopping all workers.
+        */
+       invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        btrfs_stop_all_workers(fs_info);
 
        del_fs_roots(fs_info);
index e7e7afb4a87268211e8b0ef881a6eeac0068eefd..84ceff6abbc11f66e8e3f96d24740d67cbacd1ed 100644 (file)
@@ -1624,6 +1624,7 @@ again:
                 * shortening the size of the delalloc range we're searching
                 */
                free_extent_state(cached_state);
+               cached_state = NULL;
                if (!loops) {
                        unsigned long offset = (*start) & (PAGE_CACHE_SIZE - 1);
                        max_bytes = PAGE_CACHE_SIZE - offset;
@@ -2356,7 +2357,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
 {
        int uptodate = (err == 0);
        struct extent_io_tree *tree;
-       int ret;
+       int ret = 0;
 
        tree = &BTRFS_I(page->mapping->host)->io_tree;
 
@@ -2370,6 +2371,8 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
        if (!uptodate) {
                ClearPageUptodate(page);
                SetPageError(page);
+               ret = ret < 0 ? ret : -EIO;
+               mapping_set_error(page->mapping, ret);
        }
        return 0;
 }
index e53009657f0e5b91b638f3d31d5e4d2cbf919f62..0cbe95dc81135639db58f2c941760b26cf4f842d 100644 (file)
@@ -835,7 +835,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
 
        if (!matched) {
                __btrfs_remove_free_space_cache(ctl);
-               btrfs_err(fs_info, "block group %llu has wrong amount of free space",
+               btrfs_warn(fs_info, "block group %llu has wrong amount of free space",
                        block_group->key.objectid);
                ret = -1;
        }
@@ -847,7 +847,7 @@ out:
                spin_unlock(&block_group->lock);
                ret = 0;
 
-               btrfs_err(fs_info, "failed to load free space cache for block group %llu",
+               btrfs_warn(fs_info, "failed to load free space cache for block group %llu, rebuild it now",
                        block_group->key.objectid);
        }
 
index eb84c2db1acae77010410bd2300b0de173f4628e..e4f69e3b78b94eb7c29ccf58d6fd315e95eacdb6 100644 (file)
@@ -545,8 +545,9 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
 
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                do {
-                       ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
-                                                       &ref_root, &ref_level);
+                       ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
+                                                     item_size, &ref_root,
+                                                     &ref_level);
                        printk_in_rcu(KERN_WARNING
                                "btrfs: %s at logical %llu on dev %s, "
                                "sector %llu: metadata %s (level %d) in tree "
index 256a9a46d5443f657921aacb62abdd7a9bafac81..414c1b9eb8964cb4cf099819e682d5f625f7cfa3 100644 (file)
@@ -1550,6 +1550,10 @@ static int lookup_dir_item_inode(struct btrfs_root *root,
                goto out;
        }
        btrfs_dir_item_key_to_cpu(path->nodes[0], di, &key);
+       if (key.type == BTRFS_ROOT_ITEM_KEY) {
+               ret = -ENOENT;
+               goto out;
+       }
        *found_inode = key.objectid;
        *found_type = btrfs_dir_type(path->nodes[0], di);
 
index b6c23c4abae273f09575b96f72fe3b98823d1a45..7fc774639a7879251ec32433d69b18d071d5e7fb 100644 (file)
@@ -1384,6 +1384,22 @@ out:
        return ret;
 }
 
+/*
+ * Function to update ctime/mtime for a given device path.
+ * Mainly used for ctime/mtime based probe like libblkid.
+ */
+static void update_dev_time(char *path_name)
+{
+       struct file *filp;
+
+       filp = filp_open(path_name, O_RDWR, 0);
+       if (!filp)
+               return;
+       file_update_time(filp);
+       filp_close(filp, NULL);
+       return;
+}
+
 static int btrfs_rm_dev_item(struct btrfs_root *root,
                             struct btrfs_device *device)
 {
@@ -1612,11 +1628,12 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
                struct btrfs_fs_devices *fs_devices;
                fs_devices = root->fs_info->fs_devices;
                while (fs_devices) {
-                       if (fs_devices->seed == cur_devices)
+                       if (fs_devices->seed == cur_devices) {
+                               fs_devices->seed = cur_devices->seed;
                                break;
+                       }
                        fs_devices = fs_devices->seed;
                }
-               fs_devices->seed = cur_devices->seed;
                cur_devices->seed = NULL;
                lock_chunks(root);
                __btrfs_close_devices(cur_devices);
@@ -1642,10 +1659,14 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
 
        ret = 0;
 
-       /* Notify udev that device has changed */
-       if (bdev)
+       if (bdev) {
+               /* Notify udev that device has changed */
                btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
 
+               /* Update ctime/mtime for device path for libblkid */
+               update_dev_time(device_path);
+       }
+
 error_brelse:
        brelse(bh);
        if (bdev)
@@ -1817,7 +1838,6 @@ static int btrfs_prepare_sprout(struct btrfs_root *root)
        fs_devices->seeding = 0;
        fs_devices->num_devices = 0;
        fs_devices->open_devices = 0;
-       fs_devices->total_devices = 0;
        fs_devices->seed = seed_devices;
 
        generate_random_uuid(fs_devices->fsid);
@@ -2089,6 +2109,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                ret = btrfs_commit_transaction(trans, root);
        }
 
+       /* Update ctime/mtime for libblkid */
+       update_dev_time(device_path);
        return ret;
 
 error_trans:
index fba960ee26de54ab2be95d22d3a58d5eacec2ec7..16bb6591561b38ce36826ce6bc17b690f7ac4d07 100644 (file)
@@ -3116,7 +3116,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
        }
        BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
                        start > ac->ac_o_ex.fe_logical);
-       BUG_ON(size <= 0 || size > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb));
+       BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
 
        /* now prepare goal request */
 
index 4acf1f78881b6c4c61aa10377a62ea4368106593..b12a4427aedc41f71bbbf697cbc61e17e30963cf 100644 (file)
@@ -383,6 +383,17 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        set_page_writeback(page);
        ClearPageError(page);
 
+       /*
+        * Comments copied from block_write_full_page_endio:
+        *
+        * The page straddles i_size.  It must be zeroed out on each and every
+        * writepage invocation because it may be mmapped.  "A file is mapped
+        * in multiples of the page size.  For a file that is not a multiple of
+        * the page size, the remaining memory is zeroed when mapped, and
+        * writes to that region are not written out to the file."
+        */
+       if (len < PAGE_CACHE_SIZE)
+               zero_user_segment(page, len, PAGE_CACHE_SIZE);
        /*
         * In the first loop we prepare and mark buffers to submit. We have to
         * mark all buffers in the page before submitting so that
@@ -394,19 +405,6 @@ int ext4_bio_write_page(struct ext4_io_submit *io,
        do {
                block_start = bh_offset(bh);
                if (block_start >= len) {
-                       /*
-                        * Comments copied from block_write_full_page_endio:
-                        *
-                        * The page straddles i_size.  It must be zeroed out on
-                        * each and every writepage invocation because it may
-                        * be mmapped.  "A file is mapped in multiples of the
-                        * page size.  For a file that is not a multiple of
-                        * the  page size, the remaining memory is zeroed when
-                        * mapped, and writes to that region are not written
-                        * out to the file."
-                        */
-                       zero_user_segment(page, block_start,
-                                         block_start + blocksize);
                        clear_buffer_dirty(bh);
                        set_buffer_uptodate(bh);
                        continue;
index 16fae6436d0ed150fe5947683c647e7ae8b1d9c4..25b8b15197b008635c9eb354ba3f2baf3c367886 100644 (file)
@@ -193,6 +193,7 @@ struct team {
        bool user_carrier_enabled;
        bool queue_override_enabled;
        struct list_head *qom_lists; /* array of queue override mapping lists */
+       bool port_mtu_change_allowed;
        long mode_priv[TEAM_MODE_PRIV_LONGS];
 };
 
index f4101f01286b2c1a3700b2774df8b70158979332..a8cc2fcffcaf40e4ab00786cd61f918c783cffe0 100644 (file)
@@ -27,6 +27,8 @@ struct irq_desc;
  * @irq_count:         stats field to detect stalled irqs
  * @last_unhandled:    aging timer for unhandled count
  * @irqs_unhandled:    stats field for spurious unhandled interrupts
+ * @threads_handled:   stats field for deferred spurious detection of threaded handlers
+ * @threads_handled_last: comparator field for deferred spurious detection of theraded handlers
  * @lock:              locking for SMP
  * @affinity_hint:     hint to user space for preferred irq affinity
  * @affinity_notify:   context for notification of affinity changes
@@ -52,6 +54,8 @@ struct irq_desc {
        unsigned int            irq_count;      /* For detecting broken IRQs */
        unsigned long           last_unhandled; /* Aging timer for unhandled count */
        unsigned int            irqs_unhandled;
+       atomic_t                threads_handled;
+       int                     threads_handled_last;
        raw_spinlock_t          lock;
        struct cpumask          *percpu_enabled;
 #ifdef CONFIG_SMP
index 6358da5eeee8f8d89b619557631613762d09c883..9516dad451091d1dd8862652cbddf620cac991ba 100644 (file)
@@ -16,9 +16,10 @@ static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb)
 }
 
 enum netlink_skb_flags {
-       NETLINK_SKB_MMAPED      = 0x1,          /* Packet data is mmaped */
-       NETLINK_SKB_TX          = 0x2,          /* Packet was sent by userspace */
-       NETLINK_SKB_DELIVERED   = 0x4,          /* Packet was delivered */
+       NETLINK_SKB_MMAPED      = 0x1,  /* Packet data is mmaped */
+       NETLINK_SKB_TX          = 0x2,  /* Packet was sent by userspace */
+       NETLINK_SKB_DELIVERED   = 0x4,  /* Packet was delivered */
+       NETLINK_SKB_DST         = 0x8,  /* Dst set in sendto or sendmsg */
 };
 
 struct netlink_skb_parms {
@@ -144,4 +145,11 @@ static inline int netlink_dump_start(struct sock *ssk, struct sk_buff *skb,
        return __netlink_dump_start(ssk, skb, nlh, control);
 }
 
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+                         struct user_namespace *ns, int cap);
+bool netlink_ns_capable(const struct sk_buff *skb,
+                       struct user_namespace *ns, int cap);
+bool netlink_capable(const struct sk_buff *skb, int cap);
+bool netlink_net_capable(const struct sk_buff *skb, int cap);
+
 #endif /* __LINUX_NETLINK_H */
index 89573a33ab3c43ee84cf629d84e5ff3d7f091206..2e99b8e08770c374b25350ec70584f0513254d1d 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/sched.h>               /* For struct task_struct.  */
 #include <linux/err.h>                 /* for IS_ERR_VALUE */
 #include <linux/bug.h>                 /* For BUG_ON.  */
+#include <linux/pid_namespace.h>       /* For task_active_pid_ns.  */
 #include <uapi/linux/ptrace.h>
 
 /*
@@ -128,6 +129,37 @@ static inline void ptrace_event(int event, unsigned long message)
        }
 }
 
+/**
+ * ptrace_event_pid - possibly stop for a ptrace event notification
+ * @event:     %PTRACE_EVENT_* value to report
+ * @pid:       process identifier for %PTRACE_GETEVENTMSG to return
+ *
+ * Check whether @event is enabled and, if so, report @event and @pid
+ * to the ptrace parent.  @pid is reported as the pid_t seen from the
+ * the ptrace parent's pid namespace.
+ *
+ * Called without locks.
+ */
+static inline void ptrace_event_pid(int event, struct pid *pid)
+{
+       /*
+        * FIXME: There's a potential race if a ptracer in a different pid
+        * namespace than parent attaches between computing message below and
+        * when we acquire tasklist_lock in ptrace_stop().  If this happens,
+        * the ptracer will get a bogus pid from PTRACE_GETEVENTMSG.
+        */
+       unsigned long message = 0;
+       struct pid_namespace *ns;
+
+       rcu_read_lock();
+       ns = task_active_pid_ns(rcu_dereference(current->parent));
+       if (ns)
+               message = pid_nr_ns(pid, ns);
+       rcu_read_unlock();
+
+       ptrace_event(event, message);
+}
+
 /**
  * ptrace_init_task - initialize ptrace state for a new child
  * @child:             new child task
index 302ab805b0bbba23ed7f4b8892b201e7e28469ce..46cca4c06848346ca84753ac182526a4514ff277 100644 (file)
@@ -23,7 +23,7 @@ int sock_diag_check_cookie(void *sk, __u32 *cookie);
 void sock_diag_save_cookie(void *sk, __u32 *cookie);
 
 int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attr);
-int sock_diag_put_filterinfo(struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
                             struct sk_buff *skb, int attrtype);
 
 #endif
index 53f464d7cddcd6ce18925c521b99bfaefac66f2f..6ca347a0717efedf386b8ccaa318c5e34c7e643b 100644 (file)
@@ -178,16 +178,9 @@ static inline void inet_peer_refcheck(const struct inet_peer *p)
 /* can be called with or without local BH being disabled */
 static inline int inet_getid(struct inet_peer *p, int more)
 {
-       int old, new;
        more++;
        inet_peer_refcheck(p);
-       do {
-               old = atomic_read(&p->ip_id_count);
-               new = old + more;
-               if (!new)
-                       new = 1;
-       } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
-       return new;
+       return atomic_add_return(more, &p->ip_id_count) - more;
 }
 
 #endif /* _NET_INETPEER_H */
index 8f32b779bc83f4a1b1d71adaf6ce3401bf049d7c..72f710d2f75a0ce6d6f0d4c38125a0af92a7cce5 100644 (file)
@@ -2248,6 +2248,11 @@ extern void sock_enable_timestamp(struct sock *sk, int flag);
 extern int sock_get_timestamp(struct sock *, struct timeval __user *);
 extern int sock_get_timestampns(struct sock *, struct timespec __user *);
 
+bool sk_ns_capable(const struct sock *sk,
+                  struct user_namespace *user_ns, int cap);
+bool sk_capable(const struct sock *sk, int cap);
+bool sk_net_capable(const struct sock *sk, int cap);
+
 /*
  *     Enable debug/info messages
  */
index 5bfe5136441c748de80c27992c94d6d60deb96bc..97cd9c3592f7d9623db9100b59ba0de492c5e617 100644 (file)
@@ -120,6 +120,8 @@ struct snd_card {
        int user_ctl_count;             /* count of all user controls */
        struct list_head controls;      /* all controls for this card */
        struct list_head ctl_files;     /* active control files */
+       struct mutex user_ctl_lock;     /* protects user controls against
+                                          concurrent access */
 
        struct snd_info_entry *proc_root;       /* root for soundcard specific files */
        struct snd_info_entry *proc_id; /* the card id */
index ffa2696d64dcfe9794ec87a83be887b0e2aab453..a63529ab9fd7365cbd292a78247286f49ff8174e 100644 (file)
@@ -50,6 +50,7 @@ int   transport_subsystem_register(struct se_subsystem_api *);
 void   transport_subsystem_release(struct se_subsystem_api *);
 
 void   target_complete_cmd(struct se_cmd *, u8);
+void   target_complete_cmd_with_length(struct se_cmd *, u8, int);
 
 sense_reason_t spc_parse_cdb(struct se_cmd *cmd, unsigned int *size);
 sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd);
index 5759810e1c1b9768f67f8e703c625b7ce9d76e94..21eed488783f2d4d475dbb0282fb5f2ddae966d1 100644 (file)
@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
 struct snd_compr_avail {
        __u64 avail;
        struct snd_compr_tstamp tstamp;
-};
+} __attribute__((packed));
 
 enum snd_compr_direction {
        SND_COMPRESS_PLAYBACK = 0,
index 6def25f1b351140fa2258bb164eb284dde01445f..a6c632757e57a3223c8daebf1219d8d94c218120 100644 (file)
@@ -593,13 +593,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
        case AUDIT_TTY_SET:
        case AUDIT_TRIM:
        case AUDIT_MAKE_EQUIV:
-               if (!capable(CAP_AUDIT_CONTROL))
+               if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
                        err = -EPERM;
                break;
        case AUDIT_USER:
        case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
        case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
-               if (!capable(CAP_AUDIT_WRITE))
+               if (!netlink_capable(skb, CAP_AUDIT_WRITE))
                        err = -EPERM;
                break;
        default:  /* bad msg */
index ff7be9dac4c15ee2c70e8909ad6ce0bbfd3ea2e5..270c1dab674a59df3ea52c48e05ad9cca5246166 100644 (file)
@@ -1607,10 +1607,12 @@ long do_fork(unsigned long clone_flags,
         */
        if (!IS_ERR(p)) {
                struct completion vfork;
+               struct pid *pid;
 
                trace_sched_process_fork(current, p);
 
-               nr = task_pid_vnr(p);
+               pid = get_task_pid(p, PIDTYPE_PID);
+               nr = pid_vnr(pid);
 
                if (clone_flags & CLONE_PARENT_SETTID)
                        put_user(nr, parent_tidptr);
@@ -1625,12 +1627,14 @@ long do_fork(unsigned long clone_flags,
 
                /* forking complete and child started to run, tell ptracer */
                if (unlikely(trace))
-                       ptrace_event(trace, nr);
+                       ptrace_event_pid(trace, pid);
 
                if (clone_flags & CLONE_VFORK) {
                        if (!wait_for_vfork_done(p, &vfork))
-                               ptrace_event(PTRACE_EVENT_VFORK_DONE, nr);
+                               ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
                }
+
+               put_pid(pid);
        } else {
                nr = PTR_ERR(p);
        }
index 8815abfdf2cb43b4a6f0de6540557cf19948cfc7..a79d267b64ecae0c25b11f99aa3b1c4404412d9a 100644 (file)
@@ -861,8 +861,8 @@ static int irq_thread(void *data)
                irq_thread_check_affinity(desc, action);
 
                action_ret = handler_fn(desc, action);
-               if (!noirqdebug)
-                       note_interrupt(action->irq, desc, action_ret);
+               if (action_ret == IRQ_HANDLED)
+                       atomic_inc(&desc->threads_handled);
 
                wake_threads_waitq(desc);
        }
index 7b5f012bde9d73ff246652fe48d49e5b1aebc12c..febcee3c2aa9270554e9fa83dc02bfbf74a8f67f 100644 (file)
@@ -265,21 +265,119 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
        return action && (action->flags & IRQF_IRQPOLL);
 }
 
+#define SPURIOUS_DEFERRED      0x80000000
+
 void note_interrupt(unsigned int irq, struct irq_desc *desc,
                    irqreturn_t action_ret)
 {
        if (desc->istate & IRQS_POLL_INPROGRESS)
                return;
 
-       /* we get here again via the threaded handler */
-       if (action_ret == IRQ_WAKE_THREAD)
-               return;
-
        if (bad_action_ret(action_ret)) {
                report_bad_irq(irq, desc, action_ret);
                return;
        }
 
+       /*
+        * We cannot call note_interrupt from the threaded handler
+        * because we need to look at the compound of all handlers
+        * (primary and threaded). Aside of that in the threaded
+        * shared case we have no serialization against an incoming
+        * hardware interrupt while we are dealing with a threaded
+        * result.
+        *
+        * So in case a thread is woken, we just note the fact and
+        * defer the analysis to the next hardware interrupt.
+        *
+        * The threaded handlers store whether they sucessfully
+        * handled an interrupt and we check whether that number
+        * changed versus the last invocation.
+        *
+        * We could handle all interrupts with the delayed by one
+        * mechanism, but for the non forced threaded case we'd just
+        * add pointless overhead to the straight hardirq interrupts
+        * for the sake of a few lines less code.
+        */
+       if (action_ret & IRQ_WAKE_THREAD) {
+               /*
+                * There is a thread woken. Check whether one of the
+                * shared primary handlers returned IRQ_HANDLED. If
+                * not we defer the spurious detection to the next
+                * interrupt.
+                */
+               if (action_ret == IRQ_WAKE_THREAD) {
+                       int handled;
+                       /*
+                        * We use bit 31 of thread_handled_last to
+                        * denote the deferred spurious detection
+                        * active. No locking necessary as
+                        * thread_handled_last is only accessed here
+                        * and we have the guarantee that hard
+                        * interrupts are not reentrant.
+                        */
+                       if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
+                               desc->threads_handled_last |= SPURIOUS_DEFERRED;
+                               return;
+                       }
+                       /*
+                        * Check whether one of the threaded handlers
+                        * returned IRQ_HANDLED since the last
+                        * interrupt happened.
+                        *
+                        * For simplicity we just set bit 31, as it is
+                        * set in threads_handled_last as well. So we
+                        * avoid extra masking. And we really do not
+                        * care about the high bits of the handled
+                        * count. We just care about the count being
+                        * different than the one we saw before.
+                        */
+                       handled = atomic_read(&desc->threads_handled);
+                       handled |= SPURIOUS_DEFERRED;
+                       if (handled != desc->threads_handled_last) {
+                               action_ret = IRQ_HANDLED;
+                               /*
+                                * Note: We keep the SPURIOUS_DEFERRED
+                                * bit set. We are handling the
+                                * previous invocation right now.
+                                * Keep it for the current one, so the
+                                * next hardware interrupt will
+                                * account for it.
+                                */
+                               desc->threads_handled_last = handled;
+                       } else {
+                               /*
+                                * None of the threaded handlers felt
+                                * responsible for the last interrupt
+                                *
+                                * We keep the SPURIOUS_DEFERRED bit
+                                * set in threads_handled_last as we
+                                * need to account for the current
+                                * interrupt as well.
+                                */
+                               action_ret = IRQ_NONE;
+                       }
+               } else {
+                       /*
+                        * One of the primary handlers returned
+                        * IRQ_HANDLED. So we don't care about the
+                        * threaded handlers on the same line. Clear
+                        * the deferred detection bit.
+                        *
+                        * In theory we could/should check whether the
+                        * deferred bit is set and take the result of
+                        * the previous run into account here as
+                        * well. But it's really not worth the
+                        * trouble. If every other interrupt is
+                        * handled we never trigger the spurious
+                        * detector. And if this is just the one out
+                        * of 100k unhandled ones which is handled
+                        * then we merily delay the spurious detection
+                        * by one hard interrupt. Not a real problem.
+                        */
+                       desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
+               }
+       }
+
        if (unlikely(action_ret == IRQ_NONE)) {
                /*
                 * If we are seeing only the odd spurious IRQ caused by
index 4251374578bc96a762d336cfb8ecc08cde699dd1..67f7a2d2efbcad60cf0e698fc33bcb12d1b92c10 100644 (file)
@@ -720,8 +720,10 @@ static bool can_stop_idle_tick(int cpu, struct tick_sched *ts)
                return false;
        }
 
-       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE))
+       if (unlikely(ts->nohz_mode == NOHZ_MODE_INACTIVE)) {
+               ts->sleep_length = (ktime_t) { .tv64 = NSEC_PER_SEC/HZ };
                return false;
+       }
 
        if (need_resched())
                return false;
index cca4b9302a710c5ef0e1a66355302040ada9d3d7..a3bfde8ad60e24c2e043c358048d18464705f072 100644 (file)
--- a/lib/idr.c
+++ b/lib/idr.c
@@ -250,7 +250,7 @@ static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa,
                        id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
 
                        /* if already at the top layer, we need to grow */
-                       if (id >= 1 << (idp->layers * IDR_BITS)) {
+                       if (id > idr_max(idp->layers)) {
                                *starting_id = id;
                                return -EAGAIN;
                        }
@@ -829,12 +829,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
        if (!p)
                return ERR_PTR(-EINVAL);
 
-       n = (p->layer+1) * IDR_BITS;
-
-       if (id >= (1 << n))
+       if (id > idr_max(p->layer + 1))
                return ERR_PTR(-EINVAL);
 
-       n -= IDR_BITS;
+       n = p->layer * IDR_BITS;
        while ((n > 0) && p) {
                p = p->ary[(id >> n) & IDR_MASK];
                n -= IDR_BITS;
index 569985d522d518a8992929d5924b6a5062ff9e93..8563081e8da38fb81e0335d2589c9fcebcd81266 100644 (file)
 #include <linux/lzo.h>
 #include "lzodefs.h"
 
-#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x)                                  \
+       (((size_t)(ip_end - ip) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x)                                  \
+       (((size_t)(op_end - op) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_IP(t, x))                     \
+                       goto input_overrun;             \
+       } while (0)
+
+#define NEED_OP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_OP(t, x))                     \
+                       goto output_overrun;            \
+       } while (0)
+
+#define TEST_LB(m_pos)                                 \
+       do {                                            \
+               if ((m_pos) < out)                      \
+                       goto lookbehind_overrun;        \
+       } while (0)
 
 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                          unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                                        while (unlikely(*ip == 0)) {
                                                t += 255;
                                                ip++;
-                                               NEED_IP(1);
+                                               NEED_IP(1, 0);
                                        }
                                        t += 15 + *ip++;
                                }
                                t += 3;
 copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+                               if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
                                        const unsigned char *ie = ip + t;
                                        unsigned char *oe = op + t;
                                        do {
@@ -81,8 +101,8 @@ copy_literal_run:
                                } else
 #endif
                                {
-                                       NEED_OP(t);
-                                       NEED_IP(t + 3);
+                                       NEED_OP(t, 0);
+                                       NEED_IP(t, 3);
                                        do {
                                                *op++ = *ip++;
                                        } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
                                m_pos -= t >> 2;
                                m_pos -= *ip++ << 2;
                                TEST_LB(m_pos);
-                               NEED_OP(2);
+                               NEED_OP(2, 0);
                                op[0] = m_pos[0];
                                op[1] = m_pos[1];
                                op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 31 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        m_pos = op - 1;
                        next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 7 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        next = get_unaligned_le16(ip);
                        ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
                if (op - m_pos >= 8) {
                        unsigned char *oe = op + t;
-                       if (likely(HAVE_OP(t + 15))) {
+                       if (likely(HAVE_OP(t, 15))) {
                                do {
                                        COPY8(op, m_pos);
                                        op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
                                        m_pos += 8;
                                } while (op < oe);
                                op = oe;
-                               if (HAVE_IP(6)) {
+                               if (HAVE_IP(6, 0)) {
                                        state = next;
                                        COPY4(op, ip);
                                        op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
                                        continue;
                                }
                        } else {
-                               NEED_OP(t);
+                               NEED_OP(t, 0);
                                do {
                                        *op++ = *m_pos++;
                                } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
 #endif
                {
                        unsigned char *oe = op + t;
-                       NEED_OP(t);
+                       NEED_OP(t, 0);
                        op[0] = m_pos[0];
                        op[1] = m_pos[1];
                        op += 2;
@@ -194,15 +214,15 @@ match_next:
                state = next;
                t = next;
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+               if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
                        COPY4(op, ip);
                        op += t;
                        ip += t;
                } else
 #endif
                {
-                       NEED_IP(t + 3);
-                       NEED_OP(t);
+                       NEED_IP(t, 3);
+                       NEED_OP(t, 0);
                        while (t > 0) {
                                *op++ = *ip++;
                                t--;
index fc6754720ced66f80343a7331044b89f42521a42..10ad042d01be3a43b24500525127c1bbba7a7724 100644 (file)
@@ -201,8 +201,8 @@ int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
        }
 
        if (unlikely(rem > 0))
-               printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
-                      "attributes.\n", rem);
+               pr_warn_ratelimited("netlink: %d bytes leftover after parsing attributes in process `%s'.\n",
+                                   rem, current->comm);
 
        err = 0;
 errout:
index 4f8548abd6ee0c985b676302ed40120344ec53a3..603f1fa1b7a3aaffc6b1f198ef62e8a4f1391e73 100644 (file)
@@ -208,9 +208,9 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
 #endif
        si.si_addr_lsb = compound_trans_order(compound_head(page)) + PAGE_SHIFT;
 
-       if ((flags & MF_ACTION_REQUIRED) && t == current) {
+       if ((flags & MF_ACTION_REQUIRED) && t->mm == current->mm) {
                si.si_code = BUS_MCEERR_AR;
-               ret = force_sig_info(SIGBUS, &si, t);
+               ret = force_sig_info(SIGBUS, &si, current);
        } else {
                /*
                 * Don't use force here, it's convenient if the signal
@@ -382,10 +382,12 @@ static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
        }
 }
 
-static int task_early_kill(struct task_struct *tsk)
+static int task_early_kill(struct task_struct *tsk, int force_early)
 {
        if (!tsk->mm)
                return 0;
+       if (force_early)
+               return 1;
        if (tsk->flags & PF_MCE_PROCESS)
                return !!(tsk->flags & PF_MCE_EARLY);
        return sysctl_memory_failure_early_kill;
@@ -395,7 +397,7 @@ static int task_early_kill(struct task_struct *tsk)
  * Collect processes when the error hit an anonymous page.
  */
 static void collect_procs_anon(struct page *page, struct list_head *to_kill,
-                             struct to_kill **tkc)
+                             struct to_kill **tkc, int force_early)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
@@ -411,7 +413,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
 
-               if (!task_early_kill(tsk))
+               if (!task_early_kill(tsk, force_early))
                        continue;
                anon_vma_interval_tree_foreach(vmac, &av->rb_root,
                                               pgoff, pgoff) {
@@ -430,7 +432,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
  * Collect processes when the error hit a file mapped page.
  */
 static void collect_procs_file(struct page *page, struct list_head *to_kill,
-                             struct to_kill **tkc)
+                             struct to_kill **tkc, int force_early)
 {
        struct vm_area_struct *vma;
        struct task_struct *tsk;
@@ -441,7 +443,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        for_each_process(tsk) {
                pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
 
-               if (!task_early_kill(tsk))
+               if (!task_early_kill(tsk, force_early))
                        continue;
 
                vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff,
@@ -467,7 +469,8 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
  * First preallocate one tokill structure outside the spin locks,
  * so that we can kill at least one process reasonably reliable.
  */
-static void collect_procs(struct page *page, struct list_head *tokill)
+static void collect_procs(struct page *page, struct list_head *tokill,
+                               int force_early)
 {
        struct to_kill *tk;
 
@@ -478,9 +481,9 @@ static void collect_procs(struct page *page, struct list_head *tokill)
        if (!tk)
                return;
        if (PageAnon(page))
-               collect_procs_anon(page, tokill, &tk);
+               collect_procs_anon(page, tokill, &tk, force_early);
        else
-               collect_procs_file(page, tokill, &tk);
+               collect_procs_file(page, tokill, &tk, force_early);
        kfree(tk);
 }
 
@@ -965,7 +968,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
         * there's nothing that can be done.
         */
        if (kill)
-               collect_procs(ppage, &tokill);
+               collect_procs(ppage, &tokill, flags & MF_ACTION_REQUIRED);
 
        ret = try_to_unmap(ppage, ttu);
        if (ret != SWAP_SUCCESS)
index b730a4409be6d1a404ac08e6d4e54d33dfa9a171..705bfc8e6fcdd2059f815edf6bcd892a6cfe0630 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -103,6 +103,7 @@ static inline void anon_vma_free(struct anon_vma *anon_vma)
         * LOCK should suffice since the actual taking of the lock must
         * happen _before_ what follows.
         */
+       might_sleep();
        if (rwsem_is_locked(&anon_vma->root->rwsem)) {
                anon_vma_lock_write(anon_vma);
                anon_vma_unlock_write(anon_vma);
@@ -426,8 +427,9 @@ struct anon_vma *page_get_anon_vma(struct page *page)
         * above cannot corrupt).
         */
        if (!page_mapped(page)) {
+               rcu_read_unlock();
                put_anon_vma(anon_vma);
-               anon_vma = NULL;
+               return NULL;
        }
 out:
        rcu_read_unlock();
@@ -477,9 +479,9 @@ struct anon_vma *page_lock_anon_vma_read(struct page *page)
        }
 
        if (!page_mapped(page)) {
+               rcu_read_unlock();
                put_anon_vma(anon_vma);
-               anon_vma = NULL;
-               goto out;
+               return NULL;
        }
 
        /* we pinned the anon_vma, its safe to sleep */
index 43ddef3cf44fedd96ec16fb288c6ff940b47a93c..4e89500391dcbeb6a2ac13e0979e788e9caf0593 100644 (file)
@@ -2286,10 +2286,17 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
 
        for (i = 0; i <= ZONE_NORMAL; i++) {
                zone = &pgdat->node_zones[i];
+               if (!populated_zone(zone))
+                       continue;
+
                pfmemalloc_reserve += min_wmark_pages(zone);
                free_pages += zone_page_state(zone, NR_FREE_PAGES);
        }
 
+       /* If there are no reserves (unexpected config) then do not throttle */
+       if (!pfmemalloc_reserve)
+               return true;
+
        wmark_ok = free_pages > pfmemalloc_reserve / 2;
 
        /* kswapd must be awake if processes are being throttled */
@@ -2314,9 +2321,9 @@ static bool pfmemalloc_watermark_ok(pg_data_t *pgdat)
 static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
                                        nodemask_t *nodemask)
 {
+       struct zoneref *z;
        struct zone *zone;
-       int high_zoneidx = gfp_zone(gfp_mask);
-       pg_data_t *pgdat;
+       pg_data_t *pgdat = NULL;
 
        /*
         * Kernel threads should not be throttled as they may be indirectly
@@ -2335,10 +2342,34 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
        if (fatal_signal_pending(current))
                goto out;
 
-       /* Check if the pfmemalloc reserves are ok */
-       first_zones_zonelist(zonelist, high_zoneidx, NULL, &zone);
-       pgdat = zone->zone_pgdat;
-       if (pfmemalloc_watermark_ok(pgdat))
+       /*
+        * Check if the pfmemalloc reserves are ok by finding the first node
+        * with a usable ZONE_NORMAL or lower zone. The expectation is that
+        * GFP_KERNEL will be required for allocating network buffers when
+        * swapping over the network so ZONE_HIGHMEM is unusable.
+        *
+        * Throttling is based on the first usable node and throttled processes
+        * wait on a queue until kswapd makes progress and wakes them. There
+        * is an affinity then between processes waking up and where reclaim
+        * progress has been made assuming the process wakes on the same node.
+        * More importantly, processes running on remote nodes will not compete
+        * for remote pfmemalloc reserves and processes on different nodes
+        * should make reasonable progress.
+        */
+       for_each_zone_zonelist_nodemask(zone, z, zonelist,
+                                       gfp_mask, nodemask) {
+               if (zone_idx(zone) > ZONE_NORMAL)
+                       continue;
+
+               /* Throttle based on the first usable node */
+               pgdat = zone->zone_pgdat;
+               if (pfmemalloc_watermark_ok(pgdat))
+                       goto out;
+               break;
+       }
+
+       /* If no zone was usable by the allocation flags then do not throttle */
+       if (!pgdat)
                goto out;
 
        /* Account for the throttling */
@@ -3059,7 +3090,10 @@ static int kswapd(void *p)
                }
        }
 
+       tsk->flags &= ~(PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD);
        current->reclaim_state = NULL;
+       lockdep_clear_current_reclaim_state();
+
        return 0;
 }
 
index 36fed40c162cff5361c9dbdf9cfd5f313e47743a..302d29b3744d4c5250778ac784a432d369dde977 100644 (file)
@@ -949,13 +949,16 @@ static struct l2cap_chan *l2cap_sock_new_connection_cb(struct l2cap_chan *chan)
        /* Check for backlog size */
        if (sk_acceptq_is_full(parent)) {
                BT_DBG("backlog full %d", parent->sk_ack_backlog);
+               release_sock(parent);
                return NULL;
        }
 
        sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP,
                              GFP_ATOMIC);
-       if (!sk)
+       if (!sk) {
+               release_sock(parent);
                return NULL;
+        }
 
        bt_sock_reclassify_lock(sk, BTPROTO_L2CAP);
 
index 3ee690e8c7d32354a525ad398291b7b7c5155215..de25455b4e3e827bfae94b8a835a69c3191fe569 100644 (file)
@@ -784,7 +784,7 @@ static int cgw_create_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
        struct cgw_job *gwj;
        int err = 0;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlmsg_len(nlh) < sizeof(*r))
@@ -876,7 +876,7 @@ static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh)
        struct can_can_gw ccgw;
        int err = 0;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlmsg_len(nlh) < sizeof(*r))
index 56383a3e5d715627010a3c13841d240390644527..cca7ae0ba9152cdb1d1104e016cc2aab6db17a54 100644 (file)
@@ -5827,6 +5827,9 @@ EXPORT_SYMBOL(unregister_netdevice_queue);
 /**
  *     unregister_netdevice_many - unregister many devices
  *     @head: list of devices
+ *
+ *  Note: As most callers use a stack allocated list_head,
+ *  we force a list_del() to make sure stack wont be corrupted later.
  */
 void unregister_netdevice_many(struct list_head *head)
 {
@@ -5836,6 +5839,7 @@ void unregister_netdevice_many(struct list_head *head)
                rollback_registered_many(head);
                list_for_each_entry(dev, head, unreg_list)
                        net_set_todo(dev);
+               list_del(head);
        }
 }
 EXPORT_SYMBOL(unregister_netdevice_many);
@@ -6252,7 +6256,6 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
                }
        }
        unregister_netdevice_many(&dev_kill_list);
-       list_del(&dev_kill_list);
        rtnl_unlock();
 }
 
index 87ec574ffac8e82c8b482cd9cd138a0362a05c7f..ae43dd807bb2b21a03ced8c874461467837b5303 100644 (file)
@@ -1044,6 +1044,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        struct nlattr *tb[IFLA_MAX+1];
        u32 ext_filter_mask = 0;
        int err;
+       int hdrlen;
 
        s_h = cb->args[0];
        s_idx = cb->args[1];
@@ -1051,8 +1052,17 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
        rcu_read_lock();
        cb->seq = net->dev_base_seq;
 
-       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
-                       ifla_policy) >= 0) {
+       /* A hack to preserve kernel<->userspace interface.
+        * The correct header is ifinfomsg. It is consistent with rtnl_getlink.
+        * However, before Linux v3.9 the code here assumed rtgenmsg and that's
+        * what iproute2 < v3.9.0 used.
+        * We can detect the old iproute2. Even including the IFLA_EXT_MASK
+        * attribute, its netlink message is shorter than struct ifinfomsg.
+        */
+       hdrlen = nlmsg_len(cb->nlh) < sizeof(struct ifinfomsg) ?
+                sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
+
+       if (nlmsg_parse(cb->nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
 
                if (tb[IFLA_EXT_MASK])
                        ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
@@ -1294,7 +1304,8 @@ static int do_set_master(struct net_device *dev, int ifindex)
        return 0;
 }
 
-static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
+static int do_setlink(const struct sk_buff *skb,
+                     struct net_device *dev, struct ifinfomsg *ifm,
                      struct nlattr **tb, char *ifname, int modified)
 {
        const struct net_device_ops *ops = dev->netdev_ops;
@@ -1306,7 +1317,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
                        err = PTR_ERR(net);
                        goto errout;
                }
-               if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) {
+               if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
                        err = -EPERM;
                        goto errout;
                }
@@ -1560,7 +1571,7 @@ static int rtnl_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (err < 0)
                goto errout;
 
-       err = do_setlink(dev, ifm, tb, ifname, 0);
+       err = do_setlink(skb, dev, ifm, tb, ifname, 0);
 errout:
        return err;
 }
@@ -1600,7 +1611,6 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
 
        ops->dellink(dev, &list_kill);
        unregister_netdevice_many(&list_kill);
-       list_del(&list_kill);
        return 0;
 }
 
@@ -1678,7 +1688,8 @@ err:
 }
 EXPORT_SYMBOL(rtnl_create_link);
 
-static int rtnl_group_changelink(struct net *net, int group,
+static int rtnl_group_changelink(const struct sk_buff *skb,
+               struct net *net, int group,
                struct ifinfomsg *ifm,
                struct nlattr **tb)
 {
@@ -1687,7 +1698,7 @@ static int rtnl_group_changelink(struct net *net, int group,
 
        for_each_netdev(net, dev) {
                if (dev->group == group) {
-                       err = do_setlink(dev, ifm, tb, NULL, 0);
+                       err = do_setlink(skb, dev, ifm, tb, NULL, 0);
                        if (err < 0)
                                return err;
                }
@@ -1789,12 +1800,12 @@ replay:
                                modified = 1;
                        }
 
-                       return do_setlink(dev, ifm, tb, ifname, modified);
+                       return do_setlink(skb, dev, ifm, tb, ifname, modified);
                }
 
                if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
                        if (ifm->ifi_index == 0 && tb[IFLA_GROUP])
-                               return rtnl_group_changelink(net,
+                               return rtnl_group_changelink(skb, net,
                                                nla_get_u32(tb[IFLA_GROUP]),
                                                ifm, tb);
                        return -ENODEV;
@@ -1906,9 +1917,13 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *tb[IFLA_MAX+1];
        u32 ext_filter_mask = 0;
        u16 min_ifinfo_dump_size = 0;
+       int hdrlen;
+
+       /* Same kernel<->userspace interface hack as in rtnl_dump_ifinfo. */
+       hdrlen = nlmsg_len(nlh) < sizeof(struct ifinfomsg) ?
+                sizeof(struct rtgenmsg) : sizeof(struct ifinfomsg);
 
-       if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
-                       ifla_policy) >= 0) {
+       if (nlmsg_parse(nlh, hdrlen, tb, IFLA_MAX, ifla_policy) >= 0) {
                if (tb[IFLA_EXT_MASK])
                        ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]);
        }
@@ -2179,7 +2194,7 @@ static int rtnl_fdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err = -EINVAL;
        __u8 *addr;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
@@ -2635,7 +2650,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        sz_idx = type>>2;
        kind = type&3;
 
-       if (kind != 2 && !ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (kind != 2 && !netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
index 66f722b5052a3a6647872730d6b34bc4be5f9dab..9f84a5f7404d4b2ae38f78a94152ef72010c50ef 100644 (file)
@@ -2844,6 +2844,8 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
                skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
 
                while (pos < offset + len && i < nfrags) {
+                       if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
+                               goto err;
                        *frag = skb_shinfo(skb)->frags[i];
                        __skb_frag_ref(frag);
                        size = skb_frag_size(frag);
index d743099250f40fa5e62b3a72cfc578dcc31d51a1..af65d17517b8e563d9ac462e7c3ab6c72a464953 100644 (file)
 static DEFINE_MUTEX(proto_list_mutex);
 static LIST_HEAD(proto_list);
 
+/**
+ * sk_ns_capable - General socket capability test
+ * @sk: Socket to use a capability on or through
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in the user
+ * namespace @user_ns.
+ */
+bool sk_ns_capable(const struct sock *sk,
+                  struct user_namespace *user_ns, int cap)
+{
+       return file_ns_capable(sk->sk_socket->file, user_ns, cap) &&
+               ns_capable(user_ns, cap);
+}
+EXPORT_SYMBOL(sk_ns_capable);
+
+/**
+ * sk_capable - Socket global capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The global capbility to use
+ *
+ * Test to see if the opener of the socket had when the socket was
+ * created and the current process has the capability @cap in all user
+ * namespaces.
+ */
+bool sk_capable(const struct sock *sk, int cap)
+{
+       return sk_ns_capable(sk, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(sk_capable);
+
+/**
+ * sk_net_capable - Network namespace socket capability test
+ * @sk: Socket to use a capability on or through
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket had when the socke was created
+ * and the current process has the capability @cap over the network namespace
+ * the socket is a member of.
+ */
+bool sk_net_capable(const struct sock *sk, int cap)
+{
+       return sk_ns_capable(sk, sock_net(sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(sk_net_capable);
+
+
 #ifdef CONFIG_MEMCG_KMEM
 int mem_cgroup_sockets_init(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
 {
index 6a7fae228634a50c7b1d997fa72d6174d6592fdd..c38e7a2b5a8ee094fe8629e02c983161de0a2cd1 100644 (file)
@@ -49,7 +49,7 @@ int sock_diag_put_meminfo(struct sock *sk, struct sk_buff *skb, int attrtype)
 }
 EXPORT_SYMBOL_GPL(sock_diag_put_meminfo);
 
-int sock_diag_put_filterinfo(struct sock *sk,
+int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
                             struct sk_buff *skb, int attrtype)
 {
        struct nlattr *attr;
@@ -57,7 +57,7 @@ int sock_diag_put_filterinfo(struct sock *sk,
        unsigned int len;
        int err = 0;
 
-       if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN)) {
+       if (!may_report_filterinfo) {
                nla_reserve(skb, attrtype, 0);
                return 0;
        }
index 40d5829ed36aaa6945c9c4c2056325b68ab64c03..1074ffb6d533993dd0332c04558192ed9d8373a8 100644 (file)
@@ -1670,7 +1670,7 @@ static int dcb_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlmsghdr *reply_nlh = NULL;
        const struct reply_func *fn;
 
-       if ((nlh->nlmsg_type == RTM_SETDCB) && !capable(CAP_NET_ADMIN))
+       if ((nlh->nlmsg_type == RTM_SETDCB) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        ret = nlmsg_parse(nlh, sizeof(*dcb), tb, DCB_ATTR_MAX,
index 7d9197063ebb98beee6c778b25c5e675ecedfb4b..b5e52100a89a1ab3ea24b5e697339e7db478218c 100644 (file)
@@ -573,7 +573,7 @@ static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct dn_ifaddr __rcu **ifap;
        int err = -EINVAL;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
@@ -617,7 +617,7 @@ static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct dn_ifaddr *ifa;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
index 57dc159245ecfff38e318626cf0ea1ffa9db1cae..d332aefb0846f86a11d924e3e1e7ad23e279dda2 100644 (file)
@@ -505,7 +505,7 @@ static int dn_fib_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *attrs[RTA_MAX+1];
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
@@ -530,7 +530,7 @@ static int dn_fib_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct nlattr *attrs[RTA_MAX+1];
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (!net_eq(net, &init_net))
index 2a7efe388344fdd37b10487c50ba3d712eedbf8a..f3dc69a41d63446d53bb24925c9dad8bdbef5e47 100644 (file)
@@ -107,7 +107,7 @@ static inline void dnrmg_receive_user_skb(struct sk_buff *skb)
        if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
                return;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                RCV_SKB_FAIL(-EPERM);
 
        /* Eventually we might send routing messages too */
index 19e36376d2a083b5c8b307208ebd17ade086dcaa..5f3dc1df04bf28a8bc7b993cff545f8beff08192 100644 (file)
@@ -86,18 +86,26 @@ out:
 }
 EXPORT_SYMBOL(ip4_datagram_connect);
 
+/* Because UDP xmit path can manipulate sk_dst_cache without holding
+ * socket lock, we need to use sk_dst_set() here,
+ * even if we own the socket lock.
+ */
 void ip4_datagram_release_cb(struct sock *sk)
 {
        const struct inet_sock *inet = inet_sk(sk);
        const struct ip_options_rcu *inet_opt;
        __be32 daddr = inet->inet_daddr;
+       struct dst_entry *dst;
        struct flowi4 fl4;
        struct rtable *rt;
 
-       if (! __sk_dst_get(sk) || __sk_dst_check(sk, 0))
-               return;
-
        rcu_read_lock();
+
+       dst = __sk_dst_get(sk);
+       if (!dst || !dst->obsolete || dst->ops->check(dst, 0)) {
+               rcu_read_unlock();
+               return;
+       }
        inet_opt = rcu_dereference(inet->inet_opt);
        if (inet_opt && inet_opt->opt.srr)
                daddr = inet_opt->opt.faddr;
@@ -105,8 +113,10 @@ void ip4_datagram_release_cb(struct sock *sk)
                                   inet->inet_saddr, inet->inet_dport,
                                   inet->inet_sport, sk->sk_protocol,
                                   RT_CONN_FLAGS(sk), sk->sk_bound_dev_if);
-       if (!IS_ERR(rt))
-               __sk_dst_set(sk, &rt->dst);
+
+       dst = !IS_ERR(rt) ? &rt->dst : NULL;
+       sk_dst_set(sk, dst);
+
        rcu_read_unlock();
 }
 EXPORT_SYMBOL_GPL(ip4_datagram_release_cb);
index f5cc7b3315113a5bef605141afe88e4cad2be1a9..897b784e9c0582bfaaf7d69d830fddaf1c795492 100644 (file)
@@ -149,13 +149,13 @@ static int ipip_err(struct sk_buff *skb, u32 info)
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->dev->ifindex, 0, IPPROTO_IPIP, 0);
+                                t->parms.link, 0, IPPROTO_IPIP, 0);
                err = 0;
                goto out;
        }
 
        if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
                              IPPROTO_IPIP, 0);
                err = 0;
                goto out;
@@ -483,4 +483,5 @@ static void __exit ipip_fini(void)
 module_init(ipip_init);
 module_exit(ipip_fini);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ipip");
 MODULE_ALIAS_NETDEV("tunl0");
index e15d330919af97e51c2b3fdb8bbadad9bda6a6d5..ba7d2b7ad9f9910434cb2e6200c875ef95117e66 100644 (file)
@@ -2720,13 +2720,12 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
        bool recovered = !before(tp->snd_una, tp->high_seq);
 
        if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
-               if (flag & FLAG_ORIG_SACK_ACKED) {
-                       /* Step 3.b. A timeout is spurious if not all data are
-                        * lost, i.e., never-retransmitted data are (s)acked.
-                        */
-                       tcp_try_undo_loss(sk, true);
+               /* Step 3.b. A timeout is spurious if not all data are
+                * lost, i.e., never-retransmitted data are (s)acked.
+                */
+               if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
                        return;
-               }
+
                if (after(tp->snd_nxt, tp->high_seq) &&
                    (flag & FLAG_DATA_SACKED || is_dupack)) {
                        tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
index 73d7f68da5574f2000acb89f72037e4860afbc82..a0ecdf596f2fbf0ccec406ea092ee045458df5e5 100644 (file)
@@ -61,6 +61,7 @@
 MODULE_AUTHOR("Ville Nuorvala");
 MODULE_DESCRIPTION("IPv6 tunneling device");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("ip6tnl");
 MODULE_ALIAS_NETDEV("ip6tnl0");
 
 #ifdef IP6_TNL_DEBUG
index c2e73e647e440745acd32dcaa1564031f4323e38..3d2c81a66d6a10e150ff0c2b0190d1f08b073a6d 100644 (file)
@@ -9,7 +9,7 @@
 void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
 {
        static atomic_t ipv6_fragmentation_id;
-       int old, new;
+       int ident;
 
 #if IS_ENABLED(CONFIG_IPV6)
        if (rt && !(rt->dst.flags & DST_NOPEER)) {
@@ -25,13 +25,8 @@ void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
                }
        }
 #endif
-       do {
-               old = atomic_read(&ipv6_fragmentation_id);
-               new = old + 1;
-               if (!new)
-                       new = 1;
-       } while (atomic_cmpxchg(&ipv6_fragmentation_id, old, new) != old);
-       fhdr->identification = htonl(new);
+       ident = atomic_inc_return(&ipv6_fragmentation_id);
+       fhdr->identification = htonl(ident);
 }
 EXPORT_SYMBOL(ipv6_select_ident);
 
index 620d326e8fdd168dff8e0281827b5bcb5b29d419..540d58921007bdcc31900ac4dc4722e9eb5351d4 100644 (file)
@@ -530,12 +530,12 @@ static int ipip6_err(struct sk_buff *skb, u32 info)
 
        if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED) {
                ipv4_update_pmtu(skb, dev_net(skb->dev), info,
-                                t->dev->ifindex, 0, IPPROTO_IPV6, 0);
+                                t->parms.link, 0, IPPROTO_IPV6, 0);
                err = 0;
                goto out;
        }
        if (type == ICMP_REDIRECT) {
-               ipv4_redirect(skb, dev_net(skb->dev), t->dev->ifindex, 0,
+               ipv4_redirect(skb, dev_net(skb->dev), t->parms.link, 0,
                              IPPROTO_IPV6, 0);
                err = 0;
                goto out;
@@ -1654,4 +1654,5 @@ xfrm_tunnel_failed:
 module_init(sit_init);
 module_exit(sit_cleanup);
 MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("sit");
 MODULE_ALIAS_NETDEV("sit0");
index 276aa86f366b798e3eade374821656425f6a9ba4..215e9b008db647f5c96928f92a4d979eff220295 100644 (file)
@@ -1829,7 +1829,7 @@ static void iucv_callback_txdone(struct iucv_path *path,
                spin_lock_irqsave(&list->lock, flags);
 
                while (list_skb != (struct sk_buff *)list) {
-                       if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {
+                       if (msg->tag == IUCV_SKB_CB(list_skb)->tag) {
                                this = list_skb;
                                break;
                        }
index 514e90f470bf654abf6f6279cf8dbc16284e0ef9..2c64ab27b51542188a2afa16aebe7ed34b721a4e 100644 (file)
@@ -1746,7 +1746,6 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
        }
        mutex_unlock(&local->iflist_mtx);
        unregister_netdevice_many(&unreg_list);
-       list_del(&unreg_list);
 
        list_for_each_entry_safe(sdata, tmp, &wdev_list, list) {
                list_del(&sdata->list);
index 572d87dc116ffa838d2f9f8838129156add7284e..0a03662bfbefbb41bc433999966196f38d7674bd 100644 (file)
@@ -147,7 +147,7 @@ static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        const struct nfnetlink_subsystem *ss;
        int type, err;
 
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (!netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        /* All the messages must at least contain nfgenmsg */
index c9c2a8441d32d70b26979850951e66d0888bc09e..be34adde692fb4918268d23715913c0273157ec9 100644 (file)
@@ -1219,7 +1219,74 @@ retry:
        return err;
 }
 
-static inline int netlink_capable(const struct socket *sock, unsigned int flag)
+/**
+ * __netlink_ns_capable - General netlink message capability test
+ * @nsp: NETLINK_CB of the socket buffer holding a netlink command from userspace.
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in the user namespace @user_ns.
+ */
+bool __netlink_ns_capable(const struct netlink_skb_parms *nsp,
+                       struct user_namespace *user_ns, int cap)
+{
+       return ((nsp->flags & NETLINK_SKB_DST) ||
+               file_ns_capable(nsp->sk->sk_socket->file, user_ns, cap)) &&
+               ns_capable(user_ns, cap);
+}
+EXPORT_SYMBOL(__netlink_ns_capable);
+
+/**
+ * netlink_ns_capable - General netlink message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @user_ns: The user namespace of the capability to use
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in the user namespace @user_ns.
+ */
+bool netlink_ns_capable(const struct sk_buff *skb,
+                       struct user_namespace *user_ns, int cap)
+{
+       return __netlink_ns_capable(&NETLINK_CB(skb), user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_ns_capable);
+
+/**
+ * netlink_capable - Netlink global message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap in all user namespaces.
+ */
+bool netlink_capable(const struct sk_buff *skb, int cap)
+{
+       return netlink_ns_capable(skb, &init_user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_capable);
+
+/**
+ * netlink_net_capable - Netlink network namespace message capability test
+ * @skb: socket buffer holding a netlink command from userspace
+ * @cap: The capability to use
+ *
+ * Test to see if the opener of the socket we received the message
+ * from had when the netlink socket was created and the sender of the
+ * message has has the capability @cap over the network namespace of
+ * the socket we received the message from.
+ */
+bool netlink_net_capable(const struct sk_buff *skb, int cap)
+{
+       return netlink_ns_capable(skb, sock_net(skb->sk)->user_ns, cap);
+}
+EXPORT_SYMBOL(netlink_net_capable);
+
+static inline int netlink_allowed(const struct socket *sock, unsigned int flag)
 {
        return (nl_table[sock->sk->sk_protocol].flags & flag) ||
                ns_capable(sock_net(sock->sk)->user_ns, CAP_NET_ADMIN);
@@ -1287,7 +1354,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
 
        /* Only superuser is allowed to listen multicasts */
        if (nladdr->nl_groups) {
-               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
+               if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
@@ -1349,7 +1416,7 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
                return -EINVAL;
 
        /* Only superuser is allowed to send multicasts */
-       if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+       if (nladdr->nl_groups && !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
                return -EPERM;
 
        if (!nlk->portid)
@@ -1921,7 +1988,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
                break;
        case NETLINK_ADD_MEMBERSHIP:
        case NETLINK_DROP_MEMBERSHIP: {
-               if (!netlink_capable(sock, NL_CFG_F_NONROOT_RECV))
+               if (!netlink_allowed(sock, NL_CFG_F_NONROOT_RECV))
                        return -EPERM;
                err = netlink_realloc_groups(sk);
                if (err)
@@ -2053,6 +2120,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        struct sk_buff *skb;
        int err;
        struct scm_cookie scm;
+       u32 netlink_skb_flags = 0;
 
        if (msg->msg_flags&MSG_OOB)
                return -EOPNOTSUPP;
@@ -2072,8 +2140,9 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
                dst_group = ffs(addr->nl_groups);
                err =  -EPERM;
                if ((dst_group || dst_portid) &&
-                   !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+                   !netlink_allowed(sock, NL_CFG_F_NONROOT_SEND))
                        goto out;
+               netlink_skb_flags |= NETLINK_SKB_DST;
        } else {
                dst_portid = nlk->dst_portid;
                dst_group = nlk->dst_group;
@@ -2103,6 +2172,7 @@ static int netlink_sendmsg(struct kiocb *kiocb, struct socket *sock,
        NETLINK_CB(skb).portid  = nlk->portid;
        NETLINK_CB(skb).dst_group = dst_group;
        NETLINK_CB(skb).creds   = siocb->scm->creds;
+       NETLINK_CB(skb).flags   = netlink_skb_flags;
 
        err = -EFAULT;
        if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) {
index 393f17eea1a2f148a4c48a423c1611553473019c..ade434b8abd883400855d3ddddf2a76e578fe347 100644 (file)
@@ -592,7 +592,7 @@ static int genl_family_rcv_msg(struct genl_family *family,
                return -EOPNOTSUPP;
 
        if ((ops->flags & GENL_ADMIN_PERM) &&
-           !capable(CAP_NET_ADMIN))
+           !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
index ec8b6e8a80b1dccce33001ada5a62c2e91ebf9ce..674b0a65df6c02eb49d36869c969a1439982ab2e 100644 (file)
@@ -127,6 +127,7 @@ static int pdiag_put_fanout(struct packet_sock *po, struct sk_buff *nlskb)
 
 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                        struct packet_diag_req *req,
+                       bool may_report_filterinfo,
                        struct user_namespace *user_ns,
                        u32 portid, u32 seq, u32 flags, int sk_ino)
 {
@@ -171,7 +172,8 @@ static int sk_diag_fill(struct sock *sk, struct sk_buff *skb,
                goto out_nlmsg_trim;
 
        if ((req->pdiag_show & PACKET_SHOW_FILTER) &&
-           sock_diag_put_filterinfo(sk, skb, PACKET_DIAG_FILTER))
+           sock_diag_put_filterinfo(may_report_filterinfo, sk, skb,
+                                    PACKET_DIAG_FILTER))
                goto out_nlmsg_trim;
 
        return nlmsg_end(skb, nlh);
@@ -187,9 +189,11 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
        struct packet_diag_req *req;
        struct net *net;
        struct sock *sk;
+       bool may_report_filterinfo;
 
        net = sock_net(skb->sk);
        req = nlmsg_data(cb->nlh);
+       may_report_filterinfo = netlink_net_capable(cb->skb, CAP_NET_ADMIN);
 
        mutex_lock(&net->packet.sklist_lock);
        sk_for_each(sk, &net->packet.sklist) {
@@ -199,6 +203,7 @@ static int packet_diag_dump(struct sk_buff *skb, struct netlink_callback *cb)
                        goto next;
 
                if (sk_diag_fill(sk, skb, req,
+                                may_report_filterinfo,
                                 sk_user_ns(NETLINK_CB(cb->skb).sk),
                                 NETLINK_CB(cb->skb).portid,
                                 cb->nlh->nlmsg_seq, NLM_F_MULTI,
index dc15f430080831e74fade00799a661ab10cc6f84..b64151ade6b33a9cbacb0980d3ddbe03d8f7b4c8 100644 (file)
@@ -70,10 +70,10 @@ static int addr_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err;
        u8 pnaddr;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
        ASSERT_RTNL();
@@ -233,10 +233,10 @@ static int route_doit(struct sk_buff *skb, struct nlmsghdr *nlh)
        int err;
        u8 dst;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
-       if (!capable(CAP_SYS_ADMIN))
+       if (!netlink_capable(skb, CAP_SYS_ADMIN))
                return -EPERM;
 
        ASSERT_RTNL();
index fd7072827a40139c4ffba595aaa261282641e37f..15d46b9166debf2d4b832e4c837e3981b7401606 100644 (file)
@@ -989,7 +989,7 @@ static int tc_ctl_action(struct sk_buff *skb, struct nlmsghdr *n)
        u32 portid = skb ? NETLINK_CB(skb).portid : 0;
        int ret = 0, ovr = 0;
 
-       if ((n->nlmsg_type != RTM_GETACTION) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETACTION) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        ret = nlmsg_parse(n, sizeof(struct tcamsg), tca, TCA_ACT_MAX, NULL);
index 8e118af9097345a500e1006bff6fa5c108435603..2ea40d1877a6cce9230b574463b12c637b8ac86e 100644 (file)
@@ -138,7 +138,7 @@ static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n)
        int err;
        int tp_created = 0;
 
-       if ((n->nlmsg_type != RTM_GETTFILTER) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETTFILTER) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
index 51b968d3febb477be1e4183c8a94258c97fdcd18..2d2f07945c85a3324275bfbe61f4faf6ccd242ad 100644 (file)
@@ -1024,7 +1024,7 @@ static int tc_get_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc *p = NULL;
        int err;
 
-       if ((n->nlmsg_type != RTM_GETQDISC) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETQDISC) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
@@ -1091,7 +1091,7 @@ static int tc_modify_qdisc(struct sk_buff *skb, struct nlmsghdr *n)
        struct Qdisc *q, *p;
        int err;
 
-       if (!capable(CAP_NET_ADMIN))
+       if (!netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
 replay:
@@ -1431,7 +1431,7 @@ static int tc_ctl_tclass(struct sk_buff *skb, struct nlmsghdr *n)
        u32 qid;
        int err;
 
-       if ((n->nlmsg_type != RTM_GETTCLASS) && !capable(CAP_NET_ADMIN))
+       if ((n->nlmsg_type != RTM_GETTCLASS) && !netlink_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        err = nlmsg_parse(n, sizeof(*tcm), tca, TCA_MAX, NULL);
index 91cfd8f94a19e4c6cecd716fda03027007158816..229b3c3fb6c98d7bc63404bcfa8e4c0dd43f396c 100644 (file)
@@ -387,7 +387,7 @@ void sctp_association_free(struct sctp_association *asoc)
        /* Only real associations count against the endpoint, so
         * don't bother for if this is a temporary association.
         */
-       if (!asoc->temp) {
+       if (!list_empty(&asoc->asocs)) {
                list_del(&asoc->asocs);
 
                /* Decrement the backlog value for a TCP-style listening
index 8bcd4985d0fb341f795346f06c55d1f059c4c643..1e6081fb60788f35d5413dd799e107652177ba97 100644 (file)
@@ -47,7 +47,7 @@ static int handle_cmd(struct sk_buff *skb, struct genl_info *info)
        int hdr_space = nlmsg_total_size(GENL_HDRLEN + TIPC_GENL_HDRLEN);
        u16 cmd;
 
-       if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN)))
+       if ((req_userhdr->cmd & 0xC000) && (!netlink_capable(skb, CAP_NET_ADMIN)))
                cmd = TIPC_CMD_NOT_NET_ADMIN;
        else
                cmd = req_userhdr->cmd;
index 3f565e495ac68cea83e1d52cf7db7e820fe777ad..7a70a5a5671aa1c592446e3794ccd5b53e59fc1c 100644 (file)
@@ -2362,7 +2362,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        link = &xfrm_dispatch[type];
 
        /* All operations require privileges, even GET */
-       if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
+       if (!netlink_net_capable(skb, CAP_NET_ADMIN))
                return -EPERM;
 
        if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
index cdbde1762189f56ebdcd133e3cf6fdfba37b3933..b9b2bebeb3505596041d11ac47468f523def36c4 100644 (file)
@@ -275,12 +275,20 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
  * @xattr_value: pointer to the new extended attribute value
  * @xattr_value_len: pointer to the new extended attribute value length
  *
- * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
- * the current value is valid.
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid.  As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value.  Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
  */
 int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
                       const void *xattr_value, size_t xattr_value_len)
 {
+       const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+       if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+           && (xattr_data->type == EVM_XATTR_HMAC))
+               return -EPERM;
        return evm_protect_xattr(dentry, xattr_name, xattr_value,
                                 xattr_value_len);
 }
index a02e0791cf15c7add98bd922ebcc08cd3db0f725..9da974c0f958ea72e5f54f0d169b480c57d76f1c 100644 (file)
 
 static struct crypto_shash *ima_shash_tfm;
 
+/**
+ * ima_kernel_read - read file content
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+static int ima_kernel_read(struct file *file, loff_t offset,
+                          char *addr, unsigned long count)
+{
+       mm_segment_t old_fs;
+       char __user *buf = addr;
+       ssize_t ret;
+
+       if (!(file->f_mode & FMODE_READ))
+               return -EBADF;
+       if (!file->f_op->read && !file->f_op->aio_read)
+               return -EINVAL;
+
+       old_fs = get_fs();
+       set_fs(get_ds());
+       if (file->f_op->read)
+               ret = file->f_op->read(file, buf, count, &offset);
+       else
+               ret = do_sync_read(file, buf, count, &offset);
+       set_fs(old_fs);
+       return ret;
+}
+
 int ima_init_crypto(void)
 {
        long rc;
@@ -70,7 +100,7 @@ int ima_calc_file_hash(struct file *file, char *digest)
        while (offset < i_size) {
                int rbuf_len;
 
-               rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
+               rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
                if (rbuf_len < 0) {
                        rc = rbuf_len;
                        break;
index d8aa206e8bdece19a337175ee1e4a9f83b8e8569..98a29b26c5f41d0448177aaab2c0bd561cc70e2b 100644 (file)
@@ -289,6 +289,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
 {
        struct snd_kcontrol *kctl;
 
+       /* Make sure that the ids assigned to the control do not wrap around */
+       if (card->last_numid >= UINT_MAX - count)
+               card->last_numid = 0;
+
        list_for_each_entry(kctl, &card->controls, list) {
                if (kctl->id.numid < card->last_numid + 1 + count &&
                    kctl->id.numid + kctl->count > card->last_numid + 1) {
@@ -331,6 +335,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
 {
        struct snd_ctl_elem_id id;
        unsigned int idx;
+       unsigned int count;
        int err = -EINVAL;
 
        if (! kcontrol)
@@ -338,6 +343,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        if (snd_BUG_ON(!card || !kcontrol->info))
                goto error;
        id = kcontrol->id;
+       if (id.index > UINT_MAX - kcontrol->count)
+               goto error;
+
        down_write(&card->controls_rwsem);
        if (snd_ctl_find_id(card, &id)) {
                up_write(&card->controls_rwsem);
@@ -359,8 +367,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -389,6 +398,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
                    bool add_on_replace)
 {
        struct snd_ctl_elem_id id;
+       unsigned int count;
        unsigned int idx;
        struct snd_kcontrol *old;
        int ret;
@@ -424,8 +434,9 @@ add:
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -898,9 +909,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
                        result = kctl->put(kctl, control);
                }
                if (result > 0) {
+                       struct snd_ctl_elem_id id = control->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
-                                      &control->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
                        return 0;
                }
        }
@@ -992,6 +1003,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
 
 struct user_element {
        struct snd_ctl_elem_info info;
+       struct snd_card *card;
        void *elem_data;                /* element data */
        unsigned long elem_data_size;   /* size of element data in bytes */
        void *tlv_data;                 /* TLV data */
@@ -1035,7 +1047,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
 {
        struct user_element *ue = kcontrol->private_data;
 
+       mutex_lock(&ue->card->user_ctl_lock);
        memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return 0;
 }
 
@@ -1044,10 +1058,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
 {
        int change;
        struct user_element *ue = kcontrol->private_data;
-       
+
+       mutex_lock(&ue->card->user_ctl_lock);
        change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
        if (change)
                memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return change;
 }
 
@@ -1067,19 +1083,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
                new_data = memdup_user(tlv, size);
                if (IS_ERR(new_data))
                        return PTR_ERR(new_data);
+               mutex_lock(&ue->card->user_ctl_lock);
                change = ue->tlv_data_size != size;
                if (!change)
                        change = memcmp(ue->tlv_data, new_data, size);
                kfree(ue->tlv_data);
                ue->tlv_data = new_data;
                ue->tlv_data_size = size;
+               mutex_unlock(&ue->card->user_ctl_lock);
        } else {
-               if (! ue->tlv_data_size || ! ue->tlv_data)
-                       return -ENXIO;
-               if (size < ue->tlv_data_size)
-                       return -ENOSPC;
+               int ret = 0;
+
+               mutex_lock(&ue->card->user_ctl_lock);
+               if (!ue->tlv_data_size || !ue->tlv_data) {
+                       ret = -ENXIO;
+                       goto err_unlock;
+               }
+               if (size < ue->tlv_data_size) {
+                       ret = -ENOSPC;
+                       goto err_unlock;
+               }
                if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
-                       return -EFAULT;
+                       ret = -EFAULT;
+err_unlock:
+               mutex_unlock(&ue->card->user_ctl_lock);
+               if (ret)
+                       return ret;
        }
        return change;
 }
@@ -1137,8 +1166,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        struct user_element *ue;
        int idx, err;
 
-       if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
-               return -ENOMEM;
        if (info->count < 1)
                return -EINVAL;
        access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
@@ -1147,21 +1174,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
                                 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
        info->id.numid = 0;
        memset(&kctl, 0, sizeof(kctl));
-       down_write(&card->controls_rwsem);
-       _kctl = snd_ctl_find_id(card, &info->id);
-       err = 0;
-       if (_kctl) {
-               if (replace)
-                       err = snd_ctl_remove(card, _kctl);
-               else
-                       err = -EBUSY;
-       } else {
-               if (replace)
-                       err = -ENOENT;
+
+       if (replace) {
+               err = snd_ctl_remove_user_ctl(file, &info->id);
+               if (err)
+                       return err;
        }
-       up_write(&card->controls_rwsem);
-       if (err < 0)
-               return err;
+
+       if (card->user_ctl_count >= MAX_USER_CONTROLS)
+               return -ENOMEM;
+
        memcpy(&kctl.id, &info->id, sizeof(info->id));
        kctl.count = info->owner ? info->owner : 1;
        access |= SNDRV_CTL_ELEM_ACCESS_USER;
@@ -1211,6 +1233,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
        if (ue == NULL)
                return -ENOMEM;
+       ue->card = card;
        ue->info = *info;
        ue->info.access = 0;
        ue->elem_data = (char *)ue + sizeof(*ue);
@@ -1322,8 +1345,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
                }
                err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
                if (err > 0) {
+                       struct snd_ctl_elem_id id = kctl->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
                        return 0;
                }
        } else {
index 6ef06400dfc83884f2feaf6ddd26529498bae8c3..27791a58e4485154ac089f0f023c32bec5e8c16a 100644 (file)
@@ -208,6 +208,7 @@ int snd_card_create(int idx, const char *xid,
        INIT_LIST_HEAD(&card->devices);
        init_rwsem(&card->controls_rwsem);
        rwlock_init(&card->ctl_files_rwlock);
+       mutex_init(&card->user_ctl_lock);
        INIT_LIST_HEAD(&card->controls);
        INIT_LIST_HEAD(&card->ctl_files);
        spin_lock_init(&card->files_lock);
index 0923f09df50361c65274fe2195cb5a7992018c70..0b85e857f1c7299e519e8fb27fdfe194b2c83a04 100644 (file)
@@ -3356,6 +3356,7 @@ enum {
        ALC269_FIXUP_STEREO_DMIC,
        ALC269_FIXUP_QUANTA_MUTE,
        ALC269_FIXUP_LIFEBOOK,
+       ALC269_FIXUP_LIFEBOOK_EXTMIC,
        ALC269_FIXUP_AMIC,
        ALC269_FIXUP_DMIC,
        ALC269VB_FIXUP_AMIC,
@@ -3463,6 +3464,13 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC269_FIXUP_QUANTA_MUTE
        },
+       [ALC269_FIXUP_LIFEBOOK_EXTMIC] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x01a1903c }, /* headset mic, with jack detect */
+                       { }
+               },
+       },
        [ALC269_FIXUP_AMIC] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -3713,6 +3721,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
        SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
+       SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Thinkpad SL410/510", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x215e, "Thinkpad L512", ALC269_FIXUP_SKU_IGNORE),
        SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
@@ -4664,6 +4673,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
        { .id = 0x10ec0670, .name = "ALC670", .patch = patch_alc662 },
        { .id = 0x10ec0671, .name = "ALC671", .patch = patch_alc662 },
        { .id = 0x10ec0680, .name = "ALC680", .patch = patch_alc680 },
+       { .id = 0x10ec0867, .name = "ALC891", .patch = patch_alc882 },
        { .id = 0x10ec0880, .name = "ALC880", .patch = patch_alc880 },
        { .id = 0x10ec0882, .name = "ALC882", .patch = patch_alc882 },
        { .id = 0x10ec0883, .name = "ALC883", .patch = patch_alc882 },
index 4fdcc1cefc25c151884fee09123e5aa69950e41b..9b7746c9546f03180a9e15f988e87071eaa15a4a 100644 (file)
@@ -255,6 +255,7 @@ static struct reg_default max98090_reg[] = {
 static bool max98090_volatile_register(struct device *dev, unsigned int reg)
 {
        switch (reg) {
+       case M98090_REG_SOFTWARE_RESET:
        case M98090_REG_DEVICE_STATUS:
        case M98090_REG_JACK_STATUS:
        case M98090_REG_REVISION_ID:
@@ -2343,6 +2344,8 @@ static int max98090_runtime_resume(struct device *dev)
 
        regcache_cache_only(max98090->regmap, false);
 
+       max98090_reset(max98090);
+
        regcache_sync(max98090->regmap);
 
        return 0;