Merge tag 'v3.10.61' into linux-linaro-lsk
authorMark Brown <broonie@kernel.org>
Fri, 21 Nov 2014 18:53:19 +0000 (18:53 +0000)
committerMark Brown <broonie@kernel.org>
Fri, 21 Nov 2014 18:53:19 +0000 (18:53 +0000)
This is the 3.10.61 stable release

105 files changed:
Makefile
arch/alpha/mm/fault.c
arch/arc/mm/fault.c
arch/arm/kernel/kprobes-common.c
arch/arm/kernel/kprobes-thumb.c
arch/arm/kernel/kprobes.c
arch/arm/mm/Kconfig
arch/arm/mm/fault.c
arch/arm64/lib/clear_user.S
arch/arm64/mm/fault.c
arch/avr32/mm/fault.c
arch/cris/mm/fault.c
arch/frv/mm/fault.c
arch/hexagon/mm/vm_fault.c
arch/ia64/mm/fault.c
arch/m32r/mm/fault.c
arch/m68k/mm/fault.c
arch/metag/mm/fault.c
arch/microblaze/mm/fault.c
arch/mips/mm/c-r4k.c
arch/mips/mm/fault.c
arch/mn10300/mm/fault.c
arch/openrisc/mm/fault.c
arch/parisc/include/uapi/asm/shmbuf.h
arch/parisc/kernel/syscall_table.S
arch/parisc/mm/fault.c
arch/powerpc/mm/fault.c
arch/s390/mm/fault.c
arch/score/mm/fault.c
arch/sh/mm/fault.c
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/vio.h
arch/sparc/kernel/pci_schizo.c
arch/sparc/kernel/smp_64.c
arch/sparc/lib/atomic32.c
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/tile/mm/fault.c
arch/um/kernel/trap.c
arch/unicore32/mm/fault.c
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/ptrace.c
arch/x86/kvm/x86.c
arch/x86/mm/fault.c
arch/xtensa/include/uapi/asm/unistd.h
arch/xtensa/mm/fault.c
drivers/ata/ahci.c
drivers/block/sunvdc.c
drivers/firewire/core-cdev.c
drivers/gpu/drm/radeon/evergreen.c
drivers/input/mouse/alps.c
drivers/md/dm-raid.c
drivers/md/persistent-data/dm-btree-internal.h
drivers/md/persistent-data/dm-btree-spine.c
drivers/md/persistent-data/dm-btree.c
drivers/media/usb/ttusb-dec/ttusbdecfe.c
drivers/misc/mei/bus.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/macvtap.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/platform/x86/dell-wmi.c
drivers/scsi/hpsa.c
drivers/scsi/scsi_error.c
fs/ioprio.c
fs/nfs/delegation.c
fs/nfs/delegation.h
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/nfs4proc.c
include/linux/clocksource.h
include/linux/kgdb.h
include/linux/memcontrol.h
include/linux/mm.h
include/linux/nfs_xdr.h
include/linux/sched.h
include/net/sctp/sctp.h
include/net/sctp/sm.h
include/uapi/linux/netfilter/xt_bpf.h
ipc/ipc_sysctl.c
kernel/audit_tree.c
kernel/events/core.c
mm/memcontrol.c
mm/memory.c
mm/oom_kill.c
net/bridge/br_private.h
net/bridge/br_stp_bpdu.c
net/ceph/crypto.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_tunnel.c
net/mac80211/rx.c
net/netfilter/nf_nat_core.c
net/netfilter/nfnetlink_log.c
net/sctp/associola.c
net/sctp/auth.c
net/sctp/inqueue.c
net/sctp/sm_make_chunk.c
net/sctp/sm_statefuns.c
sound/usb/mixer_quirks.c

index 9d4f30d0d2013fc9f7dd0658265f79907bb590b7..0d5ba80786b81a1996b992980abfd83400fb6847 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
 VERSION = 3
 PATCHLEVEL = 10
-SUBLEVEL = 60
+SUBLEVEL = 61
 EXTRAVERSION =
 NAME = TOSSUG Baby Fish
 
index 0c4132dd3507a0b62b3c40c2fc6fd4065a325262..98838a05ba6d89f0459742131010f57c38cbed05 100644 (file)
@@ -89,8 +89,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
        const struct exception_table_entry *fixup;
        int fault, si_code = SEGV_MAPERR;
        siginfo_t info;
-       unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                             (cause > 0 ? FAULT_FLAG_WRITE : 0));
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        /* As of EV6, a load into $31/$f31 is a prefetch, and never faults
           (or is suppressed by the PALcode).  Support that for older CPUs
@@ -115,7 +114,8 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
        if (address >= TASK_SIZE)
                goto vmalloc_fault;
 #endif
-
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -142,6 +142,7 @@ retry:
        } else {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        }
 
        /* If for any reason at all we couldn't handle the fault,
index 331a0846628e05ffc1a8b37b0b6540dcd603809c..50533b750a99d88dfd7e0fa1d89deb87717f9095 100644 (file)
@@ -59,8 +59,7 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
        struct mm_struct *mm = tsk->mm;
        siginfo_t info;
        int fault, ret;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        /*
         * We fault-in kernel-space virtual memory on-demand. The
@@ -88,6 +87,8 @@ void do_page_fault(struct pt_regs *regs, int write, unsigned long address,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -115,12 +116,12 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
 
-survive:
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -200,14 +201,12 @@ no_context:
        die("Oops", regs, address, cause_code);
 
 out_of_memory:
-       if (is_global_init(tsk)) {
-               yield();
-               goto survive;
-       }
        up_read(&mm->mmap_sem);
 
-       if (user_mode(regs))
-               do_group_exit(SIGKILL); /* This will never return */
+       if (user_mode(regs)) {
+               pagefault_out_of_memory();
+               return;
+       }
 
        goto no_context;
 
index 18a76282970e6dfaa414b6f9ff266553258bd27d..380c20fb9c85a459ecbb1ee7868df1c7f28a1064 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/kernel.h>
 #include <linux/kprobes.h>
 #include <asm/system_info.h>
+#include <asm/opcodes.h>
 
 #include "kprobes.h"
 
@@ -305,7 +306,8 @@ kprobe_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 
        if (handler) {
                /* We can emulate the instruction in (possibly) modified form */
-               asi->insn[0] = (insn & 0xfff00000) | (rn << 16) | reglist;
+               asi->insn[0] = __opcode_to_mem_arm((insn & 0xfff00000) |
+                                                  (rn << 16) | reglist);
                asi->insn_handler = handler;
                return INSN_GOOD;
        }
@@ -334,13 +336,14 @@ prepare_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
 #ifdef CONFIG_THUMB2_KERNEL
        if (thumb) {
                u16 *thumb_insn = (u16 *)asi->insn;
-               thumb_insn[1] = 0x4770; /* Thumb bx lr */
-               thumb_insn[2] = 0x4770; /* Thumb bx lr */
+               /* Thumb bx lr */
+               thumb_insn[1] = __opcode_to_mem_thumb16(0x4770);
+               thumb_insn[2] = __opcode_to_mem_thumb16(0x4770);
                return insn;
        }
-       asi->insn[1] = 0xe12fff1e; /* ARM bx lr */
+       asi->insn[1] = __opcode_to_mem_arm(0xe12fff1e); /* ARM bx lr */
 #else
-       asi->insn[1] = 0xe1a0f00e; /* mov pc, lr */
+       asi->insn[1] = __opcode_to_mem_arm(0xe1a0f00e); /* mov pc, lr */
 #endif
        /* Make an ARM instruction unconditional */
        if (insn < 0xe0000000)
@@ -360,12 +363,12 @@ set_emulated_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi,
        if (thumb) {
                u16 *ip = (u16 *)asi->insn;
                if (is_wide_instruction(insn))
-                       *ip++ = insn >> 16;
-               *ip++ = insn;
+                       *ip++ = __opcode_to_mem_thumb16(insn >> 16);
+               *ip++ = __opcode_to_mem_thumb16(insn);
                return;
        }
 #endif
-       asi->insn[0] = insn;
+       asi->insn[0] = __opcode_to_mem_arm(insn);
 }
 
 /*
index 6123daf397a7bbb7ffe161075165ddf57f175d10..b82e798983c4fdf280b77b104cbafc68184b5e72 100644 (file)
@@ -163,9 +163,9 @@ t32_decode_ldmstm(kprobe_opcode_t insn, struct arch_specific_insn *asi)
        enum kprobe_insn ret = kprobe_decode_ldmstm(insn, asi);
 
        /* Fixup modified instruction to have halfwords in correct order...*/
-       insn = asi->insn[0];
-       ((u16 *)asi->insn)[0] = insn >> 16;
-       ((u16 *)asi->insn)[1] = insn & 0xffff;
+       insn = __mem_to_opcode_arm(asi->insn[0]);
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn >> 16);
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0xffff);
 
        return ret;
 }
@@ -1153,7 +1153,7 @@ t16_decode_hiregs(kprobe_opcode_t insn, struct arch_specific_insn *asi)
 {
        insn &= ~0x00ff;
        insn |= 0x001; /* Set Rdn = R1 and Rm = R0 */
-       ((u16 *)asi->insn)[0] = insn;
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(insn);
        asi->insn_handler = t16_emulate_hiregs;
        return INSN_GOOD;
 }
@@ -1182,8 +1182,10 @@ t16_decode_push(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * and call it with R9=SP and LR in the register list represented
         * by R8.
         */
-       ((u16 *)asi->insn)[0] = 0xe929;         /* 1st half STMDB R9!,{} */
-       ((u16 *)asi->insn)[1] = insn & 0x1ff;   /* 2nd half (register list) */
+       /* 1st half STMDB R9!,{} */
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe929);
+       /* 2nd half (register list) */
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
        asi->insn_handler = t16_emulate_push;
        return INSN_GOOD;
 }
@@ -1232,8 +1234,10 @@ t16_decode_pop(kprobe_opcode_t insn, struct arch_specific_insn *asi)
         * and call it with R9=SP and PC in the register list represented
         * by R8.
         */
-       ((u16 *)asi->insn)[0] = 0xe8b9;         /* 1st half LDMIA R9!,{} */
-       ((u16 *)asi->insn)[1] = insn & 0x1ff;   /* 2nd half (register list) */
+       /* 1st half LDMIA R9!,{} */
+       ((u16 *)asi->insn)[0] = __opcode_to_mem_thumb16(0xe8b9);
+       /* 2nd half (register list) */
+       ((u16 *)asi->insn)[1] = __opcode_to_mem_thumb16(insn & 0x1ff);
        asi->insn_handler = insn & 0x100 ? t16_emulate_pop_pc
                                         : t16_emulate_pop_nopc;
        return INSN_GOOD;
index 170e9f34003f414030c6070e8c913df21c5091cd..1c6ece51781c9d0a84ee25ff5095bc0edb59241a 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/stop_machine.h>
 #include <linux/stringify.h>
 #include <asm/traps.h>
+#include <asm/opcodes.h>
 #include <asm/cacheflush.h>
 
 #include "kprobes.h"
@@ -62,10 +63,10 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
 #ifdef CONFIG_THUMB2_KERNEL
        thumb = true;
        addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
-       insn = ((u16 *)addr)[0];
+       insn = __mem_to_opcode_thumb16(((u16 *)addr)[0]);
        if (is_wide_instruction(insn)) {
-               insn <<= 16;
-               insn |= ((u16 *)addr)[1];
+               u16 inst2 = __mem_to_opcode_thumb16(((u16 *)addr)[1]);
+               insn = __opcode_thumb32_compose(insn, inst2);
                decode_insn = thumb32_kprobe_decode_insn;
        } else
                decode_insn = thumb16_kprobe_decode_insn;
@@ -73,7 +74,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
        thumb = false;
        if (addr & 0x3)
                return -EINVAL;
-       insn = *p->addr;
+       insn = __mem_to_opcode_arm(*p->addr);
        decode_insn = arm_kprobe_decode_insn;
 #endif
 
index 36e9f24e03b0213cb1682ab131a0f0b9c2176998..2e719593c5cb7cbf0bb9789e9ef0266495e94db9 100644 (file)
@@ -778,6 +778,7 @@ config NEED_KUSER_HELPERS
 
 config KUSER_HELPERS
        bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
+       depends on MMU
        default y
        help
          Warning: disabling this option may break user programs.
index e207aa5f846f602ec0fc843df89c8a81a5994e7b..54fcddafec155b9a8bb9950c2a63d2f75c7d0af1 100644 (file)
@@ -261,9 +261,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
-       int write = fsr & FSR_WRITE;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        if (notify_page_fault(regs, fsr))
                return 0;
@@ -282,6 +280,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (fsr & FSR_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * As per x86, we may deadlock here.  However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -349,6 +352,13 @@ retry:
        if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
                return 0;
 
+       /*
+        * If we are in kernel mode at this point, we
+        * have no context to handle this fault with.
+        */
+       if (!user_mode(regs))
+               goto no_context;
+
        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
@@ -359,13 +369,6 @@ retry:
                return 0;
        }
 
-       /*
-        * If we are in kernel mode at this point, we
-        * have no context to handle this fault with.
-        */
-       if (!user_mode(regs))
-               goto no_context;
-
        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to
index 6e0ed93d51fe1850ee27176fc9222dac5416b74d..c17967fdf5f6007330ba65beb7a05bd347600764 100644 (file)
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2   )
        sub     x1, x1, #2
 4:     adds    x1, x1, #1
        b.mi    5f
-       strb    wzr, [x0]
+USER(9f, strb  wzr, [x0]       )
 5:     mov     x0, #0
        ret
 ENDPROC(__clear_user)
index df4f2fd187c3bed7b54574fa0ac92e86da3727a7..c23751b0612033f56533955788eabefa6c5fd39e 100644 (file)
@@ -199,13 +199,6 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
        unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
-       if (esr & ESR_LNX_EXEC) {
-               vm_flags = VM_EXEC;
-       } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
-               vm_flags = VM_WRITE;
-               mm_flags |= FAULT_FLAG_WRITE;
-       }
-
        tsk = current;
        mm  = tsk->mm;
 
@@ -220,6 +213,16 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               mm_flags |= FAULT_FLAG_USER;
+
+       if (esr & ESR_LNX_EXEC) {
+               vm_flags = VM_EXEC;
+       } else if ((esr & ESR_WRITE) && !(esr & ESR_CM)) {
+               vm_flags = VM_WRITE;
+               mm_flags |= FAULT_FLAG_WRITE;
+       }
+
        /*
         * As per x86, we may deadlock here. However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -288,6 +291,13 @@ retry:
                              VM_FAULT_BADACCESS))))
                return 0;
 
+       /*
+        * If we are in kernel mode at this point, we have no context to
+        * handle this fault with.
+        */
+       if (!user_mode(regs))
+               goto no_context;
+
        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
@@ -298,13 +308,6 @@ retry:
                return 0;
        }
 
-       /*
-        * If we are in kernel mode at this point, we have no context to
-        * handle this fault with.
-        */
-       if (!user_mode(regs))
-               goto no_context;
-
        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to successfully fix up
index b2f2d2d668491905dbbc37c385449852a9e7fdde..0eca93327195077ec16bdfd99efd7294c6ab2de6 100644 (file)
@@ -86,6 +86,8 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
 
        local_irq_enable();
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
@@ -228,9 +230,9 @@ no_context:
         */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       pagefault_out_of_memory();
        if (!user_mode(regs))
                goto no_context;
+       pagefault_out_of_memory();
        return;
 
 do_sigbus:
index 73312ab6c696c160f7fd58df02a50890390ad650..1790f22e71a21a859b2b7b1942cbbc503c2d557e 100644 (file)
@@ -58,8 +58,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        struct vm_area_struct * vma;
        siginfo_t info;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        D(printk(KERN_DEBUG
                 "Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -117,6 +116,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -155,6 +156,7 @@ retry:
        } else if (writeaccess == 1) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
index 331c1e2cfb6760ee7ed3f38d4e5c92a2526d44bf..9a66372fc7c76019ca874a9c3780c2fc8392266c 100644 (file)
@@ -34,11 +34,11 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        struct vm_area_struct *vma;
        struct mm_struct *mm;
        unsigned long _pme, lrai, lrad, fixup;
+       unsigned long flags = 0;
        siginfo_t info;
        pgd_t *pge;
        pud_t *pue;
        pte_t *pte;
-       int write;
        int fault;
 
 #if 0
@@ -81,6 +81,9 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(__frame))
+               flags |= FAULT_FLAG_USER;
+
        down_read(&mm->mmap_sem);
 
        vma = find_vma(mm, ear0);
@@ -129,7 +132,6 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
  */
  good_area:
        info.si_code = SEGV_ACCERR;
-       write = 0;
        switch (esr0 & ESR0_ATXC) {
        default:
                /* handle write to write protected page */
@@ -140,7 +142,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
 #endif
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
-               write = 1;
+               flags |= FAULT_FLAG_WRITE;
                break;
 
                 /* handle read from protected page */
@@ -162,7 +164,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
         * make sure we exit gracefully rather than endlessly redo
         * the fault.
         */
-       fault = handle_mm_fault(mm, vma, ear0, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, ear0, flags);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index 1bd276dbec7d3503f92a19fc7cb5a39df1051621..8704c9320032705cf7de10d6a94f3c4d70cf8b12 100644 (file)
@@ -53,8 +53,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
        int si_code = SEGV_MAPERR;
        int fault;
        const struct exception_table_entry *fixup;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                (cause > 0 ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        /*
         * If we're in an interrupt or have no user context,
@@ -65,6 +64,8 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
 
        local_irq_enable();
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -96,6 +97,7 @@ good_area:
        case FLT_STORE:
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
                break;
        }
 
index 6cf0341f978e59ddf235c44e70dcf615799390ed..7225dad87094d81e89459e5a61909fa5b2d10ca0 100644 (file)
@@ -90,8 +90,6 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
        mask = ((((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
                | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
 
-       flags |= ((mask & VM_WRITE) ? FAULT_FLAG_WRITE : 0);
-
        /* mmap_sem is performance critical.... */
        prefetchw(&mm->mmap_sem);
 
@@ -119,6 +117,10 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
        if (notify_page_fault(regs, TRAP_BRKPT))
                return;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (mask & VM_WRITE)
+               flags |= FAULT_FLAG_WRITE;
 retry:
        down_read(&mm->mmap_sem);
 
index 3cdfa9c1d0915b71969c0943b27da64e64156d53..e9c6a8014bd647eec50a66afb5bc75b076b35e4d 100644 (file)
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        unsigned long page, addr;
-       int write;
+       unsigned long flags = 0;
        int fault;
        siginfo_t info;
 
@@ -117,6 +117,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+       if (error_code & ACE_USERMODE)
+               flags |= FAULT_FLAG_USER;
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -166,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
  */
 good_area:
        info.si_code = SEGV_ACCERR;
-       write = 0;
        switch (error_code & (ACE_WRITE|ACE_PROTECTION)) {
                default:        /* 3: write, present */
                        /* fall through */
                case ACE_WRITE: /* write, not present */
                        if (!(vma->vm_flags & VM_WRITE))
                                goto bad_area;
-                       write++;
+                       flags |= FAULT_FLAG_WRITE;
                        break;
                case ACE_PROTECTION:    /* read, present */
                case 0:         /* read, not present */
@@ -194,7 +196,7 @@ good_area:
         */
        addr = (address & PAGE_MASK);
        set_thread_fault_code(error_code);
-       fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0);
+       fault = handle_mm_fault(mm, vma, addr, flags);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
index a563727806bf922b5b3315559b9b46b5bbb9d7fc..eb1d61f6872549991dae7d5e491a74627d8456d0 100644 (file)
@@ -88,6 +88,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
index 2c75bf7357c58deec87850b5cf293d510ddd9743..332680e5ebf23c7909b796c415c2273efd77ba3c 100644 (file)
@@ -53,8 +53,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        struct vm_area_struct *vma, *prev_vma;
        siginfo_t info;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                               (write_access ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
 
@@ -109,6 +108,8 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
@@ -121,6 +122,7 @@ good_area:
        if (write_access) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
                        goto bad_area;
@@ -224,8 +226,10 @@ do_sigbus:
         */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
+       if (user_mode(regs)) {
+               pagefault_out_of_memory();
+               return 1;
+       }
 
 no_context:
        /* Are we prepared to handle this kernel fault?  */
index 731f739d17a1be6c485a7759f9479adc592377d1..fa4cf52aa7a6d386711690005a314ece7d67fc53 100644 (file)
@@ -92,8 +92,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
        int code = SEGV_MAPERR;
        int is_write = error_code & ESR_S;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                        (is_write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        regs->ear = address;
        regs->esr = error_code;
@@ -121,6 +120,9 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
                die("Weird page fault", regs, SIGSEGV);
        }
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -199,6 +201,7 @@ good_area:
        if (unlikely(is_write)) {
                if (unlikely(!(vma->vm_flags & VM_WRITE)))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        /* a read */
        } else {
                /* protection fault */
index 5495101d32c8e5cad3187ff382493c1bc0e5a277..c2ec87e5d1cc6980f6f6e3fe72590b56079234af 100644 (file)
@@ -608,6 +608,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
                        r4k_blast_scache();
                else
                        blast_scache_range(addr, addr + size);
+               preempt_enable();
                __sync();
                return;
        }
@@ -649,6 +650,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
                         */
                        blast_inv_scache_range(addr, addr + size);
                }
+               preempt_enable();
                __sync();
                return;
        }
index 0fead53d1c26b261affa89d00c1fa06844f9e602..0214a43b9911b00e552d99c3396b07f9f12f6481 100644 (file)
@@ -41,8 +41,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        const int field = sizeof(unsigned long) * 2;
        siginfo_t info;
        int fault;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                                (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
 #if 0
        printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(),
@@ -92,6 +91,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, unsigned long writ
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -113,6 +114,7 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (cpu_has_rixi) {
                        if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) {
@@ -240,6 +242,8 @@ out_of_memory:
         * (which will retry the fault, or kill us if we got oom-killed).
         */
        up_read(&mm->mmap_sem);
+       if (!user_mode(regs))
+               goto no_context;
        pagefault_out_of_memory();
        return;
 
index d48a84fd7fae51b298ee1b03956dd823dd82ffdf..3516cbdf1ee93acb82ebef6428f79df9af104514 100644 (file)
@@ -171,6 +171,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
 
@@ -345,9 +347,10 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
-       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-               do_exit(SIGKILL);
+       if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
+               pagefault_out_of_memory();
+               return;
+       }
        goto no_context;
 
 do_sigbus:
index e2bfafce66c53661064e2cf5e4b0d5d036e2c6a7..0703acf7d3276811919fd3d398ada99b1b9c6d50 100644 (file)
@@ -86,6 +86,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
        if (user_mode(regs)) {
                /* Exception was in userspace: reenable interrupts */
                local_irq_enable();
+               flags |= FAULT_FLAG_USER;
        } else {
                /* If exception was in a syscall, then IRQ's may have
                 * been enabled or disabled.  If they were enabled,
@@ -267,10 +268,10 @@ out_of_memory:
        __asm__ __volatile__("l.nop 1");
 
        up_read(&mm->mmap_sem);
-       printk("VM: killing process %s\n", tsk->comm);
-       if (user_mode(regs))
-               do_exit(SIGKILL);
-       goto no_context;
+       if (!user_mode(regs))
+               goto no_context;
+       pagefault_out_of_memory();
+       return;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 0a3eada1863b71e1e859e5bf1536da089a65ffd1..f395cde7b5931a2f6d8d9cb1903c5dfffa319185 100644 (file)
@@ -36,23 +36,16 @@ struct shmid64_ds {
        unsigned int            __unused2;
 };
 
-#ifdef CONFIG_64BIT
-/* The 'unsigned int' (formerly 'unsigned long') data types below will
- * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
- * a wide kernel, but if some of these values are meant to contain pointers
- * they may need to be 'long long' instead. -PB XXX FIXME
- */
-#endif
 struct shminfo64 {
-       unsigned int    shmmax;
-       unsigned int    shmmin;
-       unsigned int    shmmni;
-       unsigned int    shmseg;
-       unsigned int    shmall;
-       unsigned int    __unused1;
-       unsigned int    __unused2;
-       unsigned int    __unused3;
-       unsigned int    __unused4;
+       unsigned long   shmmax;
+       unsigned long   shmmin;
+       unsigned long   shmmni;
+       unsigned long   shmseg;
+       unsigned long   shmall;
+       unsigned long   __unused1;
+       unsigned long   __unused2;
+       unsigned long   __unused3;
+       unsigned long   __unused4;
 };
 
 #endif /* _PARISC_SHMBUF_H */
index 10a0c2aad8cfd1e36dc2f75ffcf7ab841a094b86..b24732d1bdbf7ad5ae28864c4f40b952bac84273 100644 (file)
        ENTRY_COMP(msgsnd)
        ENTRY_COMP(msgrcv)
        ENTRY_SAME(msgget)              /* 190 */
-       ENTRY_SAME(msgctl)
-       ENTRY_SAME(shmat)
+       ENTRY_COMP(msgctl)
+       ENTRY_COMP(shmat)
        ENTRY_SAME(shmdt)
        ENTRY_SAME(shmget)
-       ENTRY_SAME(shmctl)              /* 195 */
+       ENTRY_COMP(shmctl)              /* 195 */
        ENTRY_SAME(ni_syscall)          /* streams1 */
        ENTRY_SAME(ni_syscall)          /* streams2 */
        ENTRY_SAME(lstat64)
        ENTRY_SAME(epoll_ctl)           /* 225 */
        ENTRY_SAME(epoll_wait)
        ENTRY_SAME(remap_file_pages)
-       ENTRY_SAME(semtimedop)
+       ENTRY_COMP(semtimedop)
        ENTRY_COMP(mq_open)
        ENTRY_SAME(mq_unlink)           /* 230 */
        ENTRY_COMP(mq_timedsend)
index f247a3480e8e0cd7e276f4fd04004cbb608b3e11..d10d27a720c0d1f323c2248f01cc93193e73ca1e 100644 (file)
@@ -180,6 +180,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (acc_type & VM_WRITE)
+               flags |= FAULT_FLAG_WRITE;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma_prev(mm, address, &prev_vma);
@@ -203,8 +207,7 @@ good_area:
         * fault.
         */
 
-       fault = handle_mm_fault(mm, vma, address,
-                       flags | ((acc_type & VM_WRITE) ? FAULT_FLAG_WRITE : 0));
+       fault = handle_mm_fault(mm, vma, address, flags);
 
        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
                return;
index 8726779e1409b5da36c1e1ea852276cf47d4252c..d9196c9f93d9dd7c1cb075fa3e0b732cd0ba9993 100644 (file)
@@ -223,9 +223,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        is_write = error_code & ESR_DST;
 #endif /* CONFIG_4xx || CONFIG_BOOKE */
 
-       if (is_write)
-               flags |= FAULT_FLAG_WRITE;
-
 #ifdef CONFIG_PPC_ICSWX
        /*
         * we need to do this early because this "data storage
@@ -280,6 +277,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
        /* When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
         * kernel and should generate an OOPS.  Unfortunately, in the case of an
@@ -408,6 +408,7 @@ good_area:
        } else if (is_write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        /* a read */
        } else {
                /* protection fault */
index 047c3e4c59a2e4ad03ea7812b8a5da8a6ab20f9a..416facec4a332eea5c33b6ffad24eb80643b5e2c 100644 (file)
@@ -302,6 +302,8 @@ static inline int do_exception(struct pt_regs *regs, int access)
        address = trans_exc_code & __FAIL_ADDR_MASK;
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
        if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
                flags |= FAULT_FLAG_WRITE;
        down_read(&mm->mmap_sem);
index 47b600e4b2c50ae853b72404f3e1b051dce15f53..52238983527d605914853fd5415ea39617944ffe 100644 (file)
@@ -47,6 +47,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
        const int field = sizeof(unsigned long) * 2;
+       unsigned long flags = 0;
        siginfo_t info;
        int fault;
 
@@ -75,6 +76,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
        if (in_atomic() || !mm)
                goto bad_area_nosemaphore;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
        if (!vma)
@@ -95,18 +99,18 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)))
                        goto bad_area;
        }
 
-survive:
        /*
        * If for any reason at all we couldn't handle the fault,
        * make sure we exit gracefully rather than endlessly redo
        * the fault.
        */
-       fault = handle_mm_fault(mm, vma, address, write);
+       fault = handle_mm_fault(mm, vma, address, flags);
        if (unlikely(fault & VM_FAULT_ERROR)) {
                if (fault & VM_FAULT_OOM)
                        goto out_of_memory;
@@ -167,15 +171,10 @@ no_context:
        */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(tsk)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       printk("VM: killing process %s\n", tsk->comm);
-       if (user_mode(regs))
-               do_group_exit(SIGKILL);
-       goto no_context;
+       if (!user_mode(regs))
+               goto no_context;
+       pagefault_out_of_memory();
+       return;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 1f49c28affa90495047c6b82577b2d5221bc089b..541dc610150888e706977c7944c42ab1d61d7437 100644 (file)
@@ -400,9 +400,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
        struct mm_struct *mm;
        struct vm_area_struct * vma;
        int fault;
-       int write = error_code & FAULT_CODE_WRITE;
-       unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                             (write ? FAULT_FLAG_WRITE : 0));
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
        mm = tsk->mm;
@@ -476,6 +474,11 @@ good_area:
 
        set_thread_fault_code(error_code);
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (error_code & FAULT_CODE_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
index 905832aa9e9ec0c4d6520567e61d26e96fda0d97..a0ed182ae73c5b54087da8b271338a8b13098d85 100644 (file)
@@ -21,7 +21,7 @@
 
 extern int __atomic_add_return(int, atomic_t *);
 extern int atomic_cmpxchg(atomic_t *, int, int);
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+extern int atomic_xchg(atomic_t *, int);
 extern int __atomic_add_unless(atomic_t *, int, int);
 extern void atomic_set(atomic_t *, int);
 
index 1fae1a02e3c2136324cab7429d5561f8db76ed3c..ae0f9a7a314d12585fc83f1550c8d6401cf72f9b 100644 (file)
 #ifndef __ARCH_SPARC_CMPXCHG__
 #define __ARCH_SPARC_CMPXCHG__
 
-static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
-{
-       __asm__ __volatile__("swap [%2], %0"
-                            : "=&r" (val)
-                            : "0" (val), "r" (m)
-                            : "memory");
-       return val;
-}
-
+extern unsigned long __xchg_u32(volatile u32 *m, u32 new);
 extern void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
        switch (size) {
        case 4:
-               return xchg_u32(ptr, x);
+               return __xchg_u32(ptr, x);
        }
        __xchg_called_with_bad_pointer();
        return x;
index 432afa83886137a89bcd6fa66911922addfa50d3..55841c184e6d5d4ae23d15f7f1506b903b86c540 100644 (file)
@@ -118,12 +118,18 @@ struct vio_disk_attr_info {
        u8                      vdisk_type;
 #define VD_DISK_TYPE_SLICE     0x01 /* Slice in block device   */
 #define VD_DISK_TYPE_DISK      0x02 /* Entire block device     */
-       u16                     resv1;
+       u8                      vdisk_mtype;            /* v1.1 */
+#define VD_MEDIA_TYPE_FIXED    0x01 /* Fixed device */
+#define VD_MEDIA_TYPE_CD       0x02 /* CD Device    */
+#define VD_MEDIA_TYPE_DVD      0x03 /* DVD Device   */
+       u8                      resv1;
        u32                     vdisk_block_size;
        u64                     operations;
-       u64                     vdisk_size;
+       u64                     vdisk_size;             /* v1.1 */
        u64                     max_xfer_size;
-       u64                     resv2[2];
+       u32                     phys_block_size;        /* v1.2 */
+       u32                     resv2;
+       u64                     resv3[1];
 };
 
 struct vio_disk_desc {
@@ -259,7 +265,7 @@ static inline u32 vio_dring_avail(struct vio_dring_state *dr,
                                  unsigned int ring_size)
 {
        return (dr->pending -
-               ((dr->prod - dr->cons) & (ring_size - 1)));
+               ((dr->prod - dr->cons) & (ring_size - 1)) - 1);
 }
 
 #define VIO_MAX_TYPE_LEN       32
index 8f76f23dac38ec66b0afea55a5311e612f2459f0..f9c6813c132d606c2a2d138342d4ed9941165f47 100644 (file)
@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
 {
        unsigned long csr_reg, csr, csr_error_bits;
        irqreturn_t ret = IRQ_NONE;
-       u16 stat;
+       u32 stat;
 
        csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
        csr = upa_readq(csr_reg);
@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
                               pbm->name);
                ret = IRQ_HANDLED;
        }
-       pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat);
+       pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
        if (stat & (PCI_STATUS_PARITY |
                    PCI_STATUS_SIG_TARGET_ABORT |
                    PCI_STATUS_REC_TARGET_ABORT |
@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
                    PCI_STATUS_SIG_SYSTEM_ERROR)) {
                printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
                       pbm->name, stat);
-               pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff);
+               pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
                ret = IRQ_HANDLED;
        }
        return ret;
index 8565ecd7d48a1ab233f4558ebacf55da04be6c5b..173964d5e948a19664a56d2f7d4b022bc43e3f10 100644 (file)
@@ -821,13 +821,17 @@ void arch_send_call_function_single_ipi(int cpu)
 void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       irq_enter();
        generic_smp_call_function_interrupt();
+       irq_exit();
 }
 
 void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
 {
        clear_softint(1 << irq);
+       irq_enter();
        generic_smp_call_function_single_interrupt();
+       irq_exit();
 }
 
 static void tsb_sync(void *info)
index 1d32b54089aad3e3d6094f9d3d013b4f3664602b..8f2f94d53434af179ae1be20a1573d3e0df026f4 100644 (file)
@@ -40,6 +40,19 @@ int __atomic_add_return(int i, atomic_t *v)
 }
 EXPORT_SYMBOL(__atomic_add_return);
 
+int atomic_xchg(atomic_t *v, int new)
+{
+       int ret;
+       unsigned long flags;
+
+       spin_lock_irqsave(ATOMIC_HASH(v), flags);
+       ret = v->counter;
+       v->counter = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
+       return ret;
+}
+EXPORT_SYMBOL(atomic_xchg);
+
 int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        int ret;
@@ -132,3 +145,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
        return (unsigned long)prev;
 }
 EXPORT_SYMBOL(__cmpxchg_u32);
+
+unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
+{
+       unsigned long flags;
+       u32 prev;
+
+       spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
+       prev = *ptr;
+       *ptr = new;
+       spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
+
+       return (unsigned long)prev;
+}
+EXPORT_SYMBOL(__xchg_u32);
index e98bfda205a2beb97bcaa49a27d5a9412bf03dc3..59dbd46457250b050ce84a48370a4f96afa3746d 100644 (file)
@@ -177,8 +177,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
        unsigned long g2;
        int from_user = !(regs->psr & PSR_PS);
        int fault, code;
-       unsigned int flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                             (write ? FAULT_FLAG_WRITE : 0));
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        if (text_fault)
                address = regs->pc;
@@ -235,6 +234,11 @@ good_area:
                        goto bad_area;
        }
 
+       if (from_user)
+               flags |= FAULT_FLAG_USER;
+       if (write)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -383,6 +387,7 @@ static void force_user_fault(unsigned long address, int write)
        struct vm_area_struct *vma;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
+       unsigned int flags = FAULT_FLAG_USER;
        int code;
 
        code = SEGV_MAPERR;
@@ -402,11 +407,12 @@ good_area:
        if (write) {
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
-       switch (handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0)) {
+       switch (handle_mm_fault(mm, vma, address, flags)) {
        case VM_FAULT_SIGBUS:
        case VM_FAULT_OOM:
                goto do_sigbus;
index ea83f82464dab8c8de95b3b9e66e30916a9b820e..3841a081beb3967d9f2f08d0c6d1b4abf5390ff0 100644 (file)
@@ -323,7 +323,8 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
                        bad_kernel_pc(regs, address);
                        return;
                }
-       }
+       } else
+               flags |= FAULT_FLAG_USER;
 
        /*
         * If we're in an interrupt or have no user
@@ -426,13 +427,14 @@ good_area:
                    vma->vm_file != NULL)
                        set_thread_fault_code(fault_code |
                                              FAULT_CODE_BLKCOMMIT);
+
+               flags |= FAULT_FLAG_WRITE;
        } else {
                /* Allow reads even for write-only mappings */
                if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
                        goto bad_area;
        }
 
-       flags |= ((fault_code & FAULT_CODE_WRITE) ? FAULT_FLAG_WRITE : 0);
        fault = handle_mm_fault(mm, vma, address, flags);
 
        if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
index 3d2b81c163a6aac89e0e32d862bd603021eec917..3ff289f422e635cdf74280f86488733c0f88c55f 100644 (file)
@@ -280,8 +280,7 @@ static int handle_page_fault(struct pt_regs *regs,
        if (!is_page_fault)
                write = 1;
 
-       flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                (write ? FAULT_FLAG_WRITE : 0));
+       flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
 
@@ -365,6 +364,9 @@ static int handle_page_fault(struct pt_regs *regs,
                goto bad_area_nosemaphore;
        }
 
+       if (!is_kernel_mode)
+               flags |= FAULT_FLAG_USER;
+
        /*
         * When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in the
@@ -425,12 +427,12 @@ good_area:
 #endif
                if (!(vma->vm_flags & VM_WRITE))
                        goto bad_area;
+               flags |= FAULT_FLAG_WRITE;
        } else {
                if (!is_page_fault || !(vma->vm_flags & VM_READ))
                        goto bad_area;
        }
 
- survive:
        /*
         * If for any reason at all we couldn't handle the fault,
         * make sure we exit gracefully rather than endlessly redo
@@ -568,15 +570,10 @@ no_context:
  */
 out_of_memory:
        up_read(&mm->mmap_sem);
-       if (is_global_init(tsk)) {
-               yield();
-               down_read(&mm->mmap_sem);
-               goto survive;
-       }
-       pr_alert("VM: killing process %s\n", tsk->comm);
-       if (!is_kernel_mode)
-               do_group_exit(SIGKILL);
-       goto no_context;
+       if (is_kernel_mode)
+               goto no_context;
+       pagefault_out_of_memory();
+       return 0;
 
 do_sigbus:
        up_read(&mm->mmap_sem);
index 089f3987e273a2c3f9576ef923dbd8ba8648cca5..5c3aef74237ffda72a0b252d4ee4b118d14a969a 100644 (file)
@@ -30,8 +30,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        pmd_t *pmd;
        pte_t *pte;
        int err = -EFAULT;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                (is_write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        *code_out = SEGV_MAPERR;
 
@@ -42,6 +41,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        if (in_atomic())
                goto out_nosemaphore;
 
+       if (is_user)
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
@@ -58,12 +59,15 @@ retry:
 
 good_area:
        *code_out = SEGV_ACCERR;
-       if (is_write && !(vma->vm_flags & VM_WRITE))
-               goto out;
-
-       /* Don't require VM_READ|VM_EXEC for write faults! */
-       if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC)))
-               goto out;
+       if (is_write) {
+               if (!(vma->vm_flags & VM_WRITE))
+                       goto out;
+               flags |= FAULT_FLAG_WRITE;
+       } else {
+               /* Don't require VM_READ|VM_EXEC for write faults! */
+               if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+                       goto out;
+       }
 
        do {
                int fault;
@@ -124,6 +128,8 @@ out_of_memory:
         * (which will retry the fault, or kill us if we got oom-killed).
         */
        up_read(&mm->mmap_sem);
+       if (!is_user)
+               goto out_nosemaphore;
        pagefault_out_of_memory();
        return 0;
 }
index f9b5c10bccee96e8838484aaf6effc39b3c89bd1..0dc922dba9154d7cfcfe5352ec8ec77169e8082f 100644 (file)
@@ -209,8 +209,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        struct task_struct *tsk;
        struct mm_struct *mm;
        int fault, sig, code;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                ((!(fsr ^ 0x12)) ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
        mm = tsk->mm;
@@ -222,6 +221,11 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
        if (in_atomic() || !mm)
                goto no_context;
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
+       if (!(fsr ^ 0x12))
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * As per x86, we may deadlock here.  However, since the kernel only
         * validly references user space from well defined areas of the code,
@@ -278,6 +282,13 @@ retry:
               (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
                return 0;
 
+       /*
+        * If we are in kernel mode at this point, we
+        * have no context to handle this fault with.
+        */
+       if (!user_mode(regs))
+               goto no_context;
+
        if (fault & VM_FAULT_OOM) {
                /*
                 * We ran out of memory, call the OOM killer, and return to
@@ -288,13 +299,6 @@ retry:
                return 0;
        }
 
-       /*
-        * If we are in kernel mode at this point, we
-        * have no context to handle this fault with.
-        */
-       if (!user_mode(regs))
-               goto no_context;
-
        if (fault & VM_FAULT_SIGBUS) {
                /*
                 * We had some memory, but were unable to
index b45ac6affa9ce47c8eb108f81678ce5c116feeed..6d6bb6f4fd439275a9ea83a7bc5fe776f72b4753 100644 (file)
@@ -2172,6 +2172,9 @@ __init int intel_pmu_init(void)
        case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               /* dTLB-load-misses on IVB is different than SNB */
+               hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
                       sizeof(hw_cache_extra_regs));
 
index 29a8120e6fe88a88012490a51130be8b89ea2193..baa61e7370b751d6a42025db73f6650e06ce4f76 100644 (file)
@@ -1475,15 +1475,6 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
        force_sig_info(SIGTRAP, &info, tsk);
 }
 
-
-#ifdef CONFIG_X86_32
-# define IS_IA32       1
-#elif defined CONFIG_IA32_EMULATION
-# define IS_IA32       is_compat_task()
-#else
-# define IS_IA32       0
-#endif
-
 /*
  * We must return the syscall number to actually look up in the table.
  * This can be -1L to skip running any syscall at all.
@@ -1521,7 +1512,7 @@ long syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->orig_ax);
 
-       if (IS_IA32)
+       if (is_ia32_task())
                audit_syscall_entry(AUDIT_ARCH_I386,
                                    regs->orig_ax,
                                    regs->bx, regs->cx,
index dd894d4859a89280591b72cd0f73ae5eadaf4cb0..ce20cb65de58611708ac032b4730aead27e2b254 100644 (file)
@@ -4837,7 +4837,7 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 
        ++vcpu->stat.insn_emulation_fail;
        trace_kvm_emulate_insn_failed(vcpu);
-       if (!is_guest_mode(vcpu)) {
+       if (!is_guest_mode(vcpu) && kvm_x86_ops->get_cpl(vcpu) == 0) {
                vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
                vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
                vcpu->run->internal.ndata = 0;
index c1e9e4cbbd76d4aa1835e1e96715e098a003b6d0..d8b1ff68dbb9366692ba2502286172692fb36ef5 100644 (file)
@@ -842,23 +842,15 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
        force_sig_info_fault(SIGBUS, code, address, tsk, fault);
 }
 
-static noinline int
+static noinline void
 mm_fault_error(struct pt_regs *regs, unsigned long error_code,
               unsigned long address, unsigned int fault)
 {
-       /*
-        * Pagefault was interrupted by SIGKILL. We have no reason to
-        * continue pagefault.
-        */
-       if (fatal_signal_pending(current)) {
-               if (!(fault & VM_FAULT_RETRY))
-                       up_read(&current->mm->mmap_sem);
-               if (!(error_code & PF_USER))
-                       no_context(regs, error_code, address, 0, 0);
-               return 1;
+       if (fatal_signal_pending(current) && !(error_code & PF_USER)) {
+               up_read(&current->mm->mmap_sem);
+               no_context(regs, error_code, address, 0, 0);
+               return;
        }
-       if (!(fault & VM_FAULT_ERROR))
-               return 0;
 
        if (fault & VM_FAULT_OOM) {
                /* Kernel mode? Handle exceptions or die: */
@@ -866,7 +858,7 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                        up_read(&current->mm->mmap_sem);
                        no_context(regs, error_code, address,
                                   SIGSEGV, SEGV_MAPERR);
-                       return 1;
+                       return;
                }
 
                up_read(&current->mm->mmap_sem);
@@ -884,7 +876,6 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
                else
                        BUG();
        }
-       return 1;
 }
 
 static int spurious_fault_check(unsigned long error_code, pte_t *pte)
@@ -1017,9 +1008,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
        unsigned long address;
        struct mm_struct *mm;
        int fault;
-       int write = error_code & PF_WRITE;
-       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
-                                       (write ? FAULT_FLAG_WRITE : 0);
+       unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
 
        tsk = current;
        mm = tsk->mm;
@@ -1089,6 +1078,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
        if (user_mode_vm(regs)) {
                local_irq_enable();
                error_code |= PF_USER;
+               flags |= FAULT_FLAG_USER;
        } else {
                if (regs->flags & X86_EFLAGS_IF)
                        local_irq_enable();
@@ -1113,6 +1103,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
                return;
        }
 
+       if (error_code & PF_WRITE)
+               flags |= FAULT_FLAG_WRITE;
+
        /*
         * When running in the kernel we expect faults to occur only to
         * addresses in user space.  All other faults represent errors in
@@ -1191,9 +1184,17 @@ good_area:
         */
        fault = handle_mm_fault(mm, vma, address, flags);
 
-       if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
-               if (mm_fault_error(regs, error_code, address, fault))
-                       return;
+       /*
+        * If we need to retry but a fatal signal is pending, handle the
+        * signal first. We do not need to release the mmap_sem because it
+        * would already be released in __lock_page_or_retry in mm/filemap.c.
+        */
+       if (unlikely((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)))
+               return;
+
+       if (unlikely(fault & VM_FAULT_ERROR)) {
+               mm_fault_error(regs, error_code, address, fault);
+               return;
        }
 
        /*
index 51940fec6990f2b9a1c86fa5c43a199c3eade2f0..513effd48060743a3b6228b71b4c6703d601fca3 100644 (file)
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
 #define __NR_pivot_root                        175
 __SYSCALL(175, sys_pivot_root, 2)
 #define __NR_umount                            176
-__SYSCALL(176, sys_umount, 2)
+__SYSCALL(176, sys_oldumount, 1)
+#define __ARCH_WANT_SYS_OLDUMOUNT
 #define __NR_swapoff                           177
 __SYSCALL(177, sys_swapoff, 1)
 #define __NR_sync                              178
index 4b7bc8db170ff8ac6befadb8246ac15938b74cae..70fa7bc42b4a0853012af6f6daaeb254f9c5a086 100644 (file)
@@ -72,6 +72,8 @@ void do_page_fault(struct pt_regs *regs)
               address, exccause, regs->pc, is_write? "w":"", is_exec? "x":"");
 #endif
 
+       if (user_mode(regs))
+               flags |= FAULT_FLAG_USER;
 retry:
        down_read(&mm->mmap_sem);
        vma = find_vma(mm, address);
index c3f09505f795ce9be15e420938a09878a561cfc7..64150a9ffff3fb54668ca7f12a00ff12d2a5777f 100644 (file)
@@ -61,6 +61,7 @@ enum board_ids {
        /* board IDs by feature in alphabetical order */
        board_ahci,
        board_ahci_ign_iferr,
+       board_ahci_nomsi,
        board_ahci_noncq,
        board_ahci_nosntf,
        board_ahci_yes_fbs,
@@ -120,6 +121,13 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_nomsi] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_NO_MSI),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        [board_ahci_noncq] = {
                AHCI_HFLAGS     (AHCI_HFLAG_NO_NCQ),
                .flags          = AHCI_FLAG_COMMON,
@@ -312,6 +320,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
        { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
+       { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
+       { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
 
        /* JMicron 360/1/3/5/6, match class to avoid IDE function */
        { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -474,10 +487,10 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
 
        /*
-        * Samsung SSDs found on some macbooks.  NCQ times out.
-        * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+        * Samsung SSDs found on some macbooks.  NCQ times out if MSI is
+        * enabled.  https://bugzilla.kernel.org/show_bug.cgi?id=60731
         */
-       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+       { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
 
        /* Enmotus */
        { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
index 5814deb6963d52a875708e78a4a3a38eb148145e..0ebadf93b6c5610cb0d7e46edcbdfa040719c199 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/blkdev.h>
 #include <linux/hdreg.h>
 #include <linux/genhd.h>
+#include <linux/cdrom.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -22,8 +23,8 @@
 
 #define DRV_MODULE_NAME                "sunvdc"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "1.0"
-#define DRV_MODULE_RELDATE     "June 25, 2007"
+#define DRV_MODULE_VERSION     "1.1"
+#define DRV_MODULE_RELDATE     "February 13, 2013"
 
 static char version[] =
        DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
@@ -32,7 +33,7 @@ MODULE_DESCRIPTION("Sun LDOM virtual disk client driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
-#define VDC_TX_RING_SIZE       256
+#define VDC_TX_RING_SIZE       512
 
 #define WAITING_FOR_LINK_UP    0x01
 #define WAITING_FOR_TX_SPACE   0x02
@@ -65,11 +66,9 @@ struct vdc_port {
        u64                     operations;
        u32                     vdisk_size;
        u8                      vdisk_type;
+       u8                      vdisk_mtype;
 
        char                    disk_name[32];
-
-       struct vio_disk_geom    geom;
-       struct vio_disk_vtoc    label;
 };
 
 static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -79,9 +78,16 @@ static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
 
 /* Ordered from largest major to lowest */
 static struct vio_version vdc_versions[] = {
+       { .major = 1, .minor = 1 },
        { .major = 1, .minor = 0 },
 };
 
+static inline int vdc_version_supported(struct vdc_port *port,
+                                       u16 major, u16 minor)
+{
+       return port->vio.ver.major == major && port->vio.ver.minor >= minor;
+}
+
 #define VDCBLK_NAME    "vdisk"
 static int vdc_major;
 #define PARTITION_SHIFT        3
@@ -94,18 +100,54 @@ static inline u32 vdc_tx_dring_avail(struct vio_dring_state *dr)
 static int vdc_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 {
        struct gendisk *disk = bdev->bd_disk;
-       struct vdc_port *port = disk->private_data;
+       sector_t nsect = get_capacity(disk);
+       sector_t cylinders = nsect;
 
-       geo->heads = (u8) port->geom.num_hd;
-       geo->sectors = (u8) port->geom.num_sec;
-       geo->cylinders = port->geom.num_cyl;
+       geo->heads = 0xff;
+       geo->sectors = 0x3f;
+       sector_div(cylinders, geo->heads * geo->sectors);
+       geo->cylinders = cylinders;
+       if ((sector_t)(geo->cylinders + 1) * geo->heads * geo->sectors < nsect)
+               geo->cylinders = 0xffff;
 
        return 0;
 }
 
+/* Add ioctl/CDROM_GET_CAPABILITY to support cdrom_id in udev
+ * when vdisk_mtype is VD_MEDIA_TYPE_CD or VD_MEDIA_TYPE_DVD.
+ * Needed to be able to install inside an ldom from an iso image.
+ */
+static int vdc_ioctl(struct block_device *bdev, fmode_t mode,
+                    unsigned command, unsigned long argument)
+{
+       int i;
+       struct gendisk *disk;
+
+       switch (command) {
+       case CDROMMULTISESSION:
+               pr_debug(PFX "Multisession CDs not supported\n");
+               for (i = 0; i < sizeof(struct cdrom_multisession); i++)
+                       if (put_user(0, (char __user *)(argument + i)))
+                               return -EFAULT;
+               return 0;
+
+       case CDROM_GET_CAPABILITY:
+               disk = bdev->bd_disk;
+
+               if (bdev->bd_disk && (disk->flags & GENHD_FL_CD))
+                       return 0;
+               return -EINVAL;
+
+       default:
+               pr_debug(PFX "ioctl %08x not supported\n", command);
+               return -EINVAL;
+       }
+}
+
 static const struct block_device_operations vdc_fops = {
        .owner          = THIS_MODULE,
        .getgeo         = vdc_getgeo,
+       .ioctl          = vdc_ioctl,
 };
 
 static void vdc_finish(struct vio_driver_state *vio, int err, int waiting_for)
@@ -165,9 +207,9 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
        struct vio_disk_attr_info *pkt = arg;
 
        viodbg(HS, "GOT ATTR stype[0x%x] ops[%llx] disk_size[%llu] disk_type[%x] "
-              "xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
+              "mtype[0x%x] xfer_mode[0x%x] blksz[%u] max_xfer[%llu]\n",
               pkt->tag.stype, pkt->operations,
-              pkt->vdisk_size, pkt->vdisk_type,
+              pkt->vdisk_size, pkt->vdisk_type, pkt->vdisk_mtype,
               pkt->xfer_mode, pkt->vdisk_block_size,
               pkt->max_xfer_size);
 
@@ -192,8 +234,11 @@ static int vdc_handle_attr(struct vio_driver_state *vio, void *arg)
                }
 
                port->operations = pkt->operations;
-               port->vdisk_size = pkt->vdisk_size;
                port->vdisk_type = pkt->vdisk_type;
+               if (vdc_version_supported(port, 1, 1)) {
+                       port->vdisk_size = pkt->vdisk_size;
+                       port->vdisk_mtype = pkt->vdisk_mtype;
+               }
                if (pkt->max_xfer_size < port->max_xfer_size)
                        port->max_xfer_size = pkt->max_xfer_size;
                port->vdisk_block_size = pkt->vdisk_block_size;
@@ -236,7 +281,9 @@ static void vdc_end_one(struct vdc_port *port, struct vio_dring_state *dr,
 
        __blk_end_request(req, (desc->status ? -EIO : 0), desc->size);
 
-       if (blk_queue_stopped(port->disk->queue))
+       /* restart blk queue when ring is half emptied */
+       if (blk_queue_stopped(port->disk->queue) &&
+           vdc_tx_dring_avail(dr) * 100 / VDC_TX_RING_SIZE >= 50)
                blk_start_queue(port->disk->queue);
 }
 
@@ -388,12 +435,6 @@ static int __send_request(struct request *req)
        for (i = 0; i < nsg; i++)
                len += sg[i].length;
 
-       if (unlikely(vdc_tx_dring_avail(dr) < 1)) {
-               blk_stop_queue(port->disk->queue);
-               err = -ENOMEM;
-               goto out;
-       }
-
        desc = vio_dring_cur(dr);
 
        err = ldc_map_sg(port->vio.lp, sg, nsg,
@@ -433,21 +474,32 @@ static int __send_request(struct request *req)
                port->req_id++;
                dr->prod = (dr->prod + 1) & (VDC_TX_RING_SIZE - 1);
        }
-out:
 
        return err;
 }
 
-static void do_vdc_request(struct request_queue *q)
+static void do_vdc_request(struct request_queue *rq)
 {
-       while (1) {
-               struct request *req = blk_fetch_request(q);
+       struct request *req;
 
-               if (!req)
-                       break;
+       while ((req = blk_peek_request(rq)) != NULL) {
+               struct vdc_port *port;
+               struct vio_dring_state *dr;
 
-               if (__send_request(req) < 0)
-                       __blk_end_request_all(req, -EIO);
+               port = req->rq_disk->private_data;
+               dr = &port->vio.drings[VIO_DRIVER_TX_RING];
+               if (unlikely(vdc_tx_dring_avail(dr) < 1))
+                       goto wait;
+
+               blk_start_request(req);
+
+               if (__send_request(req) < 0) {
+                       blk_requeue_request(rq, req);
+wait:
+                       /* Avoid pointless unplugs. */
+                       blk_stop_queue(rq);
+                       break;
+               }
        }
 }
 
@@ -656,25 +708,27 @@ static int probe_disk(struct vdc_port *port)
        if (comp.err)
                return comp.err;
 
-       err = generic_request(port, VD_OP_GET_VTOC,
-                             &port->label, sizeof(port->label));
-       if (err < 0) {
-               printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
-               return err;
-       }
-
-       err = generic_request(port, VD_OP_GET_DISKGEOM,
-                             &port->geom, sizeof(port->geom));
-       if (err < 0) {
-               printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
-                      "error %d\n", err);
-               return err;
+       if (vdc_version_supported(port, 1, 1)) {
+               /* vdisk_size should be set during the handshake, if it wasn't
+                * then the underlying disk is reserved by another system
+                */
+               if (port->vdisk_size == -1)
+                       return -ENODEV;
+       } else {
+               struct vio_disk_geom geom;
+
+               err = generic_request(port, VD_OP_GET_DISKGEOM,
+                                     &geom, sizeof(geom));
+               if (err < 0) {
+                       printk(KERN_ERR PFX "VD_OP_GET_DISKGEOM returns "
+                              "error %d\n", err);
+                       return err;
+               }
+               port->vdisk_size = ((u64)geom.num_cyl *
+                                   (u64)geom.num_hd *
+                                   (u64)geom.num_sec);
        }
 
-       port->vdisk_size = ((u64)port->geom.num_cyl *
-                           (u64)port->geom.num_hd *
-                           (u64)port->geom.num_sec);
-
        q = blk_init_queue(do_vdc_request, &port->vio.lock);
        if (!q) {
                printk(KERN_ERR PFX "%s: Could not allocate queue.\n",
@@ -691,6 +745,10 @@ static int probe_disk(struct vdc_port *port)
 
        port->disk = g;
 
+       /* Each segment in a request is up to an aligned page in size. */
+       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+       blk_queue_max_segment_size(q, PAGE_SIZE);
+
        blk_queue_max_segments(q, port->ring_cookies);
        blk_queue_max_hw_sectors(q, port->max_xfer_size);
        g->major = vdc_major;
@@ -704,9 +762,32 @@ static int probe_disk(struct vdc_port *port)
 
        set_capacity(g, port->vdisk_size);
 
-       printk(KERN_INFO PFX "%s: %u sectors (%u MB)\n",
+       if (vdc_version_supported(port, 1, 1)) {
+               switch (port->vdisk_mtype) {
+               case VD_MEDIA_TYPE_CD:
+                       pr_info(PFX "Virtual CDROM %s\n", port->disk_name);
+                       g->flags |= GENHD_FL_CD;
+                       g->flags |= GENHD_FL_REMOVABLE;
+                       set_disk_ro(g, 1);
+                       break;
+
+               case VD_MEDIA_TYPE_DVD:
+                       pr_info(PFX "Virtual DVD %s\n", port->disk_name);
+                       g->flags |= GENHD_FL_CD;
+                       g->flags |= GENHD_FL_REMOVABLE;
+                       set_disk_ro(g, 1);
+                       break;
+
+               case VD_MEDIA_TYPE_FIXED:
+                       pr_info(PFX "Virtual Hard disk %s\n", port->disk_name);
+                       break;
+               }
+       }
+
+       pr_info(PFX "%s: %u sectors (%u MB) protocol %d.%d\n",
               g->disk_name,
-              port->vdisk_size, (port->vdisk_size >> (20 - 9)));
+              port->vdisk_size, (port->vdisk_size >> (20 - 9)),
+              port->vio.ver.major, port->vio.ver.minor);
 
        add_disk(g);
 
@@ -765,6 +846,7 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        else
                snprintf(port->disk_name, sizeof(port->disk_name),
                         VDCBLK_NAME "%c", 'a' + ((int)vdev->dev_no % 26));
+       port->vdisk_size = -1;
 
        err = vio_driver_init(&port->vio, vdev, VDEV_DISK,
                              vdc_versions, ARRAY_SIZE(vdc_versions),
index ac1b43a0428531273c4fdaefd56a0b83f1545ce2..4f73c727a97acd222dbeb1fcab3812092b349b43 100644 (file)
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
            _IOC_SIZE(cmd) > sizeof(buffer))
                return -ENOTTY;
 
-       if (_IOC_DIR(cmd) == _IOC_READ)
-               memset(&buffer, 0, _IOC_SIZE(cmd));
+       memset(&buffer, 0, sizeof(buffer));
 
        if (_IOC_DIR(cmd) & _IOC_WRITE)
                if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
index e62a9ce3e4dc51b4fd840f54b2bfdbdc07c804d2..ead08a49bec0fc16962ba3cc0ece2f593395048f 100644 (file)
@@ -2379,6 +2379,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
                                        WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
                                        tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
                                        WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
+                                       WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
                                }
                        } else {
                                tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
index 7c5d72a6a26a3400fbedfcbe19e37e1abb298f1f..19e070f16e6beb7a220faaa50dbcf2ea478fe571 100644 (file)
@@ -873,7 +873,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
 {
        struct alps_data *priv = psmouse->private;
 
-       if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */
+       /*
+        * Check if we are dealing with a bare PS/2 packet, presumably from
+        * a device connected to the external PS/2 port. Because bare PS/2
+        * protocol does not have enough constant bits to self-synchronize
+        * properly we only do this if the device is fully synchronized.
+        */
+       if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
                if (psmouse->pktcnt == 3) {
                        alps_report_bare_ps2_packet(psmouse, psmouse->packet,
                                                    true);
@@ -1816,6 +1822,9 @@ int alps_init(struct psmouse *psmouse)
        /* We are having trouble resyncing ALPS touchpads so disable it for now */
        psmouse->resync_time = 0;
 
+       /* Allow 2 invalid packets without resetting device */
+       psmouse->resetafter = psmouse->pktsize * 2;
+
        return 0;
 
 init_fail:
index 2dea49c4279e23db60239b7c20b8f08040599ae6..84cddccc024978f50790395c3c04c9ba1d8343f2 100644 (file)
@@ -785,8 +785,7 @@ struct dm_raid_superblock {
        __le32 layout;
        __le32 stripe_sectors;
 
-       __u8 pad[452];          /* Round struct to 512 bytes. */
-                               /* Always set to 0 when writing. */
+       /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
 } __packed;
 
 static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -823,7 +822,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
                    test_bit(Faulty, &(rs->dev[i].rdev.flags)))
                        failed_devices |= (1ULL << i);
 
-       memset(sb, 0, sizeof(*sb));
+       memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
 
        sb->magic = cpu_to_le32(DM_RAID_MAGIC);
        sb->features = cpu_to_le32(0);  /* No features yet */
@@ -858,7 +857,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
        uint64_t events_sb, events_refsb;
 
        rdev->sb_start = 0;
-       rdev->sb_size = sizeof(*sb);
+       rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
+       if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
+               DMERR("superblock size of a logical block is no longer valid");
+               return -EINVAL;
+       }
 
        ret = read_disk_sb(rdev, rdev->sb_size);
        if (ret)
index 37d367bb9aa8976653d5bbc80b29d0dfbf037372..bf2b80d5c4707a64210b5e57deb785069dc7d921 100644 (file)
@@ -42,6 +42,12 @@ struct btree_node {
 } __packed;
 
 
+/*
+ * Locks a block using the btree node validator.
+ */
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+                struct dm_block **result);
+
 void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
                  struct dm_btree_value_type *vt);
 
index cf9fd676ae444ad29f8fb6f6ef58d85706438092..1b5e13ec7f96a670ed7a9b5b472a5d2ee95a7dff 100644 (file)
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
 
 /*----------------------------------------------------------------*/
 
-static int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
+int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
                 struct dm_block **result)
 {
        return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
index 35865425e4b4443e925014fd918655ef51c07d57..0a7592e88811e8aa45d9b89225bc98a459e0ee64 100644 (file)
@@ -812,22 +812,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_highest_key);
  * FIXME: We shouldn't use a recursive algorithm when we have limited stack
  * space.  Also this only works for single level trees.
  */
-static int walk_node(struct ro_spine *s, dm_block_t block,
+static int walk_node(struct dm_btree_info *info, dm_block_t block,
                     int (*fn)(void *context, uint64_t *keys, void *leaf),
                     void *context)
 {
        int r;
        unsigned i, nr;
+       struct dm_block *node;
        struct btree_node *n;
        uint64_t keys;
 
-       r = ro_step(s, block);
-       n = ro_node(s);
+       r = bn_read_lock(info, block, &node);
+       if (r)
+               return r;
+
+       n = dm_block_data(node);
 
        nr = le32_to_cpu(n->header.nr_entries);
        for (i = 0; i < nr; i++) {
                if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
-                       r = walk_node(s, value64(n, i), fn, context);
+                       r = walk_node(info, value64(n, i), fn, context);
                        if (r)
                                goto out;
                } else {
@@ -839,7 +843,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
        }
 
 out:
-       ro_pop(s);
+       dm_tm_unlock(info->tm, node);
        return r;
 }
 
@@ -847,15 +851,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
                  int (*fn)(void *context, uint64_t *keys, void *leaf),
                  void *context)
 {
-       int r;
-       struct ro_spine spine;
-
        BUG_ON(info->levels > 1);
-
-       init_ro_spine(&spine, info);
-       r = walk_node(&spine, root, fn, context);
-       exit_ro_spine(&spine);
-
-       return r;
+       return walk_node(info, root, fn, context);
 }
 EXPORT_SYMBOL_GPL(dm_btree_walk);
index 5c45c9d0712ddf949f0ac571b5649111585046ae..9c29552aedec2e7b1a08447619671cd65c170b77 100644 (file)
@@ -156,6 +156,9 @@ static int ttusbdecfe_dvbs_diseqc_send_master_cmd(struct dvb_frontend* fe, struc
                   0x00, 0x00, 0x00, 0x00,
                   0x00, 0x00 };
 
+       if (cmd->msg_len > sizeof(b) - 4)
+               return -EINVAL;
+
        memcpy(&b[4], cmd->msg, cmd->msg_len);
 
        state->config->send_command(fe, 0x72,
index 99cc0b07a71313e1970aad5efa0521888c3f7141..0513ea0906dd6dbdccee36a64515f4a55e085d65 100644 (file)
@@ -71,7 +71,7 @@ static int mei_cl_device_probe(struct device *dev)
 
        dev_dbg(dev, "Device probe\n");
 
-       strncpy(id.name, dev_name(dev), MEI_CL_NAME_SIZE);
+       strlcpy(id.name, dev_name(dev), sizeof(id.name));
 
        return driver->probe(device, &id);
 }
index 4e6877a032a8414bd059e2dcf658616dc2cab0fe..bd8800c85525234d1bbf05adfa7e7d23af297e1e 100644 (file)
@@ -191,6 +191,39 @@ void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
                       MLX4_QP_STATE_RST, NULL, 0, 0, &ring->qp);
 }
 
+static void mlx4_en_stamp_wqe(struct mlx4_en_priv *priv,
+                             struct mlx4_en_tx_ring *ring, int index,
+                             u8 owner)
+{
+       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
+       struct mlx4_en_tx_desc *tx_desc = ring->buf + index * TXBB_SIZE;
+       struct mlx4_en_tx_info *tx_info = &ring->tx_info[index];
+       void *end = ring->buf + ring->buf_size;
+       __be32 *ptr = (__be32 *)tx_desc;
+       int i;
+
+       /* Optimize the common case when there are no wraparounds */
+       if (likely((void *)tx_desc + tx_info->nr_txbb * TXBB_SIZE <= end)) {
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+                    i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+               }
+       } else {
+               /* Stamp the freed descriptor */
+               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE;
+                    i += STAMP_STRIDE) {
+                       *ptr = stamp;
+                       ptr += STAMP_DWORDS;
+                       if ((void *)ptr >= end) {
+                               ptr = ring->buf;
+                               stamp ^= cpu_to_be32(0x80000000);
+                       }
+               }
+       }
+}
+
 
 static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                struct mlx4_en_tx_ring *ring,
@@ -205,8 +238,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
        void *end = ring->buf + ring->buf_size;
        int frags = skb_shinfo(skb)->nr_frags;
        int i;
-       __be32 *ptr = (__be32 *)tx_desc;
-       __be32 stamp = cpu_to_be32(STAMP_VAL | (!!owner << STAMP_SHIFT));
        struct skb_shared_hwtstamps hwts;
 
        if (timestamp) {
@@ -232,12 +263,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                        skb_frag_size(frag), PCI_DMA_TODEVICE);
                        }
                }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-               }
-
        } else {
                if (!tx_info->inl) {
                        if ((void *) data >= end) {
@@ -263,16 +288,6 @@ static u32 mlx4_en_free_tx_desc(struct mlx4_en_priv *priv,
                                ++data;
                        }
                }
-               /* Stamp the freed descriptor */
-               for (i = 0; i < tx_info->nr_txbb * TXBB_SIZE; i += STAMP_STRIDE) {
-                       *ptr = stamp;
-                       ptr += STAMP_DWORDS;
-                       if ((void *) ptr >= end) {
-                               ptr = ring->buf;
-                               stamp ^= cpu_to_be32(0x80000000);
-                       }
-               }
-
        }
        dev_kfree_skb_any(skb);
        return tx_info->nr_txbb;
@@ -318,8 +333,9 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        struct mlx4_en_tx_ring *ring = &priv->tx_ring[cq->ring];
        struct mlx4_cqe *cqe;
        u16 index;
-       u16 new_index, ring_index;
+       u16 new_index, ring_index, stamp_index;
        u32 txbbs_skipped = 0;
+       u32 txbbs_stamp = 0;
        u32 cons_index = mcq->cons_index;
        int size = cq->size;
        u32 size_mask = ring->size_mask;
@@ -335,6 +351,7 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
        ring_index = ring->cons & size_mask;
+       stamp_index = ring_index;
 
        /* Process all completed CQEs */
        while (XNOR(cqe->owner_sr_opcode & MLX4_CQE_OWNER_MASK,
@@ -359,6 +376,12 @@ static void mlx4_en_process_tx_cq(struct net_device *dev, struct mlx4_en_cq *cq)
                                        priv, ring, ring_index,
                                        !!((ring->cons + txbbs_skipped) &
                                        ring->size), timestamp);
+
+                       mlx4_en_stamp_wqe(priv, ring, stamp_index,
+                                         !!((ring->cons + txbbs_stamp) &
+                                               ring->size));
+                       stamp_index = ring_index;
+                       txbbs_stamp = txbbs_skipped;
                        packets++;
                        bytes += ring->tx_info[ring_index].nr_bytes;
                } while (ring_index != new_index);
index 398faff8be7a26737b84071961ee05c5768c5da1..ade8bdfc03afa0673ef2052348e33138e3e0115f 100644 (file)
@@ -656,7 +656,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        spin_lock_irqsave(&port->vio.lock, flags);
 
        dr = &port->vio.drings[VIO_DRIVER_TX_RING];
-       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+       if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
                if (!netif_queue_stopped(dev)) {
                        netif_stop_queue(dev);
 
@@ -704,7 +704,7 @@ static int vnet_start_xmit(struct sk_buff *skb, struct net_device *dev)
        dev->stats.tx_bytes += skb->len;
 
        dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
-       if (unlikely(vnet_tx_dring_avail(dr) < 2)) {
+       if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
                netif_stop_queue(dev);
                if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
                        netif_wake_queue(dev);
index 9e56eb479a4fcb3b1d7d381b255381db0e07e0c4..2d255ba911d51c52a7b1e038d37e3cc28cbdc0b5 100644 (file)
@@ -625,6 +625,8 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
                vnet_hdr->csum_start = skb_checksum_start_offset(skb);
+               if (vlan_tx_tag_present(skb))
+                       vnet_hdr->csum_start += VLAN_HLEN;
                vnet_hdr->csum_offset = skb->csum_offset;
        } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
                vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
index 72d2ecce0b8d7614c870cfb41e5d7e4773cd1bd8..d8df1d9b0de3ce3337965fa4626a5f6ed7449b1f 100644 (file)
@@ -489,6 +489,7 @@ enum iwl_trans_state {
  *     Set during transport allocation.
  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
  * @pm_support: set to true in start_hw if link pm is supported
+ * @ltr_enabled: set to true if the LTR is enabled
  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
  *     The user should use iwl_trans_{alloc,free}_tx_cmd.
  * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -513,6 +514,7 @@ struct iwl_trans {
        u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
 
        bool pm_support;
+       bool ltr_enabled;
 
        /* The following fields are internal only */
        struct kmem_cache *dev_cmd_pool;
index 81fe45f46be7e97f1d346ada8c457d065b9a377e..ac38ecf13c18103de520b66706877881aa62fd63 100644 (file)
 /* Power Management Commands, Responses, Notifications */
 
 /**
- * enum iwl_scan_flags - masks for power table command flags
+ * enum iwl_ltr_config_flags - masks for LTR config command flags
+ * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
+ * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
+ *     memory access
+ * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
+ *     reg change
+ * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
+ *     D0 to D3
+ * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
+ * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
+ * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
+ */
+enum iwl_ltr_config_flags {
+       LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
+       LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
+       LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
+       LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
+       LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
+       LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
+       LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
+};
+
+/**
+ * struct iwl_ltr_config_cmd - configures the LTR
+ * @flags: See %enum iwl_ltr_config_flags
+ */
+struct iwl_ltr_config_cmd {
+       __le32 flags;
+       __le32 static_long;
+       __le32 static_short;
+} __packed;
+
+/**
+ * enum iwl_power_flags - masks for power table command flags
  * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
  *             receiver and transmitter. '0' - does not allow.
  * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
index c6384555aab4de0172c7f1a7c5662cce776e76ed..4b6730db42a5e47c5de9d25df3b7ee005e9f7a46 100644 (file)
@@ -138,6 +138,7 @@ enum {
 
        /* Power */
        POWER_TABLE_CMD = 0x77,
+       LTR_CONFIG = 0xee,
 
        /* Scanning */
        SCAN_REQUEST_CMD = 0x80,
index e18c92dd60ecdd768c86e23fe6d0f6505f396018..d250d451fd015fb645ba49cd3d9048300fea6dc4 100644 (file)
@@ -443,6 +443,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
        if (ret)
                goto error;
 
+       if (mvm->trans->ltr_enabled) {
+               struct iwl_ltr_config_cmd cmd = {
+                       .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
+               };
+
+               WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
+                                            sizeof(cmd), &cmd));
+       }
+
        IWL_DEBUG_INFO(mvm, "RT uCode started.\n");
 
        return 0;
index 388c8a91496046e99b9d91d837c195dc2c4f0118..649d301cfa2ac82567b49f68edcf10e1f2428cc7 100644 (file)
@@ -293,6 +293,7 @@ static const char *iwl_mvm_cmd_strings[REPLY_MAX] = {
        CMD(BT_PROFILE_NOTIFICATION),
        CMD(BT_CONFIG),
        CMD(MCAST_FILTER_CMD),
+       CMD(LTR_CONFIG),
 };
 #undef CMD
 
index ff04135d37afa74c5832e0452934bea631ac4304..6a5eb2b29418da1a60b091b162271ddca0fcede3 100644 (file)
@@ -116,11 +116,13 @@ static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
 
 /* PCI registers */
 #define PCI_CFG_RETRY_TIMEOUT  0x041
+#define PCI_EXP_DEVCTL2_LTR_EN 0x0400
 
 static void iwl_pcie_apm_config(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        u16 lctl;
+       u16 cap;
 
        /*
         * HW bug W/A for instability in PCIe bus L0S->L1 transition.
@@ -131,16 +133,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
         *    power savings, even without L1.
         */
        pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
-       if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
-               /* L1-ASPM enabled; disable(!) L0S */
+       if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
                iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
-       } else {
-               /* L1-ASPM disabled; enable(!) L0S */
+       else
                iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
-               dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
-       }
        trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
+
+       pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
+       trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
+       dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
+                (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
+                trans->ltr_enabled ? "En" : "Dis");
 }
 
 /*
index fa9a2171cc134b733c837f89a4f1b80aeb0989ea..b264d8fe190812285287c57c4a9b3a3b85f6f53c 100644 (file)
@@ -163,18 +163,24 @@ static void dell_wmi_notify(u32 value, void *context)
                const struct key_entry *key;
                int reported_key;
                u16 *buffer_entry = (u16 *)obj->buffer.pointer;
+               int buffer_size = obj->buffer.length/2;
 
-               if (dell_new_hk_type && (buffer_entry[1] != 0x10)) {
+               if (buffer_size >= 2 && dell_new_hk_type && buffer_entry[1] != 0x10) {
                        pr_info("Received unknown WMI event (0x%x)\n",
                                buffer_entry[1]);
                        kfree(obj);
                        return;
                }
 
-               if (dell_new_hk_type || buffer_entry[1] == 0x0)
+               if (buffer_size >= 3 && (dell_new_hk_type || buffer_entry[1] == 0x0))
                        reported_key = (int)buffer_entry[2];
-               else
+               else if (buffer_size >= 2)
                        reported_key = (int)buffer_entry[1] & 0xffff;
+               else {
+                       pr_info("Received unknown WMI event\n");
+                       kfree(obj);
+                       return;
+               }
 
                key = sparse_keymap_entry_from_scancode(dell_wmi_input_dev,
                                                        reported_key);
index 287667c20c6a37c30a4d8ba8ebebeeed8d27e422..62ed744bbe06c47487d5af052cfe9a9e46dbd32a 100644 (file)
@@ -1206,8 +1206,8 @@ static void complete_scsi_command(struct CommandList *cp)
        scsi_set_resid(cmd, ei->ResidualCnt);
 
        if (ei->CommandStatus == 0) {
-               cmd->scsi_done(cmd);
                cmd_free(h, cp);
+               cmd->scsi_done(cmd);
                return;
        }
 
@@ -1380,8 +1380,8 @@ static void complete_scsi_command(struct CommandList *cp)
                dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
                                cp, ei->CommandStatus);
        }
-       cmd->scsi_done(cmd);
        cmd_free(h, cp);
+       cmd->scsi_done(cmd);
 }
 
 static void hpsa_pci_unmap(struct pci_dev *pdev,
index f43de1e56420ac7916ca99c83281b81b417f4fb9..3668b1b23b5a3fd6a27d61fd4637e4a1904ad611 100644 (file)
@@ -1689,8 +1689,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
         * is no point trying to lock the door of an off-line device.
         */
        shost_for_each_device(sdev, shost) {
-               if (scsi_device_online(sdev) && sdev->locked)
+               if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
                        scsi_eh_lock_door(sdev);
+                       sdev->was_reset = 0;
+               }
        }
 
        /*
index e50170ca7c33f446acc16e29a0d0097828919c30..31666c92b46af29919f42ea3e1093caed7127d71 100644 (file)
@@ -157,14 +157,16 @@ out:
 
 int ioprio_best(unsigned short aprio, unsigned short bprio)
 {
-       unsigned short aclass = IOPRIO_PRIO_CLASS(aprio);
-       unsigned short bclass = IOPRIO_PRIO_CLASS(bprio);
+       unsigned short aclass;
+       unsigned short bclass;
 
-       if (aclass == IOPRIO_CLASS_NONE)
-               aclass = IOPRIO_CLASS_BE;
-       if (bclass == IOPRIO_CLASS_NONE)
-               bclass = IOPRIO_CLASS_BE;
+       if (!ioprio_valid(aprio))
+               aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
+       if (!ioprio_valid(bprio))
+               bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
 
+       aclass = IOPRIO_PRIO_CLASS(aprio);
+       bclass = IOPRIO_PRIO_CLASS(bprio);
        if (aclass == bclass)
                return min(aprio, bprio);
        if (aclass > bclass)
index 4b49a8c6ccade73d7c28f6c5b30dbb05d208b820..ef0c394b7bf55cc5891e01fc4647acff2db41307 100644 (file)
@@ -108,6 +108,8 @@ again:
                        continue;
                if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
                        continue;
+               if (!nfs4_valid_open_stateid(state))
+                       continue;
                if (!nfs4_stateid_match(&state->stateid, stateid))
                        continue;
                get_nfs_open_context(ctx);
@@ -175,7 +177,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
 {
        int res = 0;
 
-       res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
+       if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+               res = nfs4_proc_delegreturn(inode,
+                               delegation->cred,
+                               &delegation->stateid,
+                               issync);
        nfs_free_delegation(delegation);
        return res;
 }
@@ -361,11 +367,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
 {
        struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
        struct nfs_inode *nfsi = NFS_I(inode);
-       int err;
+       int err = 0;
 
        if (delegation == NULL)
                return 0;
        do {
+               if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
+                       break;
                err = nfs_delegation_claim_opens(inode, &delegation->stateid);
                if (!issync || err != -EAGAIN)
                        break;
@@ -586,10 +594,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
        rcu_read_unlock();
 }
 
+static void nfs_revoke_delegation(struct inode *inode)
+{
+       struct nfs_delegation *delegation;
+       rcu_read_lock();
+       delegation = rcu_dereference(NFS_I(inode)->delegation);
+       if (delegation != NULL) {
+               set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
+               nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
+       }
+       rcu_read_unlock();
+}
+
 void nfs_remove_bad_delegation(struct inode *inode)
 {
        struct nfs_delegation *delegation;
 
+       nfs_revoke_delegation(inode);
        delegation = nfs_inode_detach_delegation(inode);
        if (delegation) {
                nfs_inode_find_state_and_recover(inode, &delegation->stateid);
index 9a79c7a99d6d6dd64b03f58481dc8d0fb4b32818..e02b090ab9da86f6c5c3d7c8217b07bd72c282cc 100644 (file)
@@ -31,6 +31,7 @@ enum {
        NFS_DELEGATION_RETURN_IF_CLOSED,
        NFS_DELEGATION_REFERENCED,
        NFS_DELEGATION_RETURNING,
+       NFS_DELEGATION_REVOKED,
 };
 
 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
index 0bd7a55a5f073befd4d0ce97e87cca2dd0d42e37..725e87538c98ad71ae5f28ded813d9e61fb8a6d1 100644 (file)
@@ -180,6 +180,7 @@ static void nfs_direct_req_free(struct kref *kref)
 {
        struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
 
+       nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
        if (dreq->l_ctx != NULL)
                nfs_put_lock_context(dreq->l_ctx);
        if (dreq->ctx != NULL)
index cd4b9073dd2093a9ebd2267293bc733f033244a4..e9be01b2cc5a4ecd0da8db70918ff5e89be1988e 100644 (file)
@@ -519,7 +519,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
 {
        struct inode *inode = dentry->d_inode;
        int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
-       int err;
+       int err = 0;
 
        /* Flush out writes to the server in order to update c/mtime.  */
        if (S_ISREG(inode->i_mode)) {
index 69fc437be661b958e1c93166ca45f2bc46296736..78787948f69d75a7ff97a569fa707771ab027710 100644 (file)
@@ -1416,7 +1416,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
                        nfs_inode_find_state_and_recover(state->inode,
                                        stateid);
                        nfs4_schedule_stateid_recovery(server, state);
-                       return 0;
+                       return -EAGAIN;
                case -NFS4ERR_DELAY:
                case -NFS4ERR_GRACE:
                        set_bit(NFS_DELEGATED_STATE, &state->flags);
@@ -1845,6 +1845,28 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
        return ret;
 }
 
+static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
+{
+       nfs_remove_bad_delegation(state->inode);
+       write_seqlock(&state->seqlock);
+       nfs4_stateid_copy(&state->stateid, &state->open_stateid);
+       write_sequnlock(&state->seqlock);
+       clear_bit(NFS_DELEGATED_STATE, &state->flags);
+}
+
+static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
+{
+       if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
+               nfs_finish_clear_delegation_stateid(state);
+}
+
+static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
+{
+       /* NFSv4.0 doesn't allow for delegation recovery on open expire */
+       nfs40_clear_delegation_stateid(state);
+       return nfs4_open_expired(sp, state);
+}
+
 #if defined(CONFIG_NFS_V4_1)
 static void nfs41_clear_delegation_stateid(struct nfs4_state *state)
 {
@@ -6974,7 +6996,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
 static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
        .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
        .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
-       .recover_open   = nfs4_open_expired,
+       .recover_open   = nfs40_open_expired,
        .recover_lock   = nfs4_lock_expired,
        .establish_clid = nfs4_init_clientid,
        .get_clid_cred  = nfs4_get_setclientid_cred,
index 7279b94c01da3fdb31108e9ca3d3e8fb5202e923..91aa89e1aaa06bbdcfcd3fc33401369f8e00968f 100644 (file)
@@ -285,7 +285,7 @@ extern struct clocksource* clocksource_get_next(void);
 extern void clocksource_change_rating(struct clocksource *cs, int rating);
 extern void clocksource_suspend(void);
 extern void clocksource_resume(void);
-extern struct clocksource * __init __weak clocksource_default_clock(void);
+extern struct clocksource * __init clocksource_default_clock(void);
 extern void clocksource_mark_unstable(struct clocksource *cs);
 
 extern void
index c6e091bf39a52e73a0964036ef3e1888e7be61d9..bdfc95bddde965d8d9dad1d69e497e1af088cbad 100644 (file)
@@ -283,7 +283,7 @@ struct kgdb_io {
 
 extern struct kgdb_arch                arch_kgdb_ops;
 
-extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
+extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
 
 #ifdef CONFIG_SERIAL_KGDB_NMI
 extern int kgdb_register_nmi_console(void);
index d6183f06d8c182951fac67c17a2c05d2ce9f20fa..a3b4812f494f0c74a093fa5693f5e8b07fe6c40f 100644 (file)
@@ -124,6 +124,25 @@ extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
 extern void mem_cgroup_replace_page_cache(struct page *oldpage,
                                        struct page *newpage);
 
+static inline void mem_cgroup_oom_enable(void)
+{
+       WARN_ON(current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 1;
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+       WARN_ON(!current->memcg_oom.may_oom);
+       current->memcg_oom.may_oom = 0;
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+       return p->memcg_oom.memcg;
+}
+
+bool mem_cgroup_oom_synchronize(bool wait);
+
 #ifdef CONFIG_MEMCG_SWAP
 extern int do_swap_account;
 #endif
@@ -347,6 +366,24 @@ static inline void mem_cgroup_end_update_page_stat(struct page *page,
 {
 }
 
+static inline void mem_cgroup_oom_enable(void)
+{
+}
+
+static inline void mem_cgroup_oom_disable(void)
+{
+}
+
+static inline bool task_in_memcg_oom(struct task_struct *p)
+{
+       return false;
+}
+
+static inline bool mem_cgroup_oom_synchronize(bool wait)
+{
+       return false;
+}
+
 static inline void mem_cgroup_inc_page_stat(struct page *page,
                                            enum mem_cgroup_page_stat_item idx)
 {
index fc02c749ae77df7e9b5d7b81209c4707823c9137..c613aae04189b2d8ee35a6f367d0ed85f9c4e725 100644 (file)
@@ -170,6 +170,7 @@ extern pgprot_t protection_map[16];
 #define FAULT_FLAG_RETRY_NOWAIT        0x10    /* Don't drop mmap_sem and wait when retrying */
 #define FAULT_FLAG_KILLABLE    0x20    /* The fault task is in SIGKILL killable region */
 #define FAULT_FLAG_TRIED       0x40    /* second try */
+#define FAULT_FLAG_USER                0x80    /* The fault originated in userspace */
 
 /*
  * vm_fault is filled by the the pagefault handler and passed to the vma's
index 104b62f23ee025a51d730228e061ea49ce4513e0..54e351aa4d2e5652c03711ec70e416b71d7ee3a5 100644 (file)
@@ -1184,11 +1184,22 @@ struct nfs41_free_stateid_res {
        unsigned int                    status;
 };
 
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+       kfree(cinfo->buckets);
+}
+
 #else
 
 struct pnfs_ds_commit_info {
 };
 
+static inline void
+nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
+{
+}
+
 #endif /* CONFIG_NFS_V4_1 */
 
 struct nfs_page;
index 1e287bacafc70577ddb702908389862c90337b49..6ca9630fde2d4d5eaac3118fa6544fd0616d44e3 100644 (file)
@@ -1432,6 +1432,12 @@ struct task_struct {
                unsigned long memsw_nr_pages; /* uncharged mem+swap usage */
        } memcg_batch;
        unsigned int memcg_kmem_skip_account;
+       struct memcg_oom_info {
+               struct mem_cgroup *memcg;
+               gfp_t gfp_mask;
+               int order;
+               unsigned int may_oom:1;
+       } memcg_oom;
 #endif
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
        atomic_t ptrace_bp_refcnt;
index cd89510eab2a44d405c2cba2817b3b7ae863306e..845ab6decc457b1e8a18aee128fe68f70322a6ee 100644 (file)
@@ -540,6 +540,11 @@ static inline void sctp_assoc_pending_pmtu(struct sock *sk, struct sctp_associat
        asoc->pmtu_pending = 0;
 }
 
+static inline bool sctp_chunk_pending(const struct sctp_chunk *chunk)
+{
+       return !list_empty(&chunk->list);
+}
+
 /* Walk through a list of TLV parameters.  Don't trust the
  * individual parameter lengths and instead depend on
  * the chunk length to indicate when to stop.  Make sure
index 2a82d1384706c61a6757db53edf8d81a53cbc7de..c4c9458f37cdede038205781eb84eeb4f991a7bb 100644 (file)
@@ -255,9 +255,9 @@ struct sctp_chunk *sctp_make_asconf_update_ip(struct sctp_association *,
                                              int, __be16);
 struct sctp_chunk *sctp_make_asconf_set_prim(struct sctp_association *asoc,
                                             union sctp_addr *addr);
-int sctp_verify_asconf(const struct sctp_association *asoc,
-                      struct sctp_paramhdr *param_hdr, void *chunk_end,
-                      struct sctp_paramhdr **errp);
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+                       struct sctp_chunk *chunk, bool addr_param_needed,
+                       struct sctp_paramhdr **errp);
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf);
 int sctp_process_asconf_ack(struct sctp_association *asoc,
index 5dda450eb55be1ca37d17680984d788f94c82d29..2ec9fbcd06f9a489c0669521bf77728af69e0ed0 100644 (file)
@@ -6,6 +6,8 @@
 
 #define XT_BPF_MAX_NUM_INSTR   64
 
+struct sk_filter;
+
 struct xt_bpf_info {
        __u16 bpf_program_num_elem;
        struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
index b0e99deb6d05330482c8ec98f1a511f07f9fa5f1..a0f0ab2ac2a8b92961c229dde22703dc7107cf3e 100644 (file)
@@ -123,7 +123,6 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
        void __user *buffer, size_t *lenp, loff_t *ppos)
 {
        struct ctl_table ipc_table;
-       size_t lenp_bef = *lenp;
        int oldval;
        int rc;
 
@@ -133,7 +132,7 @@ static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
 
        rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos);
 
-       if (write && !rc && lenp_bef == *lenp) {
+       if (write && !rc) {
                int newval = *((int *)(ipc_table.data));
                /*
                 * The file "auto_msgmni" has correctly been set.
index 43c307dc9453d5c9166596d2303deaf099cbf5b0..00c4459f76df3e5256bb27eef53f4822153753c2 100644 (file)
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
                chunk->owners[i].index = i;
        }
        fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
+       chunk->mark.mask = FS_IN_IGNORED;
        return chunk;
 }
 
index 0b4733447151cf8fcaa7c1796dfb2554ea0d3b0a..3f63ea6464cae60eb55acb1bbed1cb10af2fdedf 100644 (file)
@@ -39,6 +39,7 @@
 #include <linux/hw_breakpoint.h>
 #include <linux/mm_types.h>
 #include <linux/cgroup.h>
+#include <linux/compat.h>
 
 #include "internal.h"
 
@@ -3490,6 +3491,25 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        return 0;
 }
 
+#ifdef CONFIG_COMPAT
+static long perf_compat_ioctl(struct file *file, unsigned int cmd,
+                               unsigned long arg)
+{
+       switch (_IOC_NR(cmd)) {
+       case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
+               /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
+               if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
+                       cmd &= ~IOCSIZE_MASK;
+                       cmd |= sizeof(void *) << IOCSIZE_SHIFT;
+               }
+               break;
+       }
+       return perf_ioctl(file, cmd, arg);
+}
+#else
+# define perf_compat_ioctl NULL
+#endif
+
 int perf_event_task_enable(void)
 {
        struct perf_event *event;
@@ -3961,7 +3981,7 @@ static const struct file_operations perf_fops = {
        .read                   = perf_read,
        .poll                   = perf_poll,
        .unlocked_ioctl         = perf_ioctl,
-       .compat_ioctl           = perf_ioctl,
+       .compat_ioctl           = perf_compat_ioctl,
        .mmap                   = perf_mmap,
        .fasync                 = perf_fasync,
 };
index f45e21ab9cea8531acd4bde77e9b4e3b63866f96..eaa3accb01e708a11986135a8df1146f7dbc1b4b 100644 (file)
@@ -302,6 +302,7 @@ struct mem_cgroup {
 
        bool            oom_lock;
        atomic_t        under_oom;
+       atomic_t        oom_wakeups;
 
        atomic_t        refcnt;
 
@@ -2075,15 +2076,18 @@ static int mem_cgroup_soft_reclaim(struct mem_cgroup *root_memcg,
        return total;
 }
 
+static DEFINE_SPINLOCK(memcg_oom_lock);
+
 /*
  * Check OOM-Killer is already running under our hierarchy.
  * If someone is running, return false.
- * Has to be called with memcg_oom_lock
  */
-static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
+static bool mem_cgroup_oom_trylock(struct mem_cgroup *memcg)
 {
        struct mem_cgroup *iter, *failed = NULL;
 
+       spin_lock(&memcg_oom_lock);
+
        for_each_mem_cgroup_tree(iter, memcg) {
                if (iter->oom_lock) {
                        /*
@@ -2097,33 +2101,33 @@ static bool mem_cgroup_oom_lock(struct mem_cgroup *memcg)
                        iter->oom_lock = true;
        }
 
-       if (!failed)
-               return true;
-
-       /*
-        * OK, we failed to lock the whole subtree so we have to clean up
-        * what we set up to the failing subtree
-        */
-       for_each_mem_cgroup_tree(iter, memcg) {
-               if (iter == failed) {
-                       mem_cgroup_iter_break(memcg, iter);
-                       break;
+       if (failed) {
+               /*
+                * OK, we failed to lock the whole subtree so we have
+                * to clean up what we set up to the failing subtree
+                */
+               for_each_mem_cgroup_tree(iter, memcg) {
+                       if (iter == failed) {
+                               mem_cgroup_iter_break(memcg, iter);
+                               break;
+                       }
+                       iter->oom_lock = false;
                }
-               iter->oom_lock = false;
        }
-       return false;
+
+       spin_unlock(&memcg_oom_lock);
+
+       return !failed;
 }
 
-/*
- * Has to be called with memcg_oom_lock
- */
-static int mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
+static void mem_cgroup_oom_unlock(struct mem_cgroup *memcg)
 {
        struct mem_cgroup *iter;
 
+       spin_lock(&memcg_oom_lock);
        for_each_mem_cgroup_tree(iter, memcg)
                iter->oom_lock = false;
-       return 0;
+       spin_unlock(&memcg_oom_lock);
 }
 
 static void mem_cgroup_mark_under_oom(struct mem_cgroup *memcg)
@@ -2147,7 +2151,6 @@ static void mem_cgroup_unmark_under_oom(struct mem_cgroup *memcg)
                atomic_add_unless(&iter->under_oom, -1, 0);
 }
 
-static DEFINE_SPINLOCK(memcg_oom_lock);
 static DECLARE_WAIT_QUEUE_HEAD(memcg_oom_waitq);
 
 struct oom_wait_info {
@@ -2177,6 +2180,7 @@ static int memcg_oom_wake_function(wait_queue_t *wait,
 
 static void memcg_wakeup_oom(struct mem_cgroup *memcg)
 {
+       atomic_inc(&memcg->oom_wakeups);
        /* for filtering, pass "memcg" as argument. */
        __wake_up(&memcg_oom_waitq, TASK_NORMAL, 0, memcg);
 }
@@ -2187,57 +2191,97 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
                memcg_wakeup_oom(memcg);
 }
 
-/*
- * try to call OOM killer. returns false if we should exit memory-reclaim loop.
+static void mem_cgroup_oom(struct mem_cgroup *memcg, gfp_t mask, int order)
+{
+       if (!current->memcg_oom.may_oom)
+               return;
+       /*
+        * We are in the middle of the charge context here, so we
+        * don't want to block when potentially sitting on a callstack
+        * that holds all kinds of filesystem and mm locks.
+        *
+        * Also, the caller may handle a failed allocation gracefully
+        * (like optional page cache readahead) and so an OOM killer
+        * invocation might not even be necessary.
+        *
+        * That's why we don't do anything here except remember the
+        * OOM context and then deal with it at the end of the page
+        * fault when the stack is unwound, the locks are released,
+        * and when we know whether the fault was overall successful.
+        */
+       css_get(&memcg->css);
+       current->memcg_oom.memcg = memcg;
+       current->memcg_oom.gfp_mask = mask;
+       current->memcg_oom.order = order;
+}
+
+/**
+ * mem_cgroup_oom_synchronize - complete memcg OOM handling
+ * @handle: actually kill/wait or just clean up the OOM state
+ *
+ * This has to be called at the end of a page fault if the memcg OOM
+ * handler was enabled.
+ *
+ * Memcg supports userspace OOM handling where failed allocations must
+ * sleep on a waitqueue until the userspace task resolves the
+ * situation.  Sleeping directly in the charge context with all kinds
+ * of locks held is not a good idea, instead we remember an OOM state
+ * in the task and mem_cgroup_oom_synchronize() has to be called at
+ * the end of the page fault to complete the OOM handling.
+ *
+ * Returns %true if an ongoing memcg OOM situation was detected and
+ * completed, %false otherwise.
  */
-static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
-                                 int order)
+bool mem_cgroup_oom_synchronize(bool handle)
 {
+       struct mem_cgroup *memcg = current->memcg_oom.memcg;
        struct oom_wait_info owait;
-       bool locked, need_to_kill;
+       bool locked;
+
+       /* OOM is global, do not handle */
+       if (!memcg)
+               return false;
+
+       if (!handle)
+               goto cleanup;
 
        owait.memcg = memcg;
        owait.wait.flags = 0;
        owait.wait.func = memcg_oom_wake_function;
        owait.wait.private = current;
        INIT_LIST_HEAD(&owait.wait.task_list);
-       need_to_kill = true;
-       mem_cgroup_mark_under_oom(memcg);
 
-       /* At first, try to OOM lock hierarchy under memcg.*/
-       spin_lock(&memcg_oom_lock);
-       locked = mem_cgroup_oom_lock(memcg);
-       /*
-        * Even if signal_pending(), we can't quit charge() loop without
-        * accounting. So, UNINTERRUPTIBLE is appropriate. But SIGKILL
-        * under OOM is always welcomed, use TASK_KILLABLE here.
-        */
        prepare_to_wait(&memcg_oom_waitq, &owait.wait, TASK_KILLABLE);
-       if (!locked || memcg->oom_kill_disable)
-               need_to_kill = false;
+       mem_cgroup_mark_under_oom(memcg);
+
+       locked = mem_cgroup_oom_trylock(memcg);
+
        if (locked)
                mem_cgroup_oom_notify(memcg);
-       spin_unlock(&memcg_oom_lock);
 
-       if (need_to_kill) {
+       if (locked && !memcg->oom_kill_disable) {
+               mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
-               mem_cgroup_out_of_memory(memcg, mask, order);
+               mem_cgroup_out_of_memory(memcg, current->memcg_oom.gfp_mask,
+                                        current->memcg_oom.order);
        } else {
                schedule();
+               mem_cgroup_unmark_under_oom(memcg);
                finish_wait(&memcg_oom_waitq, &owait.wait);
        }
-       spin_lock(&memcg_oom_lock);
-       if (locked)
-               mem_cgroup_oom_unlock(memcg);
-       memcg_wakeup_oom(memcg);
-       spin_unlock(&memcg_oom_lock);
 
-       mem_cgroup_unmark_under_oom(memcg);
-
-       if (test_thread_flag(TIF_MEMDIE) || fatal_signal_pending(current))
-               return false;
-       /* Give chance to dying process */
-       schedule_timeout_uninterruptible(1);
+       if (locked) {
+               mem_cgroup_oom_unlock(memcg);
+               /*
+                * There is no guarantee that an OOM-lock contender
+                * sees the wakeups triggered by the OOM kill
+                * uncharges.  Wake any sleepers explicitely.
+                */
+               memcg_oom_recover(memcg);
+       }
+cleanup:
+       current->memcg_oom.memcg = NULL;
+       css_put(&memcg->css);
        return true;
 }
 
@@ -2550,12 +2594,11 @@ enum {
        CHARGE_RETRY,           /* need to retry but retry is not bad */
        CHARGE_NOMEM,           /* we can't do more. return -ENOMEM */
        CHARGE_WOULDBLOCK,      /* GFP_WAIT wasn't set and no enough res. */
-       CHARGE_OOM_DIE,         /* the current is killed because of OOM */
 };
 
 static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
                                unsigned int nr_pages, unsigned int min_pages,
-                               bool oom_check)
+                               bool invoke_oom)
 {
        unsigned long csize = nr_pages * PAGE_SIZE;
        struct mem_cgroup *mem_over_limit;
@@ -2612,14 +2655,10 @@ static int mem_cgroup_do_charge(struct mem_cgroup *memcg, gfp_t gfp_mask,
        if (mem_cgroup_wait_acct_move(mem_over_limit))
                return CHARGE_RETRY;
 
-       /* If we don't need to call oom-killer at el, return immediately */
-       if (!oom_check)
-               return CHARGE_NOMEM;
-       /* check OOM */
-       if (!mem_cgroup_handle_oom(mem_over_limit, gfp_mask, get_order(csize)))
-               return CHARGE_OOM_DIE;
+       if (invoke_oom)
+               mem_cgroup_oom(mem_over_limit, gfp_mask, get_order(csize));
 
-       return CHARGE_RETRY;
+       return CHARGE_NOMEM;
 }
 
 /*
@@ -2663,6 +2702,9 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
                     || fatal_signal_pending(current)))
                goto bypass;
 
+       if (unlikely(task_in_memcg_oom(current)))
+               goto bypass;
+
        /*
         * We always charge the cgroup the mm_struct belongs to.
         * The mm_struct's mem_cgroup changes on task migration if the
@@ -2722,7 +2764,7 @@ again:
        }
 
        do {
-               bool oom_check;
+               bool invoke_oom = oom && !nr_oom_retries;
 
                /* If killed, bypass charge */
                if (fatal_signal_pending(current)) {
@@ -2730,14 +2772,8 @@ again:
                        goto bypass;
                }
 
-               oom_check = false;
-               if (oom && !nr_oom_retries) {
-                       oom_check = true;
-                       nr_oom_retries = MEM_CGROUP_RECLAIM_RETRIES;
-               }
-
-               ret = mem_cgroup_do_charge(memcg, gfp_mask, batch, nr_pages,
-                   oom_check);
+               ret = mem_cgroup_do_charge(memcg, gfp_mask, batch,
+                                          nr_pages, invoke_oom);
                switch (ret) {
                case CHARGE_OK:
                        break;
@@ -2750,16 +2786,12 @@ again:
                        css_put(&memcg->css);
                        goto nomem;
                case CHARGE_NOMEM: /* OOM routine works */
-                       if (!oom) {
+                       if (!oom || invoke_oom) {
                                css_put(&memcg->css);
                                goto nomem;
                        }
-                       /* If oom, we never return -ENOMEM */
                        nr_oom_retries--;
                        break;
-               case CHARGE_OOM_DIE: /* Killed by OOM Killer */
-                       css_put(&memcg->css);
-                       goto bypass;
                }
        } while (ret != CHARGE_OK);
 
index 8a6ab7be24b6eae37deabd5c79c9762520213089..b5edd6ee71d831fdd869bff1666839838d574443 100644 (file)
@@ -3756,22 +3756,14 @@ unlock:
 /*
  * By the time we get here, we already hold the mm semaphore
  */
-int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
-               unsigned long address, unsigned int flags)
+static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                            unsigned long address, unsigned int flags)
 {
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd;
        pte_t *pte;
 
-       __set_current_state(TASK_RUNNING);
-
-       count_vm_event(PGFAULT);
-       mem_cgroup_count_vm_event(mm, PGFAULT);
-
-       /* do counter updates before entering really critical section. */
-       check_sync_rss_stat(current);
-
        if (unlikely(is_vm_hugetlb_page(vma)))
                return hugetlb_fault(mm, vma, address, flags);
 
@@ -3852,6 +3844,43 @@ retry:
        return handle_pte_fault(mm, vma, address, pte, pmd, flags);
 }
 
+int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+                   unsigned long address, unsigned int flags)
+{
+       int ret;
+
+       __set_current_state(TASK_RUNNING);
+
+       count_vm_event(PGFAULT);
+       mem_cgroup_count_vm_event(mm, PGFAULT);
+
+       /* do counter updates before entering really critical section. */
+       check_sync_rss_stat(current);
+
+       /*
+        * Enable the memcg OOM handling for faults triggered in user
+        * space.  Kernel faults are handled more gracefully.
+        */
+       if (flags & FAULT_FLAG_USER)
+               mem_cgroup_oom_enable();
+
+       ret = __handle_mm_fault(mm, vma, address, flags);
+
+       if (flags & FAULT_FLAG_USER) {
+               mem_cgroup_oom_disable();
+                /*
+                 * The task may have entered a memcg OOM situation but
+                 * if the allocation error was handled gracefully (no
+                 * VM_FAULT_OOM), there is no need to kill anything.
+                 * Just clean up the OOM state peacefully.
+                 */
+                if (task_in_memcg_oom(current) && !(ret & VM_FAULT_OOM))
+                        mem_cgroup_oom_synchronize(false);
+       }
+
+       return ret;
+}
+
 #ifndef __PAGETABLE_PUD_FOLDED
 /*
  * Allocate page upper directory.
index f104c7e9f61efa4cfd61d4c6af544b1eabf2806b..4d87d7c4ed2e4e0d545f4db22146b6230440064e 100644 (file)
@@ -702,9 +702,12 @@ out:
  */
 void pagefault_out_of_memory(void)
 {
-       struct zonelist *zonelist = node_zonelist(first_online_node,
-                                                 GFP_KERNEL);
+       struct zonelist *zonelist;
 
+       if (mem_cgroup_oom_synchronize(true))
+               return;
+
+       zonelist = node_zonelist(first_online_node, GFP_KERNEL);
        if (try_set_zonelist_oom(zonelist, GFP_KERNEL)) {
                out_of_memory(NULL, 0, 0, NULL, false);
                clear_zonelist_oom(zonelist, GFP_KERNEL);
index e696833a31b532dabf1b7d27e2a46c7590ee0170..11ab6628027ab92dc53826957f0c15c0bde158e5 100644 (file)
@@ -429,6 +429,16 @@ extern netdev_features_t br_features_recompute(struct net_bridge *br,
 extern int br_handle_frame_finish(struct sk_buff *skb);
 extern rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
 
+static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
+{
+       return rcu_dereference(dev->rx_handler) == br_handle_frame;
+}
+
+static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
+{
+       return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
+}
+
 /* br_ioctl.c */
 extern int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
 extern int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd, void __user *arg);
index 8660ea3be7054571defa59bcfbfc0cd0dd7ab99e..bdb459d21ad8e2d5191023c5b096de39b6a78c90 100644 (file)
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
        if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
                goto err;
 
-       p = br_port_get_rcu(dev);
+       p = br_port_get_check_rcu(dev);
        if (!p)
                goto err;
 
index 6e7a236525b6ff92d9cd2e44770cb6f7334b0b02..06f19b9e159a3c30f180cca3e5b405207519f36e 100644 (file)
@@ -89,11 +89,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
 
 static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
 
+/*
+ * Should be used for buffers allocated with ceph_kvmalloc().
+ * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
+ * in-buffer (msg front).
+ *
+ * Dispose of @sgt with teardown_sgtable().
+ *
+ * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
+ * in cases where a single sg is sufficient.  No attempt to reduce the
+ * number of sgs by squeezing physically contiguous pages together is
+ * made though, for simplicity.
+ */
+static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
+                        const void *buf, unsigned int buf_len)
+{
+       struct scatterlist *sg;
+       const bool is_vmalloc = is_vmalloc_addr(buf);
+       unsigned int off = offset_in_page(buf);
+       unsigned int chunk_cnt = 1;
+       unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
+       int i;
+       int ret;
+
+       if (buf_len == 0) {
+               memset(sgt, 0, sizeof(*sgt));
+               return -EINVAL;
+       }
+
+       if (is_vmalloc) {
+               chunk_cnt = chunk_len >> PAGE_SHIFT;
+               chunk_len = PAGE_SIZE;
+       }
+
+       if (chunk_cnt > 1) {
+               ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
+               if (ret)
+                       return ret;
+       } else {
+               WARN_ON(chunk_cnt != 1);
+               sg_init_table(prealloc_sg, 1);
+               sgt->sgl = prealloc_sg;
+               sgt->nents = sgt->orig_nents = 1;
+       }
+
+       for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
+               struct page *page;
+               unsigned int len = min(chunk_len - off, buf_len);
+
+               if (is_vmalloc)
+                       page = vmalloc_to_page(buf);
+               else
+                       page = virt_to_page(buf);
+
+               sg_set_page(sg, page, len, off);
+
+               off = 0;
+               buf += len;
+               buf_len -= len;
+       }
+       WARN_ON(buf_len != 0);
+
+       return 0;
+}
+
+static void teardown_sgtable(struct sg_table *sgt)
+{
+       if (sgt->orig_nents > 1)
+               sg_free_table(sgt);
+}
+
 static int ceph_aes_encrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[2], sg_out[1];
+       struct scatterlist sg_in[2], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -109,16 +180,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
 
        *dst_len = src_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 2);
        sg_set_buf(&sg_in[0], src, src_len);
        sg_set_buf(&sg_in[1], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -127,16 +200,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
@@ -144,7 +223,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
                             const void *src1, size_t src1_len,
                             const void *src2, size_t src2_len)
 {
-       struct scatterlist sg_in[3], sg_out[1];
+       struct scatterlist sg_in[3], prealloc_sg;
+       struct sg_table sg_out;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
        int ret;
@@ -160,17 +240,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
 
        *dst_len = src1_len + src2_len + zero_padding;
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        sg_init_table(sg_in, 3);
        sg_set_buf(&sg_in[0], src1, src1_len);
        sg_set_buf(&sg_in[1], src2, src2_len);
        sg_set_buf(&sg_in[2], pad, zero_padding);
-       sg_init_table(sg_out, 1);
-       sg_set_buf(sg_out, dst, *dst_len);
+       ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
+       if (ret)
+               goto out_tfm;
+
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
+
        /*
        print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
                       key, key_len, 1);
@@ -181,23 +263,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
        print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
                        pad, zero_padding, 1);
        */
-       ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
+       ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
                                     src1_len + src2_len + zero_padding);
-       crypto_free_blkcipher(tfm);
-       if (ret < 0)
+       if (ret < 0) {
                pr_err("ceph_aes_crypt2 failed %d\n", ret);
+               goto out_sg;
+       }
        /*
        print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_out);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt(const void *key, int key_len,
                            void *dst, size_t *dst_len,
                            const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[2];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[2], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -209,16 +298,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       crypto_blkcipher_setkey((void *)tfm, key, key_len);
-       sg_init_table(sg_in, 1);
        sg_init_table(sg_out, 2);
-       sg_set_buf(sg_in, src, src_len);
        sg_set_buf(&sg_out[0], dst, *dst_len);
        sg_set_buf(&sg_out[1], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
+       crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -227,12 +316,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst_len)
@@ -250,7 +337,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
                       dst, *dst_len, 1);
        */
-       return 0;
+
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 static int ceph_aes_decrypt2(const void *key, int key_len,
@@ -258,7 +350,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                             void *dst2, size_t *dst2_len,
                             const void *src, size_t src_len)
 {
-       struct scatterlist sg_in[1], sg_out[3];
+       struct sg_table sg_in;
+       struct scatterlist sg_out[3], prealloc_sg;
        struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
        struct blkcipher_desc desc = { .tfm = tfm };
        char pad[16];
@@ -270,17 +363,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        if (IS_ERR(tfm))
                return PTR_ERR(tfm);
 
-       sg_init_table(sg_in, 1);
-       sg_set_buf(sg_in, src, src_len);
        sg_init_table(sg_out, 3);
        sg_set_buf(&sg_out[0], dst1, *dst1_len);
        sg_set_buf(&sg_out[1], dst2, *dst2_len);
        sg_set_buf(&sg_out[2], pad, sizeof(pad));
+       ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
+       if (ret)
+               goto out_tfm;
 
        crypto_blkcipher_setkey((void *)tfm, key, key_len);
        iv = crypto_blkcipher_crt(tfm)->iv;
        ivsize = crypto_blkcipher_ivsize(tfm);
-
        memcpy(iv, aes_iv, ivsize);
 
        /*
@@ -289,12 +382,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
        print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
                       src, src_len, 1);
        */
-
-       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
-       crypto_free_blkcipher(tfm);
+       ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
        if (ret < 0) {
                pr_err("ceph_aes_decrypt failed %d\n", ret);
-               return ret;
+               goto out_sg;
        }
 
        if (src_len <= *dst1_len)
@@ -324,7 +415,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
                       dst2, *dst2_len, 1);
        */
 
-       return 0;
+out_sg:
+       teardown_sgtable(&sg_in);
+out_tfm:
+       crypto_free_blkcipher(tfm);
+       return ret;
 }
 
 
index 250a73e77f57b2d8b85f0e4419479a83e2c34f6c..6c20f4731f1a0322e9dd1416e670b2d3e1bcd515 100644 (file)
@@ -962,8 +962,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
        else
                dev->flags &= ~IFF_POINTOPOINT;
 
-       dev->iflink = p->link;
-
        /* Precalculate GRE options length */
        if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
                if (t->parms.o_flags&GRE_CSUM)
@@ -1267,6 +1265,8 @@ static int ip6gre_tunnel_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       dev->iflink = tunnel->parms.link;
+
        return 0;
 }
 
@@ -1282,7 +1282,6 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
        dev_hold(dev);
 }
 
-
 static struct inet6_protocol ip6gre_protocol __read_mostly = {
        .handler     = ip6gre_rcv,
        .err_handler = ip6gre_err,
@@ -1458,6 +1457,8 @@ static int ip6gre_tap_init(struct net_device *dev)
        if (!dev->tstats)
                return -ENOMEM;
 
+       dev->iflink = tunnel->parms.link;
+
        return 0;
 }
 
index a0ecdf596f2fbf0ccec406ea092ee045458df5e5..14f46af17704f813a5c86b36e8c6521e23698b81 100644 (file)
@@ -265,9 +265,6 @@ static int ip6_tnl_create2(struct net_device *dev)
        int err;
 
        t = netdev_priv(dev);
-       err = ip6_tnl_dev_init(dev);
-       if (err < 0)
-               goto out;
 
        err = register_netdevice(dev);
        if (err < 0)
@@ -1433,6 +1430,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
 
 
 static const struct net_device_ops ip6_tnl_netdev_ops = {
+       .ndo_init       = ip6_tnl_dev_init,
        .ndo_uninit     = ip6_tnl_dev_uninit,
        .ndo_start_xmit = ip6_tnl_xmit,
        .ndo_do_ioctl   = ip6_tnl_ioctl,
@@ -1514,16 +1512,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
        struct ip6_tnl *t = netdev_priv(dev);
        struct net *net = dev_net(dev);
        struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
-       int err = ip6_tnl_dev_init_gen(dev);
-
-       if (err)
-               return err;
 
        t->parms.proto = IPPROTO_IPV6;
        dev_hold(dev);
 
-       ip6_tnl_link_config(t);
-
        rcu_assign_pointer(ip6n->tnls_wc[0], t);
        return 0;
 }
index fae73b0ef14b9b40be4a7de5b44e8ccabb0a7e96..85bc6d498b46f59b76658881cf8e55aac2df6e34 100644 (file)
@@ -1585,11 +1585,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
        sc = le16_to_cpu(hdr->seq_ctrl);
        frag = sc & IEEE80211_SCTL_FRAG;
 
-       if (likely((!ieee80211_has_morefrags(fc) && frag == 0) ||
-                  is_multicast_ether_addr(hdr->addr1))) {
-               /* not fragmented */
+       if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
+               goto out;
+
+       if (is_multicast_ether_addr(hdr->addr1)) {
+               rx->local->dot11MulticastReceivedFrameCount++;
                goto out;
        }
+
        I802_DEBUG_INC(rx->local->rx_handlers_fragments);
 
        if (skb_linearize(rx->skb))
@@ -1682,10 +1685,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
  out:
        if (rx->sta)
                rx->sta->rx_packets++;
-       if (is_multicast_ether_addr(hdr->addr1))
-               rx->local->dot11MulticastReceivedFrameCount++;
-       else
-               ieee80211_led_rx(rx->local);
+       ieee80211_led_rx(rx->local);
        return RX_CONTINUE;
 }
 
index 038eee5c8f8548787bff468c40256d52bb6655fd..2bb801e3ee8c488243e7d50cf47057c94fc394d3 100644 (file)
@@ -487,6 +487,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
        return i->status & IPS_NAT_MASK ? 1 : 0;
 }
 
+static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
+{
+       struct nf_conn_nat *nat = nfct_nat(ct);
+
+       if (nf_nat_proto_remove(ct, data))
+               return 1;
+
+       if (!nat || !nat->ct)
+               return 0;
+
+       /* This netns is being destroyed, and conntrack has nat null binding.
+        * Remove it from bysource hash, as the table will be freed soon.
+        *
+        * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
+        * will delete entry from already-freed table.
+        */
+       if (!del_timer(&ct->timeout))
+               return 1;
+
+       spin_lock_bh(&nf_nat_lock);
+       hlist_del_rcu(&nat->bysource);
+       ct->status &= ~IPS_NAT_DONE_MASK;
+       nat->ct = NULL;
+       spin_unlock_bh(&nf_nat_lock);
+
+       add_timer(&ct->timeout);
+
+       /* don't delete conntrack.  Although that would make things a lot
+        * simpler, we'd end up flushing all conntracks on nat rmmod.
+        */
+       return 0;
+}
+
 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
 {
        struct nf_nat_proto_clean clean = {
@@ -749,7 +782,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);
+       nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
index 962e9792e3179997db98a448a76fc909432d841f..216261dd32aeca3f9ec9b7027484e7f2bcabeba1 100644 (file)
@@ -45,7 +45,8 @@
 #define NFULNL_NLBUFSIZ_DEFAULT        NLMSG_GOODSIZE
 #define NFULNL_TIMEOUT_DEFAULT         100     /* every second */
 #define NFULNL_QTHRESH_DEFAULT         100     /* 100 packets */
-#define NFULNL_COPY_RANGE_MAX  0xFFFF  /* max packet size is limited by 16-bit struct nfattr nfa_len field */
+/* max packet size is limited by 16-bit struct nfattr nfa_len field */
+#define NFULNL_COPY_RANGE_MAX  (0xFFFF - NLA_HDRLEN)
 
 #define PRINTR(x, args...)     do { if (net_ratelimit()) \
                                     printk(x, ## args); } while (0);
@@ -255,6 +256,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
 
        case NFULNL_COPY_PACKET:
                inst->copy_mode = mode;
+               if (range == 0)
+                       range = NFULNL_COPY_RANGE_MAX;
                inst->copy_range = min_t(unsigned int,
                                         range, NFULNL_COPY_RANGE_MAX);
                break;
@@ -345,26 +348,25 @@ nfulnl_alloc_skb(u32 peer_portid, unsigned int inst_size, unsigned int pkt_size)
        return skb;
 }
 
-static int
+static void
 __nfulnl_send(struct nfulnl_instance *inst)
 {
-       int status = -1;
-
        if (inst->qlen > 1) {
                struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
                                                 NLMSG_DONE,
                                                 sizeof(struct nfgenmsg),
                                                 0);
-               if (!nlh)
+               if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
+                             inst->skb->len, skb_tailroom(inst->skb))) {
+                       kfree_skb(inst->skb);
                        goto out;
+               }
        }
-       status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
-                                  MSG_DONTWAIT);
-
+       nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
+                         MSG_DONTWAIT);
+out:
        inst->qlen = 0;
        inst->skb = NULL;
-out:
-       return status;
 }
 
 static void
@@ -647,7 +649,8 @@ nfulnl_log_packet(struct net *net,
                + nla_total_size(sizeof(u_int32_t))     /* gid */
                + nla_total_size(plen)                  /* prefix */
                + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
-               + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp));
+               + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
+               + nla_total_size(sizeof(struct nfgenmsg));      /* NLMSG_DONE */
 
        if (in && skb_mac_header_was_set(skb)) {
                size +=   nla_total_size(skb->dev->hard_header_len)
@@ -676,8 +679,7 @@ nfulnl_log_packet(struct net *net,
                break;
 
        case NFULNL_COPY_PACKET:
-               if (inst->copy_range == 0
-                   || inst->copy_range > skb->len)
+               if (inst->copy_range > skb->len)
                        data_len = skb->len;
                else
                        data_len = inst->copy_range;
@@ -690,8 +692,7 @@ nfulnl_log_packet(struct net *net,
                goto unlock_and_release;
        }
 
-       if (inst->skb &&
-           size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
+       if (inst->skb && size > skb_tailroom(inst->skb)) {
                /* either the queue len is too high or we don't have
                 * enough room in the skb left. flush to userspace. */
                __nfulnl_flush(inst);
index 62e86d98bc36c46b7c3c6b14dc5c6957d4d8d275..ca4a1a1b8e693b0e743e64a5d3410d133ff7ae17 100644 (file)
@@ -1659,6 +1659,8 @@ struct sctp_chunk *sctp_assoc_lookup_asconf_ack(
         * ack chunk whose serial number matches that of the request.
         */
        list_for_each_entry(ack, &asoc->asconf_ack_list, transmitted_list) {
+               if (sctp_chunk_pending(ack))
+                       continue;
                if (ack->subh.addip_hdr->serial == serial) {
                        sctp_chunk_hold(ack);
                        return ack;
index 7a19117254db8e26fb2d7d5e1f21ac4c58a81266..bc2fae7e67be6d0625058a80190ba1b737fffe97 100644 (file)
@@ -874,8 +874,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
                list_add(&cur_key->key_list, sh_keys);
 
        cur_key->key = key;
-       sctp_auth_key_hold(key);
-
        return 0;
 nomem:
        if (!replace)
index 3221d073448ce0356ac24078a3f69c6501e91371..49c58eadbfa2af0d05de9b024bbab711e845639c 100644 (file)
@@ -147,18 +147,9 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
                } else {
                        /* Nothing to do. Next chunk in the packet, please. */
                        ch = (sctp_chunkhdr_t *) chunk->chunk_end;
-
                        /* Force chunk->skb->data to chunk->chunk_end.  */
-                       skb_pull(chunk->skb,
-                                chunk->chunk_end - chunk->skb->data);
-
-                       /* Verify that we have at least chunk headers
-                        * worth of buffer left.
-                        */
-                       if (skb_headlen(chunk->skb) < sizeof(sctp_chunkhdr_t)) {
-                               sctp_chunk_free(chunk);
-                               chunk = queue->in_progress = NULL;
-                       }
+                       skb_pull(chunk->skb, chunk->chunk_end - chunk->skb->data);
+                       /* We are guaranteed to pull a SCTP header. */
                }
        }
 
@@ -194,24 +185,14 @@ struct sctp_chunk *sctp_inq_pop(struct sctp_inq *queue)
        skb_pull(chunk->skb, sizeof(sctp_chunkhdr_t));
        chunk->subh.v = NULL; /* Subheader is no longer valid.  */
 
-       if (chunk->chunk_end < skb_tail_pointer(chunk->skb)) {
+       if (chunk->chunk_end + sizeof(sctp_chunkhdr_t) <
+           skb_tail_pointer(chunk->skb)) {
                /* This is not a singleton */
                chunk->singleton = 0;
        } else if (chunk->chunk_end > skb_tail_pointer(chunk->skb)) {
-               /* RFC 2960, Section 6.10  Bundling
-                *
-                * Partial chunks MUST NOT be placed in an SCTP packet.
-                * If the receiver detects a partial chunk, it MUST drop
-                * the chunk.
-                *
-                * Since the end of the chunk is past the end of our buffer
-                * (which contains the whole packet, we can freely discard
-                * the whole packet.
-                */
-               sctp_chunk_free(chunk);
-               chunk = queue->in_progress = NULL;
-
-               return NULL;
+               /* Discard inside state machine. */
+               chunk->pdiscard = 1;
+               chunk->chunk_end = skb_tail_pointer(chunk->skb);
        } else {
                /* We are at the end of the packet, so mark the chunk
                 * in case we need to send a SACK.
index 87e244be899a78dd076cf1b4e7b1301c748df8d9..29fc16f3633f211bd4ec658f2dd7db4333a3c211 100644 (file)
@@ -2596,6 +2596,9 @@ do_addr_param:
                addr_param = param.v + sizeof(sctp_addip_param_t);
 
                af = sctp_get_af_specific(param_type2af(param.p->type));
+               if (af == NULL)
+                       break;
+
                af->from_addr_param(&addr, addr_param,
                                    htons(asoc->peer.port), 0);
 
@@ -3094,50 +3097,63 @@ static __be16 sctp_process_asconf_param(struct sctp_association *asoc,
        return SCTP_ERROR_NO_ERROR;
 }
 
-/* Verify the ASCONF packet before we process it.  */
-int sctp_verify_asconf(const struct sctp_association *asoc,
-                      struct sctp_paramhdr *param_hdr, void *chunk_end,
-                      struct sctp_paramhdr **errp) {
-       sctp_addip_param_t *asconf_param;
+/* Verify the ASCONF packet before we process it. */
+bool sctp_verify_asconf(const struct sctp_association *asoc,
+                       struct sctp_chunk *chunk, bool addr_param_needed,
+                       struct sctp_paramhdr **errp)
+{
+       sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) chunk->chunk_hdr;
        union sctp_params param;
-       int length, plen;
+       bool addr_param_seen = false;
 
-       param.v = (sctp_paramhdr_t *) param_hdr;
-       while (param.v <= chunk_end - sizeof(sctp_paramhdr_t)) {
-               length = ntohs(param.p->length);
-               *errp = param.p;
-
-               if (param.v > chunk_end - length ||
-                   length < sizeof(sctp_paramhdr_t))
-                       return 0;
+       sctp_walk_params(param, addip, addip_hdr.params) {
+               size_t length = ntohs(param.p->length);
 
+               *errp = param.p;
                switch (param.p->type) {
+               case SCTP_PARAM_ERR_CAUSE:
+                       break;
+               case SCTP_PARAM_IPV4_ADDRESS:
+                       if (length != sizeof(sctp_ipv4addr_param_t))
+                               return false;
+                       addr_param_seen = true;
+                       break;
+               case SCTP_PARAM_IPV6_ADDRESS:
+                       if (length != sizeof(sctp_ipv6addr_param_t))
+                               return false;
+                       addr_param_seen = true;
+                       break;
                case SCTP_PARAM_ADD_IP:
                case SCTP_PARAM_DEL_IP:
                case SCTP_PARAM_SET_PRIMARY:
-                       asconf_param = (sctp_addip_param_t *)param.v;
-                       plen = ntohs(asconf_param->param_hdr.length);
-                       if (plen < sizeof(sctp_addip_param_t) +
-                           sizeof(sctp_paramhdr_t))
-                               return 0;
+                       /* In ASCONF chunks, these need to be first. */
+                       if (addr_param_needed && !addr_param_seen)
+                               return false;
+                       length = ntohs(param.addip->param_hdr.length);
+                       if (length < sizeof(sctp_addip_param_t) +
+                                    sizeof(sctp_paramhdr_t))
+                               return false;
                        break;
                case SCTP_PARAM_SUCCESS_REPORT:
                case SCTP_PARAM_ADAPTATION_LAYER_IND:
                        if (length != sizeof(sctp_addip_param_t))
-                               return 0;
-
+                               return false;
                        break;
                default:
-                       break;
+                       /* This is unkown to us, reject! */
+                       return false;
                }
-
-               param.v += WORD_ROUND(length);
        }
 
-       if (param.v != chunk_end)
-               return 0;
+       /* Remaining sanity checks. */
+       if (addr_param_needed && !addr_param_seen)
+               return false;
+       if (!addr_param_needed && addr_param_seen)
+               return false;
+       if (param.v != chunk->chunk_end)
+               return false;
 
-       return 1;
+       return true;
 }
 
 /* Process an incoming ASCONF chunk with the next expected serial no. and
@@ -3146,16 +3162,17 @@ int sctp_verify_asconf(const struct sctp_association *asoc,
 struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                                       struct sctp_chunk *asconf)
 {
+       sctp_addip_chunk_t *addip = (sctp_addip_chunk_t *) asconf->chunk_hdr;
+       bool all_param_pass = true;
+       union sctp_params param;
        sctp_addiphdr_t         *hdr;
        union sctp_addr_param   *addr_param;
        sctp_addip_param_t      *asconf_param;
        struct sctp_chunk       *asconf_ack;
-
        __be16  err_code;
        int     length = 0;
        int     chunk_len;
        __u32   serial;
-       int     all_param_pass = 1;
 
        chunk_len = ntohs(asconf->chunk_hdr->length) - sizeof(sctp_chunkhdr_t);
        hdr = (sctp_addiphdr_t *)asconf->skb->data;
@@ -3183,9 +3200,14 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                goto done;
 
        /* Process the TLVs contained within the ASCONF chunk. */
-       while (chunk_len > 0) {
+       sctp_walk_params(param, addip, addip_hdr.params) {
+               /* Skip preceeding address parameters. */
+               if (param.p->type == SCTP_PARAM_IPV4_ADDRESS ||
+                   param.p->type == SCTP_PARAM_IPV6_ADDRESS)
+                       continue;
+
                err_code = sctp_process_asconf_param(asoc, asconf,
-                                                    asconf_param);
+                                                    param.addip);
                /* ADDIP 4.1 A7)
                 * If an error response is received for a TLV parameter,
                 * all TLVs with no response before the failed TLV are
@@ -3193,28 +3215,20 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
                 * the failed response are considered unsuccessful unless
                 * a specific success indication is present for the parameter.
                 */
-               if (SCTP_ERROR_NO_ERROR != err_code)
-                       all_param_pass = 0;
-
+               if (err_code != SCTP_ERROR_NO_ERROR)
+                       all_param_pass = false;
                if (!all_param_pass)
-                       sctp_add_asconf_response(asconf_ack,
-                                                asconf_param->crr_id, err_code,
-                                                asconf_param);
+                       sctp_add_asconf_response(asconf_ack, param.addip->crr_id,
+                                                err_code, param.addip);
 
                /* ADDIP 4.3 D11) When an endpoint receiving an ASCONF to add
                 * an IP address sends an 'Out of Resource' in its response, it
                 * MUST also fail any subsequent add or delete requests bundled
                 * in the ASCONF.
                 */
-               if (SCTP_ERROR_RSRC_LOW == err_code)
+               if (err_code == SCTP_ERROR_RSRC_LOW)
                        goto done;
-
-               /* Move to the next ASCONF param. */
-               length = ntohs(asconf_param->param_hdr.length);
-               asconf_param = (void *)asconf_param + length;
-               chunk_len -= length;
        }
-
 done:
        asoc->peer.addip_serial++;
 
index edc204b05c825b1b8a97f0d37c049a1b5d75c008..c52763a2629719d38afa624f85cd83756d51969f 100644 (file)
@@ -177,6 +177,9 @@ sctp_chunk_length_valid(struct sctp_chunk *chunk,
 {
        __u16 chunk_length = ntohs(chunk->chunk_hdr->length);
 
+       /* Previously already marked? */
+       if (unlikely(chunk->pdiscard))
+               return 0;
        if (unlikely(chunk_length < required_length))
                return 0;
 
@@ -3593,9 +3596,7 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
        struct sctp_chunk       *asconf_ack = NULL;
        struct sctp_paramhdr    *err_param = NULL;
        sctp_addiphdr_t         *hdr;
-       union sctp_addr_param   *addr_param;
        __u32                   serial;
-       int                     length;
 
        if (!sctp_vtag_verify(chunk, asoc)) {
                sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_BAD_TAG,
@@ -3620,17 +3621,8 @@ sctp_disposition_t sctp_sf_do_asconf(struct net *net,
        hdr = (sctp_addiphdr_t *)chunk->skb->data;
        serial = ntohl(hdr->serial);
 
-       addr_param = (union sctp_addr_param *)hdr->params;
-       length = ntohs(addr_param->p.length);
-       if (length < sizeof(sctp_paramhdr_t))
-               return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
-                          (void *)addr_param, commands);
-
        /* Verify the ASCONF chunk before processing it. */
-       if (!sctp_verify_asconf(asoc,
-                           (sctp_paramhdr_t *)((void *)addr_param + length),
-                           (void *)chunk->chunk_end,
-                           &err_param))
+       if (!sctp_verify_asconf(asoc, chunk, true, &err_param))
                return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                                                  (void *)err_param, commands);
 
@@ -3748,10 +3740,7 @@ sctp_disposition_t sctp_sf_do_asconf_ack(struct net *net,
        rcvd_serial = ntohl(addip_hdr->serial);
 
        /* Verify the ASCONF-ACK chunk before processing it. */
-       if (!sctp_verify_asconf(asoc,
-           (sctp_paramhdr_t *)addip_hdr->params,
-           (void *)asconf_ack->chunk_end,
-           &err_param))
+       if (!sctp_verify_asconf(asoc, asconf_ack, false, &err_param))
                return sctp_sf_violation_paramlen(net, ep, asoc, type, arg,
                           (void *)err_param, commands);
 
index ebe91440a068a8f500901652892d5a3575b68e63..c89a5bf5c00e6b3dccf2741ef7b20b4253ac91b1 100644 (file)
@@ -799,6 +799,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
        return changed;
 }
 
+static void kctl_private_value_free(struct snd_kcontrol *kctl)
+{
+       kfree((void *)kctl->private_value);
+}
+
 static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
        int validx, int bUnitID)
 {
@@ -833,6 +838,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
                return -ENOMEM;
        }
 
+       kctl->private_free = kctl_private_value_free;
        err = snd_ctl_add(mixer->chip->card, kctl);
        if (err < 0)
                return err;