Merge branch 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Mar 2012 01:21:35 +0000 (18:21 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 30 Mar 2012 01:21:35 +0000 (18:21 -0700)
Pull x86 cleanups from Peter Anvin:
 "The biggest textual change is the cleanup to use symbolic constants
  for x86 trap values.

  The only *functional* change and the reason for the x86/x32 dependency
  is the move of is_ia32_task() into <asm/thread_info.h> so that it can
  be used in other code that needs to understand if a system call comes
  from the compat entry point (and therefore uses i386 system call
  numbers) or not.  One intended user for that is the BPF system call
  filter.  Moving it out of <asm/compat.h> means we can define it
  unconditionally, returning always true on i386."

* 'x86-cleanups-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86: Move is_ia32_task to asm/thread_info.h from asm/compat.h
  x86: Rename trap_no to trap_nr in thread_struct
  x86: Use enum instead of literals for trap values

1  2 
arch/x86/ia32/ia32_signal.c
arch/x86/include/asm/processor.h
arch/x86/kernel/dumpstack.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/signal.c
arch/x86/kernel/traps.c
arch/x86/kernel/vm86_32.c
arch/x86/kernel/vsyscall_64.c

index 8ff8e7ddfc55ec65542783979cdb2bcd0bc64c8b,45b4fdd4e1da1638b530b699efe95c1d23b3d76b..a69245ba27e328363de1e389dd5ba50ed2eb979c
@@@ -22,7 -22,6 +22,7 @@@
  #include <asm/ucontext.h>
  #include <asm/uaccess.h>
  #include <asm/i387.h>
 +#include <asm/fpu-internal.h>
  #include <asm/ptrace.h>
  #include <asm/ia32_unistd.h>
  #include <asm/user32.h>
@@@ -38,7 -37,7 +38,7 @@@
  int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from)
  {
        int err = 0;
 -      bool ia32 = !is_ia32_task();
 +      bool ia32 = is_ia32_task();
  
        if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
                return -EFAULT;
@@@ -346,7 -345,7 +346,7 @@@ static int ia32_setup_sigcontext(struc
                put_user_ex(regs->dx, &sc->dx);
                put_user_ex(regs->cx, &sc->cx);
                put_user_ex(regs->ax, &sc->ax);
-               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.trap_nr, &sc->trapno);
                put_user_ex(current->thread.error_code, &sc->err);
                put_user_ex(regs->ip, &sc->ip);
                put_user_ex(regs->cs, (unsigned int __user *)&sc->cs);
index f302ef6bb200fe01635acd700f174364daa263d5,f6d0d2eb0832dd41686067d8d1259e642351fcd2..7284c9a6a0b531c29132a475b0429046787de742
@@@ -14,13 -14,13 +14,13 @@@ struct mm_struct
  #include <asm/sigcontext.h>
  #include <asm/current.h>
  #include <asm/cpufeature.h>
 -#include <asm/system.h>
  #include <asm/page.h>
  #include <asm/pgtable_types.h>
  #include <asm/percpu.h>
  #include <asm/msr.h>
  #include <asm/desc_defs.h>
  #include <asm/nops.h>
 +#include <asm/special_insns.h>
  
  #include <linux/personality.h>
  #include <linux/cpumask.h>
  #include <linux/math64.h>
  #include <linux/init.h>
  #include <linux/err.h>
 +#include <linux/irqflags.h>
 +
 +/*
 + * We handle most unaligned accesses in hardware.  On the other hand
 + * unaligned DMA can be quite expensive on some Nehalem processors.
 + *
 + * Based on this we disable the IP header alignment in network drivers.
 + */
 +#define NET_IP_ALIGN  0
  
  #define HBP_NUM 4
  /*
@@@ -171,7 -162,6 +171,7 @@@ extern void early_cpu_init(void)
  extern void identify_boot_cpu(void);
  extern void identify_secondary_cpu(struct cpuinfo_x86 *);
  extern void print_cpu_info(struct cpuinfo_x86 *);
 +void print_cpu_msr(struct cpuinfo_x86 *);
  extern void init_scattered_cpuid_features(struct cpuinfo_x86 *c);
  extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
  extern unsigned short num_cache_leaves;
@@@ -463,7 -453,7 +463,7 @@@ struct thread_struct 
        unsigned long           ptrace_dr7;
        /* Fault info: */
        unsigned long           cr2;
-       unsigned long           trap_no;
+       unsigned long           trap_nr;
        unsigned long           error_code;
        /* floating point and extended processor state */
        struct fpu              fpu;
        unsigned                io_bitmap_max;
  };
  
 -static inline unsigned long native_get_debugreg(int regno)
 -{
 -      unsigned long val = 0;  /* Damn you, gcc! */
 -
 -      switch (regno) {
 -      case 0:
 -              asm("mov %%db0, %0" :"=r" (val));
 -              break;
 -      case 1:
 -              asm("mov %%db1, %0" :"=r" (val));
 -              break;
 -      case 2:
 -              asm("mov %%db2, %0" :"=r" (val));
 -              break;
 -      case 3:
 -              asm("mov %%db3, %0" :"=r" (val));
 -              break;
 -      case 6:
 -              asm("mov %%db6, %0" :"=r" (val));
 -              break;
 -      case 7:
 -              asm("mov %%db7, %0" :"=r" (val));
 -              break;
 -      default:
 -              BUG();
 -      }
 -      return val;
 -}
 -
 -static inline void native_set_debugreg(int regno, unsigned long value)
 -{
 -      switch (regno) {
 -      case 0:
 -              asm("mov %0, %%db0"     ::"r" (value));
 -              break;
 -      case 1:
 -              asm("mov %0, %%db1"     ::"r" (value));
 -              break;
 -      case 2:
 -              asm("mov %0, %%db2"     ::"r" (value));
 -              break;
 -      case 3:
 -              asm("mov %0, %%db3"     ::"r" (value));
 -              break;
 -      case 6:
 -              asm("mov %0, %%db6"     ::"r" (value));
 -              break;
 -      case 7:
 -              asm("mov %0, %%db7"     ::"r" (value));
 -              break;
 -      default:
 -              BUG();
 -      }
 -}
 -
  /*
   * Set IOPL bits in EFLAGS from given mask
   */
@@@ -529,6 -574,14 +529,6 @@@ static inline void native_swapgs(void
  #define __cpuid                       native_cpuid
  #define paravirt_enabled()    0
  
 -/*
 - * These special macros can be used to get or set a debugging register
 - */
 -#define get_debugreg(var, register)                           \
 -      (var) = native_get_debugreg(register)
 -#define set_debugreg(value, register)                         \
 -      native_set_debugreg(register, value)
 -
  static inline void load_sp0(struct tss_struct *tss,
                            struct thread_struct *thread)
  {
@@@ -974,24 -1027,4 +974,24 @@@ extern bool cpu_has_amd_erratum(const i
  #define cpu_has_amd_erratum(x)        (false)
  #endif /* CONFIG_CPU_SUP_AMD */
  
 +#ifdef CONFIG_X86_32
 +/*
 + * disable hlt during certain critical i/o operations
 + */
 +#define HAVE_DISABLE_HLT
 +#endif
 +
 +void disable_hlt(void);
 +void enable_hlt(void);
 +
 +void cpu_idle_wait(void);
 +
 +extern unsigned long arch_align_stack(unsigned long sp);
 +extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
 +
 +void default_idle(void);
 +bool set_pm_idle_to_default(void);
 +
 +void stop_this_cpu(void *dummy);
 +
  #endif /* _ASM_X86_PROCESSOR_H */
index 90bf130f09bc16742294154b14f30310d6a11fcd,28f98706b08b5d4671611791ccb5c39041a8583e..1b81839b6c8890f261e486429db51f37a9c68042
@@@ -37,16 -37,13 +37,16 @@@ print_ftrace_graph_addr(unsigned long a
                        const struct stacktrace_ops *ops,
                        struct thread_info *tinfo, int *graph)
  {
 -      struct task_struct *task = tinfo->task;
 +      struct task_struct *task;
        unsigned long ret_addr;
 -      int index = task->curr_ret_stack;
 +      int index;
  
        if (addr != (unsigned long)return_to_handler)
                return;
  
 +      task = tinfo->task;
 +      index = task->curr_ret_stack;
 +
        if (!task->ret_stack || index < *graph)
                return;
  
@@@ -268,7 -265,7 +268,7 @@@ int __kprobes __die(const char *str, st
  #endif
        printk("\n");
        if (notify_die(DIE_OOPS, str, regs, err,
-                       current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
+                       current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
                return 1;
  
        show_registers(regs);
index 6d5fc8cfd5d6a1b90b4dbd5e63dd68c7a5c4b564,7b77062dea1123bf0bbbaed7589ea61b6ad4472d..252981afd6c4063cabaf3ebb0145af1f620f8209
@@@ -16,6 -16,7 +16,6 @@@
  #include <linux/delay.h>
  
  #include <linux/atomic.h>
 -#include <asm/system.h>
  #include <asm/timer.h>
  #include <asm/hw_irq.h>
  #include <asm/pgtable.h>
@@@ -60,7 -61,7 +60,7 @@@ static irqreturn_t math_error_irq(int c
        outb(0, 0xF0);
        if (ignore_fpu_irq || !boot_cpu_data.hard_math)
                return IRQ_NONE;
-       math_error(get_irq_regs(), 0, 16);
+       math_error(get_irq_regs(), 0, X86_TRAP_MF);
        return IRQ_HANDLED;
  }
  
@@@ -305,10 -306,10 +305,10 @@@ void __init native_init_IRQ(void
         * us. (some of these will be overridden and become
         * 'special' SMP interrupts)
         */
 -      for (i = FIRST_EXTERNAL_VECTOR; i < NR_VECTORS; i++) {
 +      i = FIRST_EXTERNAL_VECTOR;
 +      for_each_clear_bit_from(i, used_vectors, NR_VECTORS) {
                /* IA32_SYSCALL_VECTOR could be used in trap_init already. */
 -              if (!test_bit(i, used_vectors))
 -                      set_intr_gate(i, interrupt[i-FIRST_EXTERNAL_VECTOR]);
 +              set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]);
        }
  
        if (!acpi_ioapic && !of_ioapic)
diff --combined arch/x86/kernel/ptrace.c
index 284c35ae60e43a0380af94195612b58799280bd8,6fb330adc7c79d733dcf74f7b65e1689f6037623..685845cf16e0963efd746e1d1ec3b49b69278cb6
  
  #include <asm/uaccess.h>
  #include <asm/pgtable.h>
 -#include <asm/system.h>
  #include <asm/processor.h>
  #include <asm/i387.h>
 +#include <asm/fpu-internal.h>
  #include <asm/debugreg.h>
  #include <asm/ldt.h>
  #include <asm/desc.h>
  #include <asm/prctl.h>
  #include <asm/proto.h>
  #include <asm/hw_breakpoint.h>
+ #include <asm/traps.h>
  
  #include "tls.h"
  
@@@ -1425,7 -1426,7 +1426,7 @@@ static void fill_sigtrap_info(struct ta
                                int error_code, int si_code,
                                struct siginfo *info)
  {
-       tsk->thread.trap_no = 1;
+       tsk->thread.trap_nr = X86_TRAP_DB;
        tsk->thread.error_code = error_code;
  
        memset(info, 0, sizeof(*info));
diff --combined arch/x86/kernel/signal.c
index 5134e17855f05ffabe448cdecd663875c0183b06,9c73acc1c86012c6694d931cc98bc7c340010d86..115eac431483cbc6acc3a01d54f26abcd69d0227
@@@ -22,7 -22,6 +22,7 @@@
  #include <asm/processor.h>
  #include <asm/ucontext.h>
  #include <asm/i387.h>
 +#include <asm/fpu-internal.h>
  #include <asm/vdso.h>
  #include <asm/mce.h>
  #include <asm/sighandling.h>
@@@ -151,7 -150,7 +151,7 @@@ int setup_sigcontext(struct sigcontext 
                put_user_ex(regs->r15, &sc->r15);
  #endif /* CONFIG_X86_64 */
  
-               put_user_ex(current->thread.trap_no, &sc->trapno);
+               put_user_ex(current->thread.trap_nr, &sc->trapno);
                put_user_ex(current->thread.error_code, &sc->err);
                put_user_ex(regs->ip, &sc->ip);
  #ifdef CONFIG_X86_32
diff --combined arch/x86/kernel/traps.c
index 860f126ca23343f2c3a2052a2768f20d11385b20,c6d17ad59b8aa1b16e3aedd1d87273e160395d94..ff9281f1602913a56f8b017d05b3712f37daa27b
  #include <asm/processor.h>
  #include <asm/debugreg.h>
  #include <linux/atomic.h>
 -#include <asm/system.h>
  #include <asm/traps.h>
  #include <asm/desc.h>
  #include <asm/i387.h>
 +#include <asm/fpu-internal.h>
  #include <asm/mce.h>
  
  #include <asm/mach_traps.h>
@@@ -119,7 -119,7 +119,7 @@@ do_trap(int trapnr, int signr, char *st
                 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
                 * On nmi (interrupt 2), do_trap should not be called.
                 */
-               if (trapnr < 6)
+               if (trapnr < X86_TRAP_UD)
                        goto vm86_trap;
                goto trap_signal;
        }
  trap_signal:
  #endif
        /*
-        * We want error_code and trap_no set for userspace faults and
+        * We want error_code and trap_nr set for userspace faults and
         * kernelspace faults which result in die(), but not
         * kernelspace faults which are fixed up.  die() gives the
         * process no chance to handle the signal and notice the
         * delivered, faults.  See also do_general_protection below.
         */
        tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = trapnr;
+       tsk->thread.trap_nr = trapnr;
  
  #ifdef CONFIG_X86_64
        if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
  kernel_trap:
        if (!fixup_exception(regs)) {
                tsk->thread.error_code = error_code;
-               tsk->thread.trap_no = trapnr;
+               tsk->thread.trap_nr = trapnr;
                die(str, regs, error_code);
        }
        return;
@@@ -203,27 -203,31 +203,31 @@@ dotraplinkage void do_##name(struct pt_
        do_trap(trapnr, signr, str, regs, error_code, &info);           \
  }
  
- DO_ERROR_INFO(0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->ip)
- DO_ERROR(4, SIGSEGV, "overflow", overflow)
- DO_ERROR(5, SIGSEGV, "bounds", bounds)
- DO_ERROR_INFO(6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->ip)
- DO_ERROR(9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
- DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
- DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
+ DO_ERROR_INFO(X86_TRAP_DE, SIGFPE, "divide error", divide_error, FPE_INTDIV,
+               regs->ip)
+ DO_ERROR(X86_TRAP_OF, SIGSEGV, "overflow", overflow)
+ DO_ERROR(X86_TRAP_BR, SIGSEGV, "bounds", bounds)
+ DO_ERROR_INFO(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN,
+               regs->ip)
+ DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",
+               coprocessor_segment_overrun)
+ DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
+ DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
  #ifdef CONFIG_X86_32
- DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
+ DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
  #endif
- DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
+ DO_ERROR_INFO(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check,
+               BUS_ADRALN, 0)
  
  #ifdef CONFIG_X86_64
  /* Runs on IST stack */
  dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
  {
        if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
-                       12, SIGBUS) == NOTIFY_STOP)
+                       X86_TRAP_SS, SIGBUS) == NOTIFY_STOP)
                return;
        preempt_conditional_sti(regs);
-       do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
+       do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
        preempt_conditional_cli(regs);
  }
  
@@@ -233,10 -237,10 +237,10 @@@ dotraplinkage void do_double_fault(stru
        struct task_struct *tsk = current;
  
        /* Return not checked because double check cannot be ignored */
-       notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
+       notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
  
        tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = 8;
+       tsk->thread.trap_nr = X86_TRAP_DF;
  
        /*
         * This is always a kernel trap and never fixable (and thus must
@@@ -264,7 -268,7 +268,7 @@@ do_general_protection(struct pt_regs *r
                goto gp_in_kernel;
  
        tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = 13;
+       tsk->thread.trap_nr = X86_TRAP_GP;
  
        if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
                        printk_ratelimit()) {
@@@ -291,9 -295,9 +295,9 @@@ gp_in_kernel
                return;
  
        tsk->thread.error_code = error_code;
-       tsk->thread.trap_no = 13;
-       if (notify_die(DIE_GPF, "general protection fault", regs,
-                               error_code, 13, SIGSEGV) == NOTIFY_STOP)
+       tsk->thread.trap_nr = X86_TRAP_GP;
+       if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
+                       X86_TRAP_GP, SIGSEGV) == NOTIFY_STOP)
                return;
        die("general protection fault", regs, error_code);
  }
  dotraplinkage void __kprobes do_int3(struct pt_regs *regs, long error_code)
  {
  #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
-       if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-                       == NOTIFY_STOP)
+       if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
+                               SIGTRAP) == NOTIFY_STOP)
                return;
  #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
  
-       if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
-                       == NOTIFY_STOP)
+       if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
+                       SIGTRAP) == NOTIFY_STOP)
                return;
  
        /*
         */
        debug_stack_usage_inc();
        preempt_conditional_sti(regs);
-       do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
+       do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
        preempt_conditional_cli(regs);
        debug_stack_usage_dec();
  }
@@@ -422,8 -426,8 +426,8 @@@ dotraplinkage void __kprobes do_debug(s
        preempt_conditional_sti(regs);
  
        if (regs->flags & X86_VM_MASK) {
-               handle_vm86_trap((struct kernel_vm86_regs *) regs,
-                               error_code, 1);
+               handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
+                                       X86_TRAP_DB);
                preempt_conditional_cli(regs);
                debug_stack_usage_dec();
                return;
@@@ -460,7 -464,8 +464,8 @@@ void math_error(struct pt_regs *regs, i
        struct task_struct *task = current;
        siginfo_t info;
        unsigned short err;
-       char *str = (trapnr == 16) ? "fpu exception" : "simd exception";
+       char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
+                                               "simd exception";
  
        if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
                return;
        {
                if (!fixup_exception(regs)) {
                        task->thread.error_code = error_code;
-                       task->thread.trap_no = trapnr;
+                       task->thread.trap_nr = trapnr;
                        die(str, regs, error_code);
                }
                return;
         * Save the info for the exception handler and clear the error.
         */
        save_init_fpu(task);
-       task->thread.trap_no = trapnr;
+       task->thread.trap_nr = trapnr;
        task->thread.error_code = error_code;
        info.si_signo = SIGFPE;
        info.si_errno = 0;
        info.si_addr = (void __user *)regs->ip;
-       if (trapnr == 16) {
+       if (trapnr == X86_TRAP_MF) {
                unsigned short cwd, swd;
                /*
                 * (~cwd & swd) will mask out exceptions that are not set to unmasked
                info.si_code = FPE_FLTRES;
        } else {
                /*
-                * If we're using IRQ 13, or supposedly even some trap 16
-                * implementations, it's possible we get a spurious trap...
+                * If we're using IRQ 13, or supposedly even some trap
+                * X86_TRAP_MF implementations, it's possible
+                * we get a spurious trap, which is not an error.
                 */
-               return;         /* Spurious trap, no error */
+               return;
        }
        force_sig_info(SIGFPE, &info, task);
  }
@@@ -543,13 -549,13 +549,13 @@@ dotraplinkage void do_coprocessor_error
        ignore_fpu_irq = 1;
  #endif
  
-       math_error(regs, error_code, 16);
+       math_error(regs, error_code, X86_TRAP_MF);
  }
  
  dotraplinkage void
  do_simd_coprocessor_error(struct pt_regs *regs, long error_code)
  {
-       math_error(regs, error_code, 19);
+       math_error(regs, error_code, X86_TRAP_XF);
  }
  
  dotraplinkage void
@@@ -643,20 -649,21 +649,21 @@@ dotraplinkage void do_iret_error(struc
        info.si_errno = 0;
        info.si_code = ILL_BADSTK;
        info.si_addr = NULL;
-       if (notify_die(DIE_TRAP, "iret exception",
-                       regs, error_code, 32, SIGILL) == NOTIFY_STOP)
+       if (notify_die(DIE_TRAP, "iret exception", regs, error_code,
+                       X86_TRAP_IRET, SIGILL) == NOTIFY_STOP)
                return;
-       do_trap(32, SIGILL, "iret exception", regs, error_code, &info);
+       do_trap(X86_TRAP_IRET, SIGILL, "iret exception", regs, error_code,
+               &info);
  }
  #endif
  
  /* Set of traps needed for early debugging. */
  void __init early_trap_init(void)
  {
-       set_intr_gate_ist(1, &debug, DEBUG_STACK);
+       set_intr_gate_ist(X86_TRAP_DB, &debug, DEBUG_STACK);
        /* int3 can be called from all */
-       set_system_intr_gate_ist(3, &int3, DEBUG_STACK);
-       set_intr_gate(14, &page_fault);
+       set_system_intr_gate_ist(X86_TRAP_BP, &int3, DEBUG_STACK);
+       set_intr_gate(X86_TRAP_PF, &page_fault);
        load_idt(&idt_descr);
  }
  
@@@ -672,30 -679,30 +679,30 @@@ void __init trap_init(void
        early_iounmap(p, 4);
  #endif
  
-       set_intr_gate(0, &divide_error);
-       set_intr_gate_ist(2, &nmi, NMI_STACK);
+       set_intr_gate(X86_TRAP_DE, &divide_error);
+       set_intr_gate_ist(X86_TRAP_NMI, &nmi, NMI_STACK);
        /* int4 can be called from all */
-       set_system_intr_gate(4, &overflow);
-       set_intr_gate(5, &bounds);
-       set_intr_gate(6, &invalid_op);
-       set_intr_gate(7, &device_not_available);
+       set_system_intr_gate(X86_TRAP_OF, &overflow);
+       set_intr_gate(X86_TRAP_BR, &bounds);
+       set_intr_gate(X86_TRAP_UD, &invalid_op);
+       set_intr_gate(X86_TRAP_NM, &device_not_available);
  #ifdef CONFIG_X86_32
-       set_task_gate(8, GDT_ENTRY_DOUBLEFAULT_TSS);
+       set_task_gate(X86_TRAP_DF, GDT_ENTRY_DOUBLEFAULT_TSS);
  #else
-       set_intr_gate_ist(8, &double_fault, DOUBLEFAULT_STACK);
+       set_intr_gate_ist(X86_TRAP_DF, &double_fault, DOUBLEFAULT_STACK);
  #endif
-       set_intr_gate(9, &coprocessor_segment_overrun);
-       set_intr_gate(10, &invalid_TSS);
-       set_intr_gate(11, &segment_not_present);
-       set_intr_gate_ist(12, &stack_segment, STACKFAULT_STACK);
-       set_intr_gate(13, &general_protection);
-       set_intr_gate(15, &spurious_interrupt_bug);
-       set_intr_gate(16, &coprocessor_error);
-       set_intr_gate(17, &alignment_check);
+       set_intr_gate(X86_TRAP_OLD_MF, &coprocessor_segment_overrun);
+       set_intr_gate(X86_TRAP_TS, &invalid_TSS);
+       set_intr_gate(X86_TRAP_NP, &segment_not_present);
+       set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK);
+       set_intr_gate(X86_TRAP_GP, &general_protection);
+       set_intr_gate(X86_TRAP_SPURIOUS, &spurious_interrupt_bug);
+       set_intr_gate(X86_TRAP_MF, &coprocessor_error);
+       set_intr_gate(X86_TRAP_AC, &alignment_check);
  #ifdef CONFIG_X86_MCE
-       set_intr_gate_ist(18, &machine_check, MCE_STACK);
+       set_intr_gate_ist(X86_TRAP_MC, &machine_check, MCE_STACK);
  #endif
-       set_intr_gate(19, &simd_coprocessor_error);
+       set_intr_gate(X86_TRAP_XF, &simd_coprocessor_error);
  
        /* Reserve all the builtin and the syscall vector: */
        for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
  
  #ifdef CONFIG_X86_64
        memcpy(&nmi_idt_table, &idt_table, IDT_ENTRIES * 16);
-       set_nmi_gate(1, &debug);
-       set_nmi_gate(3, &int3);
+       set_nmi_gate(X86_TRAP_DB, &debug);
+       set_nmi_gate(X86_TRAP_BP, &int3);
  #endif
  }
index 328cb37bb827915ccc3e87cc6518acd7ccf25686,a1315ab2d6b949f1287e3f132d0c7f705b0a9792..255f58ae71e8991838080eac595786a60f372a62
@@@ -172,7 -172,6 +172,7 @@@ static void mark_screen_rdonly(struct m
        spinlock_t *ptl;
        int i;
  
 +      down_write(&mm->mmap_sem);
        pgd = pgd_offset(mm, 0xA0000);
        if (pgd_none_or_clear_bad(pgd))
                goto out;
        }
        pte_unmap_unlock(pte, ptl);
  out:
 +      up_write(&mm->mmap_sem);
        flush_tlb();
  }
  
@@@ -569,7 -567,7 +569,7 @@@ int handle_vm86_trap(struct kernel_vm86
        }
        if (trapno != 1)
                return 1; /* we let this handle by the calling routine */
-       current->thread.trap_no = trapno;
+       current->thread.trap_nr = trapno;
        current->thread.error_code = error_code;
        force_sig(SIGTRAP, current);
        return 0;
index d5c69860b524bc85f1e5eea1685466e42c6c2945,327509b95e0e456c4cfd14798c432bb6b3ae1306..f386dc49f988d753c5b9f8167c80313d5be72178
  #include "vsyscall_trace.h"
  
  DEFINE_VVAR(int, vgetcpu_mode);
 -DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
 -{
 -      .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
 -};
 +DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
  
  static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
  
@@@ -77,15 -80,20 +77,15 @@@ early_param("vsyscall", vsyscall_setup)
  
  void update_vsyscall_tz(void)
  {
 -      unsigned long flags;
 -
 -      write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
 -      /* sys_tz has changed */
        vsyscall_gtod_data.sys_tz = sys_tz;
 -      write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
  }
  
  void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
                        struct clocksource *clock, u32 mult)
  {
 -      unsigned long flags;
 +      struct timespec monotonic;
  
 -      write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
 +      write_seqcount_begin(&vsyscall_gtod_data.seq);
  
        /* copy vsyscall data */
        vsyscall_gtod_data.clock.vclock_mode    = clock->archdata.vclock_mode;
        vsyscall_gtod_data.clock.mask           = clock->mask;
        vsyscall_gtod_data.clock.mult           = mult;
        vsyscall_gtod_data.clock.shift          = clock->shift;
 +
        vsyscall_gtod_data.wall_time_sec        = wall_time->tv_sec;
        vsyscall_gtod_data.wall_time_nsec       = wall_time->tv_nsec;
 -      vsyscall_gtod_data.wall_to_monotonic    = *wtm;
 +
 +      monotonic = timespec_add(*wall_time, *wtm);
 +      vsyscall_gtod_data.monotonic_time_sec   = monotonic.tv_sec;
 +      vsyscall_gtod_data.monotonic_time_nsec  = monotonic.tv_nsec;
 +
        vsyscall_gtod_data.wall_time_coarse     = __current_kernel_time();
 +      vsyscall_gtod_data.monotonic_time_coarse =
 +              timespec_add(vsyscall_gtod_data.wall_time_coarse, *wtm);
  
 -      write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
 +      write_seqcount_end(&vsyscall_gtod_data.seq);
  }
  
  static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
@@@ -152,7 -153,7 +152,7 @@@ static bool write_ok_or_segv(unsigned l
  
                thread->error_code      = 6;  /* user fault, no page, write */
                thread->cr2             = ptr;
-               thread->trap_no         = 14;
+               thread->trap_nr         = X86_TRAP_PF;
  
                memset(&info, 0, sizeof(info));
                info.si_signo           = SIGSEGV;