Merge branch 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Mar 2012 16:41:22 +0000 (09:41 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 22 Mar 2012 16:41:22 +0000 (09:41 -0700)
Pull x86/fpu changes from Ingo Molnar.

* 'x86-fpu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  i387: Split up <asm/i387.h> into exported and internal interfaces
  i387: Uninline the generic FP helpers that we expose to kernel modules

1  2 
arch/x86/kernel/cpu/common.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kvm/x86.c

index b3e0081f9a2b1697faf058c8da77b997deeedfbe,89620b1725d407a12f279c7955f224bdec20f5e3..ade9c794ed9860004f148379996b1fcf07032054
@@@ -28,6 -28,7 +28,7 @@@
  #include <asm/apic.h>
  #include <asm/desc.h>
  #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
  #include <asm/mtrr.h>
  #include <linux/numa.h>
  #include <asm/asm.h>
@@@ -933,7 -934,7 +934,7 @@@ static const struct msr_range msr_range
        { 0xc0011000, 0xc001103b},
  };
  
 -static void __cpuinit print_cpu_msr(void)
 +static void __cpuinit __print_cpu_msr(void)
  {
        unsigned index_min, index_max;
        unsigned index;
@@@ -997,13 -998,13 +998,13 @@@ void __cpuinit print_cpu_info(struct cp
        else
                printk(KERN_CONT "\n");
  
 -#ifdef CONFIG_SMP
 +      __print_cpu_msr();
 +}
 +
 +void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c)
 +{
        if (c->cpu_index < show_msr)
 -              print_cpu_msr();
 -#else
 -      if (show_msr)
 -              print_cpu_msr();
 -#endif
 +              __print_cpu_msr();
  }
  
  static __init int setup_disablecpuid(char *arg)
@@@ -1045,7 -1046,6 +1046,6 @@@ DEFINE_PER_CPU(char *, irq_stack_ptr) 
  DEFINE_PER_CPU(unsigned int, irq_count) = -1;
  
  DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
- EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
  
  /*
   * Special IST stacks which the CPU switches to when it calls
@@@ -1115,7 -1115,6 +1115,6 @@@ void debug_stack_reset(void
  DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
  EXPORT_PER_CPU_SYMBOL(current_task);
  DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
- EXPORT_PER_CPU_SYMBOL(fpu_owner_task);
  
  #ifdef CONFIG_CC_STACKPROTECTOR
  DEFINE_PER_CPU_ALIGNED(struct stack_canary, stack_canary);
index 44eefde92109ee70f7d70e884c321fa31b624b0c,c38d84e01022d31b36b44e0fdae4c4536f19e3d5..14baf78d5a1fd86b969941550f83024ed7c77246
@@@ -21,6 -21,7 +21,7 @@@
  #include <asm/idle.h>
  #include <asm/uaccess.h>
  #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
  #include <asm/debugreg.h>
  
  struct kmem_cache *task_xstate_cachep;
@@@ -377,8 -378,8 +378,8 @@@ static inline int hlt_use_halt(void
  void default_idle(void)
  {
        if (hlt_use_halt()) {
 -              trace_power_start(POWER_CSTATE, 1, smp_processor_id());
 -              trace_cpu_idle(1, smp_processor_id());
 +              trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
 +              trace_cpu_idle_rcuidle(1, smp_processor_id());
                current_thread_info()->status &= ~TS_POLLING;
                /*
                 * TS_POLLING-cleared state must be visible before we
                else
                        local_irq_enable();
                current_thread_info()->status |= TS_POLLING;
 -              trace_power_end(smp_processor_id());
 -              trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 +              trace_power_end_rcuidle(smp_processor_id());
 +              trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else {
                local_irq_enable();
                /* loop is done by the caller */
@@@ -450,8 -451,8 +451,8 @@@ EXPORT_SYMBOL_GPL(cpu_idle_wait)
  static void mwait_idle(void)
  {
        if (!need_resched()) {
 -              trace_power_start(POWER_CSTATE, 1, smp_processor_id());
 -              trace_cpu_idle(1, smp_processor_id());
 +              trace_power_start_rcuidle(POWER_CSTATE, 1, smp_processor_id());
 +              trace_cpu_idle_rcuidle(1, smp_processor_id());
                if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
                        clflush((void *)&current_thread_info()->flags);
  
                        __sti_mwait(0, 0);
                else
                        local_irq_enable();
 -              trace_power_end(smp_processor_id());
 -              trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 +              trace_power_end_rcuidle(smp_processor_id());
 +              trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
        } else
                local_irq_enable();
  }
   */
  static void poll_idle(void)
  {
 -      trace_power_start(POWER_CSTATE, 0, smp_processor_id());
 -      trace_cpu_idle(0, smp_processor_id());
 +      trace_power_start_rcuidle(POWER_CSTATE, 0, smp_processor_id());
 +      trace_cpu_idle_rcuidle(0, smp_processor_id());
        local_irq_enable();
        while (!need_resched())
                cpu_relax();
 -      trace_power_end(smp_processor_id());
 -      trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
 +      trace_power_end_rcuidle(smp_processor_id());
 +      trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
  }
  
  /*
index 49888fefe79434de793d8986cb2b98a5fe9c9d8f,ee32dee7a0a312e6f5a8e87a5340fddae9151451..9d7d4842bfafee620bbead3aac128594bbe7f912
@@@ -45,6 -45,7 +45,7 @@@
  #include <asm/ldt.h>
  #include <asm/processor.h>
  #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
  #include <asm/desc.h>
  #ifdef CONFIG_MATH_EMULATION
  #include <asm/math_emu.h>
@@@ -119,7 -120,9 +120,7 @@@ void cpu_idle(void
                }
                rcu_idle_exit();
                tick_nohz_idle_exit();
 -              preempt_enable_no_resched();
 -              schedule();
 -              preempt_disable();
 +              schedule_preempt_disabled();
        }
  }
  
index 442e7bfe10ae5ef2879374498e57026011e5a98f,5bad3c71e48f3275e1bcbcd8df2ec52fab7f324d..292da13fc5aa85867522a5004475cfa9c594e56a
@@@ -43,6 -43,7 +43,7 @@@
  #include <asm/system.h>
  #include <asm/processor.h>
  #include <asm/i387.h>
+ #include <asm/fpu-internal.h>
  #include <asm/mmu_context.h>
  #include <asm/prctl.h>
  #include <asm/desc.h>
@@@ -156,7 -157,9 +157,7 @@@ void cpu_idle(void
                }
  
                tick_nohz_idle_exit();
 -              preempt_enable_no_resched();
 -              schedule();
 -              preempt_disable();
 +              schedule_preempt_disabled();
        }
  }
  
@@@ -340,7 -343,6 +341,7 @@@ start_thread_common(struct pt_regs *reg
        loadsegment(es, _ds);
        loadsegment(ds, _ds);
        load_gs_index(0);
 +      current->thread.usersp  = new_sp;
        regs->ip                = new_ip;
        regs->sp                = new_sp;
        percpu_write(old_rsp, new_sp);
diff --combined arch/x86/kvm/x86.c
index bb4fd2636bc20d219113b3f8b02d6bc05159f87e,b937b6179d80f3bbeb3497d8e22e0def8bcce2fe..54696b5f8443509eb9ed94ab77c9617b46f4407b
@@@ -57,6 -57,7 +57,7 @@@
  #include <asm/mtrr.h>
  #include <asm/mce.h>
  #include <asm/i387.h>
+ #include <asm/fpu-internal.h> /* Ugh! */
  #include <asm/xcr.h>
  #include <asm/pvclock.h>
  #include <asm/div64.h>
@@@ -1162,12 -1163,12 +1163,12 @@@ static int kvm_guest_time_update(struc
         */
        vcpu->hv_clock.version += 2;
  
 -      shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0);
 +      shared_kaddr = kmap_atomic(vcpu->time_page);
  
        memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
               sizeof(vcpu->hv_clock));
  
 -      kunmap_atomic(shared_kaddr, KM_USER0);
 +      kunmap_atomic(shared_kaddr);
  
        mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
        return 0;
@@@ -3848,7 -3849,7 +3849,7 @@@ static int emulator_cmpxchg_emulated(st
                goto emul_write;
        }
  
 -      kaddr = kmap_atomic(page, KM_USER0);
 +      kaddr = kmap_atomic(page);
        kaddr += offset_in_page(gpa);
        switch (bytes) {
        case 1:
        default:
                BUG();
        }
 -      kunmap_atomic(kaddr, KM_USER0);
 +      kunmap_atomic(kaddr);
        kvm_release_page_dirty(page);
  
        if (!exchanged)