x86/fpu: Use 'struct fpu' in switch_fpu_prepare()
authorIngo Molnar <mingo@kernel.org>
Thu, 23 Apr 2015 15:39:04 +0000 (17:39 +0200)
committerIngo Molnar <mingo@kernel.org>
Tue, 19 May 2015 13:47:27 +0000 (15:47 +0200)
Migrate this function to pure 'struct fpu' usage.

Reviewed-by: Borislav Petkov <bp@alien8.de>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Fenghua Yu <fenghua.yu@intel.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/x86/include/asm/fpu-internal.h
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c

index 579f7d0a399ded66ad202af93f65209307a5826b..60d2c6f376f3c5c02ac228d984a68c9adaa51142 100644 (file)
@@ -402,10 +402,9 @@ static inline void fpu_reset_state(struct fpu *fpu)
  */
 typedef struct { int preload; } fpu_switch_t;
 
-static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
+static inline fpu_switch_t
+switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
 {
-       struct fpu *old_fpu = &old->thread.fpu;
-       struct fpu *new_fpu = &new->thread.fpu;
        fpu_switch_t fpu;
 
        /*
@@ -413,33 +412,33 @@ static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct ta
         * or if the past 5 consecutive context-switches used math.
         */
        fpu.preload = new_fpu->fpstate_active &&
-                     (use_eager_fpu() || new->thread.fpu.counter > 5);
+                     (use_eager_fpu() || new_fpu->counter > 5);
 
        if (old_fpu->has_fpu) {
-               if (!fpu_save_init(&old->thread.fpu))
-                       old->thread.fpu.last_cpu = -1;
+               if (!fpu_save_init(old_fpu))
+                       old_fpu->last_cpu = -1;
                else
-                       old->thread.fpu.last_cpu = cpu;
+                       old_fpu->last_cpu = cpu;
 
                /* But leave fpu_fpregs_owner_ctx! */
-               old->thread.fpu.has_fpu = 0;
+               old_fpu->has_fpu = 0;
 
                /* Don't change CR0.TS if we just switch! */
                if (fpu.preload) {
-                       new->thread.fpu.counter++;
+                       new_fpu->counter++;
                        __thread_set_has_fpu(new_fpu);
-                       prefetch(new->thread.fpu.state);
+                       prefetch(new_fpu->state);
                } else if (!use_eager_fpu())
                        stts();
        } else {
-               old->thread.fpu.counter = 0;
-               old->thread.fpu.last_cpu = -1;
+               old_fpu->counter = 0;
+               old_fpu->last_cpu = -1;
                if (fpu.preload) {
-                       new->thread.fpu.counter++;
+                       new_fpu->counter++;
                        if (fpu_want_lazy_restore(new_fpu, cpu))
                                fpu.preload = 0;
                        else
-                               prefetch(new->thread.fpu.state);
+                               prefetch(new_fpu->state);
                        __thread_fpu_begin(new_fpu);
                }
        }
index 1a0edce626b2dde1eff9d2aec4e2aaef13fe313a..5b0ed71dde605bd2a176fa916e9cb53e99108fae 100644 (file)
@@ -248,7 +248,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
-       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+       fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
 
        /*
         * Save away %gs. No need to save %fs, as it was saved on the
index 99cc4b8589ad1e81cda19f24b5d78c39e78e2763..fefe65efd9d6d2ac076622df6a01911febc70361 100644 (file)
@@ -278,7 +278,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        unsigned fsindex, gsindex;
        fpu_switch_t fpu;
 
-       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+       fpu = switch_fpu_prepare(&prev_p->thread.fpu, &next_p->thread.fpu, cpu);
 
        /* We must save %fs and %gs before load_TLS() because
         * %fs and %gs may be cleared by load_TLS().