2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
8 #include <linux/module.h>
9 #include <linux/regset.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
13 #include <asm/sigcontext.h>
14 #include <asm/processor.h>
15 #include <asm/math_emu.h>
16 #include <asm/tlbflush.h>
17 #include <asm/uaccess.h>
18 #include <asm/ptrace.h>
20 #include <asm/fpu-internal.h>
23 static DEFINE_PER_CPU(bool, in_kernel_fpu);
25 void kernel_fpu_disable(void)
27 WARN_ON(this_cpu_read(in_kernel_fpu));
28 this_cpu_write(in_kernel_fpu, true);
31 void kernel_fpu_enable(void)
33 this_cpu_write(in_kernel_fpu, false);
37 * Were we in an interrupt that interrupted kernel mode?
39 * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
40 * pair does nothing at all: the thread must not have fpu (so
41 * that we don't try to save the FPU state), and TS must
42 * be set (so that the clts/stts pair does nothing that is
43 * visible in the interrupted kernel thread).
45 * Except for the eagerfpu case when we return true; in the likely case
46 * the thread has FPU but we are not going to set/clear TS.
48 static inline bool interrupted_kernel_fpu_idle(void)
50 if (this_cpu_read(in_kernel_fpu))
56 return !__thread_has_fpu(current) &&
57 (read_cr0() & X86_CR0_TS);
61 * Were we in user mode (or vm86 mode) when we were
64 * Doing kernel_fpu_begin/end() is ok if we are running
65 * in an interrupt context from user mode - we'll just
66 * save the FPU state as required.
68 static inline bool interrupted_user_mode(void)
70 struct pt_regs *regs = get_irq_regs();
71 return regs && user_mode(regs);
75 * Can we use the FPU in kernel mode with the
76 * whole "kernel_fpu_begin/end()" sequence?
78 * It's always ok in process context (ie "not interrupt")
79 * but it is sometimes ok even from an irq.
81 bool irq_fpu_usable(void)
83 return !in_interrupt() ||
84 interrupted_user_mode() ||
85 interrupted_kernel_fpu_idle();
87 EXPORT_SYMBOL(irq_fpu_usable);
89 void __kernel_fpu_begin(void)
91 struct task_struct *me = current;
93 this_cpu_write(in_kernel_fpu, true);
95 if (__thread_has_fpu(me)) {
98 this_cpu_write(fpu_owner_task, NULL);
103 EXPORT_SYMBOL(__kernel_fpu_begin);
105 void __kernel_fpu_end(void)
107 struct task_struct *me = current;
109 if (__thread_has_fpu(me)) {
110 if (WARN_ON(restore_fpu_checking(me)))
112 } else if (!use_eager_fpu()) {
116 this_cpu_write(in_kernel_fpu, false);
118 EXPORT_SYMBOL(__kernel_fpu_end);
120 void unlazy_fpu(struct task_struct *tsk)
123 if (__thread_has_fpu(tsk)) {
124 if (use_eager_fpu()) {
127 __save_init_fpu(tsk);
128 __thread_fpu_end(tsk);
133 EXPORT_SYMBOL(unlazy_fpu);
135 unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
136 unsigned int xstate_size;
137 EXPORT_SYMBOL_GPL(xstate_size);
138 static struct i387_fxsave_struct fx_scratch;
140 static void mxcsr_feature_mask_init(void)
142 unsigned long mask = 0;
145 memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
146 asm volatile("fxsave %0" : "+m" (fx_scratch));
147 mask = fx_scratch.mxcsr_mask;
151 mxcsr_feature_mask &= mask;
154 static void init_thread_xstate(void)
157 * Note that xstate_size might be overwriten later during
163 * Disable xsave as we do not support it if i387
164 * emulation is enabled.
166 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
167 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
168 xstate_size = sizeof(struct i387_soft_struct);
173 xstate_size = sizeof(struct i387_fxsave_struct);
175 xstate_size = sizeof(struct i387_fsave_struct);
179 * Called at bootup to set up the initial FPU state that is later cloned
180 * into all processes.
186 unsigned long cr4_mask = 0;
188 #ifndef CONFIG_MATH_EMULATION
190 pr_emerg("No FPU found and no math emulation present\n");
191 pr_emerg("Giving up\n");
197 cr4_mask |= X86_CR4_OSFXSR;
199 cr4_mask |= X86_CR4_OSXMMEXCPT;
201 cr4_set_bits(cr4_mask);
204 cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
210 * init_thread_xstate is only called once to avoid overriding
211 * xstate_size during boot time or during CPU hotplug.
213 if (xstate_size == 0)
214 init_thread_xstate();
216 mxcsr_feature_mask_init();
221 void fpu_finit(struct fpu *fpu)
224 finit_soft_fpu(&fpu->state->soft);
228 memset(fpu->state, 0, xstate_size);
231 fx_finit(&fpu->state->fxsave);
233 struct i387_fsave_struct *fp = &fpu->state->fsave;
234 fp->cwd = 0xffff037fu;
235 fp->swd = 0xffff0000u;
236 fp->twd = 0xffffffffu;
237 fp->fos = 0xffff0000u;
240 EXPORT_SYMBOL_GPL(fpu_finit);
243 * The _current_ task is using the FPU for the first time
244 * so initialize it and set the mxcsr to its default
245 * value at reset if we support XMM instructions and then
246 * remember the current task has used the FPU.
248 int init_fpu(struct task_struct *tsk)
252 if (tsk_used_math(tsk)) {
253 if (cpu_has_fpu && tsk == current)
255 task_disable_lazy_fpu_restore(tsk);
260 * Memory allocation at the first usage of the FPU and other state.
262 ret = fpu_alloc(&tsk->thread.fpu);
266 fpu_finit(&tsk->thread.fpu);
268 set_stopped_child_used_math(tsk);
271 EXPORT_SYMBOL_GPL(init_fpu);
274 * The xstateregs_active() routine is the same as the fpregs_active() routine,
275 * as the "regset->n" for the xstate regset will be updated based on the feature
276 * capabilites supported by the xsave.
278 int fpregs_active(struct task_struct *target, const struct user_regset *regset)
280 return tsk_used_math(target) ? regset->n : 0;
283 int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
285 return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
288 int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
289 unsigned int pos, unsigned int count,
290 void *kbuf, void __user *ubuf)
297 ret = init_fpu(target);
301 sanitize_i387_state(target);
303 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
304 &target->thread.fpu.state->fxsave, 0, -1);
307 int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
308 unsigned int pos, unsigned int count,
309 const void *kbuf, const void __user *ubuf)
316 ret = init_fpu(target);
320 sanitize_i387_state(target);
322 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
323 &target->thread.fpu.state->fxsave, 0, -1);
326 * mxcsr reserved bits must be masked to zero for security reasons.
328 target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
331 * update the header bits in the xsave header, indicating the
332 * presence of FP and SSE state.
335 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
340 int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
341 unsigned int pos, unsigned int count,
342 void *kbuf, void __user *ubuf)
344 struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
350 ret = init_fpu(target);
355 * Copy the 48bytes defined by the software first into the xstate
356 * memory layout in the thread struct, so that we can copy the entire
357 * xstateregs to the user using one user_regset_copyout().
359 memcpy(&xsave->i387.sw_reserved,
360 xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
362 * Copy the xstate memory layout.
364 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
368 int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
369 unsigned int pos, unsigned int count,
370 const void *kbuf, const void __user *ubuf)
372 struct xsave_struct *xsave = &target->thread.fpu.state->xsave;
378 ret = init_fpu(target);
382 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
384 * mxcsr reserved bits must be masked to zero for security reasons.
386 xsave->i387.mxcsr &= mxcsr_feature_mask;
387 xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
389 * These bits must be zero.
391 memset(&xsave->xsave_hdr.reserved, 0, 48);
395 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
398 * FPU tag word conversions.
401 static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
403 unsigned int tmp; /* to avoid 16 bit prefixes in the code */
405 /* Transform each pair of bits into 01 (valid) or 00 (empty) */
407 tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
408 /* and move the valid bits to the lower byte. */
409 tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
410 tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
411 tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
416 #define FPREG_ADDR(f, n) ((void *)&(f)->st_space + (n) * 16)
417 #define FP_EXP_TAG_VALID 0
418 #define FP_EXP_TAG_ZERO 1
419 #define FP_EXP_TAG_SPECIAL 2
420 #define FP_EXP_TAG_EMPTY 3
422 static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
425 u32 tos = (fxsave->swd >> 11) & 7;
426 u32 twd = (unsigned long) fxsave->twd;
428 u32 ret = 0xffff0000u;
431 for (i = 0; i < 8; i++, twd >>= 1) {
433 st = FPREG_ADDR(fxsave, (i - tos) & 7);
435 switch (st->exponent & 0x7fff) {
437 tag = FP_EXP_TAG_SPECIAL;
440 if (!st->significand[0] &&
441 !st->significand[1] &&
442 !st->significand[2] &&
444 tag = FP_EXP_TAG_ZERO;
446 tag = FP_EXP_TAG_SPECIAL;
449 if (st->significand[3] & 0x8000)
450 tag = FP_EXP_TAG_VALID;
452 tag = FP_EXP_TAG_SPECIAL;
456 tag = FP_EXP_TAG_EMPTY;
458 ret |= tag << (2 * i);
464 * FXSR floating point environment conversions.
468 convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
470 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
471 struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
472 struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
475 env->cwd = fxsave->cwd | 0xffff0000u;
476 env->swd = fxsave->swd | 0xffff0000u;
477 env->twd = twd_fxsr_to_i387(fxsave);
480 env->fip = fxsave->rip;
481 env->foo = fxsave->rdp;
483 * should be actually ds/cs at fpu exception time, but
484 * that information is not available in 64bit mode.
486 env->fcs = task_pt_regs(tsk)->cs;
487 if (tsk == current) {
488 savesegment(ds, env->fos);
490 env->fos = tsk->thread.ds;
492 env->fos |= 0xffff0000;
494 env->fip = fxsave->fip;
495 env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
496 env->foo = fxsave->foo;
497 env->fos = fxsave->fos;
500 for (i = 0; i < 8; ++i)
501 memcpy(&to[i], &from[i], sizeof(to[0]));
504 void convert_to_fxsr(struct task_struct *tsk,
505 const struct user_i387_ia32_struct *env)
508 struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
509 struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
510 struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
513 fxsave->cwd = env->cwd;
514 fxsave->swd = env->swd;
515 fxsave->twd = twd_i387_to_fxsr(env->twd);
516 fxsave->fop = (u16) ((u32) env->fcs >> 16);
518 fxsave->rip = env->fip;
519 fxsave->rdp = env->foo;
520 /* cs and ds ignored */
522 fxsave->fip = env->fip;
523 fxsave->fcs = (env->fcs & 0xffff);
524 fxsave->foo = env->foo;
525 fxsave->fos = env->fos;
528 for (i = 0; i < 8; ++i)
529 memcpy(&to[i], &from[i], sizeof(from[0]));
532 int fpregs_get(struct task_struct *target, const struct user_regset *regset,
533 unsigned int pos, unsigned int count,
534 void *kbuf, void __user *ubuf)
536 struct user_i387_ia32_struct env;
539 ret = init_fpu(target);
543 if (!static_cpu_has(X86_FEATURE_FPU))
544 return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
547 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
548 &target->thread.fpu.state->fsave, 0,
551 sanitize_i387_state(target);
553 if (kbuf && pos == 0 && count == sizeof(env)) {
554 convert_from_fxsr(kbuf, target);
558 convert_from_fxsr(&env, target);
560 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
563 int fpregs_set(struct task_struct *target, const struct user_regset *regset,
564 unsigned int pos, unsigned int count,
565 const void *kbuf, const void __user *ubuf)
567 struct user_i387_ia32_struct env;
570 ret = init_fpu(target);
574 sanitize_i387_state(target);
576 if (!static_cpu_has(X86_FEATURE_FPU))
577 return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
580 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
581 &target->thread.fpu.state->fsave, 0,
584 if (pos > 0 || count < sizeof(env))
585 convert_from_fxsr(&env, target);
587 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
589 convert_to_fxsr(target, &env);
592 * update the header bit in the xsave header, indicating the
596 target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
601 * FPU state for core dumps.
602 * This is only used for a.out dumps now.
603 * It is declared generically using elf_fpregset_t (which is
604 * struct user_i387_struct) but is in fact only used for 32-bit
605 * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
607 int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
609 struct task_struct *tsk = current;
612 fpvalid = !!used_math();
614 fpvalid = !fpregs_get(tsk, NULL,
615 0, sizeof(struct user_i387_ia32_struct),
620 EXPORT_SYMBOL(dump_fpu);
622 #endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
624 static int __init no_387(char *s)
626 setup_clear_cpu_cap(X86_FEATURE_FPU);
630 __setup("no387", no_387);
632 void fpu_detect(struct cpuinfo_x86 *c)
640 cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
643 asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
644 : "+m" (fsw), "+m" (fcw));
646 if (fsw == 0 && (fcw & 0x103f) == 0x003f)
647 set_cpu_cap(c, X86_FEATURE_FPU);
649 clear_cpu_cap(c, X86_FEATURE_FPU);
651 /* The final cr0 value is set in fpu_init() */