2 * Copyright (C) 1994 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * General FPU state handling cleanups
6 * Gareth Hughes <gareth@valinux.com>, May 2000
7 * x86-64 work by Andi Kleen 2002
10 #ifndef _ASM_X86_I387_H
11 #define _ASM_X86_I387_H
15 #include <linux/sched.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/regset.h>
18 #include <linux/hardirq.h>
19 #include <linux/slab.h>
21 #include <asm/processor.h>
22 #include <asm/sigcontext.h>
24 #include <asm/uaccess.h>
25 #include <asm/xsave.h>
27 extern unsigned int sig_xstate_size;
28 extern void fpu_init(void);
29 extern void mxcsr_feature_mask_init(void);
30 extern int init_fpu(struct task_struct *child);
31 extern asmlinkage void math_state_restore(void);
32 extern void __math_state_restore(void);
33 extern void init_thread_xstate(void);
34 extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
36 extern user_regset_active_fn fpregs_active, xfpregs_active;
37 extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
39 extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
43 * xstateregs_active == fpregs_active. Please refer to the comment
44 * at the definition of fpregs_active.
46 #define xstateregs_active fpregs_active
48 extern struct _fpx_sw_bytes fx_sw_reserved;
49 #ifdef CONFIG_IA32_EMULATION
50 extern unsigned int sig_xstate_ia32_size;
51 extern struct _fpx_sw_bytes fx_sw_reserved_ia32;
54 extern int save_i387_xstate_ia32(void __user *buf);
55 extern int restore_i387_xstate_ia32(void __user *buf);
58 #define X87_FSW_ES (1 << 7) /* Exception Summary */
60 static inline bool use_xsave(void)
64 alternative_io("mov $0, %0",
74 /* Ignore delayed exceptions from user space */
75 static inline void tolerant_fwait(void)
77 asm volatile("1: fwait\n"
79 _ASM_EXTABLE(1b, 2b));
82 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
86 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
88 ".section .fixup,\"ax\"\n"
89 "3: movl $-1,%[err]\n"
94 #if 0 /* See comment in fxsave() below. */
95 : [fx] "r" (fx), "m" (*fx), "0" (0));
97 : [fx] "cdaSDb" (fx), "m" (*fx), "0" (0));
102 /* AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
103 is pending. Clear the x87 state here by setting it to fixed
104 values. The kernel data segment can be sometimes 0 and sometimes
105 new user value. Both should be ok.
106 Use the PDA as safe address because it should be already in L1. */
107 static inline void fpu_clear(struct fpu *fpu)
109 struct xsave_struct *xstate = &fpu->state->xsave;
110 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
113 * xsave header may indicate the init state of the FP.
116 !(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
119 if (unlikely(fx->swd & X87_FSW_ES))
120 asm volatile("fnclex");
121 alternative_input(ASM_NOP8 ASM_NOP2,
122 " emms\n" /* clear stack tags */
123 " fildl %%gs:0", /* load to clear state */
124 X86_FEATURE_FXSAVE_LEAK);
127 static inline void clear_fpu_state(struct task_struct *tsk)
129 fpu_clear(&tsk->thread.fpu);
132 static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
136 asm volatile("1: rex64/fxsave (%[fx])\n\t"
138 ".section .fixup,\"ax\"\n"
139 "3: movl $-1,%[err]\n"
143 : [err] "=r" (err), "=m" (*fx)
144 #if 0 /* See comment in fxsave() below. */
145 : [fx] "r" (fx), "0" (0));
147 : [fx] "cdaSDb" (fx), "0" (0));
150 __clear_user(fx, sizeof(struct i387_fxsave_struct)))
152 /* No need to clear here because the caller clears USED_MATH */
156 static inline void fpu_fxsave(struct fpu *fpu)
158 /* Using "rex64; fxsave %0" is broken because, if the memory operand
159 uses any extended registers for addressing, a second REX prefix
160 will be generated (to the assembler, rex64 followed by semicolon
161 is a separate instruction), and hence the 64-bitness is lost. */
163 /* Using "fxsaveq %0" would be the ideal choice, but is only supported
164 starting with gas 2.16. */
165 __asm__ __volatile__("fxsaveq %0"
166 : "=m" (fpu->state->fxsave));
168 /* Using, as a workaround, the properly prefixed form below isn't
169 accepted by any binutils version so far released, complaining that
170 the same type of prefix is used twice if an extended register is
171 needed for addressing (fix submitted to mainline 2005-11-21). */
172 __asm__ __volatile__("rex64/fxsave %0"
173 : "=m" (fpu->state->fxsave));
175 /* This, however, we can work around by forcing the compiler to select
176 an addressing mode that doesn't require extended registers. */
177 __asm__ __volatile__("rex64/fxsave (%1)"
178 : "=m" (fpu->state->fxsave)
179 : "cdaSDb" (&fpu->state->fxsave));
183 static inline void fpu_save_init(struct fpu *fpu)
193 static inline void __save_init_fpu(struct task_struct *tsk)
195 fpu_save_init(&tsk->thread.fpu);
196 task_thread_info(tsk)->status &= ~TS_USEDFPU;
199 #else /* CONFIG_X86_32 */
201 #ifdef CONFIG_MATH_EMULATION
202 extern void finit_soft_fpu(struct i387_soft_struct *soft);
204 static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
207 static inline void tolerant_fwait(void)
209 asm volatile("fnclex ; fwait");
212 /* perform fxrstor iff the processor has extended states, otherwise frstor */
213 static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
216 * The "nop" is needed to make the instructions the same
228 /* We need a safe address that is cheap to find and that is already
229 in L1 during context switch. The best choices are unfortunately
230 different for UP and SMP */
232 #define safe_address (__per_cpu_offset[0])
234 #define safe_address (kstat_cpu(0).cpustat.user)
238 * These must be called with preempt disabled
240 static inline void fpu_save_init(struct fpu *fpu)
243 struct xsave_struct *xstate = &fpu->state->xsave;
244 struct i387_fxsave_struct *fx = &fpu->state->fxsave;
249 * xsave header may indicate the init state of the FP.
251 if (!(xstate->xsave_hdr.xstate_bv & XSTATE_FP))
254 if (unlikely(fx->swd & X87_FSW_ES))
255 asm volatile("fnclex");
258 * we can do a simple return here or be paranoid :)
263 /* Use more nops than strictly needed in case the compiler
266 "fnsave %[fx] ;fwait;" GENERIC_NOP8 GENERIC_NOP4,
268 "bt $7,%[fsw] ; jnc 1f ; fnclex\n1:",
270 [fx] "m" (fpu->state->fxsave),
271 [fsw] "m" (fpu->state->fxsave.swd) : "memory");
273 /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception
274 is pending. Clear the x87 state here by setting it to fixed
275 values. safe_address is a random variable that should be in L1 */
277 GENERIC_NOP8 GENERIC_NOP2,
278 "emms\n\t" /* clear stack tags */
279 "fildl %[addr]", /* set F?P to defined value */
280 X86_FEATURE_FXSAVE_LEAK,
281 [addr] "m" (safe_address));
286 static inline void __save_init_fpu(struct task_struct *tsk)
288 fpu_save_init(&tsk->thread.fpu);
289 task_thread_info(tsk)->status &= ~TS_USEDFPU;
293 #endif /* CONFIG_X86_64 */
295 static inline int fpu_fxrstor_checking(struct fpu *fpu)
297 return fxrstor_checking(&fpu->state->fxsave);
300 static inline int fpu_restore_checking(struct fpu *fpu)
303 return fpu_xrstor_checking(fpu);
305 return fpu_fxrstor_checking(fpu);
308 static inline int restore_fpu_checking(struct task_struct *tsk)
310 return fpu_restore_checking(&tsk->thread.fpu);
314 * Signal frame handlers...
316 extern int save_i387_xstate(void __user *buf);
317 extern int restore_i387_xstate(void __user *buf);
319 static inline void __unlazy_fpu(struct task_struct *tsk)
321 if (task_thread_info(tsk)->status & TS_USEDFPU) {
322 __save_init_fpu(tsk);
325 tsk->fpu_counter = 0;
328 static inline void __clear_fpu(struct task_struct *tsk)
330 if (task_thread_info(tsk)->status & TS_USEDFPU) {
332 task_thread_info(tsk)->status &= ~TS_USEDFPU;
337 static inline void kernel_fpu_begin(void)
339 struct thread_info *me = current_thread_info();
341 if (me->status & TS_USEDFPU)
342 __save_init_fpu(me->task);
347 static inline void kernel_fpu_end(void)
353 static inline bool irq_fpu_usable(void)
355 struct pt_regs *regs;
357 return !in_interrupt() || !(regs = get_irq_regs()) || \
358 user_mode(regs) || (read_cr0() & X86_CR0_TS);
362 * Some instructions like VIA's padlock instructions generate a spurious
363 * DNA fault but don't modify SSE registers. And these instructions
364 * get used from interrupt context as well. To prevent these kernel instructions
365 * in interrupt context interacting wrongly with other user/kernel fpu usage, we
366 * should use them only in the context of irq_ts_save/restore()
368 static inline int irq_ts_save(void)
371 * If in process context and not atomic, we can take a spurious DNA fault.
372 * Otherwise, doing clts() in process context requires disabling preemption
373 * or some heavy lifting like kernel_fpu_begin()
378 if (read_cr0() & X86_CR0_TS) {
386 static inline void irq_ts_restore(int TS_state)
394 static inline void save_init_fpu(struct task_struct *tsk)
396 __save_init_fpu(tsk);
400 #define unlazy_fpu __unlazy_fpu
401 #define clear_fpu __clear_fpu
403 #else /* CONFIG_X86_32 */
406 * These disable preemption on their own and are safe
408 static inline void save_init_fpu(struct task_struct *tsk)
411 __save_init_fpu(tsk);
416 static inline void unlazy_fpu(struct task_struct *tsk)
423 static inline void clear_fpu(struct task_struct *tsk)
430 #endif /* CONFIG_X86_64 */
433 * i387 state interaction
435 static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
438 return tsk->thread.fpu.state->fxsave.cwd;
440 return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
444 static inline unsigned short get_fpu_swd(struct task_struct *tsk)
447 return tsk->thread.fpu.state->fxsave.swd;
449 return (unsigned short)tsk->thread.fpu.state->fsave.swd;
453 static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
456 return tsk->thread.fpu.state->fxsave.mxcsr;
458 return MXCSR_DEFAULT;
462 static bool fpu_allocated(struct fpu *fpu)
464 return fpu->state != NULL;
467 static inline int fpu_alloc(struct fpu *fpu)
469 if (fpu_allocated(fpu))
471 fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
474 WARN_ON((unsigned long)fpu->state & 15);
478 static inline void fpu_free(struct fpu *fpu)
481 kmem_cache_free(task_xstate_cachep, fpu->state);
486 static inline void fpu_copy(struct fpu *dst, struct fpu *src)
488 memcpy(dst->state, src->state, xstate_size);
491 #endif /* __ASSEMBLY__ */
493 #define PSHUFB_XMM5_XMM0 .byte 0x66, 0x0f, 0x38, 0x00, 0xc5
494 #define PSHUFB_XMM5_XMM6 .byte 0x66, 0x0f, 0x38, 0x00, 0xf5
496 #endif /* _ASM_X86_I387_H */