X-Git-Url: https://bbs.cooldavid.org/git/?a=blobdiff_plain;f=arch%2Fsh%2Fkernel%2Fprocess_32.c;h=aff5fe02e393c417a3c8646447bf0d7fd8ef504e;hb=a0458b07c17a10ea316e6ae65ab15b78bf5f44ee;hp=0673c4746be3996f58a33de506ca6e664284a044;hpb=a8a8a669ea13d792296737505adc43ccacf3a648;p=net-next-2.6.git diff --git a/arch/sh/kernel/process_32.c b/arch/sh/kernel/process_32.c index 0673c4746be..aff5fe02e39 100644 --- a/arch/sh/kernel/process_32.c +++ b/arch/sh/kernel/process_32.c @@ -288,8 +288,14 @@ static void ubc_set_tracing(int asid, unsigned long pc) __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev, struct task_struct *next) { + struct thread_struct *next_t = &next->thread; + #if defined(CONFIG_SH_FPU) unlazy_fpu(prev, task_pt_regs(prev)); + + /* we're going to use this soon, after a few expensive things */ + if (next->fpu_counter > 5) + prefetch(&next_t->fpu.hard); #endif #ifdef CONFIG_MMU @@ -321,6 +327,16 @@ __switch_to(struct task_struct *prev, struct task_struct *next) #endif } +#if defined(CONFIG_SH_FPU) + /* If the task has used fpu the last 5 timeslices, just do a full + * restore of the math state immediately to avoid the trap; the + * chances of needing FPU soon are obviously high now + */ + if (next->fpu_counter > 5) { + fpu_state_restore(task_pt_regs(next)); + } +#endif + return prev; }