]>
Commit | Line | Data |
---|---|---|
cbf6b1ba PM |
1 | #include <linux/mm.h> |
2 | #include <linux/kernel.h> | |
5a0e3ad6 | 3 | #include <linux/slab.h> |
cbf6b1ba PM |
4 | #include <linux/sched.h> |
5 | ||
0ea820cf PM |
6 | struct kmem_cache *task_xstate_cachep = NULL; |
7 | unsigned int xstate_size; | |
8 | ||
9 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | |
10 | { | |
11 | *dst = *src; | |
12 | ||
13 | if (src->thread.xstate) { | |
14 | dst->thread.xstate = kmem_cache_alloc(task_xstate_cachep, | |
15 | GFP_KERNEL); | |
16 | if (!dst->thread.xstate) | |
17 | return -ENOMEM; | |
18 | memcpy(dst->thread.xstate, src->thread.xstate, xstate_size); | |
19 | } | |
20 | ||
21 | return 0; | |
22 | } | |
23 | ||
24 | void free_thread_xstate(struct task_struct *tsk) | |
25 | { | |
26 | if (tsk->thread.xstate) { | |
27 | kmem_cache_free(task_xstate_cachep, tsk->thread.xstate); | |
28 | tsk->thread.xstate = NULL; | |
29 | } | |
30 | } | |
31 | ||
cbf6b1ba PM |
32 | #if THREAD_SHIFT < PAGE_SHIFT |
33 | static struct kmem_cache *thread_info_cache; | |
34 | ||
35 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | |
36 | { | |
37 | struct thread_info *ti; | |
38 | ||
39 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | |
40 | if (unlikely(ti == NULL)) | |
41 | return NULL; | |
42 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
43 | memset(ti, 0, THREAD_SIZE); | |
44 | #endif | |
45 | return ti; | |
46 | } | |
47 | ||
48 | void free_thread_info(struct thread_info *ti) | |
49 | { | |
0ea820cf | 50 | free_thread_xstate(ti->task); |
cbf6b1ba PM |
51 | kmem_cache_free(thread_info_cache, ti); |
52 | } | |
53 | ||
54 | void thread_info_cache_init(void) | |
55 | { | |
56 | thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE, | |
a3705799 | 57 | THREAD_SIZE, SLAB_PANIC, NULL); |
cbf6b1ba PM |
58 | } |
59 | #else | |
60 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | |
61 | { | |
62 | #ifdef CONFIG_DEBUG_STACK_USAGE | |
63 | gfp_t mask = GFP_KERNEL | __GFP_ZERO; | |
64 | #else | |
65 | gfp_t mask = GFP_KERNEL; | |
66 | #endif | |
67 | return (struct thread_info *)__get_free_pages(mask, THREAD_SIZE_ORDER); | |
68 | } | |
69 | ||
70 | void free_thread_info(struct thread_info *ti) | |
71 | { | |
0ea820cf | 72 | free_thread_xstate(ti->task); |
cbf6b1ba PM |
73 | free_pages((unsigned long)ti, THREAD_SIZE_ORDER); |
74 | } | |
75 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | |
0ea820cf PM |
76 | |
77 | void arch_task_cache_init(void) | |
78 | { | |
79 | if (!xstate_size) | |
80 | return; | |
81 | ||
82 | task_xstate_cachep = kmem_cache_create("task_xstate", xstate_size, | |
83 | __alignof__(union thread_xstate), | |
84 | SLAB_PANIC | SLAB_NOTRACK, NULL); | |
85 | } | |
86 | ||
87 | #ifdef CONFIG_SH_FPU_EMU | |
88 | # define HAVE_SOFTFP 1 | |
89 | #else | |
90 | # define HAVE_SOFTFP 0 | |
91 | #endif | |
92 | ||
93 | void init_thread_xstate(void) | |
94 | { | |
95 | if (boot_cpu_data.flags & CPU_HAS_FPU) | |
96 | xstate_size = sizeof(struct sh_fpu_hard_struct); | |
97 | else if (HAVE_SOFTFP) | |
98 | xstate_size = sizeof(struct sh_fpu_soft_struct); | |
99 | else | |
100 | xstate_size = 0; | |
101 | } |