]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/cpuset.h
stmmac: add init/exit callback in plat_stmmacenet_data struct
[net-next-2.6.git] / include / linux / cpuset.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_CPUSET_H
2#define _LINUX_CPUSET_H
3/*
4 * cpuset interface
5 *
6 * Copyright (C) 2003 BULL SA
825a46af 7 * Copyright (C) 2004-2006 Silicon Graphics, Inc.
1da177e4
LT
8 *
9 */
10
11#include <linux/sched.h>
12#include <linux/cpumask.h>
13#include <linux/nodemask.h>
8793d854 14#include <linux/cgroup.h>
a1bc5a4e 15#include <linux/mm.h>
1da177e4
LT
16
17#ifdef CONFIG_CPUSETS
18
202f72d5
PJ
19extern int number_of_cpusets; /* How many cpusets are defined in system? */
20
1da177e4
LT
21extern int cpuset_init(void);
22extern void cpuset_init_smp(void);
3a101d05 23extern void cpuset_update_active_cpus(void);
6af866af 24extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
9084bb82 25extern int cpuset_cpus_allowed_fallback(struct task_struct *p);
909d75a3 26extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
9276b1bc 27#define cpuset_current_mems_allowed (current->mems_allowed)
1da177e4 28void cpuset_init_current_mems_allowed(void);
19770b32 29int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
202f72d5 30
a1bc5a4e
DR
31extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask);
32extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
02a0e53d 33
a1bc5a4e 34static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
02a0e53d
PJ
35{
36 return number_of_cpusets <= 1 ||
a1bc5a4e 37 __cpuset_node_allowed_softwall(node, gfp_mask);
02a0e53d
PJ
38}
39
a1bc5a4e 40static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
202f72d5 41{
02a0e53d 42 return number_of_cpusets <= 1 ||
a1bc5a4e
DR
43 __cpuset_node_allowed_hardwall(node, gfp_mask);
44}
45
46static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
47{
48 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
49}
50
51static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
52{
53 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
202f72d5
PJ
54}
55
bbe373f2
DR
56extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
57 const struct task_struct *tsk2);
3e0d98b9
PJ
58
59#define cpuset_memory_pressure_bump() \
60 do { \
61 if (cpuset_memory_pressure_enabled) \
62 __cpuset_memory_pressure_bump(); \
63 } while (0)
64extern int cpuset_memory_pressure_enabled;
65extern void __cpuset_memory_pressure_bump(void);
66
54047320 67extern const struct file_operations proc_cpuset_operations;
df5f8314
EB
68struct seq_file;
69extern void cpuset_task_status_allowed(struct seq_file *m,
70 struct task_struct *task);
1da177e4 71
825a46af 72extern int cpuset_mem_spread_node(void);
6adef3eb 73extern int cpuset_slab_spread_node(void);
825a46af
PJ
74
75static inline int cpuset_do_page_mem_spread(void)
76{
77 return current->flags & PF_SPREAD_PAGE;
78}
79
80static inline int cpuset_do_slab_mem_spread(void)
81{
82 return current->flags & PF_SPREAD_SLAB;
83}
84
8793d854
PM
85extern int current_cpuset_is_being_rebound(void);
86
e761b772
MK
87extern void rebuild_sched_domains(void);
88
75aa1994
DR
89extern void cpuset_print_task_mems_allowed(struct task_struct *p);
90
c0ff7453
MX
91/*
92 * reading current mems_allowed and mempolicy in the fastpath must protected
93 * by get_mems_allowed()
94 */
95static inline void get_mems_allowed(void)
96{
97 current->mems_allowed_change_disable++;
98
99 /*
100 * ensure that reading mems_allowed and mempolicy happens after the
101 * update of ->mems_allowed_change_disable.
102 *
103 * the write-side task finds ->mems_allowed_change_disable is not 0,
104 * and knows the read-side task is reading mems_allowed or mempolicy,
105 * so it will clear old bits lazily.
106 */
107 smp_mb();
108}
109
110static inline void put_mems_allowed(void)
111{
112 /*
113 * ensure that reading mems_allowed and mempolicy before reducing
114 * mems_allowed_change_disable.
115 *
116 * the write-side task will know that the read-side task is still
117 * reading mems_allowed or mempolicy, don't clears old bits in the
118 * nodemask.
119 */
120 smp_mb();
121 --ACCESS_ONCE(current->mems_allowed_change_disable);
122}
123
58568d2a
MX
124static inline void set_mems_allowed(nodemask_t nodemask)
125{
c0ff7453 126 task_lock(current);
58568d2a 127 current->mems_allowed = nodemask;
c0ff7453 128 task_unlock(current);
58568d2a
MX
129}
130
1da177e4
LT
131#else /* !CONFIG_CPUSETS */
132
133static inline int cpuset_init(void) { return 0; }
134static inline void cpuset_init_smp(void) {}
1da177e4 135
3a101d05
TH
136static inline void cpuset_update_active_cpus(void)
137{
138 partition_sched_domains(1, NULL, NULL);
139}
140
6af866af
LZ
141static inline void cpuset_cpus_allowed(struct task_struct *p,
142 struct cpumask *mask)
1da177e4 143{
aa85ea5b 144 cpumask_copy(mask, cpu_possible_mask);
1da177e4
LT
145}
146
9084bb82
ON
147static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
148{
149 cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
150 return cpumask_any(cpu_active_mask);
151}
152
909d75a3
PJ
153static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
154{
155 return node_possible_map;
156}
157
0e1e7c7a 158#define cpuset_current_mems_allowed (node_states[N_HIGH_MEMORY])
1da177e4 159static inline void cpuset_init_current_mems_allowed(void) {}
1da177e4 160
19770b32 161static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
1da177e4
LT
162{
163 return 1;
164}
165
a1bc5a4e
DR
166static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
167{
168 return 1;
169}
170
171static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
172{
173 return 1;
174}
175
02a0e53d
PJ
176static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
177{
178 return 1;
179}
180
181static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
1da177e4
LT
182{
183 return 1;
184}
185
bbe373f2
DR
186static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
187 const struct task_struct *tsk2)
ef08e3b4
PJ
188{
189 return 1;
190}
191
3e0d98b9
PJ
192static inline void cpuset_memory_pressure_bump(void) {}
193
df5f8314
EB
194static inline void cpuset_task_status_allowed(struct seq_file *m,
195 struct task_struct *task)
1da177e4 196{
1da177e4
LT
197}
198
825a46af
PJ
199static inline int cpuset_mem_spread_node(void)
200{
201 return 0;
202}
203
6adef3eb
JS
204static inline int cpuset_slab_spread_node(void)
205{
206 return 0;
207}
208
825a46af
PJ
209static inline int cpuset_do_page_mem_spread(void)
210{
211 return 0;
212}
213
214static inline int cpuset_do_slab_mem_spread(void)
215{
216 return 0;
217}
218
8793d854
PM
219static inline int current_cpuset_is_being_rebound(void)
220{
221 return 0;
222}
223
e761b772
MK
224static inline void rebuild_sched_domains(void)
225{
dfb512ec 226 partition_sched_domains(1, NULL, NULL);
e761b772
MK
227}
228
75aa1994
DR
229static inline void cpuset_print_task_mems_allowed(struct task_struct *p)
230{
231}
232
58568d2a
MX
233static inline void set_mems_allowed(nodemask_t nodemask)
234{
235}
236
c0ff7453
MX
237static inline void get_mems_allowed(void)
238{
239}
240
241static inline void put_mems_allowed(void)
242{
243}
244
1da177e4
LT
245#endif /* !CONFIG_CPUSETS */
246
247#endif /* _LINUX_CPUSET_H */