]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/mempolicy.h
qlcnic: Bumped up driver version to 5.0.12
[net-next-2.6.git] / include / linux / mempolicy.h
CommitLineData
1da177e4
LT
1#ifndef _LINUX_MEMPOLICY_H
2#define _LINUX_MEMPOLICY_H 1
3
4#include <linux/errno.h>
5
6/*
7 * NUMA memory policies for Linux.
8 * Copyright 2003,2004 Andi Kleen SuSE Labs
9 */
10
028fec41
DR
11/*
12 * Both the MPOL_* mempolicy mode and the MPOL_F_* optional mode flags are
13 * passed by the user to either set_mempolicy() or mbind() in an 'int' actual.
14 * The MPOL_MODE_FLAGS macro determines the legal set of optional mode flags.
15 */
16
1da177e4 17/* Policies */
a3b51e01
DR
18enum {
19 MPOL_DEFAULT,
20 MPOL_PREFERRED,
21 MPOL_BIND,
22 MPOL_INTERLEAVE,
23 MPOL_MAX, /* always last member of enum */
24};
1da177e4 25
708c1bbc
MX
26enum mpol_rebind_step {
27 MPOL_REBIND_ONCE, /* do rebind work at once(not by two step) */
28 MPOL_REBIND_STEP1, /* first step(set all the newly nodes) */
29 MPOL_REBIND_STEP2, /* second step(clean all the disallowed nodes)*/
30 MPOL_REBIND_NSTEP,
31};
32
028fec41 33/* Flags for set_mempolicy */
f5b087b5 34#define MPOL_F_STATIC_NODES (1 << 15)
4c50bc01 35#define MPOL_F_RELATIVE_NODES (1 << 14)
f5b087b5 36
028fec41
DR
37/*
38 * MPOL_MODE_FLAGS is the union of all possible optional mode flags passed to
39 * either set_mempolicy() or mbind().
40 */
4c50bc01 41#define MPOL_MODE_FLAGS (MPOL_F_STATIC_NODES | MPOL_F_RELATIVE_NODES)
028fec41
DR
42
43/* Flags for get_mempolicy */
1da177e4
LT
44#define MPOL_F_NODE (1<<0) /* return next IL mode instead of node mask */
45#define MPOL_F_ADDR (1<<1) /* look up vma using address */
754af6f5 46#define MPOL_F_MEMS_ALLOWED (1<<2) /* return allowed memories */
1da177e4
LT
47
48/* Flags for mbind */
49#define MPOL_MF_STRICT (1<<0) /* Verify existing pages in the mapping */
dc9aa5b9
CL
50#define MPOL_MF_MOVE (1<<1) /* Move pages owned by this process to conform to mapping */
51#define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to mapping */
52#define MPOL_MF_INTERNAL (1<<3) /* Internal flags start here */
1da177e4 53
aab0b102
LS
54/*
55 * Internal flags that share the struct mempolicy flags word with
56 * "mode flags". These flags are allocated from bit 0 up, as they
57 * are never OR'ed into the mode in mempolicy API arguments.
58 */
59#define MPOL_F_SHARED (1 << 0) /* identify shared policies */
fc36b8d3 60#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
708c1bbc 61#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
aab0b102 62
1da177e4
LT
63#ifdef __KERNEL__
64
1da177e4 65#include <linux/mmzone.h>
1da177e4
LT
66#include <linux/slab.h>
67#include <linux/rbtree.h>
68#include <linux/spinlock.h>
dfcd3c0d 69#include <linux/nodemask.h>
83d1674a 70#include <linux/pagemap.h>
1da177e4 71
45b35a5c 72struct mm_struct;
1da177e4
LT
73
74#ifdef CONFIG_NUMA
75
76/*
77 * Describe a memory policy.
78 *
79 * A mempolicy can be either associated with a process or with a VMA.
80 * For VMA related allocations the VMA policy is preferred, otherwise
81 * the process policy is used. Interrupts ignore the memory policy
82 * of the current process.
83 *
84 * Locking policy for interlave:
85 * In process context there is no locking because only the process accesses
86 * its own state. All vma manipulation is somewhat protected by a down_read on
b8072f09 87 * mmap_sem.
1da177e4
LT
88 *
89 * Freeing policy:
19770b32 90 * Mempolicy objects are reference counted. A mempolicy will be freed when
f0be3d32 91 * mpol_put() decrements the reference count to zero.
1da177e4 92 *
846a16bf
LS
93 * Duplicating policy objects:
94 * mpol_dup() allocates a new mempolicy and copies the specified mempolicy
19770b32 95 * to the new storage. The reference count of the new object is initialized
846a16bf 96 * to 1, representing the caller of mpol_dup().
1da177e4
LT
97 */
98struct mempolicy {
99 atomic_t refcnt;
45c4745a 100 unsigned short mode; /* See MPOL_* above */
028fec41 101 unsigned short flags; /* See set_mempolicy() MPOL_F_* above */
1da177e4 102 union {
1da177e4 103 short preferred_node; /* preferred */
19770b32 104 nodemask_t nodes; /* interleave/bind */
1da177e4
LT
105 /* undefined for default */
106 } v;
f5b087b5
DR
107 union {
108 nodemask_t cpuset_mems_allowed; /* relative to these nodes */
109 nodemask_t user_nodemask; /* nodemask passed by user */
110 } w;
1da177e4
LT
111};
112
113/*
114 * Support for managing mempolicy data objects (clone, copy, destroy)
115 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined.
116 */
117
f0be3d32
LS
118extern void __mpol_put(struct mempolicy *pol);
119static inline void mpol_put(struct mempolicy *pol)
1da177e4
LT
120{
121 if (pol)
f0be3d32 122 __mpol_put(pol);
1da177e4
LT
123}
124
52cd3b07
LS
125/*
126 * Does mempolicy pol need explicit unref after use?
127 * Currently only needed for shared policies.
128 */
129static inline int mpol_needs_cond_ref(struct mempolicy *pol)
130{
131 return (pol && (pol->flags & MPOL_F_SHARED));
132}
133
134static inline void mpol_cond_put(struct mempolicy *pol)
135{
136 if (mpol_needs_cond_ref(pol))
137 __mpol_put(pol);
138}
139
140extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol,
141 struct mempolicy *frompol);
142static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol,
143 struct mempolicy *frompol)
144{
145 if (!frompol)
146 return frompol;
147 return __mpol_cond_copy(tompol, frompol);
148}
149
846a16bf
LS
150extern struct mempolicy *__mpol_dup(struct mempolicy *pol);
151static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
1da177e4
LT
152{
153 if (pol)
846a16bf 154 pol = __mpol_dup(pol);
1da177e4
LT
155 return pol;
156}
157
158#define vma_policy(vma) ((vma)->vm_policy)
159#define vma_set_policy(vma, pol) ((vma)->vm_policy = (pol))
160
161static inline void mpol_get(struct mempolicy *pol)
162{
163 if (pol)
164 atomic_inc(&pol->refcnt);
165}
166
167extern int __mpol_equal(struct mempolicy *a, struct mempolicy *b);
168static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
169{
170 if (a == b)
171 return 1;
172 return __mpol_equal(a, b);
173}
1da177e4 174
1da177e4
LT
175/*
176 * Tree of shared policies for a shared memory region.
177 * Maintain the policies in a pseudo mm that contains vmas. The vmas
178 * carry the policy. As a special twist the pseudo mm is indexed in pages, not
179 * bytes, so that we can work with shared memory segments bigger than
180 * unsigned long.
181 */
182
183struct sp_node {
184 struct rb_node nd;
185 unsigned long start, end;
186 struct mempolicy *policy;
187};
188
189struct shared_policy {
190 struct rb_root root;
191 spinlock_t lock;
192};
193
71fe804b 194void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
1da177e4
LT
195int mpol_set_shared_policy(struct shared_policy *info,
196 struct vm_area_struct *vma,
197 struct mempolicy *new);
198void mpol_free_shared_policy(struct shared_policy *p);
199struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
200 unsigned long idx);
201
202extern void numa_default_policy(void);
203extern void numa_policy_init(void);
708c1bbc
MX
204extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
205 enum mpol_rebind_step step);
4225399a 206extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
c61afb18 207extern void mpol_fix_fork_child_flag(struct task_struct *p);
4225399a 208
5da7ca86 209extern struct zonelist *huge_zonelist(struct vm_area_struct *vma,
19770b32
MG
210 unsigned long addr, gfp_t gfp_flags,
211 struct mempolicy **mpol, nodemask_t **nodemask);
06808b08 212extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
6f48d0eb
DR
213extern bool mempolicy_nodemask_intersects(struct task_struct *tsk,
214 const nodemask_t *mask);
dc85da15 215extern unsigned slab_node(struct mempolicy *policy);
1da177e4 216
2f6726e5 217extern enum zone_type policy_zone;
4be38e35 218
2f6726e5 219static inline void check_highest_zone(enum zone_type k)
4be38e35 220{
b377fd39 221 if (k > policy_zone && k != ZONE_MOVABLE)
4be38e35
CL
222 policy_zone = k;
223}
224
39743889
CL
225int do_migrate_pages(struct mm_struct *mm,
226 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags);
227
095f1fc4
LS
228
229#ifdef CONFIG_TMPFS
71fe804b 230extern int mpol_parse_str(char *str, struct mempolicy **mpol, int no_context);
095f1fc4 231
71fe804b
LS
232extern int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
233 int no_context);
095f1fc4 234#endif
83d1674a
GS
235
236/* Check if a vma is migratable */
237static inline int vma_migratable(struct vm_area_struct *vma)
238{
239 if (vma->vm_flags & (VM_IO|VM_HUGETLB|VM_PFNMAP|VM_RESERVED))
240 return 0;
241 /*
242 * Migration allocates pages in the highest zone. If we cannot
243 * do so then migration (at least from node to node) is not
244 * possible.
245 */
246 if (vma->vm_file &&
247 gfp_zone(mapping_gfp_mask(vma->vm_file->f_mapping))
248 < policy_zone)
249 return 0;
250 return 1;
251}
252
1da177e4
LT
253#else
254
255struct mempolicy {};
256
257static inline int mpol_equal(struct mempolicy *a, struct mempolicy *b)
258{
259 return 1;
260}
1da177e4 261
f0be3d32 262static inline void mpol_put(struct mempolicy *p)
1da177e4
LT
263{
264}
265
52cd3b07
LS
266static inline void mpol_cond_put(struct mempolicy *pol)
267{
268}
269
270static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to,
271 struct mempolicy *from)
272{
273 return from;
274}
275
1da177e4
LT
276static inline void mpol_get(struct mempolicy *pol)
277{
278}
279
846a16bf 280static inline struct mempolicy *mpol_dup(struct mempolicy *old)
1da177e4
LT
281{
282 return NULL;
283}
284
1da177e4
LT
285struct shared_policy {};
286
287static inline int mpol_set_shared_policy(struct shared_policy *info,
288 struct vm_area_struct *vma,
289 struct mempolicy *new)
290{
291 return -EINVAL;
292}
293
71fe804b
LS
294static inline void mpol_shared_policy_init(struct shared_policy *sp,
295 struct mempolicy *mpol)
1da177e4
LT
296{
297}
298
299static inline void mpol_free_shared_policy(struct shared_policy *p)
300{
301}
302
303static inline struct mempolicy *
304mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
305{
306 return NULL;
307}
308
309#define vma_policy(vma) NULL
310#define vma_set_policy(vma, pol) do {} while(0)
311
312static inline void numa_policy_init(void)
313{
314}
315
316static inline void numa_default_policy(void)
317{
318}
319
74cb2155 320static inline void mpol_rebind_task(struct task_struct *tsk,
708c1bbc
MX
321 const nodemask_t *new,
322 enum mpol_rebind_step step)
68860ec1
PJ
323{
324}
325
4225399a
PJ
326static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
327{
328}
329
c61afb18
PJ
330static inline void mpol_fix_fork_child_flag(struct task_struct *p)
331{
332}
333
5da7ca86 334static inline struct zonelist *huge_zonelist(struct vm_area_struct *vma,
19770b32
MG
335 unsigned long addr, gfp_t gfp_flags,
336 struct mempolicy **mpol, nodemask_t **nodemask)
5da7ca86 337{
19770b32
MG
338 *mpol = NULL;
339 *nodemask = NULL;
0e88460d 340 return node_zonelist(0, gfp_flags);
5da7ca86
CL
341}
342
6f48d0eb
DR
343static inline bool init_nodemask_of_mempolicy(nodemask_t *m)
344{
345 return false;
346}
347
348static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
349 const nodemask_t *mask)
350{
351 return false;
352}
06808b08 353
45b07ef3
PJ
354static inline int do_migrate_pages(struct mm_struct *mm,
355 const nodemask_t *from_nodes,
356 const nodemask_t *to_nodes, int flags)
357{
358 return 0;
359}
360
4be38e35
CL
361static inline void check_highest_zone(int k)
362{
363}
095f1fc4
LS
364
365#ifdef CONFIG_TMPFS
71fe804b
LS
366static inline int mpol_parse_str(char *str, struct mempolicy **mpol,
367 int no_context)
095f1fc4 368{
71fe804b 369 return 1; /* error */
095f1fc4
LS
370}
371
71fe804b
LS
372static inline int mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol,
373 int no_context)
095f1fc4
LS
374{
375 return 0;
376}
377#endif
378
1da177e4
LT
379#endif /* CONFIG_NUMA */
380#endif /* __KERNEL__ */
381
382#endif