]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/gfp.h
Merge /home/trondmy/scm/kernel/git/torvalds/linux-2.6
[net-next-2.6.git] / include / linux / gfp.h
CommitLineData
1da177e4
LT
1#ifndef __LINUX_GFP_H
2#define __LINUX_GFP_H
3
4#include <linux/mmzone.h>
5#include <linux/stddef.h>
6#include <linux/linkage.h>
7#include <linux/config.h>
8
9struct vm_area_struct;
10
11/*
12 * GFP bitmasks..
13 */
14/* Zone modifiers in GFP_ZONEMASK (see linux/mmzone.h - low two bits) */
0db925af
AD
15#define __GFP_DMA 0x01u
16#define __GFP_HIGHMEM 0x02u
1da177e4
LT
17
18/*
19 * Action modifiers - doesn't change the zoning
20 *
21 * __GFP_REPEAT: Try hard to allocate the memory, but the allocation attempt
22 * _might_ fail. This depends upon the particular VM implementation.
23 *
24 * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
25 * cannot handle allocation failures.
26 *
27 * __GFP_NORETRY: The VM implementation must not retry indefinitely.
28 */
29#define __GFP_WAIT 0x10u /* Can wait and reschedule? */
30#define __GFP_HIGH 0x20u /* Should access emergency pools? */
31#define __GFP_IO 0x40u /* Can start physical IO? */
32#define __GFP_FS 0x80u /* Can call down to low-level FS? */
33#define __GFP_COLD 0x100u /* Cache-cold page required */
34#define __GFP_NOWARN 0x200u /* Suppress page allocation failure warning */
35#define __GFP_REPEAT 0x400u /* Retry the allocation. Might fail */
36#define __GFP_NOFAIL 0x800u /* Retry for ever. Cannot fail */
37#define __GFP_NORETRY 0x1000u /* Do not retry. Might fail */
38#define __GFP_NO_GROW 0x2000u /* Slab internal usage */
39#define __GFP_COMP 0x4000u /* Add compound page metadata */
40#define __GFP_ZERO 0x8000u /* Return zeroed page on success */
b84a35be 41#define __GFP_NOMEMALLOC 0x10000u /* Don't use emergency reserves */
0c35bbad 42#define __GFP_NORECLAIM 0x20000u /* No realy zone reclaim during allocation */
f90b1d2f 43#define __GFP_HARDWALL 0x40000u /* Enforce hardwall cpuset memory allocs */
1da177e4 44
b84a35be 45#define __GFP_BITS_SHIFT 20 /* Room for 20 __GFP_FOO bits */
1da177e4
LT
46#define __GFP_BITS_MASK ((1 << __GFP_BITS_SHIFT) - 1)
47
48/* if you forget to add the bitmask here kernel will crash, period */
49#define GFP_LEVEL_MASK (__GFP_WAIT|__GFP_HIGH|__GFP_IO|__GFP_FS| \
50 __GFP_COLD|__GFP_NOWARN|__GFP_REPEAT| \
b84a35be 51 __GFP_NOFAIL|__GFP_NORETRY|__GFP_NO_GROW|__GFP_COMP| \
f90b1d2f 52 __GFP_NOMEMALLOC|__GFP_NORECLAIM|__GFP_HARDWALL)
1da177e4
LT
53
54#define GFP_ATOMIC (__GFP_HIGH)
55#define GFP_NOIO (__GFP_WAIT)
56#define GFP_NOFS (__GFP_WAIT | __GFP_IO)
57#define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
f90b1d2f
PJ
58#define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
59#define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
60 __GFP_HIGHMEM)
1da177e4
LT
61
62/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
63 platforms, used as appropriate on others */
64
65#define GFP_DMA __GFP_DMA
66
67
68/*
69 * There is only one page-allocator function, and two main namespaces to
70 * it. The alloc_page*() variants return 'struct page *' and as such
71 * can allocate highmem pages, the *get*page*() variants return
72 * virtual kernel addresses to the allocated page(s).
73 */
74
75/*
76 * We get the zone list from the current node and the gfp_mask.
77 * This zone list contains a maximum of MAXNODES*MAX_NR_ZONES zones.
78 *
79 * For the normal case of non-DISCONTIGMEM systems the NODE_DATA() gets
80 * optimized to &contig_page_data at compile-time.
81 */
82
83#ifndef HAVE_ARCH_FREE_PAGE
84static inline void arch_free_page(struct page *page, int order) { }
85#endif
86
87extern struct page *
dd0fc66f 88FASTCALL(__alloc_pages(gfp_t, unsigned int, struct zonelist *));
1da177e4 89
dd0fc66f 90static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
1da177e4
LT
91 unsigned int order)
92{
93 if (unlikely(order >= MAX_ORDER))
94 return NULL;
95
96 return __alloc_pages(gfp_mask, order,
97 NODE_DATA(nid)->node_zonelists + (gfp_mask & GFP_ZONEMASK));
98}
99
100#ifdef CONFIG_NUMA
dd0fc66f 101extern struct page *alloc_pages_current(gfp_t gfp_mask, unsigned order);
1da177e4
LT
102
103static inline struct page *
dd0fc66f 104alloc_pages(gfp_t gfp_mask, unsigned int order)
1da177e4
LT
105{
106 if (unlikely(order >= MAX_ORDER))
107 return NULL;
108
109 return alloc_pages_current(gfp_mask, order);
110}
dd0fc66f 111extern struct page *alloc_page_vma(gfp_t gfp_mask,
1da177e4
LT
112 struct vm_area_struct *vma, unsigned long addr);
113#else
114#define alloc_pages(gfp_mask, order) \
115 alloc_pages_node(numa_node_id(), gfp_mask, order)
116#define alloc_page_vma(gfp_mask, vma, addr) alloc_pages(gfp_mask, 0)
117#endif
118#define alloc_page(gfp_mask) alloc_pages(gfp_mask, 0)
119
dd0fc66f
AV
120extern unsigned long FASTCALL(__get_free_pages(gfp_t gfp_mask, unsigned int order));
121extern unsigned long FASTCALL(get_zeroed_page(gfp_t gfp_mask));
1da177e4
LT
122
123#define __get_free_page(gfp_mask) \
124 __get_free_pages((gfp_mask),0)
125
126#define __get_dma_pages(gfp_mask, order) \
127 __get_free_pages((gfp_mask) | GFP_DMA,(order))
128
129extern void FASTCALL(__free_pages(struct page *page, unsigned int order));
130extern void FASTCALL(free_pages(unsigned long addr, unsigned int order));
131extern void FASTCALL(free_hot_page(struct page *page));
132extern void FASTCALL(free_cold_page(struct page *page));
133
134#define __free_page(page) __free_pages((page), 0)
135#define free_page(addr) free_pages((addr),0)
136
137void page_alloc_init(void);
4ae7c039
CL
138#ifdef CONFIG_NUMA
139void drain_remote_pages(void);
140#else
141static inline void drain_remote_pages(void) { };
142#endif
1da177e4
LT
143
144#endif /* __LINUX_GFP_H */