]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/slab_def.h
Staging: rtl8187se: Fix compile warnings in 2.6.35-rc2
[net-next-2.6.git] / include / linux / slab_def.h
CommitLineData
2e892f43
CL
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
02af61bb 17#include <linux/kmemtrace.h>
2e892f43 18
1f0ce8b3
DW
19#ifndef ARCH_KMALLOC_MINALIGN
20/*
21 * Enforce a minimum alignment for the kmalloc caches.
22 * Usually, the kmalloc caches are cache_line_size() aligned, except when
23 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
24 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
25 * alignment larger than the alignment of a 64-bit integer.
26 * ARCH_KMALLOC_MINALIGN allows that.
27 * Note that increasing this value may disable some debug features.
28 */
29#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
30#endif
31
32#ifndef ARCH_SLAB_MINALIGN
33/*
34 * Enforce a minimum alignment for all caches.
35 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
36 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
37 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
38 * some debug features.
39 */
40#define ARCH_SLAB_MINALIGN 0
41#endif
42
8eae985f
PE
43/*
44 * struct kmem_cache
45 *
46 * manages a cache.
47 */
48
49struct kmem_cache {
50/* 1) per-cpu data, touched during every alloc/free */
51 struct array_cache *array[NR_CPUS];
52/* 2) Cache tunables. Protected by cache_chain_mutex */
53 unsigned int batchcount;
54 unsigned int limit;
55 unsigned int shared;
56
57 unsigned int buffer_size;
58 u32 reciprocal_buffer_size;
59/* 3) touched by every alloc & free from the backend */
60
61 unsigned int flags; /* constant flags */
62 unsigned int num; /* # of objs per slab */
63
64/* 4) cache_grow/shrink */
65 /* order of pgs per slab (2^n) */
66 unsigned int gfporder;
67
68 /* force GFP flags, e.g. GFP_DMA */
69 gfp_t gfpflags;
70
71 size_t colour; /* cache colouring range */
72 unsigned int colour_off; /* colour offset */
73 struct kmem_cache *slabp_cache;
74 unsigned int slab_size;
75 unsigned int dflags; /* dynamic flags */
76
77 /* constructor func */
78 void (*ctor)(void *obj);
79
80/* 5) cache creation/removal */
81 const char *name;
82 struct list_head next;
83
84/* 6) statistics */
85#ifdef CONFIG_DEBUG_SLAB
86 unsigned long num_active;
87 unsigned long num_allocations;
88 unsigned long high_mark;
89 unsigned long grown;
90 unsigned long reaped;
91 unsigned long errors;
92 unsigned long max_freeable;
93 unsigned long node_allocs;
94 unsigned long node_frees;
95 unsigned long node_overflow;
96 atomic_t allochit;
97 atomic_t allocmiss;
98 atomic_t freehit;
99 atomic_t freemiss;
100
101 /*
102 * If debugging is enabled, then the allocator can add additional
103 * fields and/or padding to every object. buffer_size contains the total
104 * object size including these internal fields, the following two
105 * variables contain the offset to the user object and its size.
106 */
107 int obj_offset;
108 int obj_size;
109#endif /* CONFIG_DEBUG_SLAB */
110
111 /*
112 * We put nodelists[] at the end of kmem_cache, because we want to size
113 * this array to nr_node_ids slots instead of MAX_NUMNODES
114 * (see kmem_cache_init())
115 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
116 * is statically defined, so we reserve the max number of nodes.
117 */
118 struct kmem_list3 *nodelists[MAX_NUMNODES];
119 /*
120 * Do not add fields after nodelists[]
121 */
122};
123
2e892f43
CL
124/* Size description struct for general caches. */
125struct cache_sizes {
126 size_t cs_size;
127 struct kmem_cache *cs_cachep;
4b51d669 128#ifdef CONFIG_ZONE_DMA
2e892f43 129 struct kmem_cache *cs_dmacachep;
4b51d669 130#endif
2e892f43
CL
131};
132extern struct cache_sizes malloc_sizes[];
133
6193a2ff
PM
134void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
135void *__kmalloc(size_t size, gfp_t flags);
136
0f24f128 137#ifdef CONFIG_TRACING
36555751
EGM
138extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
139extern size_t slab_buffer_size(struct kmem_cache *cachep);
140#else
141static __always_inline void *
142kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
2e892f43 143{
36555751
EGM
144 return kmem_cache_alloc(cachep, flags);
145}
146static inline size_t slab_buffer_size(struct kmem_cache *cachep)
147{
148 return 0;
149}
150#endif
151
152static __always_inline void *kmalloc(size_t size, gfp_t flags)
153{
154 struct kmem_cache *cachep;
155 void *ret;
156
2e892f43
CL
157 if (__builtin_constant_p(size)) {
158 int i = 0;
6cb8f913
CL
159
160 if (!size)
161 return ZERO_SIZE_PTR;
162
2e892f43
CL
163#define CACHE(x) \
164 if (size <= x) \
165 goto found; \
166 else \
167 i++;
1c61fc40 168#include <linux/kmalloc_sizes.h>
2e892f43 169#undef CACHE
1cf3eb2f 170 return NULL;
2e892f43 171found:
4b51d669
CL
172#ifdef CONFIG_ZONE_DMA
173 if (flags & GFP_DMA)
36555751
EGM
174 cachep = malloc_sizes[i].cs_dmacachep;
175 else
4b51d669 176#endif
36555751
EGM
177 cachep = malloc_sizes[i].cs_cachep;
178
179 ret = kmem_cache_alloc_notrace(cachep, flags);
180
ca2b84cb
EGM
181 trace_kmalloc(_THIS_IP_, ret,
182 size, slab_buffer_size(cachep), flags);
36555751
EGM
183
184 return ret;
2e892f43
CL
185 }
186 return __kmalloc(size, flags);
187}
188
2e892f43
CL
189#ifdef CONFIG_NUMA
190extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
6193a2ff 191extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2e892f43 192
0f24f128 193#ifdef CONFIG_TRACING
36555751
EGM
194extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
195 gfp_t flags,
196 int nodeid);
197#else
198static __always_inline void *
199kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
200 gfp_t flags,
201 int nodeid)
202{
203 return kmem_cache_alloc_node(cachep, flags, nodeid);
204}
205#endif
206
207static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
2e892f43 208{
36555751
EGM
209 struct kmem_cache *cachep;
210 void *ret;
211
2e892f43
CL
212 if (__builtin_constant_p(size)) {
213 int i = 0;
6cb8f913
CL
214
215 if (!size)
216 return ZERO_SIZE_PTR;
217
2e892f43
CL
218#define CACHE(x) \
219 if (size <= x) \
220 goto found; \
221 else \
222 i++;
1c61fc40 223#include <linux/kmalloc_sizes.h>
2e892f43 224#undef CACHE
1cf3eb2f 225 return NULL;
2e892f43 226found:
4b51d669
CL
227#ifdef CONFIG_ZONE_DMA
228 if (flags & GFP_DMA)
36555751
EGM
229 cachep = malloc_sizes[i].cs_dmacachep;
230 else
4b51d669 231#endif
36555751
EGM
232 cachep = malloc_sizes[i].cs_cachep;
233
234 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
235
ca2b84cb
EGM
236 trace_kmalloc_node(_THIS_IP_, ret,
237 size, slab_buffer_size(cachep),
238 flags, node);
36555751
EGM
239
240 return ret;
2e892f43
CL
241 }
242 return __kmalloc_node(size, flags, node);
243}
244
245#endif /* CONFIG_NUMA */
246
247#endif /* _LINUX_SLAB_DEF_H */