]> bbs.cooldavid.org Git - net-next-2.6.git/blame - include/linux/slub_def.h
slab,slub: don't enable interrupts during early boot
[net-next-2.6.git] / include / linux / slub_def.h
CommitLineData
81819f0f
CL
1#ifndef _LINUX_SLUB_DEF_H
2#define _LINUX_SLUB_DEF_H
3
4/*
5 * SLUB : A Slab allocator without object queues.
6 *
cde53535 7 * (C) 2007 SGI, Christoph Lameter
81819f0f
CL
8 */
9#include <linux/types.h>
10#include <linux/gfp.h>
11#include <linux/workqueue.h>
12#include <linux/kobject.h>
02af61bb 13#include <linux/kmemtrace.h>
81819f0f 14
8ff12cfc
CL
15enum stat_item {
16 ALLOC_FASTPATH, /* Allocation from cpu slab */
17 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
18 FREE_FASTPATH, /* Free to cpu slub */
19 FREE_SLOWPATH, /* Freeing not to cpu slab */
20 FREE_FROZEN, /* Freeing to frozen slab */
21 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
22 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
23 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from partial list */
24 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
25 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
26 FREE_SLAB, /* Slab freed to the page allocator */
27 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
28 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
29 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
30 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
31 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
32 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
65c3376a 33 ORDER_FALLBACK, /* Number of times fallback was necessary */
8ff12cfc
CL
34 NR_SLUB_STAT_ITEMS };
35
dfb4f096 36struct kmem_cache_cpu {
da89b79e
CL
37 void **freelist; /* Pointer to first free per cpu object */
38 struct page *page; /* The slab from which we are allocating */
39 int node; /* The node of the page (or -1 for debug) */
40 unsigned int offset; /* Freepointer offset (in word units) */
41 unsigned int objsize; /* Size of an object (from kmem_cache) */
8ff12cfc
CL
42#ifdef CONFIG_SLUB_STATS
43 unsigned stat[NR_SLUB_STAT_ITEMS];
44#endif
4c93c355 45};
dfb4f096 46
81819f0f
CL
47struct kmem_cache_node {
48 spinlock_t list_lock; /* Protect partial list and nr_partial */
49 unsigned long nr_partial;
81819f0f 50 struct list_head partial;
0c710013 51#ifdef CONFIG_SLUB_DEBUG
0f389ec6 52 atomic_long_t nr_slabs;
205ab99d 53 atomic_long_t total_objects;
643b1138 54 struct list_head full;
0c710013 55#endif
81819f0f
CL
56};
57
834f3d11
CL
58/*
59 * Word size structure that can be atomically updated or read and that
60 * contains both the order and the number of objects that a slab of the
61 * given order would contain.
62 */
63struct kmem_cache_order_objects {
64 unsigned long x;
65};
66
81819f0f
CL
67/*
68 * Slab cache management.
69 */
70struct kmem_cache {
71 /* Used for retriving partial slabs etc */
72 unsigned long flags;
73 int size; /* The size of an object including meta data */
74 int objsize; /* The size of an object without meta data */
75 int offset; /* Free pointer offset. */
834f3d11 76 struct kmem_cache_order_objects oo;
81819f0f
CL
77
78 /*
79 * Avoid an extra cache line for UP, SMP and for the node local to
80 * struct kmem_cache.
81 */
82 struct kmem_cache_node local_node;
83
84 /* Allocation and freeing of slabs */
205ab99d 85 struct kmem_cache_order_objects max;
65c3376a 86 struct kmem_cache_order_objects min;
b7a49f0d 87 gfp_t allocflags; /* gfp flags to use on each alloc */
81819f0f 88 int refcount; /* Refcount for slab cache destroy */
51cc5068 89 void (*ctor)(void *);
81819f0f
CL
90 int inuse; /* Offset to metadata */
91 int align; /* Alignment */
3b89d7d8 92 unsigned long min_partial;
81819f0f
CL
93 const char *name; /* Name (only for display!) */
94 struct list_head list; /* List of slab caches */
0c710013 95#ifdef CONFIG_SLUB_DEBUG
81819f0f 96 struct kobject kobj; /* For sysfs */
0c710013 97#endif
81819f0f
CL
98
99#ifdef CONFIG_NUMA
9824601e
CL
100 /*
101 * Defragmentation by allocating from a remote node.
102 */
103 int remote_node_defrag_ratio;
81819f0f
CL
104 struct kmem_cache_node *node[MAX_NUMNODES];
105#endif
4c93c355
CL
106#ifdef CONFIG_SMP
107 struct kmem_cache_cpu *cpu_slab[NR_CPUS];
108#else
109 struct kmem_cache_cpu cpu_slab;
110#endif
81819f0f
CL
111};
112
113/*
114 * Kmalloc subsystem.
115 */
4b356be0
CL
116#if defined(ARCH_KMALLOC_MINALIGN) && ARCH_KMALLOC_MINALIGN > 8
117#define KMALLOC_MIN_SIZE ARCH_KMALLOC_MINALIGN
118#else
119#define KMALLOC_MIN_SIZE 8
120#endif
121
122#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
81819f0f 123
ffadd4d0
CL
124/*
125 * Maximum kmalloc object size handled by SLUB. Larger object allocations
126 * are passed through to the page allocator. The page allocator "fastpath"
127 * is relatively slow so we need this value sufficiently high so that
128 * performance critical objects are allocated through the SLUB fastpath.
129 *
130 * This should be dropped to PAGE_SIZE / 2 once the page allocator
131 * "fastpath" becomes competitive with the slab allocator fastpaths.
132 */
51735a7c 133#define SLUB_MAX_SIZE (2 * PAGE_SIZE)
ffadd4d0 134
51735a7c 135#define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
ffadd4d0 136
81819f0f
CL
137/*
138 * We keep the general caches in an array of slab caches that are used for
139 * 2^x bytes of allocations.
140 */
ffadd4d0 141extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
81819f0f
CL
142
143/*
144 * Sorry that the following has to be that ugly but some versions of GCC
145 * have trouble with constant propagation and loops.
146 */
aa137f9d 147static __always_inline int kmalloc_index(size_t size)
81819f0f 148{
272c1d21
CL
149 if (!size)
150 return 0;
614410d5 151
4b356be0
CL
152 if (size <= KMALLOC_MIN_SIZE)
153 return KMALLOC_SHIFT_LOW;
154
41d54d3b 155#if KMALLOC_MIN_SIZE <= 64
81819f0f
CL
156 if (size > 64 && size <= 96)
157 return 1;
158 if (size > 128 && size <= 192)
159 return 2;
41d54d3b 160#endif
81819f0f
CL
161 if (size <= 8) return 3;
162 if (size <= 16) return 4;
163 if (size <= 32) return 5;
164 if (size <= 64) return 6;
165 if (size <= 128) return 7;
166 if (size <= 256) return 8;
167 if (size <= 512) return 9;
168 if (size <= 1024) return 10;
169 if (size <= 2 * 1024) return 11;
6446faa2 170 if (size <= 4 * 1024) return 12;
aadb4bc4
CL
171/*
172 * The following is only needed to support architectures with a larger page
173 * size than 4k.
174 */
81819f0f
CL
175 if (size <= 8 * 1024) return 13;
176 if (size <= 16 * 1024) return 14;
177 if (size <= 32 * 1024) return 15;
178 if (size <= 64 * 1024) return 16;
179 if (size <= 128 * 1024) return 17;
180 if (size <= 256 * 1024) return 18;
aadb4bc4 181 if (size <= 512 * 1024) return 19;
81819f0f 182 if (size <= 1024 * 1024) return 20;
81819f0f 183 if (size <= 2 * 1024 * 1024) return 21;
81819f0f
CL
184 return -1;
185
186/*
187 * What we really wanted to do and cannot do because of compiler issues is:
188 * int i;
189 * for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
190 * if (size <= (1 << i))
191 * return i;
192 */
193}
194
195/*
196 * Find the slab cache for a given combination of allocation flags and size.
197 *
198 * This ought to end up with a global pointer to the right cache
199 * in kmalloc_caches.
200 */
aa137f9d 201static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
81819f0f
CL
202{
203 int index = kmalloc_index(size);
204
205 if (index == 0)
206 return NULL;
207
81819f0f
CL
208 return &kmalloc_caches[index];
209}
210
211#ifdef CONFIG_ZONE_DMA
212#define SLUB_DMA __GFP_DMA
213#else
214/* Disable DMA functionality */
d046943c 215#define SLUB_DMA (__force gfp_t)0
81819f0f
CL
216#endif
217
6193a2ff
PM
218void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
219void *__kmalloc(size_t size, gfp_t flags);
220
5b882be4
EGM
221#ifdef CONFIG_KMEMTRACE
222extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
223#else
224static __always_inline void *
225kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags)
226{
227 return kmem_cache_alloc(s, gfpflags);
228}
229#endif
230
eada35ef
PE
231static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
232{
5b882be4
EGM
233 unsigned int order = get_order(size);
234 void *ret = (void *) __get_free_pages(flags | __GFP_COMP, order);
235
ca2b84cb 236 trace_kmalloc(_THIS_IP_, ret, size, PAGE_SIZE << order, flags);
5b882be4
EGM
237
238 return ret;
eada35ef
PE
239}
240
aa137f9d 241static __always_inline void *kmalloc(size_t size, gfp_t flags)
81819f0f 242{
5b882be4
EGM
243 void *ret;
244
aadb4bc4 245 if (__builtin_constant_p(size)) {
ffadd4d0 246 if (size > SLUB_MAX_SIZE)
eada35ef 247 return kmalloc_large(size, flags);
81819f0f 248
aadb4bc4
CL
249 if (!(flags & SLUB_DMA)) {
250 struct kmem_cache *s = kmalloc_slab(size);
251
252 if (!s)
253 return ZERO_SIZE_PTR;
81819f0f 254
5b882be4
EGM
255 ret = kmem_cache_alloc_notrace(s, flags);
256
ca2b84cb 257 trace_kmalloc(_THIS_IP_, ret, size, s->size, flags);
5b882be4
EGM
258
259 return ret;
aadb4bc4
CL
260 }
261 }
262 return __kmalloc(size, flags);
81819f0f
CL
263}
264
81819f0f 265#ifdef CONFIG_NUMA
6193a2ff
PM
266void *__kmalloc_node(size_t size, gfp_t flags, int node);
267void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
81819f0f 268
5b882be4
EGM
269#ifdef CONFIG_KMEMTRACE
270extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *s,
271 gfp_t gfpflags,
272 int node);
273#else
274static __always_inline void *
275kmem_cache_alloc_node_notrace(struct kmem_cache *s,
276 gfp_t gfpflags,
277 int node)
278{
279 return kmem_cache_alloc_node(s, gfpflags, node);
280}
281#endif
282
aa137f9d 283static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
81819f0f 284{
5b882be4
EGM
285 void *ret;
286
aadb4bc4 287 if (__builtin_constant_p(size) &&
ffadd4d0 288 size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
aadb4bc4 289 struct kmem_cache *s = kmalloc_slab(size);
81819f0f
CL
290
291 if (!s)
272c1d21 292 return ZERO_SIZE_PTR;
81819f0f 293
5b882be4
EGM
294 ret = kmem_cache_alloc_node_notrace(s, flags, node);
295
ca2b84cb
EGM
296 trace_kmalloc_node(_THIS_IP_, ret,
297 size, s->size, flags, node);
5b882be4
EGM
298
299 return ret;
aadb4bc4
CL
300 }
301 return __kmalloc_node(size, flags, node);
81819f0f
CL
302}
303#endif
304
7e85ee0c
PE
305void __init kmem_cache_init_late(void);
306
81819f0f 307#endif /* _LINUX_SLUB_DEF_H */