]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/slab.c
[PATCH] slab: allocate node local memory for off-slab slabmanagement
[net-next-2.6.git] / mm / slab.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
29 * slabs and you must pass objects with the same intializations to
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
a737b3e2 53 * The c_cpuarray may not be read with enabled local interrupts -
1da177e4
LT
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
343e0d7a 58 * Several members in struct kmem_cache and struct slab never change, they
1da177e4
LT
59 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
fc0abb14 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
1da177e4
LT
72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
e498be7d
CL
78 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
1da177e4
LT
87 */
88
89#include <linux/config.h>
90#include <linux/slab.h>
91#include <linux/mm.h>
92#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
101a5001 97#include <linux/cpuset.h>
1da177e4
LT
98#include <linux/seq_file.h>
99#include <linux/notifier.h>
100#include <linux/kallsyms.h>
101#include <linux/cpu.h>
102#include <linux/sysctl.h>
103#include <linux/module.h>
104#include <linux/rcupdate.h>
543537bd 105#include <linux/string.h>
e498be7d 106#include <linux/nodemask.h>
dc85da15 107#include <linux/mempolicy.h>
fc0abb14 108#include <linux/mutex.h>
1da177e4
LT
109
110#include <asm/uaccess.h>
111#include <asm/cacheflush.h>
112#include <asm/tlbflush.h>
113#include <asm/page.h>
114
115/*
116 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_DEBUG_INITIAL,
117 * SLAB_RED_ZONE & SLAB_POISON.
118 * 0 for faster, smaller code (especially in the critical paths).
119 *
120 * STATS - 1 to collect stats for /proc/slabinfo.
121 * 0 for faster, smaller code (especially in the critical paths).
122 *
123 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
124 */
125
126#ifdef CONFIG_DEBUG_SLAB
127#define DEBUG 1
128#define STATS 1
129#define FORCED_DEBUG 1
130#else
131#define DEBUG 0
132#define STATS 0
133#define FORCED_DEBUG 0
134#endif
135
1da177e4
LT
136/* Shouldn't this be in a header file somewhere? */
137#define BYTES_PER_WORD sizeof(void *)
138
139#ifndef cache_line_size
140#define cache_line_size() L1_CACHE_BYTES
141#endif
142
143#ifndef ARCH_KMALLOC_MINALIGN
144/*
145 * Enforce a minimum alignment for the kmalloc caches.
146 * Usually, the kmalloc caches are cache_line_size() aligned, except when
147 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
148 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
149 * alignment larger than BYTES_PER_WORD. ARCH_KMALLOC_MINALIGN allows that.
150 * Note that this flag disables some debug features.
151 */
152#define ARCH_KMALLOC_MINALIGN 0
153#endif
154
155#ifndef ARCH_SLAB_MINALIGN
156/*
157 * Enforce a minimum alignment for all caches.
158 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
159 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
160 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
161 * some debug features.
162 */
163#define ARCH_SLAB_MINALIGN 0
164#endif
165
166#ifndef ARCH_KMALLOC_FLAGS
167#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
168#endif
169
170/* Legal flag mask for kmem_cache_create(). */
171#if DEBUG
172# define CREATE_MASK (SLAB_DEBUG_INITIAL | SLAB_RED_ZONE | \
173 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
ac2b898c 174 SLAB_CACHE_DMA | \
1da177e4
LT
175 SLAB_MUST_HWCACHE_ALIGN | SLAB_STORE_USER | \
176 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
101a5001 177 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
1da177e4 178#else
ac2b898c 179# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
1da177e4
LT
180 SLAB_CACHE_DMA | SLAB_MUST_HWCACHE_ALIGN | \
181 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
101a5001 182 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD)
1da177e4
LT
183#endif
184
185/*
186 * kmem_bufctl_t:
187 *
188 * Bufctl's are used for linking objs within a slab
189 * linked offsets.
190 *
191 * This implementation relies on "struct page" for locating the cache &
192 * slab an object belongs to.
193 * This allows the bufctl structure to be small (one int), but limits
194 * the number of objects a slab (not a cache) can contain when off-slab
195 * bufctls are used. The limit is the size of the largest general cache
196 * that does not use off-slab slabs.
197 * For 32bit archs with 4 kB pages, is this 56.
198 * This is not serious, as it is only for large objects, when it is unwise
199 * to have too many per slab.
200 * Note: This limit can be raised by introducing a general cache whose size
201 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
202 */
203
fa5b08d5 204typedef unsigned int kmem_bufctl_t;
1da177e4
LT
205#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
206#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
871751e2
AV
207#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
208#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
1da177e4
LT
209
210/* Max number of objs-per-slab for caches which use off-slab slabs.
211 * Needed to avoid a possible looping condition in cache_grow().
212 */
213static unsigned long offslab_limit;
214
215/*
216 * struct slab
217 *
218 * Manages the objs in a slab. Placed either at the beginning of mem allocated
219 * for a slab, or allocated from an general cache.
220 * Slabs are chained into three list: fully used, partial, fully free slabs.
221 */
222struct slab {
b28a02de
PE
223 struct list_head list;
224 unsigned long colouroff;
225 void *s_mem; /* including colour offset */
226 unsigned int inuse; /* num of objs active in slab */
227 kmem_bufctl_t free;
228 unsigned short nodeid;
1da177e4
LT
229};
230
231/*
232 * struct slab_rcu
233 *
234 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
235 * arrange for kmem_freepages to be called via RCU. This is useful if
236 * we need to approach a kernel structure obliquely, from its address
237 * obtained without the usual locking. We can lock the structure to
238 * stabilize it and check it's still at the given address, only if we
239 * can be sure that the memory has not been meanwhile reused for some
240 * other kind of object (which our subsystem's lock might corrupt).
241 *
242 * rcu_read_lock before reading the address, then rcu_read_unlock after
243 * taking the spinlock within the structure expected at that address.
244 *
245 * We assume struct slab_rcu can overlay struct slab when destroying.
246 */
247struct slab_rcu {
b28a02de 248 struct rcu_head head;
343e0d7a 249 struct kmem_cache *cachep;
b28a02de 250 void *addr;
1da177e4
LT
251};
252
253/*
254 * struct array_cache
255 *
1da177e4
LT
256 * Purpose:
257 * - LIFO ordering, to hand out cache-warm objects from _alloc
258 * - reduce the number of linked list operations
259 * - reduce spinlock operations
260 *
261 * The limit is stored in the per-cpu structure to reduce the data cache
262 * footprint.
263 *
264 */
265struct array_cache {
266 unsigned int avail;
267 unsigned int limit;
268 unsigned int batchcount;
269 unsigned int touched;
e498be7d 270 spinlock_t lock;
a737b3e2
AM
271 void *entry[0]; /*
272 * Must have this definition in here for the proper
273 * alignment of array_cache. Also simplifies accessing
274 * the entries.
275 * [0] is for gcc 2.95. It should really be [].
276 */
1da177e4
LT
277};
278
a737b3e2
AM
279/*
280 * bootstrap: The caches do not work without cpuarrays anymore, but the
281 * cpuarrays are allocated from the generic caches...
1da177e4
LT
282 */
283#define BOOT_CPUCACHE_ENTRIES 1
284struct arraycache_init {
285 struct array_cache cache;
b28a02de 286 void *entries[BOOT_CPUCACHE_ENTRIES];
1da177e4
LT
287};
288
289/*
e498be7d 290 * The slab lists for all objects.
1da177e4
LT
291 */
292struct kmem_list3 {
b28a02de
PE
293 struct list_head slabs_partial; /* partial list first, better asm code */
294 struct list_head slabs_full;
295 struct list_head slabs_free;
296 unsigned long free_objects;
b28a02de 297 unsigned int free_limit;
2e1217cf 298 unsigned int colour_next; /* Per-node cache coloring */
b28a02de
PE
299 spinlock_t list_lock;
300 struct array_cache *shared; /* shared per node */
301 struct array_cache **alien; /* on other nodes */
35386e3b
CL
302 unsigned long next_reap; /* updated without locking */
303 int free_touched; /* updated without locking */
1da177e4
LT
304};
305
e498be7d
CL
306/*
307 * Need this for bootstrapping a per node allocator.
308 */
309#define NUM_INIT_LISTS (2 * MAX_NUMNODES + 1)
310struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
311#define CACHE_CACHE 0
312#define SIZE_AC 1
313#define SIZE_L3 (1 + MAX_NUMNODES)
314
315/*
a737b3e2
AM
316 * This function must be completely optimized away if a constant is passed to
317 * it. Mostly the same as what is in linux/slab.h except it returns an index.
e498be7d 318 */
7243cc05 319static __always_inline int index_of(const size_t size)
e498be7d 320{
5ec8a847
SR
321 extern void __bad_size(void);
322
e498be7d
CL
323 if (__builtin_constant_p(size)) {
324 int i = 0;
325
326#define CACHE(x) \
327 if (size <=x) \
328 return i; \
329 else \
330 i++;
331#include "linux/kmalloc_sizes.h"
332#undef CACHE
5ec8a847 333 __bad_size();
7243cc05 334 } else
5ec8a847 335 __bad_size();
e498be7d
CL
336 return 0;
337}
338
339#define INDEX_AC index_of(sizeof(struct arraycache_init))
340#define INDEX_L3 index_of(sizeof(struct kmem_list3))
1da177e4 341
5295a74c 342static void kmem_list3_init(struct kmem_list3 *parent)
e498be7d
CL
343{
344 INIT_LIST_HEAD(&parent->slabs_full);
345 INIT_LIST_HEAD(&parent->slabs_partial);
346 INIT_LIST_HEAD(&parent->slabs_free);
347 parent->shared = NULL;
348 parent->alien = NULL;
2e1217cf 349 parent->colour_next = 0;
e498be7d
CL
350 spin_lock_init(&parent->list_lock);
351 parent->free_objects = 0;
352 parent->free_touched = 0;
353}
354
a737b3e2
AM
355#define MAKE_LIST(cachep, listp, slab, nodeid) \
356 do { \
357 INIT_LIST_HEAD(listp); \
358 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
e498be7d
CL
359 } while (0)
360
a737b3e2
AM
361#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
362 do { \
e498be7d
CL
363 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
364 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
365 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
366 } while (0)
1da177e4
LT
367
368/*
343e0d7a 369 * struct kmem_cache
1da177e4
LT
370 *
371 * manages a cache.
372 */
b28a02de 373
2109a2d1 374struct kmem_cache {
1da177e4 375/* 1) per-cpu data, touched during every alloc/free */
b28a02de 376 struct array_cache *array[NR_CPUS];
b5d8ca7c 377/* 2) Cache tunables. Protected by cache_chain_mutex */
b28a02de
PE
378 unsigned int batchcount;
379 unsigned int limit;
380 unsigned int shared;
b5d8ca7c 381
3dafccf2 382 unsigned int buffer_size;
b5d8ca7c 383/* 3) touched by every alloc & free from the backend */
b28a02de 384 struct kmem_list3 *nodelists[MAX_NUMNODES];
b5d8ca7c 385
a737b3e2
AM
386 unsigned int flags; /* constant flags */
387 unsigned int num; /* # of objs per slab */
1da177e4 388
b5d8ca7c 389/* 4) cache_grow/shrink */
1da177e4 390 /* order of pgs per slab (2^n) */
b28a02de 391 unsigned int gfporder;
1da177e4
LT
392
393 /* force GFP flags, e.g. GFP_DMA */
b28a02de 394 gfp_t gfpflags;
1da177e4 395
a737b3e2 396 size_t colour; /* cache colouring range */
b28a02de 397 unsigned int colour_off; /* colour offset */
343e0d7a 398 struct kmem_cache *slabp_cache;
b28a02de 399 unsigned int slab_size;
a737b3e2 400 unsigned int dflags; /* dynamic flags */
1da177e4
LT
401
402 /* constructor func */
343e0d7a 403 void (*ctor) (void *, struct kmem_cache *, unsigned long);
1da177e4
LT
404
405 /* de-constructor func */
343e0d7a 406 void (*dtor) (void *, struct kmem_cache *, unsigned long);
1da177e4 407
b5d8ca7c 408/* 5) cache creation/removal */
b28a02de
PE
409 const char *name;
410 struct list_head next;
1da177e4 411
b5d8ca7c 412/* 6) statistics */
1da177e4 413#if STATS
b28a02de
PE
414 unsigned long num_active;
415 unsigned long num_allocations;
416 unsigned long high_mark;
417 unsigned long grown;
418 unsigned long reaped;
419 unsigned long errors;
420 unsigned long max_freeable;
421 unsigned long node_allocs;
422 unsigned long node_frees;
423 atomic_t allochit;
424 atomic_t allocmiss;
425 atomic_t freehit;
426 atomic_t freemiss;
1da177e4
LT
427#endif
428#if DEBUG
3dafccf2
MS
429 /*
430 * If debugging is enabled, then the allocator can add additional
431 * fields and/or padding to every object. buffer_size contains the total
432 * object size including these internal fields, the following two
433 * variables contain the offset to the user object and its size.
434 */
435 int obj_offset;
436 int obj_size;
1da177e4
LT
437#endif
438};
439
440#define CFLGS_OFF_SLAB (0x80000000UL)
441#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
442
443#define BATCHREFILL_LIMIT 16
a737b3e2
AM
444/*
445 * Optimization question: fewer reaps means less probability for unnessary
446 * cpucache drain/refill cycles.
1da177e4 447 *
dc6f3f27 448 * OTOH the cpuarrays can contain lots of objects,
1da177e4
LT
449 * which could lock up otherwise freeable slabs.
450 */
451#define REAPTIMEOUT_CPUC (2*HZ)
452#define REAPTIMEOUT_LIST3 (4*HZ)
453
454#if STATS
455#define STATS_INC_ACTIVE(x) ((x)->num_active++)
456#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
457#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
458#define STATS_INC_GROWN(x) ((x)->grown++)
459#define STATS_INC_REAPED(x) ((x)->reaped++)
a737b3e2
AM
460#define STATS_SET_HIGH(x) \
461 do { \
462 if ((x)->num_active > (x)->high_mark) \
463 (x)->high_mark = (x)->num_active; \
464 } while (0)
1da177e4
LT
465#define STATS_INC_ERR(x) ((x)->errors++)
466#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
e498be7d 467#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
a737b3e2
AM
468#define STATS_SET_FREEABLE(x, i) \
469 do { \
470 if ((x)->max_freeable < i) \
471 (x)->max_freeable = i; \
472 } while (0)
1da177e4
LT
473#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
474#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
475#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
476#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
477#else
478#define STATS_INC_ACTIVE(x) do { } while (0)
479#define STATS_DEC_ACTIVE(x) do { } while (0)
480#define STATS_INC_ALLOCED(x) do { } while (0)
481#define STATS_INC_GROWN(x) do { } while (0)
482#define STATS_INC_REAPED(x) do { } while (0)
483#define STATS_SET_HIGH(x) do { } while (0)
484#define STATS_INC_ERR(x) do { } while (0)
485#define STATS_INC_NODEALLOCS(x) do { } while (0)
e498be7d 486#define STATS_INC_NODEFREES(x) do { } while (0)
a737b3e2 487#define STATS_SET_FREEABLE(x, i) do { } while (0)
1da177e4
LT
488#define STATS_INC_ALLOCHIT(x) do { } while (0)
489#define STATS_INC_ALLOCMISS(x) do { } while (0)
490#define STATS_INC_FREEHIT(x) do { } while (0)
491#define STATS_INC_FREEMISS(x) do { } while (0)
492#endif
493
494#if DEBUG
a737b3e2
AM
495/*
496 * Magic nums for obj red zoning.
1da177e4
LT
497 * Placed in the first word before and the first word after an obj.
498 */
499#define RED_INACTIVE 0x5A2CF071UL /* when obj is inactive */
500#define RED_ACTIVE 0x170FC2A5UL /* when obj is active */
501
502/* ...and for poisoning */
503#define POISON_INUSE 0x5a /* for use-uninitialised poisoning */
504#define POISON_FREE 0x6b /* for use-after-free poisoning */
505#define POISON_END 0xa5 /* end-byte of poisoning */
506
a737b3e2
AM
507/*
508 * memory layout of objects:
1da177e4 509 * 0 : objp
3dafccf2 510 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
1da177e4
LT
511 * the end of an object is aligned with the end of the real
512 * allocation. Catches writes behind the end of the allocation.
3dafccf2 513 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
1da177e4 514 * redzone word.
3dafccf2
MS
515 * cachep->obj_offset: The real object.
516 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
a737b3e2
AM
517 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
518 * [BYTES_PER_WORD long]
1da177e4 519 */
343e0d7a 520static int obj_offset(struct kmem_cache *cachep)
1da177e4 521{
3dafccf2 522 return cachep->obj_offset;
1da177e4
LT
523}
524
343e0d7a 525static int obj_size(struct kmem_cache *cachep)
1da177e4 526{
3dafccf2 527 return cachep->obj_size;
1da177e4
LT
528}
529
343e0d7a 530static unsigned long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
1da177e4
LT
531{
532 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
3dafccf2 533 return (unsigned long*) (objp+obj_offset(cachep)-BYTES_PER_WORD);
1da177e4
LT
534}
535
343e0d7a 536static unsigned long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
1da177e4
LT
537{
538 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
539 if (cachep->flags & SLAB_STORE_USER)
3dafccf2 540 return (unsigned long *)(objp + cachep->buffer_size -
b28a02de 541 2 * BYTES_PER_WORD);
3dafccf2 542 return (unsigned long *)(objp + cachep->buffer_size - BYTES_PER_WORD);
1da177e4
LT
543}
544
343e0d7a 545static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1da177e4
LT
546{
547 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
3dafccf2 548 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
1da177e4
LT
549}
550
551#else
552
3dafccf2
MS
553#define obj_offset(x) 0
554#define obj_size(cachep) (cachep->buffer_size)
1da177e4
LT
555#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long *)NULL;})
556#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long *)NULL;})
557#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
558
559#endif
560
561/*
a737b3e2
AM
562 * Maximum size of an obj (in 2^order pages) and absolute limit for the gfp
563 * order.
1da177e4
LT
564 */
565#if defined(CONFIG_LARGE_ALLOCS)
566#define MAX_OBJ_ORDER 13 /* up to 32Mb */
567#define MAX_GFP_ORDER 13 /* up to 32Mb */
568#elif defined(CONFIG_MMU)
569#define MAX_OBJ_ORDER 5 /* 32 pages */
570#define MAX_GFP_ORDER 5 /* 32 pages */
571#else
572#define MAX_OBJ_ORDER 8 /* up to 1Mb */
573#define MAX_GFP_ORDER 8 /* up to 1Mb */
574#endif
575
576/*
577 * Do not go above this order unless 0 objects fit into the slab.
578 */
579#define BREAK_GFP_ORDER_HI 1
580#define BREAK_GFP_ORDER_LO 0
581static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
582
a737b3e2
AM
583/*
584 * Functions for storing/retrieving the cachep and or slab from the page
585 * allocator. These are used to find the slab an obj belongs to. With kfree(),
586 * these are used to find the cache which an obj belongs to.
1da177e4 587 */
065d41cb
PE
588static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
589{
590 page->lru.next = (struct list_head *)cache;
591}
592
593static inline struct kmem_cache *page_get_cache(struct page *page)
594{
84097518
NP
595 if (unlikely(PageCompound(page)))
596 page = (struct page *)page_private(page);
065d41cb
PE
597 return (struct kmem_cache *)page->lru.next;
598}
599
600static inline void page_set_slab(struct page *page, struct slab *slab)
601{
602 page->lru.prev = (struct list_head *)slab;
603}
604
605static inline struct slab *page_get_slab(struct page *page)
606{
84097518
NP
607 if (unlikely(PageCompound(page)))
608 page = (struct page *)page_private(page);
065d41cb
PE
609 return (struct slab *)page->lru.prev;
610}
1da177e4 611
6ed5eb22
PE
612static inline struct kmem_cache *virt_to_cache(const void *obj)
613{
614 struct page *page = virt_to_page(obj);
615 return page_get_cache(page);
616}
617
618static inline struct slab *virt_to_slab(const void *obj)
619{
620 struct page *page = virt_to_page(obj);
621 return page_get_slab(page);
622}
623
8fea4e96
PE
624static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
625 unsigned int idx)
626{
627 return slab->s_mem + cache->buffer_size * idx;
628}
629
630static inline unsigned int obj_to_index(struct kmem_cache *cache,
631 struct slab *slab, void *obj)
632{
633 return (unsigned)(obj - slab->s_mem) / cache->buffer_size;
634}
635
a737b3e2
AM
636/*
637 * These are the default caches for kmalloc. Custom caches can have other sizes.
638 */
1da177e4
LT
639struct cache_sizes malloc_sizes[] = {
640#define CACHE(x) { .cs_size = (x) },
641#include <linux/kmalloc_sizes.h>
642 CACHE(ULONG_MAX)
643#undef CACHE
644};
645EXPORT_SYMBOL(malloc_sizes);
646
647/* Must match cache_sizes above. Out of line to keep cache footprint low. */
648struct cache_names {
649 char *name;
650 char *name_dma;
651};
652
653static struct cache_names __initdata cache_names[] = {
654#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
655#include <linux/kmalloc_sizes.h>
b28a02de 656 {NULL,}
1da177e4
LT
657#undef CACHE
658};
659
660static struct arraycache_init initarray_cache __initdata =
b28a02de 661 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4 662static struct arraycache_init initarray_generic =
b28a02de 663 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4
LT
664
665/* internal cache of cache description objs */
343e0d7a 666static struct kmem_cache cache_cache = {
b28a02de
PE
667 .batchcount = 1,
668 .limit = BOOT_CPUCACHE_ENTRIES,
669 .shared = 1,
343e0d7a 670 .buffer_size = sizeof(struct kmem_cache),
b28a02de 671 .name = "kmem_cache",
1da177e4 672#if DEBUG
343e0d7a 673 .obj_size = sizeof(struct kmem_cache),
1da177e4
LT
674#endif
675};
676
677/* Guard access to the cache-chain. */
fc0abb14 678static DEFINE_MUTEX(cache_chain_mutex);
1da177e4
LT
679static struct list_head cache_chain;
680
681/*
a737b3e2
AM
682 * vm_enough_memory() looks at this to determine how many slab-allocated pages
683 * are possibly freeable under pressure
1da177e4
LT
684 *
685 * SLAB_RECLAIM_ACCOUNT turns this on per-slab
686 */
687atomic_t slab_reclaim_pages;
1da177e4
LT
688
689/*
690 * chicken and egg problem: delay the per-cpu array allocation
691 * until the general caches are up.
692 */
693static enum {
694 NONE,
e498be7d
CL
695 PARTIAL_AC,
696 PARTIAL_L3,
1da177e4
LT
697 FULL
698} g_cpucache_up;
699
700static DEFINE_PER_CPU(struct work_struct, reap_work);
701
a737b3e2
AM
702static void free_block(struct kmem_cache *cachep, void **objpp, int len,
703 int node);
343e0d7a 704static void enable_cpucache(struct kmem_cache *cachep);
b28a02de 705static void cache_reap(void *unused);
343e0d7a 706static int __node_shrink(struct kmem_cache *cachep, int node);
1da177e4 707
343e0d7a 708static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1da177e4
LT
709{
710 return cachep->array[smp_processor_id()];
711}
712
a737b3e2
AM
713static inline struct kmem_cache *__find_general_cachep(size_t size,
714 gfp_t gfpflags)
1da177e4
LT
715{
716 struct cache_sizes *csizep = malloc_sizes;
717
718#if DEBUG
719 /* This happens if someone tries to call
b28a02de
PE
720 * kmem_cache_create(), or __kmalloc(), before
721 * the generic caches are initialized.
722 */
c7e43c78 723 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
1da177e4
LT
724#endif
725 while (size > csizep->cs_size)
726 csizep++;
727
728 /*
0abf40c1 729 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
1da177e4
LT
730 * has cs_{dma,}cachep==NULL. Thus no special case
731 * for large kmalloc calls required.
732 */
733 if (unlikely(gfpflags & GFP_DMA))
734 return csizep->cs_dmacachep;
735 return csizep->cs_cachep;
736}
737
343e0d7a 738struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
97e2bde4
MS
739{
740 return __find_general_cachep(size, gfpflags);
741}
742EXPORT_SYMBOL(kmem_find_general_cachep);
743
fbaccacf 744static size_t slab_mgmt_size(size_t nr_objs, size_t align)
1da177e4 745{
fbaccacf
SR
746 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
747}
1da177e4 748
a737b3e2
AM
749/*
750 * Calculate the number of objects and left-over bytes for a given buffer size.
751 */
fbaccacf
SR
752static void cache_estimate(unsigned long gfporder, size_t buffer_size,
753 size_t align, int flags, size_t *left_over,
754 unsigned int *num)
755{
756 int nr_objs;
757 size_t mgmt_size;
758 size_t slab_size = PAGE_SIZE << gfporder;
1da177e4 759
fbaccacf
SR
760 /*
761 * The slab management structure can be either off the slab or
762 * on it. For the latter case, the memory allocated for a
763 * slab is used for:
764 *
765 * - The struct slab
766 * - One kmem_bufctl_t for each object
767 * - Padding to respect alignment of @align
768 * - @buffer_size bytes for each object
769 *
770 * If the slab management structure is off the slab, then the
771 * alignment will already be calculated into the size. Because
772 * the slabs are all pages aligned, the objects will be at the
773 * correct alignment when allocated.
774 */
775 if (flags & CFLGS_OFF_SLAB) {
776 mgmt_size = 0;
777 nr_objs = slab_size / buffer_size;
778
779 if (nr_objs > SLAB_LIMIT)
780 nr_objs = SLAB_LIMIT;
781 } else {
782 /*
783 * Ignore padding for the initial guess. The padding
784 * is at most @align-1 bytes, and @buffer_size is at
785 * least @align. In the worst case, this result will
786 * be one greater than the number of objects that fit
787 * into the memory allocation when taking the padding
788 * into account.
789 */
790 nr_objs = (slab_size - sizeof(struct slab)) /
791 (buffer_size + sizeof(kmem_bufctl_t));
792
793 /*
794 * This calculated number will be either the right
795 * amount, or one greater than what we want.
796 */
797 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
798 > slab_size)
799 nr_objs--;
800
801 if (nr_objs > SLAB_LIMIT)
802 nr_objs = SLAB_LIMIT;
803
804 mgmt_size = slab_mgmt_size(nr_objs, align);
805 }
806 *num = nr_objs;
807 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1da177e4
LT
808}
809
810#define slab_error(cachep, msg) __slab_error(__FUNCTION__, cachep, msg)
811
a737b3e2
AM
812static void __slab_error(const char *function, struct kmem_cache *cachep,
813 char *msg)
1da177e4
LT
814{
815 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
b28a02de 816 function, cachep->name, msg);
1da177e4
LT
817 dump_stack();
818}
819
8fce4d8e
CL
820#ifdef CONFIG_NUMA
821/*
822 * Special reaping functions for NUMA systems called from cache_reap().
823 * These take care of doing round robin flushing of alien caches (containing
824 * objects freed on different nodes from which they were allocated) and the
825 * flushing of remote pcps by calling drain_node_pages.
826 */
827static DEFINE_PER_CPU(unsigned long, reap_node);
828
829static void init_reap_node(int cpu)
830{
831 int node;
832
833 node = next_node(cpu_to_node(cpu), node_online_map);
834 if (node == MAX_NUMNODES)
442295c9 835 node = first_node(node_online_map);
8fce4d8e
CL
836
837 __get_cpu_var(reap_node) = node;
838}
839
840static void next_reap_node(void)
841{
842 int node = __get_cpu_var(reap_node);
843
844 /*
845 * Also drain per cpu pages on remote zones
846 */
847 if (node != numa_node_id())
848 drain_node_pages(node);
849
850 node = next_node(node, node_online_map);
851 if (unlikely(node >= MAX_NUMNODES))
852 node = first_node(node_online_map);
853 __get_cpu_var(reap_node) = node;
854}
855
856#else
857#define init_reap_node(cpu) do { } while (0)
858#define next_reap_node(void) do { } while (0)
859#endif
860
1da177e4
LT
861/*
862 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
863 * via the workqueue/eventd.
864 * Add the CPU number into the expiration time to minimize the possibility of
865 * the CPUs getting into lockstep and contending for the global cache chain
866 * lock.
867 */
868static void __devinit start_cpu_timer(int cpu)
869{
870 struct work_struct *reap_work = &per_cpu(reap_work, cpu);
871
872 /*
873 * When this gets called from do_initcalls via cpucache_init(),
874 * init_workqueues() has already run, so keventd will be setup
875 * at that time.
876 */
877 if (keventd_up() && reap_work->func == NULL) {
8fce4d8e 878 init_reap_node(cpu);
1da177e4
LT
879 INIT_WORK(reap_work, cache_reap, NULL);
880 schedule_delayed_work_on(cpu, reap_work, HZ + 3 * cpu);
881 }
882}
883
e498be7d 884static struct array_cache *alloc_arraycache(int node, int entries,
b28a02de 885 int batchcount)
1da177e4 886{
b28a02de 887 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
1da177e4
LT
888 struct array_cache *nc = NULL;
889
e498be7d 890 nc = kmalloc_node(memsize, GFP_KERNEL, node);
1da177e4
LT
891 if (nc) {
892 nc->avail = 0;
893 nc->limit = entries;
894 nc->batchcount = batchcount;
895 nc->touched = 0;
e498be7d 896 spin_lock_init(&nc->lock);
1da177e4
LT
897 }
898 return nc;
899}
900
3ded175a
CL
901/*
902 * Transfer objects in one arraycache to another.
903 * Locking must be handled by the caller.
904 *
905 * Return the number of entries transferred.
906 */
907static int transfer_objects(struct array_cache *to,
908 struct array_cache *from, unsigned int max)
909{
910 /* Figure out how many entries to transfer */
911 int nr = min(min(from->avail, max), to->limit - to->avail);
912
913 if (!nr)
914 return 0;
915
916 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
917 sizeof(void *) *nr);
918
919 from->avail -= nr;
920 to->avail += nr;
921 to->touched = 1;
922 return nr;
923}
924
e498be7d 925#ifdef CONFIG_NUMA
343e0d7a 926static void *__cache_alloc_node(struct kmem_cache *, gfp_t, int);
c61afb18 927static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
dc85da15 928
5295a74c 929static struct array_cache **alloc_alien_cache(int node, int limit)
e498be7d
CL
930{
931 struct array_cache **ac_ptr;
b28a02de 932 int memsize = sizeof(void *) * MAX_NUMNODES;
e498be7d
CL
933 int i;
934
935 if (limit > 1)
936 limit = 12;
937 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
938 if (ac_ptr) {
939 for_each_node(i) {
940 if (i == node || !node_online(i)) {
941 ac_ptr[i] = NULL;
942 continue;
943 }
944 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
945 if (!ac_ptr[i]) {
b28a02de 946 for (i--; i <= 0; i--)
e498be7d
CL
947 kfree(ac_ptr[i]);
948 kfree(ac_ptr);
949 return NULL;
950 }
951 }
952 }
953 return ac_ptr;
954}
955
5295a74c 956static void free_alien_cache(struct array_cache **ac_ptr)
e498be7d
CL
957{
958 int i;
959
960 if (!ac_ptr)
961 return;
e498be7d 962 for_each_node(i)
b28a02de 963 kfree(ac_ptr[i]);
e498be7d
CL
964 kfree(ac_ptr);
965}
966
343e0d7a 967static void __drain_alien_cache(struct kmem_cache *cachep,
5295a74c 968 struct array_cache *ac, int node)
e498be7d
CL
969{
970 struct kmem_list3 *rl3 = cachep->nodelists[node];
971
972 if (ac->avail) {
973 spin_lock(&rl3->list_lock);
e00946fe
CL
974 /*
975 * Stuff objects into the remote nodes shared array first.
976 * That way we could avoid the overhead of putting the objects
977 * into the free lists and getting them back later.
978 */
979 transfer_objects(rl3->shared, ac, ac->limit);
980
ff69416e 981 free_block(cachep, ac->entry, ac->avail, node);
e498be7d
CL
982 ac->avail = 0;
983 spin_unlock(&rl3->list_lock);
984 }
985}
986
8fce4d8e
CL
987/*
988 * Called from cache_reap() to regularly drain alien caches round robin.
989 */
990static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
991{
992 int node = __get_cpu_var(reap_node);
993
994 if (l3->alien) {
995 struct array_cache *ac = l3->alien[node];
e00946fe
CL
996
997 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
8fce4d8e
CL
998 __drain_alien_cache(cachep, ac, node);
999 spin_unlock_irq(&ac->lock);
1000 }
1001 }
1002}
1003
a737b3e2
AM
1004static void drain_alien_cache(struct kmem_cache *cachep,
1005 struct array_cache **alien)
e498be7d 1006{
b28a02de 1007 int i = 0;
e498be7d
CL
1008 struct array_cache *ac;
1009 unsigned long flags;
1010
1011 for_each_online_node(i) {
4484ebf1 1012 ac = alien[i];
e498be7d
CL
1013 if (ac) {
1014 spin_lock_irqsave(&ac->lock, flags);
1015 __drain_alien_cache(cachep, ac, i);
1016 spin_unlock_irqrestore(&ac->lock, flags);
1017 }
1018 }
1019}
1020#else
7a21ef6f 1021
4484ebf1 1022#define drain_alien_cache(cachep, alien) do { } while (0)
8fce4d8e 1023#define reap_alien(cachep, l3) do { } while (0)
4484ebf1 1024
7a21ef6f
LT
1025static inline struct array_cache **alloc_alien_cache(int node, int limit)
1026{
1027 return (struct array_cache **) 0x01020304ul;
1028}
1029
4484ebf1
RT
1030static inline void free_alien_cache(struct array_cache **ac_ptr)
1031{
1032}
7a21ef6f 1033
e498be7d
CL
1034#endif
1035
1da177e4 1036static int __devinit cpuup_callback(struct notifier_block *nfb,
b28a02de 1037 unsigned long action, void *hcpu)
1da177e4
LT
1038{
1039 long cpu = (long)hcpu;
343e0d7a 1040 struct kmem_cache *cachep;
e498be7d
CL
1041 struct kmem_list3 *l3 = NULL;
1042 int node = cpu_to_node(cpu);
1043 int memsize = sizeof(struct kmem_list3);
1da177e4
LT
1044
1045 switch (action) {
1046 case CPU_UP_PREPARE:
fc0abb14 1047 mutex_lock(&cache_chain_mutex);
a737b3e2
AM
1048 /*
1049 * We need to do this right in the beginning since
e498be7d
CL
1050 * alloc_arraycache's are going to use this list.
1051 * kmalloc_node allows us to add the slab to the right
1052 * kmem_list3 and not this cpu's kmem_list3
1053 */
1054
1da177e4 1055 list_for_each_entry(cachep, &cache_chain, next) {
a737b3e2
AM
1056 /*
1057 * Set up the size64 kmemlist for cpu before we can
e498be7d
CL
1058 * begin anything. Make sure some other cpu on this
1059 * node has not already allocated this
1060 */
1061 if (!cachep->nodelists[node]) {
a737b3e2
AM
1062 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1063 if (!l3)
e498be7d
CL
1064 goto bad;
1065 kmem_list3_init(l3);
1066 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
b28a02de 1067 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d 1068
4484ebf1
RT
1069 /*
1070 * The l3s don't come and go as CPUs come and
1071 * go. cache_chain_mutex is sufficient
1072 * protection here.
1073 */
e498be7d
CL
1074 cachep->nodelists[node] = l3;
1075 }
1da177e4 1076
e498be7d
CL
1077 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1078 cachep->nodelists[node]->free_limit =
a737b3e2
AM
1079 (1 + nr_cpus_node(node)) *
1080 cachep->batchcount + cachep->num;
e498be7d
CL
1081 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1082 }
1083
a737b3e2
AM
1084 /*
1085 * Now we can go ahead with allocating the shared arrays and
1086 * array caches
1087 */
e498be7d 1088 list_for_each_entry(cachep, &cache_chain, next) {
cd105df4 1089 struct array_cache *nc;
4484ebf1
RT
1090 struct array_cache *shared;
1091 struct array_cache **alien;
cd105df4 1092
e498be7d 1093 nc = alloc_arraycache(node, cachep->limit,
4484ebf1 1094 cachep->batchcount);
1da177e4
LT
1095 if (!nc)
1096 goto bad;
4484ebf1
RT
1097 shared = alloc_arraycache(node,
1098 cachep->shared * cachep->batchcount,
1099 0xbaadf00d);
1100 if (!shared)
1101 goto bad;
7a21ef6f 1102
4484ebf1
RT
1103 alien = alloc_alien_cache(node, cachep->limit);
1104 if (!alien)
1105 goto bad;
1da177e4 1106 cachep->array[cpu] = nc;
e498be7d
CL
1107 l3 = cachep->nodelists[node];
1108 BUG_ON(!l3);
e498be7d 1109
4484ebf1
RT
1110 spin_lock_irq(&l3->list_lock);
1111 if (!l3->shared) {
1112 /*
1113 * We are serialised from CPU_DEAD or
1114 * CPU_UP_CANCELLED by the cpucontrol lock
1115 */
1116 l3->shared = shared;
1117 shared = NULL;
e498be7d 1118 }
4484ebf1
RT
1119#ifdef CONFIG_NUMA
1120 if (!l3->alien) {
1121 l3->alien = alien;
1122 alien = NULL;
1123 }
1124#endif
1125 spin_unlock_irq(&l3->list_lock);
4484ebf1
RT
1126 kfree(shared);
1127 free_alien_cache(alien);
1da177e4 1128 }
fc0abb14 1129 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1130 break;
1131 case CPU_ONLINE:
1132 start_cpu_timer(cpu);
1133 break;
1134#ifdef CONFIG_HOTPLUG_CPU
1135 case CPU_DEAD:
4484ebf1
RT
1136 /*
1137 * Even if all the cpus of a node are down, we don't free the
1138 * kmem_list3 of any cache. This to avoid a race between
1139 * cpu_down, and a kmalloc allocation from another cpu for
1140 * memory from the node of the cpu going down. The list3
1141 * structure is usually allocated from kmem_cache_create() and
1142 * gets destroyed at kmem_cache_destroy().
1143 */
1da177e4
LT
1144 /* fall thru */
1145 case CPU_UP_CANCELED:
fc0abb14 1146 mutex_lock(&cache_chain_mutex);
1da177e4
LT
1147 list_for_each_entry(cachep, &cache_chain, next) {
1148 struct array_cache *nc;
4484ebf1
RT
1149 struct array_cache *shared;
1150 struct array_cache **alien;
e498be7d 1151 cpumask_t mask;
1da177e4 1152
e498be7d 1153 mask = node_to_cpumask(node);
1da177e4
LT
1154 /* cpu is dead; no one can alloc from it. */
1155 nc = cachep->array[cpu];
1156 cachep->array[cpu] = NULL;
e498be7d
CL
1157 l3 = cachep->nodelists[node];
1158
1159 if (!l3)
4484ebf1 1160 goto free_array_cache;
e498be7d 1161
ca3b9b91 1162 spin_lock_irq(&l3->list_lock);
e498be7d
CL
1163
1164 /* Free limit for this kmem_list3 */
1165 l3->free_limit -= cachep->batchcount;
1166 if (nc)
ff69416e 1167 free_block(cachep, nc->entry, nc->avail, node);
e498be7d
CL
1168
1169 if (!cpus_empty(mask)) {
ca3b9b91 1170 spin_unlock_irq(&l3->list_lock);
4484ebf1 1171 goto free_array_cache;
b28a02de 1172 }
e498be7d 1173
4484ebf1
RT
1174 shared = l3->shared;
1175 if (shared) {
e498be7d 1176 free_block(cachep, l3->shared->entry,
b28a02de 1177 l3->shared->avail, node);
e498be7d
CL
1178 l3->shared = NULL;
1179 }
e498be7d 1180
4484ebf1
RT
1181 alien = l3->alien;
1182 l3->alien = NULL;
1183
1184 spin_unlock_irq(&l3->list_lock);
1185
1186 kfree(shared);
1187 if (alien) {
1188 drain_alien_cache(cachep, alien);
1189 free_alien_cache(alien);
e498be7d 1190 }
4484ebf1 1191free_array_cache:
1da177e4
LT
1192 kfree(nc);
1193 }
4484ebf1
RT
1194 /*
1195 * In the previous loop, all the objects were freed to
1196 * the respective cache's slabs, now we can go ahead and
1197 * shrink each nodelist to its limit.
1198 */
1199 list_for_each_entry(cachep, &cache_chain, next) {
1200 l3 = cachep->nodelists[node];
1201 if (!l3)
1202 continue;
1203 spin_lock_irq(&l3->list_lock);
1204 /* free slabs belonging to this node */
1205 __node_shrink(cachep, node);
1206 spin_unlock_irq(&l3->list_lock);
1207 }
fc0abb14 1208 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1209 break;
1210#endif
1211 }
1212 return NOTIFY_OK;
a737b3e2 1213bad:
fc0abb14 1214 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1215 return NOTIFY_BAD;
1216}
1217
1218static struct notifier_block cpucache_notifier = { &cpuup_callback, NULL, 0 };
1219
e498be7d
CL
1220/*
1221 * swap the static kmem_list3 with kmalloced memory
1222 */
a737b3e2
AM
1223static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1224 int nodeid)
e498be7d
CL
1225{
1226 struct kmem_list3 *ptr;
1227
1228 BUG_ON(cachep->nodelists[nodeid] != list);
1229 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1230 BUG_ON(!ptr);
1231
1232 local_irq_disable();
1233 memcpy(ptr, list, sizeof(struct kmem_list3));
1234 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1235 cachep->nodelists[nodeid] = ptr;
1236 local_irq_enable();
1237}
1238
a737b3e2
AM
1239/*
1240 * Initialisation. Called after the page allocator have been initialised and
1241 * before smp_init().
1da177e4
LT
1242 */
1243void __init kmem_cache_init(void)
1244{
1245 size_t left_over;
1246 struct cache_sizes *sizes;
1247 struct cache_names *names;
e498be7d 1248 int i;
07ed76b2 1249 int order;
e498be7d
CL
1250
1251 for (i = 0; i < NUM_INIT_LISTS; i++) {
1252 kmem_list3_init(&initkmem_list3[i]);
1253 if (i < MAX_NUMNODES)
1254 cache_cache.nodelists[i] = NULL;
1255 }
1da177e4
LT
1256
1257 /*
1258 * Fragmentation resistance on low memory - only use bigger
1259 * page orders on machines with more than 32MB of memory.
1260 */
1261 if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1262 slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1263
1da177e4
LT
1264 /* Bootstrap is tricky, because several objects are allocated
1265 * from caches that do not exist yet:
a737b3e2
AM
1266 * 1) initialize the cache_cache cache: it contains the struct
1267 * kmem_cache structures of all caches, except cache_cache itself:
1268 * cache_cache is statically allocated.
e498be7d
CL
1269 * Initially an __init data area is used for the head array and the
1270 * kmem_list3 structures, it's replaced with a kmalloc allocated
1271 * array at the end of the bootstrap.
1da177e4 1272 * 2) Create the first kmalloc cache.
343e0d7a 1273 * The struct kmem_cache for the new cache is allocated normally.
e498be7d
CL
1274 * An __init data area is used for the head array.
1275 * 3) Create the remaining kmalloc caches, with minimally sized
1276 * head arrays.
1da177e4
LT
1277 * 4) Replace the __init data head arrays for cache_cache and the first
1278 * kmalloc cache with kmalloc allocated arrays.
e498be7d
CL
1279 * 5) Replace the __init data for kmem_list3 for cache_cache and
1280 * the other cache's with kmalloc allocated memory.
1281 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1da177e4
LT
1282 */
1283
1284 /* 1) create the cache_cache */
1da177e4
LT
1285 INIT_LIST_HEAD(&cache_chain);
1286 list_add(&cache_cache.next, &cache_chain);
1287 cache_cache.colour_off = cache_line_size();
1288 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
e498be7d 1289 cache_cache.nodelists[numa_node_id()] = &initkmem_list3[CACHE_CACHE];
1da177e4 1290
a737b3e2
AM
1291 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1292 cache_line_size());
1da177e4 1293
07ed76b2
JS
1294 for (order = 0; order < MAX_ORDER; order++) {
1295 cache_estimate(order, cache_cache.buffer_size,
1296 cache_line_size(), 0, &left_over, &cache_cache.num);
1297 if (cache_cache.num)
1298 break;
1299 }
40094fa6 1300 BUG_ON(!cache_cache.num);
07ed76b2 1301 cache_cache.gfporder = order;
b28a02de 1302 cache_cache.colour = left_over / cache_cache.colour_off;
b28a02de
PE
1303 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1304 sizeof(struct slab), cache_line_size());
1da177e4
LT
1305
1306 /* 2+3) create the kmalloc caches */
1307 sizes = malloc_sizes;
1308 names = cache_names;
1309
a737b3e2
AM
1310 /*
1311 * Initialize the caches that provide memory for the array cache and the
1312 * kmem_list3 structures first. Without this, further allocations will
1313 * bug.
e498be7d
CL
1314 */
1315
1316 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
a737b3e2
AM
1317 sizes[INDEX_AC].cs_size,
1318 ARCH_KMALLOC_MINALIGN,
1319 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1320 NULL, NULL);
e498be7d 1321
a737b3e2 1322 if (INDEX_AC != INDEX_L3) {
e498be7d 1323 sizes[INDEX_L3].cs_cachep =
a737b3e2
AM
1324 kmem_cache_create(names[INDEX_L3].name,
1325 sizes[INDEX_L3].cs_size,
1326 ARCH_KMALLOC_MINALIGN,
1327 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1328 NULL, NULL);
1329 }
e498be7d 1330
1da177e4 1331 while (sizes->cs_size != ULONG_MAX) {
e498be7d
CL
1332 /*
1333 * For performance, all the general caches are L1 aligned.
1da177e4
LT
1334 * This should be particularly beneficial on SMP boxes, as it
1335 * eliminates "false sharing".
1336 * Note for systems short on memory removing the alignment will
e498be7d
CL
1337 * allow tighter packing of the smaller caches.
1338 */
a737b3e2 1339 if (!sizes->cs_cachep) {
e498be7d 1340 sizes->cs_cachep = kmem_cache_create(names->name,
a737b3e2
AM
1341 sizes->cs_size,
1342 ARCH_KMALLOC_MINALIGN,
1343 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1344 NULL, NULL);
1345 }
1da177e4
LT
1346
1347 /* Inc off-slab bufctl limit until the ceiling is hit. */
1348 if (!(OFF_SLAB(sizes->cs_cachep))) {
b28a02de 1349 offslab_limit = sizes->cs_size - sizeof(struct slab);
1da177e4
LT
1350 offslab_limit /= sizeof(kmem_bufctl_t);
1351 }
1352
1353 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
a737b3e2
AM
1354 sizes->cs_size,
1355 ARCH_KMALLOC_MINALIGN,
1356 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1357 SLAB_PANIC,
1358 NULL, NULL);
1da177e4
LT
1359 sizes++;
1360 names++;
1361 }
1362 /* 4) Replace the bootstrap head arrays */
1363 {
b28a02de 1364 void *ptr;
e498be7d 1365
1da177e4 1366 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d 1367
1da177e4 1368 local_irq_disable();
9a2dba4b
PE
1369 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1370 memcpy(ptr, cpu_cache_get(&cache_cache),
b28a02de 1371 sizeof(struct arraycache_init));
1da177e4
LT
1372 cache_cache.array[smp_processor_id()] = ptr;
1373 local_irq_enable();
e498be7d 1374
1da177e4 1375 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d 1376
1da177e4 1377 local_irq_disable();
9a2dba4b 1378 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
b28a02de 1379 != &initarray_generic.cache);
9a2dba4b 1380 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
b28a02de 1381 sizeof(struct arraycache_init));
e498be7d 1382 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
b28a02de 1383 ptr;
1da177e4
LT
1384 local_irq_enable();
1385 }
e498be7d
CL
1386 /* 5) Replace the bootstrap kmem_list3's */
1387 {
1388 int node;
1389 /* Replace the static kmem_list3 structures for the boot cpu */
1390 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE],
b28a02de 1391 numa_node_id());
e498be7d
CL
1392
1393 for_each_online_node(node) {
1394 init_list(malloc_sizes[INDEX_AC].cs_cachep,
b28a02de 1395 &initkmem_list3[SIZE_AC + node], node);
e498be7d
CL
1396
1397 if (INDEX_AC != INDEX_L3) {
1398 init_list(malloc_sizes[INDEX_L3].cs_cachep,
b28a02de
PE
1399 &initkmem_list3[SIZE_L3 + node],
1400 node);
e498be7d
CL
1401 }
1402 }
1403 }
1da177e4 1404
e498be7d 1405 /* 6) resize the head arrays to their final sizes */
1da177e4 1406 {
343e0d7a 1407 struct kmem_cache *cachep;
fc0abb14 1408 mutex_lock(&cache_chain_mutex);
1da177e4 1409 list_for_each_entry(cachep, &cache_chain, next)
a737b3e2 1410 enable_cpucache(cachep);
fc0abb14 1411 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1412 }
1413
1414 /* Done! */
1415 g_cpucache_up = FULL;
1416
a737b3e2
AM
1417 /*
1418 * Register a cpu startup notifier callback that initializes
1419 * cpu_cache_get for all new cpus
1da177e4
LT
1420 */
1421 register_cpu_notifier(&cpucache_notifier);
1da177e4 1422
a737b3e2
AM
1423 /*
1424 * The reap timers are started later, with a module init call: That part
1425 * of the kernel is not yet operational.
1da177e4
LT
1426 */
1427}
1428
1429static int __init cpucache_init(void)
1430{
1431 int cpu;
1432
a737b3e2
AM
1433 /*
1434 * Register the timers that return unneeded pages to the page allocator
1da177e4 1435 */
e498be7d 1436 for_each_online_cpu(cpu)
a737b3e2 1437 start_cpu_timer(cpu);
1da177e4
LT
1438 return 0;
1439}
1da177e4
LT
1440__initcall(cpucache_init);
1441
1442/*
1443 * Interface to system's page allocator. No need to hold the cache-lock.
1444 *
1445 * If we requested dmaable memory, we will get it. Even if we
1446 * did not request dmaable memory, we might get it, but that
1447 * would be relatively rare and ignorable.
1448 */
343e0d7a 1449static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4
LT
1450{
1451 struct page *page;
1452 void *addr;
1453 int i;
1454
1455 flags |= cachep->gfpflags;
50c85a19 1456 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1da177e4
LT
1457 if (!page)
1458 return NULL;
1459 addr = page_address(page);
1460
1461 i = (1 << cachep->gfporder);
1462 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1463 atomic_add(i, &slab_reclaim_pages);
1464 add_page_state(nr_slab, i);
1465 while (i--) {
f205b2fe 1466 __SetPageSlab(page);
1da177e4
LT
1467 page++;
1468 }
1469 return addr;
1470}
1471
1472/*
1473 * Interface to system's page release.
1474 */
343e0d7a 1475static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1da177e4 1476{
b28a02de 1477 unsigned long i = (1 << cachep->gfporder);
1da177e4
LT
1478 struct page *page = virt_to_page(addr);
1479 const unsigned long nr_freed = i;
1480
1481 while (i--) {
f205b2fe
NP
1482 BUG_ON(!PageSlab(page));
1483 __ClearPageSlab(page);
1da177e4
LT
1484 page++;
1485 }
1486 sub_page_state(nr_slab, nr_freed);
1487 if (current->reclaim_state)
1488 current->reclaim_state->reclaimed_slab += nr_freed;
1489 free_pages((unsigned long)addr, cachep->gfporder);
b28a02de
PE
1490 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1491 atomic_sub(1 << cachep->gfporder, &slab_reclaim_pages);
1da177e4
LT
1492}
1493
1494static void kmem_rcu_free(struct rcu_head *head)
1495{
b28a02de 1496 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
343e0d7a 1497 struct kmem_cache *cachep = slab_rcu->cachep;
1da177e4
LT
1498
1499 kmem_freepages(cachep, slab_rcu->addr);
1500 if (OFF_SLAB(cachep))
1501 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1502}
1503
1504#if DEBUG
1505
1506#ifdef CONFIG_DEBUG_PAGEALLOC
343e0d7a 1507static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
b28a02de 1508 unsigned long caller)
1da177e4 1509{
3dafccf2 1510 int size = obj_size(cachep);
1da177e4 1511
3dafccf2 1512 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1da177e4 1513
b28a02de 1514 if (size < 5 * sizeof(unsigned long))
1da177e4
LT
1515 return;
1516
b28a02de
PE
1517 *addr++ = 0x12345678;
1518 *addr++ = caller;
1519 *addr++ = smp_processor_id();
1520 size -= 3 * sizeof(unsigned long);
1da177e4
LT
1521 {
1522 unsigned long *sptr = &caller;
1523 unsigned long svalue;
1524
1525 while (!kstack_end(sptr)) {
1526 svalue = *sptr++;
1527 if (kernel_text_address(svalue)) {
b28a02de 1528 *addr++ = svalue;
1da177e4
LT
1529 size -= sizeof(unsigned long);
1530 if (size <= sizeof(unsigned long))
1531 break;
1532 }
1533 }
1534
1535 }
b28a02de 1536 *addr++ = 0x87654321;
1da177e4
LT
1537}
1538#endif
1539
343e0d7a 1540static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1da177e4 1541{
3dafccf2
MS
1542 int size = obj_size(cachep);
1543 addr = &((char *)addr)[obj_offset(cachep)];
1da177e4
LT
1544
1545 memset(addr, val, size);
b28a02de 1546 *(unsigned char *)(addr + size - 1) = POISON_END;
1da177e4
LT
1547}
1548
1549static void dump_line(char *data, int offset, int limit)
1550{
1551 int i;
1552 printk(KERN_ERR "%03x:", offset);
a737b3e2 1553 for (i = 0; i < limit; i++)
b28a02de 1554 printk(" %02x", (unsigned char)data[offset + i]);
1da177e4
LT
1555 printk("\n");
1556}
1557#endif
1558
1559#if DEBUG
1560
343e0d7a 1561static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1da177e4
LT
1562{
1563 int i, size;
1564 char *realobj;
1565
1566 if (cachep->flags & SLAB_RED_ZONE) {
1567 printk(KERN_ERR "Redzone: 0x%lx/0x%lx.\n",
a737b3e2
AM
1568 *dbg_redzone1(cachep, objp),
1569 *dbg_redzone2(cachep, objp));
1da177e4
LT
1570 }
1571
1572 if (cachep->flags & SLAB_STORE_USER) {
1573 printk(KERN_ERR "Last user: [<%p>]",
a737b3e2 1574 *dbg_userword(cachep, objp));
1da177e4 1575 print_symbol("(%s)",
a737b3e2 1576 (unsigned long)*dbg_userword(cachep, objp));
1da177e4
LT
1577 printk("\n");
1578 }
3dafccf2
MS
1579 realobj = (char *)objp + obj_offset(cachep);
1580 size = obj_size(cachep);
b28a02de 1581 for (i = 0; i < size && lines; i += 16, lines--) {
1da177e4
LT
1582 int limit;
1583 limit = 16;
b28a02de
PE
1584 if (i + limit > size)
1585 limit = size - i;
1da177e4
LT
1586 dump_line(realobj, i, limit);
1587 }
1588}
1589
343e0d7a 1590static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1da177e4
LT
1591{
1592 char *realobj;
1593 int size, i;
1594 int lines = 0;
1595
3dafccf2
MS
1596 realobj = (char *)objp + obj_offset(cachep);
1597 size = obj_size(cachep);
1da177e4 1598
b28a02de 1599 for (i = 0; i < size; i++) {
1da177e4 1600 char exp = POISON_FREE;
b28a02de 1601 if (i == size - 1)
1da177e4
LT
1602 exp = POISON_END;
1603 if (realobj[i] != exp) {
1604 int limit;
1605 /* Mismatch ! */
1606 /* Print header */
1607 if (lines == 0) {
b28a02de 1608 printk(KERN_ERR
a737b3e2
AM
1609 "Slab corruption: start=%p, len=%d\n",
1610 realobj, size);
1da177e4
LT
1611 print_objinfo(cachep, objp, 0);
1612 }
1613 /* Hexdump the affected line */
b28a02de 1614 i = (i / 16) * 16;
1da177e4 1615 limit = 16;
b28a02de
PE
1616 if (i + limit > size)
1617 limit = size - i;
1da177e4
LT
1618 dump_line(realobj, i, limit);
1619 i += 16;
1620 lines++;
1621 /* Limit to 5 lines */
1622 if (lines > 5)
1623 break;
1624 }
1625 }
1626 if (lines != 0) {
1627 /* Print some data about the neighboring objects, if they
1628 * exist:
1629 */
6ed5eb22 1630 struct slab *slabp = virt_to_slab(objp);
8fea4e96 1631 unsigned int objnr;
1da177e4 1632
8fea4e96 1633 objnr = obj_to_index(cachep, slabp, objp);
1da177e4 1634 if (objnr) {
8fea4e96 1635 objp = index_to_obj(cachep, slabp, objnr - 1);
3dafccf2 1636 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1637 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
b28a02de 1638 realobj, size);
1da177e4
LT
1639 print_objinfo(cachep, objp, 2);
1640 }
b28a02de 1641 if (objnr + 1 < cachep->num) {
8fea4e96 1642 objp = index_to_obj(cachep, slabp, objnr + 1);
3dafccf2 1643 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1644 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
b28a02de 1645 realobj, size);
1da177e4
LT
1646 print_objinfo(cachep, objp, 2);
1647 }
1648 }
1649}
1650#endif
1651
12dd36fa
MD
1652#if DEBUG
1653/**
911851e6
RD
1654 * slab_destroy_objs - destroy a slab and its objects
1655 * @cachep: cache pointer being destroyed
1656 * @slabp: slab pointer being destroyed
1657 *
1658 * Call the registered destructor for each object in a slab that is being
1659 * destroyed.
1da177e4 1660 */
343e0d7a 1661static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1da177e4 1662{
1da177e4
LT
1663 int i;
1664 for (i = 0; i < cachep->num; i++) {
8fea4e96 1665 void *objp = index_to_obj(cachep, slabp, i);
1da177e4
LT
1666
1667 if (cachep->flags & SLAB_POISON) {
1668#ifdef CONFIG_DEBUG_PAGEALLOC
a737b3e2
AM
1669 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1670 OFF_SLAB(cachep))
b28a02de 1671 kernel_map_pages(virt_to_page(objp),
a737b3e2 1672 cachep->buffer_size / PAGE_SIZE, 1);
1da177e4
LT
1673 else
1674 check_poison_obj(cachep, objp);
1675#else
1676 check_poison_obj(cachep, objp);
1677#endif
1678 }
1679 if (cachep->flags & SLAB_RED_ZONE) {
1680 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1681 slab_error(cachep, "start of a freed object "
b28a02de 1682 "was overwritten");
1da177e4
LT
1683 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1684 slab_error(cachep, "end of a freed object "
b28a02de 1685 "was overwritten");
1da177e4
LT
1686 }
1687 if (cachep->dtor && !(cachep->flags & SLAB_POISON))
3dafccf2 1688 (cachep->dtor) (objp + obj_offset(cachep), cachep, 0);
1da177e4 1689 }
12dd36fa 1690}
1da177e4 1691#else
343e0d7a 1692static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa 1693{
1da177e4
LT
1694 if (cachep->dtor) {
1695 int i;
1696 for (i = 0; i < cachep->num; i++) {
8fea4e96 1697 void *objp = index_to_obj(cachep, slabp, i);
b28a02de 1698 (cachep->dtor) (objp, cachep, 0);
1da177e4
LT
1699 }
1700 }
12dd36fa 1701}
1da177e4
LT
1702#endif
1703
911851e6
RD
1704/**
1705 * slab_destroy - destroy and release all objects in a slab
1706 * @cachep: cache pointer being destroyed
1707 * @slabp: slab pointer being destroyed
1708 *
12dd36fa 1709 * Destroy all the objs in a slab, and release the mem back to the system.
a737b3e2
AM
1710 * Before calling the slab must have been unlinked from the cache. The
1711 * cache-lock is not held/needed.
12dd36fa 1712 */
343e0d7a 1713static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa
MD
1714{
1715 void *addr = slabp->s_mem - slabp->colouroff;
1716
1717 slab_destroy_objs(cachep, slabp);
1da177e4
LT
1718 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1719 struct slab_rcu *slab_rcu;
1720
b28a02de 1721 slab_rcu = (struct slab_rcu *)slabp;
1da177e4
LT
1722 slab_rcu->cachep = cachep;
1723 slab_rcu->addr = addr;
1724 call_rcu(&slab_rcu->head, kmem_rcu_free);
1725 } else {
1726 kmem_freepages(cachep, addr);
1727 if (OFF_SLAB(cachep))
1728 kmem_cache_free(cachep->slabp_cache, slabp);
1729 }
1730}
1731
a737b3e2
AM
1732/*
1733 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1734 * size of kmem_list3.
1735 */
343e0d7a 1736static void set_up_list3s(struct kmem_cache *cachep, int index)
e498be7d
CL
1737{
1738 int node;
1739
1740 for_each_online_node(node) {
b28a02de 1741 cachep->nodelists[node] = &initkmem_list3[index + node];
e498be7d 1742 cachep->nodelists[node]->next_reap = jiffies +
b28a02de
PE
1743 REAPTIMEOUT_LIST3 +
1744 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d
CL
1745 }
1746}
1747
4d268eba 1748/**
a70773dd
RD
1749 * calculate_slab_order - calculate size (page order) of slabs
1750 * @cachep: pointer to the cache that is being created
1751 * @size: size of objects to be created in this cache.
1752 * @align: required alignment for the objects.
1753 * @flags: slab allocation flags
1754 *
1755 * Also calculates the number of objects per slab.
4d268eba
PE
1756 *
1757 * This could be made much more intelligent. For now, try to avoid using
1758 * high order pages for slabs. When the gfp() functions are more friendly
1759 * towards high-order requests, this should be changed.
1760 */
a737b3e2 1761static size_t calculate_slab_order(struct kmem_cache *cachep,
ee13d785 1762 size_t size, size_t align, unsigned long flags)
4d268eba
PE
1763{
1764 size_t left_over = 0;
9888e6fa 1765 int gfporder;
4d268eba 1766
a737b3e2 1767 for (gfporder = 0; gfporder <= MAX_GFP_ORDER; gfporder++) {
4d268eba
PE
1768 unsigned int num;
1769 size_t remainder;
1770
9888e6fa 1771 cache_estimate(gfporder, size, align, flags, &remainder, &num);
4d268eba
PE
1772 if (!num)
1773 continue;
9888e6fa 1774
4d268eba 1775 /* More than offslab_limit objects will cause problems */
9888e6fa 1776 if ((flags & CFLGS_OFF_SLAB) && num > offslab_limit)
4d268eba
PE
1777 break;
1778
9888e6fa 1779 /* Found something acceptable - save it away */
4d268eba 1780 cachep->num = num;
9888e6fa 1781 cachep->gfporder = gfporder;
4d268eba
PE
1782 left_over = remainder;
1783
f78bb8ad
LT
1784 /*
1785 * A VFS-reclaimable slab tends to have most allocations
1786 * as GFP_NOFS and we really don't want to have to be allocating
1787 * higher-order pages when we are unable to shrink dcache.
1788 */
1789 if (flags & SLAB_RECLAIM_ACCOUNT)
1790 break;
1791
4d268eba
PE
1792 /*
1793 * Large number of objects is good, but very large slabs are
1794 * currently bad for the gfp()s.
1795 */
9888e6fa 1796 if (gfporder >= slab_break_gfp_order)
4d268eba
PE
1797 break;
1798
9888e6fa
LT
1799 /*
1800 * Acceptable internal fragmentation?
1801 */
a737b3e2 1802 if (left_over * 8 <= (PAGE_SIZE << gfporder))
4d268eba
PE
1803 break;
1804 }
1805 return left_over;
1806}
1807
f30cf7d1
PE
1808static void setup_cpu_cache(struct kmem_cache *cachep)
1809{
1810 if (g_cpucache_up == FULL) {
1811 enable_cpucache(cachep);
1812 return;
1813 }
1814 if (g_cpucache_up == NONE) {
1815 /*
1816 * Note: the first kmem_cache_create must create the cache
1817 * that's used by kmalloc(24), otherwise the creation of
1818 * further caches will BUG().
1819 */
1820 cachep->array[smp_processor_id()] = &initarray_generic.cache;
1821
1822 /*
1823 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
1824 * the first cache, then we need to set up all its list3s,
1825 * otherwise the creation of further caches will BUG().
1826 */
1827 set_up_list3s(cachep, SIZE_AC);
1828 if (INDEX_AC == INDEX_L3)
1829 g_cpucache_up = PARTIAL_L3;
1830 else
1831 g_cpucache_up = PARTIAL_AC;
1832 } else {
1833 cachep->array[smp_processor_id()] =
1834 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1835
1836 if (g_cpucache_up == PARTIAL_AC) {
1837 set_up_list3s(cachep, SIZE_L3);
1838 g_cpucache_up = PARTIAL_L3;
1839 } else {
1840 int node;
1841 for_each_online_node(node) {
1842 cachep->nodelists[node] =
1843 kmalloc_node(sizeof(struct kmem_list3),
1844 GFP_KERNEL, node);
1845 BUG_ON(!cachep->nodelists[node]);
1846 kmem_list3_init(cachep->nodelists[node]);
1847 }
1848 }
1849 }
1850 cachep->nodelists[numa_node_id()]->next_reap =
1851 jiffies + REAPTIMEOUT_LIST3 +
1852 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1853
1854 cpu_cache_get(cachep)->avail = 0;
1855 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
1856 cpu_cache_get(cachep)->batchcount = 1;
1857 cpu_cache_get(cachep)->touched = 0;
1858 cachep->batchcount = 1;
1859 cachep->limit = BOOT_CPUCACHE_ENTRIES;
1860}
1861
1da177e4
LT
1862/**
1863 * kmem_cache_create - Create a cache.
1864 * @name: A string which is used in /proc/slabinfo to identify this cache.
1865 * @size: The size of objects to be created in this cache.
1866 * @align: The required alignment for the objects.
1867 * @flags: SLAB flags
1868 * @ctor: A constructor for the objects.
1869 * @dtor: A destructor for the objects.
1870 *
1871 * Returns a ptr to the cache on success, NULL on failure.
1872 * Cannot be called within a int, but can be interrupted.
1873 * The @ctor is run when new pages are allocated by the cache
1874 * and the @dtor is run before the pages are handed back.
1875 *
1876 * @name must be valid until the cache is destroyed. This implies that
a737b3e2
AM
1877 * the module calling this has to destroy the cache before getting unloaded.
1878 *
1da177e4
LT
1879 * The flags are
1880 *
1881 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
1882 * to catch references to uninitialised memory.
1883 *
1884 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
1885 * for buffer overruns.
1886 *
1da177e4
LT
1887 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
1888 * cacheline. This can be beneficial if you're counting cycles as closely
1889 * as davem.
1890 */
343e0d7a 1891struct kmem_cache *
1da177e4 1892kmem_cache_create (const char *name, size_t size, size_t align,
a737b3e2
AM
1893 unsigned long flags,
1894 void (*ctor)(void*, struct kmem_cache *, unsigned long),
343e0d7a 1895 void (*dtor)(void*, struct kmem_cache *, unsigned long))
1da177e4
LT
1896{
1897 size_t left_over, slab_size, ralign;
343e0d7a 1898 struct kmem_cache *cachep = NULL;
4f12bb4f 1899 struct list_head *p;
1da177e4
LT
1900
1901 /*
1902 * Sanity checks... these are all serious usage bugs.
1903 */
a737b3e2 1904 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
b28a02de 1905 (size > (1 << MAX_OBJ_ORDER) * PAGE_SIZE) || (dtor && !ctor)) {
a737b3e2
AM
1906 printk(KERN_ERR "%s: Early error in slab %s\n", __FUNCTION__,
1907 name);
b28a02de
PE
1908 BUG();
1909 }
1da177e4 1910
f0188f47
RT
1911 /*
1912 * Prevent CPUs from coming and going.
1913 * lock_cpu_hotplug() nests outside cache_chain_mutex
1914 */
1915 lock_cpu_hotplug();
1916
fc0abb14 1917 mutex_lock(&cache_chain_mutex);
4f12bb4f
AM
1918
1919 list_for_each(p, &cache_chain) {
343e0d7a 1920 struct kmem_cache *pc = list_entry(p, struct kmem_cache, next);
4f12bb4f
AM
1921 mm_segment_t old_fs = get_fs();
1922 char tmp;
1923 int res;
1924
1925 /*
1926 * This happens when the module gets unloaded and doesn't
1927 * destroy its slab cache and no-one else reuses the vmalloc
1928 * area of the module. Print a warning.
1929 */
1930 set_fs(KERNEL_DS);
1931 res = __get_user(tmp, pc->name);
1932 set_fs(old_fs);
1933 if (res) {
1934 printk("SLAB: cache with size %d has lost its name\n",
3dafccf2 1935 pc->buffer_size);
4f12bb4f
AM
1936 continue;
1937 }
1938
b28a02de 1939 if (!strcmp(pc->name, name)) {
4f12bb4f
AM
1940 printk("kmem_cache_create: duplicate cache %s\n", name);
1941 dump_stack();
1942 goto oops;
1943 }
1944 }
1945
1da177e4
LT
1946#if DEBUG
1947 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
1948 if ((flags & SLAB_DEBUG_INITIAL) && !ctor) {
1949 /* No constructor, but inital state check requested */
1950 printk(KERN_ERR "%s: No con, but init state check "
b28a02de 1951 "requested - %s\n", __FUNCTION__, name);
1da177e4
LT
1952 flags &= ~SLAB_DEBUG_INITIAL;
1953 }
1da177e4
LT
1954#if FORCED_DEBUG
1955 /*
1956 * Enable redzoning and last user accounting, except for caches with
1957 * large objects, if the increased size would increase the object size
1958 * above the next power of two: caches with object sizes just above a
1959 * power of two have a significant amount of internal fragmentation.
1960 */
a737b3e2 1961 if (size < 4096 || fls(size - 1) == fls(size-1 + 3 * BYTES_PER_WORD))
b28a02de 1962 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1da177e4
LT
1963 if (!(flags & SLAB_DESTROY_BY_RCU))
1964 flags |= SLAB_POISON;
1965#endif
1966 if (flags & SLAB_DESTROY_BY_RCU)
1967 BUG_ON(flags & SLAB_POISON);
1968#endif
1969 if (flags & SLAB_DESTROY_BY_RCU)
1970 BUG_ON(dtor);
1971
1972 /*
a737b3e2
AM
1973 * Always checks flags, a caller might be expecting debug support which
1974 * isn't available.
1da177e4 1975 */
40094fa6 1976 BUG_ON(flags & ~CREATE_MASK);
1da177e4 1977
a737b3e2
AM
1978 /*
1979 * Check that size is in terms of words. This is needed to avoid
1da177e4
LT
1980 * unaligned accesses for some archs when redzoning is used, and makes
1981 * sure any on-slab bufctl's are also correctly aligned.
1982 */
b28a02de
PE
1983 if (size & (BYTES_PER_WORD - 1)) {
1984 size += (BYTES_PER_WORD - 1);
1985 size &= ~(BYTES_PER_WORD - 1);
1da177e4
LT
1986 }
1987
a737b3e2
AM
1988 /* calculate the final buffer alignment: */
1989
1da177e4
LT
1990 /* 1) arch recommendation: can be overridden for debug */
1991 if (flags & SLAB_HWCACHE_ALIGN) {
a737b3e2
AM
1992 /*
1993 * Default alignment: as specified by the arch code. Except if
1994 * an object is really small, then squeeze multiple objects into
1995 * one cacheline.
1da177e4
LT
1996 */
1997 ralign = cache_line_size();
b28a02de 1998 while (size <= ralign / 2)
1da177e4
LT
1999 ralign /= 2;
2000 } else {
2001 ralign = BYTES_PER_WORD;
2002 }
2003 /* 2) arch mandated alignment: disables debug if necessary */
2004 if (ralign < ARCH_SLAB_MINALIGN) {
2005 ralign = ARCH_SLAB_MINALIGN;
2006 if (ralign > BYTES_PER_WORD)
b28a02de 2007 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1da177e4
LT
2008 }
2009 /* 3) caller mandated alignment: disables debug if necessary */
2010 if (ralign < align) {
2011 ralign = align;
2012 if (ralign > BYTES_PER_WORD)
b28a02de 2013 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
1da177e4 2014 }
a737b3e2
AM
2015 /*
2016 * 4) Store it. Note that the debug code below can reduce
1da177e4
LT
2017 * the alignment to BYTES_PER_WORD.
2018 */
2019 align = ralign;
2020
2021 /* Get cache's description obj. */
c5e3b83e 2022 cachep = kmem_cache_zalloc(&cache_cache, SLAB_KERNEL);
1da177e4 2023 if (!cachep)
4f12bb4f 2024 goto oops;
1da177e4
LT
2025
2026#if DEBUG
3dafccf2 2027 cachep->obj_size = size;
1da177e4
LT
2028
2029 if (flags & SLAB_RED_ZONE) {
2030 /* redzoning only works with word aligned caches */
2031 align = BYTES_PER_WORD;
2032
2033 /* add space for red zone words */
3dafccf2 2034 cachep->obj_offset += BYTES_PER_WORD;
b28a02de 2035 size += 2 * BYTES_PER_WORD;
1da177e4
LT
2036 }
2037 if (flags & SLAB_STORE_USER) {
2038 /* user store requires word alignment and
2039 * one word storage behind the end of the real
2040 * object.
2041 */
2042 align = BYTES_PER_WORD;
2043 size += BYTES_PER_WORD;
2044 }
2045#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
b28a02de 2046 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
3dafccf2
MS
2047 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2048 cachep->obj_offset += PAGE_SIZE - size;
1da177e4
LT
2049 size = PAGE_SIZE;
2050 }
2051#endif
2052#endif
2053
2054 /* Determine if the slab management is 'on' or 'off' slab. */
b28a02de 2055 if (size >= (PAGE_SIZE >> 3))
1da177e4
LT
2056 /*
2057 * Size is large, assume best to place the slab management obj
2058 * off-slab (should allow better packing of objs).
2059 */
2060 flags |= CFLGS_OFF_SLAB;
2061
2062 size = ALIGN(size, align);
2063
f78bb8ad 2064 left_over = calculate_slab_order(cachep, size, align, flags);
1da177e4
LT
2065
2066 if (!cachep->num) {
2067 printk("kmem_cache_create: couldn't create cache %s.\n", name);
2068 kmem_cache_free(&cache_cache, cachep);
2069 cachep = NULL;
4f12bb4f 2070 goto oops;
1da177e4 2071 }
b28a02de
PE
2072 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2073 + sizeof(struct slab), align);
1da177e4
LT
2074
2075 /*
2076 * If the slab has been placed off-slab, and we have enough space then
2077 * move it on-slab. This is at the expense of any extra colouring.
2078 */
2079 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2080 flags &= ~CFLGS_OFF_SLAB;
2081 left_over -= slab_size;
2082 }
2083
2084 if (flags & CFLGS_OFF_SLAB) {
2085 /* really off slab. No need for manual alignment */
b28a02de
PE
2086 slab_size =
2087 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
1da177e4
LT
2088 }
2089
2090 cachep->colour_off = cache_line_size();
2091 /* Offset must be a multiple of the alignment. */
2092 if (cachep->colour_off < align)
2093 cachep->colour_off = align;
b28a02de 2094 cachep->colour = left_over / cachep->colour_off;
1da177e4
LT
2095 cachep->slab_size = slab_size;
2096 cachep->flags = flags;
2097 cachep->gfpflags = 0;
2098 if (flags & SLAB_CACHE_DMA)
2099 cachep->gfpflags |= GFP_DMA;
3dafccf2 2100 cachep->buffer_size = size;
1da177e4
LT
2101
2102 if (flags & CFLGS_OFF_SLAB)
b2d55073 2103 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
1da177e4
LT
2104 cachep->ctor = ctor;
2105 cachep->dtor = dtor;
2106 cachep->name = name;
2107
1da177e4 2108
f30cf7d1 2109 setup_cpu_cache(cachep);
1da177e4 2110
1da177e4
LT
2111 /* cache setup completed, link it into the list */
2112 list_add(&cachep->next, &cache_chain);
a737b3e2 2113oops:
1da177e4
LT
2114 if (!cachep && (flags & SLAB_PANIC))
2115 panic("kmem_cache_create(): failed to create slab `%s'\n",
b28a02de 2116 name);
fc0abb14 2117 mutex_unlock(&cache_chain_mutex);
f0188f47 2118 unlock_cpu_hotplug();
1da177e4
LT
2119 return cachep;
2120}
2121EXPORT_SYMBOL(kmem_cache_create);
2122
2123#if DEBUG
2124static void check_irq_off(void)
2125{
2126 BUG_ON(!irqs_disabled());
2127}
2128
2129static void check_irq_on(void)
2130{
2131 BUG_ON(irqs_disabled());
2132}
2133
343e0d7a 2134static void check_spinlock_acquired(struct kmem_cache *cachep)
1da177e4
LT
2135{
2136#ifdef CONFIG_SMP
2137 check_irq_off();
e498be7d 2138 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
1da177e4
LT
2139#endif
2140}
e498be7d 2141
343e0d7a 2142static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
e498be7d
CL
2143{
2144#ifdef CONFIG_SMP
2145 check_irq_off();
2146 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2147#endif
2148}
2149
1da177e4
LT
2150#else
2151#define check_irq_off() do { } while(0)
2152#define check_irq_on() do { } while(0)
2153#define check_spinlock_acquired(x) do { } while(0)
e498be7d 2154#define check_spinlock_acquired_node(x, y) do { } while(0)
1da177e4
LT
2155#endif
2156
aab2207c
CL
2157static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2158 struct array_cache *ac,
2159 int force, int node);
2160
1da177e4
LT
2161static void do_drain(void *arg)
2162{
a737b3e2 2163 struct kmem_cache *cachep = arg;
1da177e4 2164 struct array_cache *ac;
ff69416e 2165 int node = numa_node_id();
1da177e4
LT
2166
2167 check_irq_off();
9a2dba4b 2168 ac = cpu_cache_get(cachep);
ff69416e
CL
2169 spin_lock(&cachep->nodelists[node]->list_lock);
2170 free_block(cachep, ac->entry, ac->avail, node);
2171 spin_unlock(&cachep->nodelists[node]->list_lock);
1da177e4
LT
2172 ac->avail = 0;
2173}
2174
343e0d7a 2175static void drain_cpu_caches(struct kmem_cache *cachep)
1da177e4 2176{
e498be7d
CL
2177 struct kmem_list3 *l3;
2178 int node;
2179
a07fa394 2180 on_each_cpu(do_drain, cachep, 1, 1);
1da177e4 2181 check_irq_on();
b28a02de 2182 for_each_online_node(node) {
e498be7d
CL
2183 l3 = cachep->nodelists[node];
2184 if (l3) {
aab2207c 2185 drain_array(cachep, l3, l3->shared, 1, node);
e498be7d 2186 if (l3->alien)
4484ebf1 2187 drain_alien_cache(cachep, l3->alien);
e498be7d
CL
2188 }
2189 }
1da177e4
LT
2190}
2191
343e0d7a 2192static int __node_shrink(struct kmem_cache *cachep, int node)
1da177e4
LT
2193{
2194 struct slab *slabp;
e498be7d 2195 struct kmem_list3 *l3 = cachep->nodelists[node];
1da177e4
LT
2196 int ret;
2197
e498be7d 2198 for (;;) {
1da177e4
LT
2199 struct list_head *p;
2200
e498be7d
CL
2201 p = l3->slabs_free.prev;
2202 if (p == &l3->slabs_free)
1da177e4
LT
2203 break;
2204
e498be7d 2205 slabp = list_entry(l3->slabs_free.prev, struct slab, list);
1da177e4 2206#if DEBUG
40094fa6 2207 BUG_ON(slabp->inuse);
1da177e4
LT
2208#endif
2209 list_del(&slabp->list);
2210
e498be7d
CL
2211 l3->free_objects -= cachep->num;
2212 spin_unlock_irq(&l3->list_lock);
1da177e4 2213 slab_destroy(cachep, slabp);
e498be7d 2214 spin_lock_irq(&l3->list_lock);
1da177e4 2215 }
b28a02de 2216 ret = !list_empty(&l3->slabs_full) || !list_empty(&l3->slabs_partial);
1da177e4
LT
2217 return ret;
2218}
2219
343e0d7a 2220static int __cache_shrink(struct kmem_cache *cachep)
e498be7d
CL
2221{
2222 int ret = 0, i = 0;
2223 struct kmem_list3 *l3;
2224
2225 drain_cpu_caches(cachep);
2226
2227 check_irq_on();
2228 for_each_online_node(i) {
2229 l3 = cachep->nodelists[i];
2230 if (l3) {
2231 spin_lock_irq(&l3->list_lock);
2232 ret += __node_shrink(cachep, i);
2233 spin_unlock_irq(&l3->list_lock);
2234 }
2235 }
2236 return (ret ? 1 : 0);
2237}
2238
1da177e4
LT
2239/**
2240 * kmem_cache_shrink - Shrink a cache.
2241 * @cachep: The cache to shrink.
2242 *
2243 * Releases as many slabs as possible for a cache.
2244 * To help debugging, a zero exit status indicates all slabs were released.
2245 */
343e0d7a 2246int kmem_cache_shrink(struct kmem_cache *cachep)
1da177e4 2247{
40094fa6 2248 BUG_ON(!cachep || in_interrupt());
1da177e4
LT
2249
2250 return __cache_shrink(cachep);
2251}
2252EXPORT_SYMBOL(kmem_cache_shrink);
2253
2254/**
2255 * kmem_cache_destroy - delete a cache
2256 * @cachep: the cache to destroy
2257 *
343e0d7a 2258 * Remove a struct kmem_cache object from the slab cache.
1da177e4
LT
2259 * Returns 0 on success.
2260 *
2261 * It is expected this function will be called by a module when it is
2262 * unloaded. This will remove the cache completely, and avoid a duplicate
2263 * cache being allocated each time a module is loaded and unloaded, if the
2264 * module doesn't have persistent in-kernel storage across loads and unloads.
2265 *
2266 * The cache must be empty before calling this function.
2267 *
2268 * The caller must guarantee that noone will allocate memory from the cache
2269 * during the kmem_cache_destroy().
2270 */
343e0d7a 2271int kmem_cache_destroy(struct kmem_cache *cachep)
1da177e4
LT
2272{
2273 int i;
e498be7d 2274 struct kmem_list3 *l3;
1da177e4 2275
40094fa6 2276 BUG_ON(!cachep || in_interrupt());
1da177e4
LT
2277
2278 /* Don't let CPUs to come and go */
2279 lock_cpu_hotplug();
2280
2281 /* Find the cache in the chain of caches. */
fc0abb14 2282 mutex_lock(&cache_chain_mutex);
1da177e4
LT
2283 /*
2284 * the chain is never empty, cache_cache is never destroyed
2285 */
2286 list_del(&cachep->next);
fc0abb14 2287 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
2288
2289 if (__cache_shrink(cachep)) {
2290 slab_error(cachep, "Can't free all objects");
fc0abb14 2291 mutex_lock(&cache_chain_mutex);
b28a02de 2292 list_add(&cachep->next, &cache_chain);
fc0abb14 2293 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
2294 unlock_cpu_hotplug();
2295 return 1;
2296 }
2297
2298 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
fbd568a3 2299 synchronize_rcu();
1da177e4 2300
e498be7d 2301 for_each_online_cpu(i)
b28a02de 2302 kfree(cachep->array[i]);
1da177e4
LT
2303
2304 /* NUMA: free the list3 structures */
e498be7d 2305 for_each_online_node(i) {
a737b3e2
AM
2306 l3 = cachep->nodelists[i];
2307 if (l3) {
e498be7d
CL
2308 kfree(l3->shared);
2309 free_alien_cache(l3->alien);
2310 kfree(l3);
2311 }
2312 }
1da177e4 2313 kmem_cache_free(&cache_cache, cachep);
1da177e4 2314 unlock_cpu_hotplug();
1da177e4
LT
2315 return 0;
2316}
2317EXPORT_SYMBOL(kmem_cache_destroy);
2318
2319/* Get the memory for a slab management obj. */
343e0d7a 2320static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
5b74ada7
RT
2321 int colour_off, gfp_t local_flags,
2322 int nodeid)
1da177e4
LT
2323{
2324 struct slab *slabp;
b28a02de 2325
1da177e4
LT
2326 if (OFF_SLAB(cachep)) {
2327 /* Slab management obj is off-slab. */
5b74ada7
RT
2328 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2329 local_flags, nodeid);
1da177e4
LT
2330 if (!slabp)
2331 return NULL;
2332 } else {
b28a02de 2333 slabp = objp + colour_off;
1da177e4
LT
2334 colour_off += cachep->slab_size;
2335 }
2336 slabp->inuse = 0;
2337 slabp->colouroff = colour_off;
b28a02de 2338 slabp->s_mem = objp + colour_off;
5b74ada7 2339 slabp->nodeid = nodeid;
1da177e4
LT
2340 return slabp;
2341}
2342
2343static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2344{
b28a02de 2345 return (kmem_bufctl_t *) (slabp + 1);
1da177e4
LT
2346}
2347
343e0d7a 2348static void cache_init_objs(struct kmem_cache *cachep,
b28a02de 2349 struct slab *slabp, unsigned long ctor_flags)
1da177e4
LT
2350{
2351 int i;
2352
2353 for (i = 0; i < cachep->num; i++) {
8fea4e96 2354 void *objp = index_to_obj(cachep, slabp, i);
1da177e4
LT
2355#if DEBUG
2356 /* need to poison the objs? */
2357 if (cachep->flags & SLAB_POISON)
2358 poison_obj(cachep, objp, POISON_FREE);
2359 if (cachep->flags & SLAB_STORE_USER)
2360 *dbg_userword(cachep, objp) = NULL;
2361
2362 if (cachep->flags & SLAB_RED_ZONE) {
2363 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2364 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2365 }
2366 /*
a737b3e2
AM
2367 * Constructors are not allowed to allocate memory from the same
2368 * cache which they are a constructor for. Otherwise, deadlock.
2369 * They must also be threaded.
1da177e4
LT
2370 */
2371 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
3dafccf2 2372 cachep->ctor(objp + obj_offset(cachep), cachep,
b28a02de 2373 ctor_flags);
1da177e4
LT
2374
2375 if (cachep->flags & SLAB_RED_ZONE) {
2376 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2377 slab_error(cachep, "constructor overwrote the"
b28a02de 2378 " end of an object");
1da177e4
LT
2379 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2380 slab_error(cachep, "constructor overwrote the"
b28a02de 2381 " start of an object");
1da177e4 2382 }
a737b3e2
AM
2383 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2384 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
b28a02de 2385 kernel_map_pages(virt_to_page(objp),
3dafccf2 2386 cachep->buffer_size / PAGE_SIZE, 0);
1da177e4
LT
2387#else
2388 if (cachep->ctor)
2389 cachep->ctor(objp, cachep, ctor_flags);
2390#endif
b28a02de 2391 slab_bufctl(slabp)[i] = i + 1;
1da177e4 2392 }
b28a02de 2393 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
1da177e4
LT
2394 slabp->free = 0;
2395}
2396
343e0d7a 2397static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
1da177e4 2398{
a737b3e2
AM
2399 if (flags & SLAB_DMA)
2400 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2401 else
2402 BUG_ON(cachep->gfpflags & GFP_DMA);
1da177e4
LT
2403}
2404
a737b3e2
AM
2405static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2406 int nodeid)
78d382d7 2407{
8fea4e96 2408 void *objp = index_to_obj(cachep, slabp, slabp->free);
78d382d7
MD
2409 kmem_bufctl_t next;
2410
2411 slabp->inuse++;
2412 next = slab_bufctl(slabp)[slabp->free];
2413#if DEBUG
2414 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2415 WARN_ON(slabp->nodeid != nodeid);
2416#endif
2417 slabp->free = next;
2418
2419 return objp;
2420}
2421
a737b3e2
AM
2422static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2423 void *objp, int nodeid)
78d382d7 2424{
8fea4e96 2425 unsigned int objnr = obj_to_index(cachep, slabp, objp);
78d382d7
MD
2426
2427#if DEBUG
2428 /* Verify that the slab belongs to the intended node */
2429 WARN_ON(slabp->nodeid != nodeid);
2430
871751e2 2431 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
78d382d7 2432 printk(KERN_ERR "slab: double free detected in cache "
a737b3e2 2433 "'%s', objp %p\n", cachep->name, objp);
78d382d7
MD
2434 BUG();
2435 }
2436#endif
2437 slab_bufctl(slabp)[objnr] = slabp->free;
2438 slabp->free = objnr;
2439 slabp->inuse--;
2440}
2441
a737b3e2
AM
2442static void set_slab_attr(struct kmem_cache *cachep, struct slab *slabp,
2443 void *objp)
1da177e4
LT
2444{
2445 int i;
2446 struct page *page;
2447
2448 /* Nasty!!!!!! I hope this is OK. */
1da177e4 2449 page = virt_to_page(objp);
84097518
NP
2450
2451 i = 1;
2452 if (likely(!PageCompound(page)))
2453 i <<= cachep->gfporder;
1da177e4 2454 do {
065d41cb
PE
2455 page_set_cache(page, cachep);
2456 page_set_slab(page, slabp);
1da177e4
LT
2457 page++;
2458 } while (--i);
2459}
2460
2461/*
2462 * Grow (by 1) the number of slabs within a cache. This is called by
2463 * kmem_cache_alloc() when there are no active objs left in a cache.
2464 */
343e0d7a 2465static int cache_grow(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4 2466{
b28a02de
PE
2467 struct slab *slabp;
2468 void *objp;
2469 size_t offset;
2470 gfp_t local_flags;
2471 unsigned long ctor_flags;
e498be7d 2472 struct kmem_list3 *l3;
1da177e4 2473
a737b3e2
AM
2474 /*
2475 * Be lazy and only check for valid flags here, keeping it out of the
2476 * critical path in kmem_cache_alloc().
1da177e4 2477 */
40094fa6 2478 BUG_ON(flags & ~(SLAB_DMA | SLAB_LEVEL_MASK | SLAB_NO_GROW));
1da177e4
LT
2479 if (flags & SLAB_NO_GROW)
2480 return 0;
2481
2482 ctor_flags = SLAB_CTOR_CONSTRUCTOR;
2483 local_flags = (flags & SLAB_LEVEL_MASK);
2484 if (!(local_flags & __GFP_WAIT))
2485 /*
2486 * Not allowed to sleep. Need to tell a constructor about
2487 * this - it might need to know...
2488 */
2489 ctor_flags |= SLAB_CTOR_ATOMIC;
2490
2e1217cf 2491 /* Take the l3 list lock to change the colour_next on this node */
1da177e4 2492 check_irq_off();
2e1217cf
RT
2493 l3 = cachep->nodelists[nodeid];
2494 spin_lock(&l3->list_lock);
1da177e4
LT
2495
2496 /* Get colour for the slab, and cal the next value. */
2e1217cf
RT
2497 offset = l3->colour_next;
2498 l3->colour_next++;
2499 if (l3->colour_next >= cachep->colour)
2500 l3->colour_next = 0;
2501 spin_unlock(&l3->list_lock);
1da177e4 2502
2e1217cf 2503 offset *= cachep->colour_off;
1da177e4
LT
2504
2505 if (local_flags & __GFP_WAIT)
2506 local_irq_enable();
2507
2508 /*
2509 * The test for missing atomic flag is performed here, rather than
2510 * the more obvious place, simply to reduce the critical path length
2511 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2512 * will eventually be caught here (where it matters).
2513 */
2514 kmem_flagcheck(cachep, flags);
2515
a737b3e2
AM
2516 /*
2517 * Get mem for the objs. Attempt to allocate a physical page from
2518 * 'nodeid'.
e498be7d 2519 */
a737b3e2
AM
2520 objp = kmem_getpages(cachep, flags, nodeid);
2521 if (!objp)
1da177e4
LT
2522 goto failed;
2523
2524 /* Get slab management. */
5b74ada7 2525 slabp = alloc_slabmgmt(cachep, objp, offset, local_flags, nodeid);
a737b3e2 2526 if (!slabp)
1da177e4
LT
2527 goto opps1;
2528
e498be7d 2529 slabp->nodeid = nodeid;
1da177e4
LT
2530 set_slab_attr(cachep, slabp, objp);
2531
2532 cache_init_objs(cachep, slabp, ctor_flags);
2533
2534 if (local_flags & __GFP_WAIT)
2535 local_irq_disable();
2536 check_irq_off();
e498be7d 2537 spin_lock(&l3->list_lock);
1da177e4
LT
2538
2539 /* Make slab active. */
e498be7d 2540 list_add_tail(&slabp->list, &(l3->slabs_free));
1da177e4 2541 STATS_INC_GROWN(cachep);
e498be7d
CL
2542 l3->free_objects += cachep->num;
2543 spin_unlock(&l3->list_lock);
1da177e4 2544 return 1;
a737b3e2 2545opps1:
1da177e4 2546 kmem_freepages(cachep, objp);
a737b3e2 2547failed:
1da177e4
LT
2548 if (local_flags & __GFP_WAIT)
2549 local_irq_disable();
2550 return 0;
2551}
2552
2553#if DEBUG
2554
2555/*
2556 * Perform extra freeing checks:
2557 * - detect bad pointers.
2558 * - POISON/RED_ZONE checking
2559 * - destructor calls, for caches with POISON+dtor
2560 */
2561static void kfree_debugcheck(const void *objp)
2562{
2563 struct page *page;
2564
2565 if (!virt_addr_valid(objp)) {
2566 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
b28a02de
PE
2567 (unsigned long)objp);
2568 BUG();
1da177e4
LT
2569 }
2570 page = virt_to_page(objp);
2571 if (!PageSlab(page)) {
b28a02de
PE
2572 printk(KERN_ERR "kfree_debugcheck: bad ptr %lxh.\n",
2573 (unsigned long)objp);
1da177e4
LT
2574 BUG();
2575 }
2576}
2577
343e0d7a 2578static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
b28a02de 2579 void *caller)
1da177e4
LT
2580{
2581 struct page *page;
2582 unsigned int objnr;
2583 struct slab *slabp;
2584
3dafccf2 2585 objp -= obj_offset(cachep);
1da177e4
LT
2586 kfree_debugcheck(objp);
2587 page = virt_to_page(objp);
2588
065d41cb 2589 if (page_get_cache(page) != cachep) {
a737b3e2
AM
2590 printk(KERN_ERR "mismatch in kmem_cache_free: expected "
2591 "cache %p, got %p\n",
b28a02de 2592 page_get_cache(page), cachep);
1da177e4 2593 printk(KERN_ERR "%p is %s.\n", cachep, cachep->name);
b28a02de
PE
2594 printk(KERN_ERR "%p is %s.\n", page_get_cache(page),
2595 page_get_cache(page)->name);
1da177e4
LT
2596 WARN_ON(1);
2597 }
065d41cb 2598 slabp = page_get_slab(page);
1da177e4
LT
2599
2600 if (cachep->flags & SLAB_RED_ZONE) {
a737b3e2
AM
2601 if (*dbg_redzone1(cachep, objp) != RED_ACTIVE ||
2602 *dbg_redzone2(cachep, objp) != RED_ACTIVE) {
2603 slab_error(cachep, "double free, or memory outside"
2604 " object was overwritten");
2605 printk(KERN_ERR "%p: redzone 1:0x%lx, "
2606 "redzone 2:0x%lx.\n",
b28a02de
PE
2607 objp, *dbg_redzone1(cachep, objp),
2608 *dbg_redzone2(cachep, objp));
1da177e4
LT
2609 }
2610 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2611 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2612 }
2613 if (cachep->flags & SLAB_STORE_USER)
2614 *dbg_userword(cachep, objp) = caller;
2615
8fea4e96 2616 objnr = obj_to_index(cachep, slabp, objp);
1da177e4
LT
2617
2618 BUG_ON(objnr >= cachep->num);
8fea4e96 2619 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
1da177e4
LT
2620
2621 if (cachep->flags & SLAB_DEBUG_INITIAL) {
a737b3e2
AM
2622 /*
2623 * Need to call the slab's constructor so the caller can
2624 * perform a verify of its state (debugging). Called without
2625 * the cache-lock held.
1da177e4 2626 */
3dafccf2 2627 cachep->ctor(objp + obj_offset(cachep),
b28a02de 2628 cachep, SLAB_CTOR_CONSTRUCTOR | SLAB_CTOR_VERIFY);
1da177e4
LT
2629 }
2630 if (cachep->flags & SLAB_POISON && cachep->dtor) {
2631 /* we want to cache poison the object,
2632 * call the destruction callback
2633 */
3dafccf2 2634 cachep->dtor(objp + obj_offset(cachep), cachep, 0);
1da177e4 2635 }
871751e2
AV
2636#ifdef CONFIG_DEBUG_SLAB_LEAK
2637 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2638#endif
1da177e4
LT
2639 if (cachep->flags & SLAB_POISON) {
2640#ifdef CONFIG_DEBUG_PAGEALLOC
a737b3e2 2641 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
1da177e4 2642 store_stackinfo(cachep, objp, (unsigned long)caller);
b28a02de 2643 kernel_map_pages(virt_to_page(objp),
3dafccf2 2644 cachep->buffer_size / PAGE_SIZE, 0);
1da177e4
LT
2645 } else {
2646 poison_obj(cachep, objp, POISON_FREE);
2647 }
2648#else
2649 poison_obj(cachep, objp, POISON_FREE);
2650#endif
2651 }
2652 return objp;
2653}
2654
343e0d7a 2655static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
1da177e4
LT
2656{
2657 kmem_bufctl_t i;
2658 int entries = 0;
b28a02de 2659
1da177e4
LT
2660 /* Check slab's freelist to see if this obj is there. */
2661 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2662 entries++;
2663 if (entries > cachep->num || i >= cachep->num)
2664 goto bad;
2665 }
2666 if (entries != cachep->num - slabp->inuse) {
a737b3e2
AM
2667bad:
2668 printk(KERN_ERR "slab: Internal list corruption detected in "
2669 "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2670 cachep->name, cachep->num, slabp, slabp->inuse);
b28a02de 2671 for (i = 0;
264132bc 2672 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
b28a02de 2673 i++) {
a737b3e2 2674 if (i % 16 == 0)
1da177e4 2675 printk("\n%03x:", i);
b28a02de 2676 printk(" %02x", ((unsigned char *)slabp)[i]);
1da177e4
LT
2677 }
2678 printk("\n");
2679 BUG();
2680 }
2681}
2682#else
2683#define kfree_debugcheck(x) do { } while(0)
2684#define cache_free_debugcheck(x,objp,z) (objp)
2685#define check_slabp(x,y) do { } while(0)
2686#endif
2687
343e0d7a 2688static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
1da177e4
LT
2689{
2690 int batchcount;
2691 struct kmem_list3 *l3;
2692 struct array_cache *ac;
2693
2694 check_irq_off();
9a2dba4b 2695 ac = cpu_cache_get(cachep);
a737b3e2 2696retry:
1da177e4
LT
2697 batchcount = ac->batchcount;
2698 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
a737b3e2
AM
2699 /*
2700 * If there was little recent activity on this cache, then
2701 * perform only a partial refill. Otherwise we could generate
2702 * refill bouncing.
1da177e4
LT
2703 */
2704 batchcount = BATCHREFILL_LIMIT;
2705 }
e498be7d
CL
2706 l3 = cachep->nodelists[numa_node_id()];
2707
2708 BUG_ON(ac->avail > 0 || !l3);
2709 spin_lock(&l3->list_lock);
1da177e4 2710
3ded175a
CL
2711 /* See if we can refill from the shared array */
2712 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2713 goto alloc_done;
2714
1da177e4
LT
2715 while (batchcount > 0) {
2716 struct list_head *entry;
2717 struct slab *slabp;
2718 /* Get slab alloc is to come from. */
2719 entry = l3->slabs_partial.next;
2720 if (entry == &l3->slabs_partial) {
2721 l3->free_touched = 1;
2722 entry = l3->slabs_free.next;
2723 if (entry == &l3->slabs_free)
2724 goto must_grow;
2725 }
2726
2727 slabp = list_entry(entry, struct slab, list);
2728 check_slabp(cachep, slabp);
2729 check_spinlock_acquired(cachep);
2730 while (slabp->inuse < cachep->num && batchcount--) {
1da177e4
LT
2731 STATS_INC_ALLOCED(cachep);
2732 STATS_INC_ACTIVE(cachep);
2733 STATS_SET_HIGH(cachep);
2734
78d382d7
MD
2735 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
2736 numa_node_id());
1da177e4
LT
2737 }
2738 check_slabp(cachep, slabp);
2739
2740 /* move slabp to correct slabp list: */
2741 list_del(&slabp->list);
2742 if (slabp->free == BUFCTL_END)
2743 list_add(&slabp->list, &l3->slabs_full);
2744 else
2745 list_add(&slabp->list, &l3->slabs_partial);
2746 }
2747
a737b3e2 2748must_grow:
1da177e4 2749 l3->free_objects -= ac->avail;
a737b3e2 2750alloc_done:
e498be7d 2751 spin_unlock(&l3->list_lock);
1da177e4
LT
2752
2753 if (unlikely(!ac->avail)) {
2754 int x;
e498be7d
CL
2755 x = cache_grow(cachep, flags, numa_node_id());
2756
a737b3e2 2757 /* cache_grow can reenable interrupts, then ac could change. */
9a2dba4b 2758 ac = cpu_cache_get(cachep);
a737b3e2 2759 if (!x && ac->avail == 0) /* no objects in sight? abort */
1da177e4
LT
2760 return NULL;
2761
a737b3e2 2762 if (!ac->avail) /* objects refilled by interrupt? */
1da177e4
LT
2763 goto retry;
2764 }
2765 ac->touched = 1;
e498be7d 2766 return ac->entry[--ac->avail];
1da177e4
LT
2767}
2768
a737b3e2
AM
2769static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2770 gfp_t flags)
1da177e4
LT
2771{
2772 might_sleep_if(flags & __GFP_WAIT);
2773#if DEBUG
2774 kmem_flagcheck(cachep, flags);
2775#endif
2776}
2777
2778#if DEBUG
a737b3e2
AM
2779static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2780 gfp_t flags, void *objp, void *caller)
1da177e4 2781{
b28a02de 2782 if (!objp)
1da177e4 2783 return objp;
b28a02de 2784 if (cachep->flags & SLAB_POISON) {
1da177e4 2785#ifdef CONFIG_DEBUG_PAGEALLOC
3dafccf2 2786 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
b28a02de 2787 kernel_map_pages(virt_to_page(objp),
3dafccf2 2788 cachep->buffer_size / PAGE_SIZE, 1);
1da177e4
LT
2789 else
2790 check_poison_obj(cachep, objp);
2791#else
2792 check_poison_obj(cachep, objp);
2793#endif
2794 poison_obj(cachep, objp, POISON_INUSE);
2795 }
2796 if (cachep->flags & SLAB_STORE_USER)
2797 *dbg_userword(cachep, objp) = caller;
2798
2799 if (cachep->flags & SLAB_RED_ZONE) {
a737b3e2
AM
2800 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
2801 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
2802 slab_error(cachep, "double free, or memory outside"
2803 " object was overwritten");
b28a02de 2804 printk(KERN_ERR
a737b3e2
AM
2805 "%p: redzone 1:0x%lx, redzone 2:0x%lx\n",
2806 objp, *dbg_redzone1(cachep, objp),
2807 *dbg_redzone2(cachep, objp));
1da177e4
LT
2808 }
2809 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2810 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2811 }
871751e2
AV
2812#ifdef CONFIG_DEBUG_SLAB_LEAK
2813 {
2814 struct slab *slabp;
2815 unsigned objnr;
2816
2817 slabp = page_get_slab(virt_to_page(objp));
2818 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
2819 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
2820 }
2821#endif
3dafccf2 2822 objp += obj_offset(cachep);
1da177e4 2823 if (cachep->ctor && cachep->flags & SLAB_POISON) {
b28a02de 2824 unsigned long ctor_flags = SLAB_CTOR_CONSTRUCTOR;
1da177e4
LT
2825
2826 if (!(flags & __GFP_WAIT))
2827 ctor_flags |= SLAB_CTOR_ATOMIC;
2828
2829 cachep->ctor(objp, cachep, ctor_flags);
b28a02de 2830 }
1da177e4
LT
2831 return objp;
2832}
2833#else
2834#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
2835#endif
2836
343e0d7a 2837static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 2838{
b28a02de 2839 void *objp;
1da177e4
LT
2840 struct array_cache *ac;
2841
dc85da15 2842#ifdef CONFIG_NUMA
b2455396 2843 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
c61afb18
PJ
2844 objp = alternate_node_alloc(cachep, flags);
2845 if (objp != NULL)
2846 return objp;
dc85da15
CL
2847 }
2848#endif
2849
5c382300 2850 check_irq_off();
9a2dba4b 2851 ac = cpu_cache_get(cachep);
1da177e4
LT
2852 if (likely(ac->avail)) {
2853 STATS_INC_ALLOCHIT(cachep);
2854 ac->touched = 1;
e498be7d 2855 objp = ac->entry[--ac->avail];
1da177e4
LT
2856 } else {
2857 STATS_INC_ALLOCMISS(cachep);
2858 objp = cache_alloc_refill(cachep, flags);
2859 }
5c382300
AK
2860 return objp;
2861}
2862
a737b3e2
AM
2863static __always_inline void *__cache_alloc(struct kmem_cache *cachep,
2864 gfp_t flags, void *caller)
5c382300
AK
2865{
2866 unsigned long save_flags;
b28a02de 2867 void *objp;
5c382300
AK
2868
2869 cache_alloc_debugcheck_before(cachep, flags);
2870
2871 local_irq_save(save_flags);
2872 objp = ____cache_alloc(cachep, flags);
1da177e4 2873 local_irq_restore(save_flags);
34342e86 2874 objp = cache_alloc_debugcheck_after(cachep, flags, objp,
7fd6b141 2875 caller);
34342e86 2876 prefetchw(objp);
1da177e4
LT
2877 return objp;
2878}
2879
e498be7d 2880#ifdef CONFIG_NUMA
c61afb18 2881/*
b2455396 2882 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
c61afb18
PJ
2883 *
2884 * If we are in_interrupt, then process context, including cpusets and
2885 * mempolicy, may not apply and should not be used for allocation policy.
2886 */
2887static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
2888{
2889 int nid_alloc, nid_here;
2890
2891 if (in_interrupt())
2892 return NULL;
2893 nid_alloc = nid_here = numa_node_id();
2894 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
2895 nid_alloc = cpuset_mem_spread_node();
2896 else if (current->mempolicy)
2897 nid_alloc = slab_node(current->mempolicy);
2898 if (nid_alloc != nid_here)
2899 return __cache_alloc_node(cachep, flags, nid_alloc);
2900 return NULL;
2901}
2902
e498be7d
CL
2903/*
2904 * A interface to enable slab creation on nodeid
1da177e4 2905 */
a737b3e2
AM
2906static void *__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
2907 int nodeid)
e498be7d
CL
2908{
2909 struct list_head *entry;
b28a02de
PE
2910 struct slab *slabp;
2911 struct kmem_list3 *l3;
2912 void *obj;
b28a02de
PE
2913 int x;
2914
2915 l3 = cachep->nodelists[nodeid];
2916 BUG_ON(!l3);
2917
a737b3e2 2918retry:
ca3b9b91 2919 check_irq_off();
b28a02de
PE
2920 spin_lock(&l3->list_lock);
2921 entry = l3->slabs_partial.next;
2922 if (entry == &l3->slabs_partial) {
2923 l3->free_touched = 1;
2924 entry = l3->slabs_free.next;
2925 if (entry == &l3->slabs_free)
2926 goto must_grow;
2927 }
2928
2929 slabp = list_entry(entry, struct slab, list);
2930 check_spinlock_acquired_node(cachep, nodeid);
2931 check_slabp(cachep, slabp);
2932
2933 STATS_INC_NODEALLOCS(cachep);
2934 STATS_INC_ACTIVE(cachep);
2935 STATS_SET_HIGH(cachep);
2936
2937 BUG_ON(slabp->inuse == cachep->num);
2938
78d382d7 2939 obj = slab_get_obj(cachep, slabp, nodeid);
b28a02de
PE
2940 check_slabp(cachep, slabp);
2941 l3->free_objects--;
2942 /* move slabp to correct slabp list: */
2943 list_del(&slabp->list);
2944
a737b3e2 2945 if (slabp->free == BUFCTL_END)
b28a02de 2946 list_add(&slabp->list, &l3->slabs_full);
a737b3e2 2947 else
b28a02de 2948 list_add(&slabp->list, &l3->slabs_partial);
e498be7d 2949
b28a02de
PE
2950 spin_unlock(&l3->list_lock);
2951 goto done;
e498be7d 2952
a737b3e2 2953must_grow:
b28a02de
PE
2954 spin_unlock(&l3->list_lock);
2955 x = cache_grow(cachep, flags, nodeid);
1da177e4 2956
b28a02de
PE
2957 if (!x)
2958 return NULL;
e498be7d 2959
b28a02de 2960 goto retry;
a737b3e2 2961done:
b28a02de 2962 return obj;
e498be7d
CL
2963}
2964#endif
2965
2966/*
2967 * Caller needs to acquire correct kmem_list's list_lock
2968 */
343e0d7a 2969static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
b28a02de 2970 int node)
1da177e4
LT
2971{
2972 int i;
e498be7d 2973 struct kmem_list3 *l3;
1da177e4
LT
2974
2975 for (i = 0; i < nr_objects; i++) {
2976 void *objp = objpp[i];
2977 struct slab *slabp;
1da177e4 2978
6ed5eb22 2979 slabp = virt_to_slab(objp);
ff69416e 2980 l3 = cachep->nodelists[node];
1da177e4 2981 list_del(&slabp->list);
ff69416e 2982 check_spinlock_acquired_node(cachep, node);
1da177e4 2983 check_slabp(cachep, slabp);
78d382d7 2984 slab_put_obj(cachep, slabp, objp, node);
1da177e4 2985 STATS_DEC_ACTIVE(cachep);
e498be7d 2986 l3->free_objects++;
1da177e4
LT
2987 check_slabp(cachep, slabp);
2988
2989 /* fixup slab chains */
2990 if (slabp->inuse == 0) {
e498be7d
CL
2991 if (l3->free_objects > l3->free_limit) {
2992 l3->free_objects -= cachep->num;
1da177e4
LT
2993 slab_destroy(cachep, slabp);
2994 } else {
e498be7d 2995 list_add(&slabp->list, &l3->slabs_free);
1da177e4
LT
2996 }
2997 } else {
2998 /* Unconditionally move a slab to the end of the
2999 * partial list on free - maximum time for the
3000 * other objects to be freed, too.
3001 */
e498be7d 3002 list_add_tail(&slabp->list, &l3->slabs_partial);
1da177e4
LT
3003 }
3004 }
3005}
3006
343e0d7a 3007static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
1da177e4
LT
3008{
3009 int batchcount;
e498be7d 3010 struct kmem_list3 *l3;
ff69416e 3011 int node = numa_node_id();
1da177e4
LT
3012
3013 batchcount = ac->batchcount;
3014#if DEBUG
3015 BUG_ON(!batchcount || batchcount > ac->avail);
3016#endif
3017 check_irq_off();
ff69416e 3018 l3 = cachep->nodelists[node];
e498be7d
CL
3019 spin_lock(&l3->list_lock);
3020 if (l3->shared) {
3021 struct array_cache *shared_array = l3->shared;
b28a02de 3022 int max = shared_array->limit - shared_array->avail;
1da177e4
LT
3023 if (max) {
3024 if (batchcount > max)
3025 batchcount = max;
e498be7d 3026 memcpy(&(shared_array->entry[shared_array->avail]),
b28a02de 3027 ac->entry, sizeof(void *) * batchcount);
1da177e4
LT
3028 shared_array->avail += batchcount;
3029 goto free_done;
3030 }
3031 }
3032
ff69416e 3033 free_block(cachep, ac->entry, batchcount, node);
a737b3e2 3034free_done:
1da177e4
LT
3035#if STATS
3036 {
3037 int i = 0;
3038 struct list_head *p;
3039
e498be7d
CL
3040 p = l3->slabs_free.next;
3041 while (p != &(l3->slabs_free)) {
1da177e4
LT
3042 struct slab *slabp;
3043
3044 slabp = list_entry(p, struct slab, list);
3045 BUG_ON(slabp->inuse);
3046
3047 i++;
3048 p = p->next;
3049 }
3050 STATS_SET_FREEABLE(cachep, i);
3051 }
3052#endif
e498be7d 3053 spin_unlock(&l3->list_lock);
1da177e4 3054 ac->avail -= batchcount;
a737b3e2 3055 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
1da177e4
LT
3056}
3057
3058/*
a737b3e2
AM
3059 * Release an obj back to its cache. If the obj has a constructed state, it must
3060 * be in this state _before_ it is released. Called with disabled ints.
1da177e4 3061 */
343e0d7a 3062static inline void __cache_free(struct kmem_cache *cachep, void *objp)
1da177e4 3063{
9a2dba4b 3064 struct array_cache *ac = cpu_cache_get(cachep);
1da177e4
LT
3065
3066 check_irq_off();
3067 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3068
e498be7d
CL
3069 /* Make sure we are not freeing a object from another
3070 * node to the array cache on this cpu.
3071 */
3072#ifdef CONFIG_NUMA
3073 {
3074 struct slab *slabp;
6ed5eb22 3075 slabp = virt_to_slab(objp);
e498be7d
CL
3076 if (unlikely(slabp->nodeid != numa_node_id())) {
3077 struct array_cache *alien = NULL;
3078 int nodeid = slabp->nodeid;
a737b3e2 3079 struct kmem_list3 *l3;
e498be7d 3080
a737b3e2 3081 l3 = cachep->nodelists[numa_node_id()];
e498be7d
CL
3082 STATS_INC_NODEFREES(cachep);
3083 if (l3->alien && l3->alien[nodeid]) {
3084 alien = l3->alien[nodeid];
3085 spin_lock(&alien->lock);
3086 if (unlikely(alien->avail == alien->limit))
3087 __drain_alien_cache(cachep,
b28a02de 3088 alien, nodeid);
e498be7d
CL
3089 alien->entry[alien->avail++] = objp;
3090 spin_unlock(&alien->lock);
3091 } else {
3092 spin_lock(&(cachep->nodelists[nodeid])->
b28a02de 3093 list_lock);
ff69416e 3094 free_block(cachep, &objp, 1, nodeid);
e498be7d 3095 spin_unlock(&(cachep->nodelists[nodeid])->
b28a02de 3096 list_lock);
e498be7d
CL
3097 }
3098 return;
3099 }
3100 }
3101#endif
1da177e4
LT
3102 if (likely(ac->avail < ac->limit)) {
3103 STATS_INC_FREEHIT(cachep);
e498be7d 3104 ac->entry[ac->avail++] = objp;
1da177e4
LT
3105 return;
3106 } else {
3107 STATS_INC_FREEMISS(cachep);
3108 cache_flusharray(cachep, ac);
e498be7d 3109 ac->entry[ac->avail++] = objp;
1da177e4
LT
3110 }
3111}
3112
3113/**
3114 * kmem_cache_alloc - Allocate an object
3115 * @cachep: The cache to allocate from.
3116 * @flags: See kmalloc().
3117 *
3118 * Allocate an object from this cache. The flags are only relevant
3119 * if the cache has no available objects.
3120 */
343e0d7a 3121void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3122{
7fd6b141 3123 return __cache_alloc(cachep, flags, __builtin_return_address(0));
1da177e4
LT
3124}
3125EXPORT_SYMBOL(kmem_cache_alloc);
3126
a8c0f9a4
PE
3127/**
3128 * kmem_cache_alloc - Allocate an object. The memory is set to zero.
3129 * @cache: The cache to allocate from.
3130 * @flags: See kmalloc().
3131 *
3132 * Allocate an object from this cache and set the allocated memory to zero.
3133 * The flags are only relevant if the cache has no available objects.
3134 */
3135void *kmem_cache_zalloc(struct kmem_cache *cache, gfp_t flags)
3136{
3137 void *ret = __cache_alloc(cache, flags, __builtin_return_address(0));
3138 if (ret)
3139 memset(ret, 0, obj_size(cache));
3140 return ret;
3141}
3142EXPORT_SYMBOL(kmem_cache_zalloc);
3143
1da177e4
LT
3144/**
3145 * kmem_ptr_validate - check if an untrusted pointer might
3146 * be a slab entry.
3147 * @cachep: the cache we're checking against
3148 * @ptr: pointer to validate
3149 *
3150 * This verifies that the untrusted pointer looks sane:
3151 * it is _not_ a guarantee that the pointer is actually
3152 * part of the slab cache in question, but it at least
3153 * validates that the pointer can be dereferenced and
3154 * looks half-way sane.
3155 *
3156 * Currently only used for dentry validation.
3157 */
343e0d7a 3158int fastcall kmem_ptr_validate(struct kmem_cache *cachep, void *ptr)
1da177e4 3159{
b28a02de 3160 unsigned long addr = (unsigned long)ptr;
1da177e4 3161 unsigned long min_addr = PAGE_OFFSET;
b28a02de 3162 unsigned long align_mask = BYTES_PER_WORD - 1;
3dafccf2 3163 unsigned long size = cachep->buffer_size;
1da177e4
LT
3164 struct page *page;
3165
3166 if (unlikely(addr < min_addr))
3167 goto out;
3168 if (unlikely(addr > (unsigned long)high_memory - size))
3169 goto out;
3170 if (unlikely(addr & align_mask))
3171 goto out;
3172 if (unlikely(!kern_addr_valid(addr)))
3173 goto out;
3174 if (unlikely(!kern_addr_valid(addr + size - 1)))
3175 goto out;
3176 page = virt_to_page(ptr);
3177 if (unlikely(!PageSlab(page)))
3178 goto out;
065d41cb 3179 if (unlikely(page_get_cache(page) != cachep))
1da177e4
LT
3180 goto out;
3181 return 1;
a737b3e2 3182out:
1da177e4
LT
3183 return 0;
3184}
3185
3186#ifdef CONFIG_NUMA
3187/**
3188 * kmem_cache_alloc_node - Allocate an object on the specified node
3189 * @cachep: The cache to allocate from.
3190 * @flags: See kmalloc().
3191 * @nodeid: node number of the target node.
3192 *
3193 * Identical to kmem_cache_alloc, except that this function is slow
3194 * and can sleep. And it will allocate memory on the given node, which
3195 * can improve the performance for cpu bound structures.
e498be7d
CL
3196 * New and improved: it will now make sure that the object gets
3197 * put on the correct node list so that there is no false sharing.
1da177e4 3198 */
343e0d7a 3199void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4 3200{
e498be7d
CL
3201 unsigned long save_flags;
3202 void *ptr;
1da177e4 3203
e498be7d
CL
3204 cache_alloc_debugcheck_before(cachep, flags);
3205 local_irq_save(save_flags);
18f820f6
CL
3206
3207 if (nodeid == -1 || nodeid == numa_node_id() ||
a737b3e2 3208 !cachep->nodelists[nodeid])
5c382300
AK
3209 ptr = ____cache_alloc(cachep, flags);
3210 else
3211 ptr = __cache_alloc_node(cachep, flags, nodeid);
e498be7d 3212 local_irq_restore(save_flags);
18f820f6
CL
3213
3214 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr,
3215 __builtin_return_address(0));
1da177e4 3216
e498be7d 3217 return ptr;
1da177e4
LT
3218}
3219EXPORT_SYMBOL(kmem_cache_alloc_node);
3220
dd0fc66f 3221void *kmalloc_node(size_t size, gfp_t flags, int node)
97e2bde4 3222{
343e0d7a 3223 struct kmem_cache *cachep;
97e2bde4
MS
3224
3225 cachep = kmem_find_general_cachep(size, flags);
3226 if (unlikely(cachep == NULL))
3227 return NULL;
3228 return kmem_cache_alloc_node(cachep, flags, node);
3229}
3230EXPORT_SYMBOL(kmalloc_node);
1da177e4
LT
3231#endif
3232
3233/**
3234 * kmalloc - allocate memory
3235 * @size: how many bytes of memory are required.
3236 * @flags: the type of memory to allocate.
911851e6 3237 * @caller: function caller for debug tracking of the caller
1da177e4
LT
3238 *
3239 * kmalloc is the normal method of allocating memory
3240 * in the kernel.
3241 *
3242 * The @flags argument may be one of:
3243 *
3244 * %GFP_USER - Allocate memory on behalf of user. May sleep.
3245 *
3246 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
3247 *
3248 * %GFP_ATOMIC - Allocation will not sleep. Use inside interrupt handlers.
3249 *
3250 * Additionally, the %GFP_DMA flag may be set to indicate the memory
3251 * must be suitable for DMA. This can mean different things on different
3252 * platforms. For example, on i386, it means that the memory must come
3253 * from the first 16MB.
3254 */
7fd6b141
PE
3255static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3256 void *caller)
1da177e4 3257{
343e0d7a 3258 struct kmem_cache *cachep;
1da177e4 3259
97e2bde4
MS
3260 /* If you want to save a few bytes .text space: replace
3261 * __ with kmem_.
3262 * Then kmalloc uses the uninlined functions instead of the inline
3263 * functions.
3264 */
3265 cachep = __find_general_cachep(size, flags);
dbdb9045
AM
3266 if (unlikely(cachep == NULL))
3267 return NULL;
7fd6b141
PE
3268 return __cache_alloc(cachep, flags, caller);
3269}
3270
7fd6b141
PE
3271
3272void *__kmalloc(size_t size, gfp_t flags)
3273{
871751e2 3274#ifndef CONFIG_DEBUG_SLAB
7fd6b141 3275 return __do_kmalloc(size, flags, NULL);
871751e2
AV
3276#else
3277 return __do_kmalloc(size, flags, __builtin_return_address(0));
3278#endif
1da177e4
LT
3279}
3280EXPORT_SYMBOL(__kmalloc);
3281
871751e2 3282#ifdef CONFIG_DEBUG_SLAB
7fd6b141
PE
3283void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3284{
3285 return __do_kmalloc(size, flags, caller);
3286}
3287EXPORT_SYMBOL(__kmalloc_track_caller);
7fd6b141
PE
3288#endif
3289
1da177e4
LT
3290#ifdef CONFIG_SMP
3291/**
3292 * __alloc_percpu - allocate one copy of the object for every present
3293 * cpu in the system, zeroing them.
3294 * Objects should be dereferenced using the per_cpu_ptr macro only.
3295 *
3296 * @size: how many bytes of memory are required.
1da177e4 3297 */
f9f75005 3298void *__alloc_percpu(size_t size)
1da177e4
LT
3299{
3300 int i;
b28a02de 3301 struct percpu_data *pdata = kmalloc(sizeof(*pdata), GFP_KERNEL);
1da177e4
LT
3302
3303 if (!pdata)
3304 return NULL;
3305
e498be7d
CL
3306 /*
3307 * Cannot use for_each_online_cpu since a cpu may come online
3308 * and we have no way of figuring out how to fix the array
3309 * that we have allocated then....
3310 */
0a945022 3311 for_each_possible_cpu(i) {
e498be7d
CL
3312 int node = cpu_to_node(i);
3313
3314 if (node_online(node))
3315 pdata->ptrs[i] = kmalloc_node(size, GFP_KERNEL, node);
3316 else
3317 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
1da177e4
LT
3318
3319 if (!pdata->ptrs[i])
3320 goto unwind_oom;
3321 memset(pdata->ptrs[i], 0, size);
3322 }
3323
3324 /* Catch derefs w/o wrappers */
b28a02de 3325 return (void *)(~(unsigned long)pdata);
1da177e4 3326
a737b3e2 3327unwind_oom:
1da177e4
LT
3328 while (--i >= 0) {
3329 if (!cpu_possible(i))
3330 continue;
3331 kfree(pdata->ptrs[i]);
3332 }
3333 kfree(pdata);
3334 return NULL;
3335}
3336EXPORT_SYMBOL(__alloc_percpu);
3337#endif
3338
3339/**
3340 * kmem_cache_free - Deallocate an object
3341 * @cachep: The cache the allocation was from.
3342 * @objp: The previously allocated object.
3343 *
3344 * Free an object which was previously allocated from this
3345 * cache.
3346 */
343e0d7a 3347void kmem_cache_free(struct kmem_cache *cachep, void *objp)
1da177e4
LT
3348{
3349 unsigned long flags;
3350
3351 local_irq_save(flags);
3352 __cache_free(cachep, objp);
3353 local_irq_restore(flags);
3354}
3355EXPORT_SYMBOL(kmem_cache_free);
3356
1da177e4
LT
3357/**
3358 * kfree - free previously allocated memory
3359 * @objp: pointer returned by kmalloc.
3360 *
80e93eff
PE
3361 * If @objp is NULL, no operation is performed.
3362 *
1da177e4
LT
3363 * Don't free memory not originally allocated by kmalloc()
3364 * or you will run into trouble.
3365 */
3366void kfree(const void *objp)
3367{
343e0d7a 3368 struct kmem_cache *c;
1da177e4
LT
3369 unsigned long flags;
3370
3371 if (unlikely(!objp))
3372 return;
3373 local_irq_save(flags);
3374 kfree_debugcheck(objp);
6ed5eb22 3375 c = virt_to_cache(objp);
3dafccf2 3376 mutex_debug_check_no_locks_freed(objp, obj_size(c));
b28a02de 3377 __cache_free(c, (void *)objp);
1da177e4
LT
3378 local_irq_restore(flags);
3379}
3380EXPORT_SYMBOL(kfree);
3381
3382#ifdef CONFIG_SMP
3383/**
3384 * free_percpu - free previously allocated percpu memory
3385 * @objp: pointer returned by alloc_percpu.
3386 *
3387 * Don't free memory not originally allocated by alloc_percpu()
3388 * The complemented objp is to check for that.
3389 */
b28a02de 3390void free_percpu(const void *objp)
1da177e4
LT
3391{
3392 int i;
b28a02de 3393 struct percpu_data *p = (struct percpu_data *)(~(unsigned long)objp);
1da177e4 3394
e498be7d
CL
3395 /*
3396 * We allocate for all cpus so we cannot use for online cpu here.
3397 */
0a945022 3398 for_each_possible_cpu(i)
b28a02de 3399 kfree(p->ptrs[i]);
1da177e4
LT
3400 kfree(p);
3401}
3402EXPORT_SYMBOL(free_percpu);
3403#endif
3404
343e0d7a 3405unsigned int kmem_cache_size(struct kmem_cache *cachep)
1da177e4 3406{
3dafccf2 3407 return obj_size(cachep);
1da177e4
LT
3408}
3409EXPORT_SYMBOL(kmem_cache_size);
3410
343e0d7a 3411const char *kmem_cache_name(struct kmem_cache *cachep)
1944972d
ACM
3412{
3413 return cachep->name;
3414}
3415EXPORT_SYMBOL_GPL(kmem_cache_name);
3416
e498be7d 3417/*
0718dc2a 3418 * This initializes kmem_list3 or resizes varioius caches for all nodes.
e498be7d 3419 */
343e0d7a 3420static int alloc_kmemlist(struct kmem_cache *cachep)
e498be7d
CL
3421{
3422 int node;
3423 struct kmem_list3 *l3;
cafeb02e
CL
3424 struct array_cache *new_shared;
3425 struct array_cache **new_alien;
e498be7d
CL
3426
3427 for_each_online_node(node) {
cafeb02e 3428
a737b3e2
AM
3429 new_alien = alloc_alien_cache(node, cachep->limit);
3430 if (!new_alien)
e498be7d 3431 goto fail;
cafeb02e 3432
0718dc2a
CL
3433 new_shared = alloc_arraycache(node,
3434 cachep->shared*cachep->batchcount,
a737b3e2 3435 0xbaadf00d);
0718dc2a
CL
3436 if (!new_shared) {
3437 free_alien_cache(new_alien);
e498be7d 3438 goto fail;
0718dc2a 3439 }
cafeb02e 3440
a737b3e2
AM
3441 l3 = cachep->nodelists[node];
3442 if (l3) {
cafeb02e
CL
3443 struct array_cache *shared = l3->shared;
3444
e498be7d
CL
3445 spin_lock_irq(&l3->list_lock);
3446
cafeb02e 3447 if (shared)
0718dc2a
CL
3448 free_block(cachep, shared->entry,
3449 shared->avail, node);
e498be7d 3450
cafeb02e
CL
3451 l3->shared = new_shared;
3452 if (!l3->alien) {
e498be7d
CL
3453 l3->alien = new_alien;
3454 new_alien = NULL;
3455 }
b28a02de 3456 l3->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 3457 cachep->batchcount + cachep->num;
e498be7d 3458 spin_unlock_irq(&l3->list_lock);
cafeb02e 3459 kfree(shared);
e498be7d
CL
3460 free_alien_cache(new_alien);
3461 continue;
3462 }
a737b3e2 3463 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
0718dc2a
CL
3464 if (!l3) {
3465 free_alien_cache(new_alien);
3466 kfree(new_shared);
e498be7d 3467 goto fail;
0718dc2a 3468 }
e498be7d
CL
3469
3470 kmem_list3_init(l3);
3471 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
a737b3e2 3472 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cafeb02e 3473 l3->shared = new_shared;
e498be7d 3474 l3->alien = new_alien;
b28a02de 3475 l3->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 3476 cachep->batchcount + cachep->num;
e498be7d
CL
3477 cachep->nodelists[node] = l3;
3478 }
cafeb02e 3479 return 0;
0718dc2a 3480
a737b3e2 3481fail:
0718dc2a
CL
3482 if (!cachep->next.next) {
3483 /* Cache is not active yet. Roll back what we did */
3484 node--;
3485 while (node >= 0) {
3486 if (cachep->nodelists[node]) {
3487 l3 = cachep->nodelists[node];
3488
3489 kfree(l3->shared);
3490 free_alien_cache(l3->alien);
3491 kfree(l3);
3492 cachep->nodelists[node] = NULL;
3493 }
3494 node--;
3495 }
3496 }
cafeb02e 3497 return -ENOMEM;
e498be7d
CL
3498}
3499
1da177e4 3500struct ccupdate_struct {
343e0d7a 3501 struct kmem_cache *cachep;
1da177e4
LT
3502 struct array_cache *new[NR_CPUS];
3503};
3504
3505static void do_ccupdate_local(void *info)
3506{
a737b3e2 3507 struct ccupdate_struct *new = info;
1da177e4
LT
3508 struct array_cache *old;
3509
3510 check_irq_off();
9a2dba4b 3511 old = cpu_cache_get(new->cachep);
e498be7d 3512
1da177e4
LT
3513 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3514 new->new[smp_processor_id()] = old;
3515}
3516
b5d8ca7c 3517/* Always called with the cache_chain_mutex held */
a737b3e2
AM
3518static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3519 int batchcount, int shared)
1da177e4
LT
3520{
3521 struct ccupdate_struct new;
e498be7d 3522 int i, err;
1da177e4 3523
b28a02de 3524 memset(&new.new, 0, sizeof(new.new));
e498be7d 3525 for_each_online_cpu(i) {
a737b3e2
AM
3526 new.new[i] = alloc_arraycache(cpu_to_node(i), limit,
3527 batchcount);
e498be7d 3528 if (!new.new[i]) {
b28a02de
PE
3529 for (i--; i >= 0; i--)
3530 kfree(new.new[i]);
e498be7d 3531 return -ENOMEM;
1da177e4
LT
3532 }
3533 }
3534 new.cachep = cachep;
3535
a07fa394 3536 on_each_cpu(do_ccupdate_local, (void *)&new, 1, 1);
e498be7d 3537
1da177e4 3538 check_irq_on();
1da177e4
LT
3539 cachep->batchcount = batchcount;
3540 cachep->limit = limit;
e498be7d 3541 cachep->shared = shared;
1da177e4 3542
e498be7d 3543 for_each_online_cpu(i) {
1da177e4
LT
3544 struct array_cache *ccold = new.new[i];
3545 if (!ccold)
3546 continue;
e498be7d 3547 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
ff69416e 3548 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
e498be7d 3549 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
1da177e4
LT
3550 kfree(ccold);
3551 }
1da177e4 3552
e498be7d
CL
3553 err = alloc_kmemlist(cachep);
3554 if (err) {
3555 printk(KERN_ERR "alloc_kmemlist failed for %s, error %d.\n",
b28a02de 3556 cachep->name, -err);
e498be7d 3557 BUG();
1da177e4 3558 }
1da177e4
LT
3559 return 0;
3560}
3561
b5d8ca7c 3562/* Called with cache_chain_mutex held always */
343e0d7a 3563static void enable_cpucache(struct kmem_cache *cachep)
1da177e4
LT
3564{
3565 int err;
3566 int limit, shared;
3567
a737b3e2
AM
3568 /*
3569 * The head array serves three purposes:
1da177e4
LT
3570 * - create a LIFO ordering, i.e. return objects that are cache-warm
3571 * - reduce the number of spinlock operations.
a737b3e2 3572 * - reduce the number of linked list operations on the slab and
1da177e4
LT
3573 * bufctl chains: array operations are cheaper.
3574 * The numbers are guessed, we should auto-tune as described by
3575 * Bonwick.
3576 */
3dafccf2 3577 if (cachep->buffer_size > 131072)
1da177e4 3578 limit = 1;
3dafccf2 3579 else if (cachep->buffer_size > PAGE_SIZE)
1da177e4 3580 limit = 8;
3dafccf2 3581 else if (cachep->buffer_size > 1024)
1da177e4 3582 limit = 24;
3dafccf2 3583 else if (cachep->buffer_size > 256)
1da177e4
LT
3584 limit = 54;
3585 else
3586 limit = 120;
3587
a737b3e2
AM
3588 /*
3589 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
1da177e4
LT
3590 * allocation behaviour: Most allocs on one cpu, most free operations
3591 * on another cpu. For these cases, an efficient object passing between
3592 * cpus is necessary. This is provided by a shared array. The array
3593 * replaces Bonwick's magazine layer.
3594 * On uniprocessor, it's functionally equivalent (but less efficient)
3595 * to a larger limit. Thus disabled by default.
3596 */
3597 shared = 0;
3598#ifdef CONFIG_SMP
3dafccf2 3599 if (cachep->buffer_size <= PAGE_SIZE)
1da177e4
LT
3600 shared = 8;
3601#endif
3602
3603#if DEBUG
a737b3e2
AM
3604 /*
3605 * With debugging enabled, large batchcount lead to excessively long
3606 * periods with disabled local interrupts. Limit the batchcount
1da177e4
LT
3607 */
3608 if (limit > 32)
3609 limit = 32;
3610#endif
b28a02de 3611 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
1da177e4
LT
3612 if (err)
3613 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
b28a02de 3614 cachep->name, -err);
1da177e4
LT
3615}
3616
1b55253a
CL
3617/*
3618 * Drain an array if it contains any elements taking the l3 lock only if
b18e7e65
CL
3619 * necessary. Note that the l3 listlock also protects the array_cache
3620 * if drain_array() is used on the shared array.
1b55253a
CL
3621 */
3622void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3623 struct array_cache *ac, int force, int node)
1da177e4
LT
3624{
3625 int tofree;
3626
1b55253a
CL
3627 if (!ac || !ac->avail)
3628 return;
1da177e4
LT
3629 if (ac->touched && !force) {
3630 ac->touched = 0;
b18e7e65 3631 } else {
1b55253a 3632 spin_lock_irq(&l3->list_lock);
b18e7e65
CL
3633 if (ac->avail) {
3634 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3635 if (tofree > ac->avail)
3636 tofree = (ac->avail + 1) / 2;
3637 free_block(cachep, ac->entry, tofree, node);
3638 ac->avail -= tofree;
3639 memmove(ac->entry, &(ac->entry[tofree]),
3640 sizeof(void *) * ac->avail);
3641 }
1b55253a 3642 spin_unlock_irq(&l3->list_lock);
1da177e4
LT
3643 }
3644}
3645
3646/**
3647 * cache_reap - Reclaim memory from caches.
1e5d5331 3648 * @unused: unused parameter
1da177e4
LT
3649 *
3650 * Called from workqueue/eventd every few seconds.
3651 * Purpose:
3652 * - clear the per-cpu caches for this CPU.
3653 * - return freeable pages to the main free memory pool.
3654 *
a737b3e2
AM
3655 * If we cannot acquire the cache chain mutex then just give up - we'll try
3656 * again on the next iteration.
1da177e4
LT
3657 */
3658static void cache_reap(void *unused)
3659{
3660 struct list_head *walk;
e498be7d 3661 struct kmem_list3 *l3;
aab2207c 3662 int node = numa_node_id();
1da177e4 3663
fc0abb14 3664 if (!mutex_trylock(&cache_chain_mutex)) {
1da177e4 3665 /* Give up. Setup the next iteration. */
b28a02de
PE
3666 schedule_delayed_work(&__get_cpu_var(reap_work),
3667 REAPTIMEOUT_CPUC);
1da177e4
LT
3668 return;
3669 }
3670
3671 list_for_each(walk, &cache_chain) {
343e0d7a 3672 struct kmem_cache *searchp;
b28a02de 3673 struct list_head *p;
1da177e4
LT
3674 int tofree;
3675 struct slab *slabp;
3676
343e0d7a 3677 searchp = list_entry(walk, struct kmem_cache, next);
1da177e4
LT
3678 check_irq_on();
3679
35386e3b
CL
3680 /*
3681 * We only take the l3 lock if absolutely necessary and we
3682 * have established with reasonable certainty that
3683 * we can do some work if the lock was obtained.
3684 */
aab2207c 3685 l3 = searchp->nodelists[node];
35386e3b 3686
8fce4d8e 3687 reap_alien(searchp, l3);
1da177e4 3688
aab2207c 3689 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
1da177e4 3690
35386e3b
CL
3691 /*
3692 * These are racy checks but it does not matter
3693 * if we skip one check or scan twice.
3694 */
e498be7d 3695 if (time_after(l3->next_reap, jiffies))
35386e3b 3696 goto next;
1da177e4 3697
e498be7d 3698 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
1da177e4 3699
aab2207c 3700 drain_array(searchp, l3, l3->shared, 0, node);
1da177e4 3701
e498be7d
CL
3702 if (l3->free_touched) {
3703 l3->free_touched = 0;
35386e3b 3704 goto next;
1da177e4
LT
3705 }
3706
a737b3e2
AM
3707 tofree = (l3->free_limit + 5 * searchp->num - 1) /
3708 (5 * searchp->num);
1da177e4 3709 do {
35386e3b
CL
3710 /*
3711 * Do not lock if there are no free blocks.
3712 */
3713 if (list_empty(&l3->slabs_free))
3714 break;
3715
3716 spin_lock_irq(&l3->list_lock);
e498be7d 3717 p = l3->slabs_free.next;
35386e3b
CL
3718 if (p == &(l3->slabs_free)) {
3719 spin_unlock_irq(&l3->list_lock);
1da177e4 3720 break;
35386e3b 3721 }
1da177e4
LT
3722
3723 slabp = list_entry(p, struct slab, list);
3724 BUG_ON(slabp->inuse);
3725 list_del(&slabp->list);
3726 STATS_INC_REAPED(searchp);
3727
a737b3e2
AM
3728 /*
3729 * Safe to drop the lock. The slab is no longer linked
3730 * to the cache. searchp cannot disappear, we hold
1da177e4
LT
3731 * cache_chain_lock
3732 */
e498be7d
CL
3733 l3->free_objects -= searchp->num;
3734 spin_unlock_irq(&l3->list_lock);
1da177e4 3735 slab_destroy(searchp, slabp);
b28a02de 3736 } while (--tofree > 0);
35386e3b 3737next:
1da177e4
LT
3738 cond_resched();
3739 }
3740 check_irq_on();
fc0abb14 3741 mutex_unlock(&cache_chain_mutex);
8fce4d8e 3742 next_reap_node();
a737b3e2 3743 /* Set up the next iteration */
cd61ef62 3744 schedule_delayed_work(&__get_cpu_var(reap_work), REAPTIMEOUT_CPUC);
1da177e4
LT
3745}
3746
3747#ifdef CONFIG_PROC_FS
3748
85289f98 3749static void print_slabinfo_header(struct seq_file *m)
1da177e4 3750{
85289f98
PE
3751 /*
3752 * Output format version, so at least we can change it
3753 * without _too_ many complaints.
3754 */
1da177e4 3755#if STATS
85289f98 3756 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1da177e4 3757#else
85289f98 3758 seq_puts(m, "slabinfo - version: 2.1\n");
1da177e4 3759#endif
85289f98
PE
3760 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
3761 "<objperslab> <pagesperslab>");
3762 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
3763 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1da177e4 3764#if STATS
85289f98
PE
3765 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
3766 "<error> <maxfreeable> <nodeallocs> <remotefrees>");
3767 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1da177e4 3768#endif
85289f98
PE
3769 seq_putc(m, '\n');
3770}
3771
3772static void *s_start(struct seq_file *m, loff_t *pos)
3773{
3774 loff_t n = *pos;
3775 struct list_head *p;
3776
fc0abb14 3777 mutex_lock(&cache_chain_mutex);
85289f98
PE
3778 if (!n)
3779 print_slabinfo_header(m);
1da177e4
LT
3780 p = cache_chain.next;
3781 while (n--) {
3782 p = p->next;
3783 if (p == &cache_chain)
3784 return NULL;
3785 }
343e0d7a 3786 return list_entry(p, struct kmem_cache, next);
1da177e4
LT
3787}
3788
3789static void *s_next(struct seq_file *m, void *p, loff_t *pos)
3790{
343e0d7a 3791 struct kmem_cache *cachep = p;
1da177e4 3792 ++*pos;
a737b3e2
AM
3793 return cachep->next.next == &cache_chain ?
3794 NULL : list_entry(cachep->next.next, struct kmem_cache, next);
1da177e4
LT
3795}
3796
3797static void s_stop(struct seq_file *m, void *p)
3798{
fc0abb14 3799 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
3800}
3801
3802static int s_show(struct seq_file *m, void *p)
3803{
343e0d7a 3804 struct kmem_cache *cachep = p;
1da177e4 3805 struct list_head *q;
b28a02de
PE
3806 struct slab *slabp;
3807 unsigned long active_objs;
3808 unsigned long num_objs;
3809 unsigned long active_slabs = 0;
3810 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
e498be7d 3811 const char *name;
1da177e4 3812 char *error = NULL;
e498be7d
CL
3813 int node;
3814 struct kmem_list3 *l3;
1da177e4 3815
1da177e4
LT
3816 active_objs = 0;
3817 num_slabs = 0;
e498be7d
CL
3818 for_each_online_node(node) {
3819 l3 = cachep->nodelists[node];
3820 if (!l3)
3821 continue;
3822
ca3b9b91
RT
3823 check_irq_on();
3824 spin_lock_irq(&l3->list_lock);
e498be7d 3825
b28a02de 3826 list_for_each(q, &l3->slabs_full) {
e498be7d
CL
3827 slabp = list_entry(q, struct slab, list);
3828 if (slabp->inuse != cachep->num && !error)
3829 error = "slabs_full accounting error";
3830 active_objs += cachep->num;
3831 active_slabs++;
3832 }
b28a02de 3833 list_for_each(q, &l3->slabs_partial) {
e498be7d
CL
3834 slabp = list_entry(q, struct slab, list);
3835 if (slabp->inuse == cachep->num && !error)
3836 error = "slabs_partial inuse accounting error";
3837 if (!slabp->inuse && !error)
3838 error = "slabs_partial/inuse accounting error";
3839 active_objs += slabp->inuse;
3840 active_slabs++;
3841 }
b28a02de 3842 list_for_each(q, &l3->slabs_free) {
e498be7d
CL
3843 slabp = list_entry(q, struct slab, list);
3844 if (slabp->inuse && !error)
3845 error = "slabs_free/inuse accounting error";
3846 num_slabs++;
3847 }
3848 free_objects += l3->free_objects;
4484ebf1
RT
3849 if (l3->shared)
3850 shared_avail += l3->shared->avail;
e498be7d 3851
ca3b9b91 3852 spin_unlock_irq(&l3->list_lock);
1da177e4 3853 }
b28a02de
PE
3854 num_slabs += active_slabs;
3855 num_objs = num_slabs * cachep->num;
e498be7d 3856 if (num_objs - active_objs != free_objects && !error)
1da177e4
LT
3857 error = "free_objects accounting error";
3858
b28a02de 3859 name = cachep->name;
1da177e4
LT
3860 if (error)
3861 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
3862
3863 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
3dafccf2 3864 name, active_objs, num_objs, cachep->buffer_size,
b28a02de 3865 cachep->num, (1 << cachep->gfporder));
1da177e4 3866 seq_printf(m, " : tunables %4u %4u %4u",
b28a02de 3867 cachep->limit, cachep->batchcount, cachep->shared);
e498be7d 3868 seq_printf(m, " : slabdata %6lu %6lu %6lu",
b28a02de 3869 active_slabs, num_slabs, shared_avail);
1da177e4 3870#if STATS
b28a02de 3871 { /* list3 stats */
1da177e4
LT
3872 unsigned long high = cachep->high_mark;
3873 unsigned long allocs = cachep->num_allocations;
3874 unsigned long grown = cachep->grown;
3875 unsigned long reaped = cachep->reaped;
3876 unsigned long errors = cachep->errors;
3877 unsigned long max_freeable = cachep->max_freeable;
1da177e4 3878 unsigned long node_allocs = cachep->node_allocs;
e498be7d 3879 unsigned long node_frees = cachep->node_frees;
1da177e4 3880
e498be7d 3881 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
a737b3e2
AM
3882 %4lu %4lu %4lu %4lu", allocs, high, grown,
3883 reaped, errors, max_freeable, node_allocs,
3884 node_frees);
1da177e4
LT
3885 }
3886 /* cpu stats */
3887 {
3888 unsigned long allochit = atomic_read(&cachep->allochit);
3889 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
3890 unsigned long freehit = atomic_read(&cachep->freehit);
3891 unsigned long freemiss = atomic_read(&cachep->freemiss);
3892
3893 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
b28a02de 3894 allochit, allocmiss, freehit, freemiss);
1da177e4
LT
3895 }
3896#endif
3897 seq_putc(m, '\n');
1da177e4
LT
3898 return 0;
3899}
3900
3901/*
3902 * slabinfo_op - iterator that generates /proc/slabinfo
3903 *
3904 * Output layout:
3905 * cache-name
3906 * num-active-objs
3907 * total-objs
3908 * object size
3909 * num-active-slabs
3910 * total-slabs
3911 * num-pages-per-slab
3912 * + further values on SMP and with statistics enabled
3913 */
3914
3915struct seq_operations slabinfo_op = {
b28a02de
PE
3916 .start = s_start,
3917 .next = s_next,
3918 .stop = s_stop,
3919 .show = s_show,
1da177e4
LT
3920};
3921
3922#define MAX_SLABINFO_WRITE 128
3923/**
3924 * slabinfo_write - Tuning for the slab allocator
3925 * @file: unused
3926 * @buffer: user buffer
3927 * @count: data length
3928 * @ppos: unused
3929 */
b28a02de
PE
3930ssize_t slabinfo_write(struct file *file, const char __user * buffer,
3931 size_t count, loff_t *ppos)
1da177e4 3932{
b28a02de 3933 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
1da177e4
LT
3934 int limit, batchcount, shared, res;
3935 struct list_head *p;
b28a02de 3936
1da177e4
LT
3937 if (count > MAX_SLABINFO_WRITE)
3938 return -EINVAL;
3939 if (copy_from_user(&kbuf, buffer, count))
3940 return -EFAULT;
b28a02de 3941 kbuf[MAX_SLABINFO_WRITE] = '\0';
1da177e4
LT
3942
3943 tmp = strchr(kbuf, ' ');
3944 if (!tmp)
3945 return -EINVAL;
3946 *tmp = '\0';
3947 tmp++;
3948 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
3949 return -EINVAL;
3950
3951 /* Find the cache in the chain of caches. */
fc0abb14 3952 mutex_lock(&cache_chain_mutex);
1da177e4 3953 res = -EINVAL;
b28a02de 3954 list_for_each(p, &cache_chain) {
a737b3e2 3955 struct kmem_cache *cachep;
1da177e4 3956
a737b3e2 3957 cachep = list_entry(p, struct kmem_cache, next);
1da177e4 3958 if (!strcmp(cachep->name, kbuf)) {
a737b3e2
AM
3959 if (limit < 1 || batchcount < 1 ||
3960 batchcount > limit || shared < 0) {
e498be7d 3961 res = 0;
1da177e4 3962 } else {
e498be7d 3963 res = do_tune_cpucache(cachep, limit,
b28a02de 3964 batchcount, shared);
1da177e4
LT
3965 }
3966 break;
3967 }
3968 }
fc0abb14 3969 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
3970 if (res >= 0)
3971 res = count;
3972 return res;
3973}
871751e2
AV
3974
3975#ifdef CONFIG_DEBUG_SLAB_LEAK
3976
3977static void *leaks_start(struct seq_file *m, loff_t *pos)
3978{
3979 loff_t n = *pos;
3980 struct list_head *p;
3981
3982 mutex_lock(&cache_chain_mutex);
3983 p = cache_chain.next;
3984 while (n--) {
3985 p = p->next;
3986 if (p == &cache_chain)
3987 return NULL;
3988 }
3989 return list_entry(p, struct kmem_cache, next);
3990}
3991
3992static inline int add_caller(unsigned long *n, unsigned long v)
3993{
3994 unsigned long *p;
3995 int l;
3996 if (!v)
3997 return 1;
3998 l = n[1];
3999 p = n + 2;
4000 while (l) {
4001 int i = l/2;
4002 unsigned long *q = p + 2 * i;
4003 if (*q == v) {
4004 q[1]++;
4005 return 1;
4006 }
4007 if (*q > v) {
4008 l = i;
4009 } else {
4010 p = q + 2;
4011 l -= i + 1;
4012 }
4013 }
4014 if (++n[1] == n[0])
4015 return 0;
4016 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4017 p[0] = v;
4018 p[1] = 1;
4019 return 1;
4020}
4021
4022static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4023{
4024 void *p;
4025 int i;
4026 if (n[0] == n[1])
4027 return;
4028 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4029 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4030 continue;
4031 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4032 return;
4033 }
4034}
4035
4036static void show_symbol(struct seq_file *m, unsigned long address)
4037{
4038#ifdef CONFIG_KALLSYMS
4039 char *modname;
4040 const char *name;
4041 unsigned long offset, size;
4042 char namebuf[KSYM_NAME_LEN+1];
4043
4044 name = kallsyms_lookup(address, &size, &offset, &modname, namebuf);
4045
4046 if (name) {
4047 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4048 if (modname)
4049 seq_printf(m, " [%s]", modname);
4050 return;
4051 }
4052#endif
4053 seq_printf(m, "%p", (void *)address);
4054}
4055
4056static int leaks_show(struct seq_file *m, void *p)
4057{
4058 struct kmem_cache *cachep = p;
4059 struct list_head *q;
4060 struct slab *slabp;
4061 struct kmem_list3 *l3;
4062 const char *name;
4063 unsigned long *n = m->private;
4064 int node;
4065 int i;
4066
4067 if (!(cachep->flags & SLAB_STORE_USER))
4068 return 0;
4069 if (!(cachep->flags & SLAB_RED_ZONE))
4070 return 0;
4071
4072 /* OK, we can do it */
4073
4074 n[1] = 0;
4075
4076 for_each_online_node(node) {
4077 l3 = cachep->nodelists[node];
4078 if (!l3)
4079 continue;
4080
4081 check_irq_on();
4082 spin_lock_irq(&l3->list_lock);
4083
4084 list_for_each(q, &l3->slabs_full) {
4085 slabp = list_entry(q, struct slab, list);
4086 handle_slab(n, cachep, slabp);
4087 }
4088 list_for_each(q, &l3->slabs_partial) {
4089 slabp = list_entry(q, struct slab, list);
4090 handle_slab(n, cachep, slabp);
4091 }
4092 spin_unlock_irq(&l3->list_lock);
4093 }
4094 name = cachep->name;
4095 if (n[0] == n[1]) {
4096 /* Increase the buffer size */
4097 mutex_unlock(&cache_chain_mutex);
4098 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4099 if (!m->private) {
4100 /* Too bad, we are really out */
4101 m->private = n;
4102 mutex_lock(&cache_chain_mutex);
4103 return -ENOMEM;
4104 }
4105 *(unsigned long *)m->private = n[0] * 2;
4106 kfree(n);
4107 mutex_lock(&cache_chain_mutex);
4108 /* Now make sure this entry will be retried */
4109 m->count = m->size;
4110 return 0;
4111 }
4112 for (i = 0; i < n[1]; i++) {
4113 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4114 show_symbol(m, n[2*i+2]);
4115 seq_putc(m, '\n');
4116 }
4117 return 0;
4118}
4119
4120struct seq_operations slabstats_op = {
4121 .start = leaks_start,
4122 .next = s_next,
4123 .stop = s_stop,
4124 .show = leaks_show,
4125};
4126#endif
1da177e4
LT
4127#endif
4128
00e145b6
MS
4129/**
4130 * ksize - get the actual amount of memory allocated for a given object
4131 * @objp: Pointer to the object
4132 *
4133 * kmalloc may internally round up allocations and return more memory
4134 * than requested. ksize() can be used to determine the actual amount of
4135 * memory allocated. The caller may use this additional memory, even though
4136 * a smaller amount of memory was initially specified with the kmalloc call.
4137 * The caller must guarantee that objp points to a valid object previously
4138 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4139 * must not be freed during the duration of the call.
4140 */
1da177e4
LT
4141unsigned int ksize(const void *objp)
4142{
00e145b6
MS
4143 if (unlikely(objp == NULL))
4144 return 0;
1da177e4 4145
6ed5eb22 4146 return obj_size(virt_to_cache(objp));
1da177e4 4147}