]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/slab.c
cpumask: use topology_core_cpumask/topology_thread_cpumask instead of cpu_core_map...
[net-next-2.6.git] / mm / slab.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
183ff22b 29 * slabs and you must pass objects with the same initializations to
1da177e4
LT
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
a737b3e2 53 * The c_cpuarray may not be read with enabled local interrupts -
1da177e4
LT
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
343e0d7a 58 * Several members in struct kmem_cache and struct slab never change, they
1da177e4
LT
59 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
fc0abb14 71 * The global cache-chain is protected by the mutex 'cache_chain_mutex'.
1da177e4
LT
72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
e498be7d
CL
78 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
1da177e4
LT
87 */
88
1da177e4
LT
89#include <linux/slab.h>
90#include <linux/mm.h>
c9cf5528 91#include <linux/poison.h>
1da177e4
LT
92#include <linux/swap.h>
93#include <linux/cache.h>
94#include <linux/interrupt.h>
95#include <linux/init.h>
96#include <linux/compiler.h>
101a5001 97#include <linux/cpuset.h>
a0ec95a8 98#include <linux/proc_fs.h>
1da177e4
LT
99#include <linux/seq_file.h>
100#include <linux/notifier.h>
101#include <linux/kallsyms.h>
102#include <linux/cpu.h>
103#include <linux/sysctl.h>
104#include <linux/module.h>
105#include <linux/rcupdate.h>
543537bd 106#include <linux/string.h>
138ae663 107#include <linux/uaccess.h>
e498be7d 108#include <linux/nodemask.h>
dc85da15 109#include <linux/mempolicy.h>
fc0abb14 110#include <linux/mutex.h>
8a8b6502 111#include <linux/fault-inject.h>
e7eebaf6 112#include <linux/rtmutex.h>
6a2d7a95 113#include <linux/reciprocal_div.h>
3ac7fe5a 114#include <linux/debugobjects.h>
1da177e4 115
1da177e4
LT
116#include <asm/cacheflush.h>
117#include <asm/tlbflush.h>
118#include <asm/page.h>
119
120/*
50953fe9 121 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
1da177e4
LT
122 * 0 for faster, smaller code (especially in the critical paths).
123 *
124 * STATS - 1 to collect stats for /proc/slabinfo.
125 * 0 for faster, smaller code (especially in the critical paths).
126 *
127 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
128 */
129
130#ifdef CONFIG_DEBUG_SLAB
131#define DEBUG 1
132#define STATS 1
133#define FORCED_DEBUG 1
134#else
135#define DEBUG 0
136#define STATS 0
137#define FORCED_DEBUG 0
138#endif
139
1da177e4
LT
140/* Shouldn't this be in a header file somewhere? */
141#define BYTES_PER_WORD sizeof(void *)
87a927c7 142#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
1da177e4 143
1da177e4
LT
144#ifndef ARCH_KMALLOC_MINALIGN
145/*
146 * Enforce a minimum alignment for the kmalloc caches.
147 * Usually, the kmalloc caches are cache_line_size() aligned, except when
148 * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
149 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
b46b8f19
DW
150 * alignment larger than the alignment of a 64-bit integer.
151 * ARCH_KMALLOC_MINALIGN allows that.
152 * Note that increasing this value may disable some debug features.
1da177e4 153 */
b46b8f19 154#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
1da177e4
LT
155#endif
156
157#ifndef ARCH_SLAB_MINALIGN
158/*
159 * Enforce a minimum alignment for all caches.
160 * Intended for archs that get misalignment faults even for BYTES_PER_WORD
161 * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
162 * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
163 * some debug features.
164 */
165#define ARCH_SLAB_MINALIGN 0
166#endif
167
168#ifndef ARCH_KMALLOC_FLAGS
169#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
170#endif
171
172/* Legal flag mask for kmem_cache_create(). */
173#if DEBUG
50953fe9 174# define CREATE_MASK (SLAB_RED_ZONE | \
1da177e4 175 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
ac2b898c 176 SLAB_CACHE_DMA | \
5af60839 177 SLAB_STORE_USER | \
1da177e4 178 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
3ac7fe5a
TG
179 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
180 SLAB_DEBUG_OBJECTS)
1da177e4 181#else
ac2b898c 182# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
5af60839 183 SLAB_CACHE_DMA | \
1da177e4 184 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
3ac7fe5a
TG
185 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
186 SLAB_DEBUG_OBJECTS)
1da177e4
LT
187#endif
188
189/*
190 * kmem_bufctl_t:
191 *
192 * Bufctl's are used for linking objs within a slab
193 * linked offsets.
194 *
195 * This implementation relies on "struct page" for locating the cache &
196 * slab an object belongs to.
197 * This allows the bufctl structure to be small (one int), but limits
198 * the number of objects a slab (not a cache) can contain when off-slab
199 * bufctls are used. The limit is the size of the largest general cache
200 * that does not use off-slab slabs.
201 * For 32bit archs with 4 kB pages, is this 56.
202 * This is not serious, as it is only for large objects, when it is unwise
203 * to have too many per slab.
204 * Note: This limit can be raised by introducing a general cache whose size
205 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
206 */
207
fa5b08d5 208typedef unsigned int kmem_bufctl_t;
1da177e4
LT
209#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
210#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
871751e2
AV
211#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
212#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
1da177e4 213
1da177e4
LT
214/*
215 * struct slab
216 *
217 * Manages the objs in a slab. Placed either at the beginning of mem allocated
218 * for a slab, or allocated from an general cache.
219 * Slabs are chained into three list: fully used, partial, fully free slabs.
220 */
221struct slab {
b28a02de
PE
222 struct list_head list;
223 unsigned long colouroff;
224 void *s_mem; /* including colour offset */
225 unsigned int inuse; /* num of objs active in slab */
226 kmem_bufctl_t free;
227 unsigned short nodeid;
1da177e4
LT
228};
229
230/*
231 * struct slab_rcu
232 *
233 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
234 * arrange for kmem_freepages to be called via RCU. This is useful if
235 * we need to approach a kernel structure obliquely, from its address
236 * obtained without the usual locking. We can lock the structure to
237 * stabilize it and check it's still at the given address, only if we
238 * can be sure that the memory has not been meanwhile reused for some
239 * other kind of object (which our subsystem's lock might corrupt).
240 *
241 * rcu_read_lock before reading the address, then rcu_read_unlock after
242 * taking the spinlock within the structure expected at that address.
243 *
244 * We assume struct slab_rcu can overlay struct slab when destroying.
245 */
246struct slab_rcu {
b28a02de 247 struct rcu_head head;
343e0d7a 248 struct kmem_cache *cachep;
b28a02de 249 void *addr;
1da177e4
LT
250};
251
252/*
253 * struct array_cache
254 *
1da177e4
LT
255 * Purpose:
256 * - LIFO ordering, to hand out cache-warm objects from _alloc
257 * - reduce the number of linked list operations
258 * - reduce spinlock operations
259 *
260 * The limit is stored in the per-cpu structure to reduce the data cache
261 * footprint.
262 *
263 */
264struct array_cache {
265 unsigned int avail;
266 unsigned int limit;
267 unsigned int batchcount;
268 unsigned int touched;
e498be7d 269 spinlock_t lock;
bda5b655 270 void *entry[]; /*
a737b3e2
AM
271 * Must have this definition in here for the proper
272 * alignment of array_cache. Also simplifies accessing
273 * the entries.
a737b3e2 274 */
1da177e4
LT
275};
276
a737b3e2
AM
277/*
278 * bootstrap: The caches do not work without cpuarrays anymore, but the
279 * cpuarrays are allocated from the generic caches...
1da177e4
LT
280 */
281#define BOOT_CPUCACHE_ENTRIES 1
282struct arraycache_init {
283 struct array_cache cache;
b28a02de 284 void *entries[BOOT_CPUCACHE_ENTRIES];
1da177e4
LT
285};
286
287/*
e498be7d 288 * The slab lists for all objects.
1da177e4
LT
289 */
290struct kmem_list3 {
b28a02de
PE
291 struct list_head slabs_partial; /* partial list first, better asm code */
292 struct list_head slabs_full;
293 struct list_head slabs_free;
294 unsigned long free_objects;
b28a02de 295 unsigned int free_limit;
2e1217cf 296 unsigned int colour_next; /* Per-node cache coloring */
b28a02de
PE
297 spinlock_t list_lock;
298 struct array_cache *shared; /* shared per node */
299 struct array_cache **alien; /* on other nodes */
35386e3b
CL
300 unsigned long next_reap; /* updated without locking */
301 int free_touched; /* updated without locking */
1da177e4
LT
302};
303
e498be7d
CL
304/*
305 * Need this for bootstrapping a per node allocator.
306 */
556a169d 307#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
e498be7d
CL
308struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309#define CACHE_CACHE 0
556a169d
PE
310#define SIZE_AC MAX_NUMNODES
311#define SIZE_L3 (2 * MAX_NUMNODES)
e498be7d 312
ed11d9eb
CL
313static int drain_freelist(struct kmem_cache *cache,
314 struct kmem_list3 *l3, int tofree);
315static void free_block(struct kmem_cache *cachep, void **objpp, int len,
316 int node);
2ed3a4ef 317static int enable_cpucache(struct kmem_cache *cachep);
65f27f38 318static void cache_reap(struct work_struct *unused);
ed11d9eb 319
e498be7d 320/*
a737b3e2
AM
321 * This function must be completely optimized away if a constant is passed to
322 * it. Mostly the same as what is in linux/slab.h except it returns an index.
e498be7d 323 */
7243cc05 324static __always_inline int index_of(const size_t size)
e498be7d 325{
5ec8a847
SR
326 extern void __bad_size(void);
327
e498be7d
CL
328 if (__builtin_constant_p(size)) {
329 int i = 0;
330
331#define CACHE(x) \
332 if (size <=x) \
333 return i; \
334 else \
335 i++;
1c61fc40 336#include <linux/kmalloc_sizes.h>
e498be7d 337#undef CACHE
5ec8a847 338 __bad_size();
7243cc05 339 } else
5ec8a847 340 __bad_size();
e498be7d
CL
341 return 0;
342}
343
e0a42726
IM
344static int slab_early_init = 1;
345
e498be7d
CL
346#define INDEX_AC index_of(sizeof(struct arraycache_init))
347#define INDEX_L3 index_of(sizeof(struct kmem_list3))
1da177e4 348
5295a74c 349static void kmem_list3_init(struct kmem_list3 *parent)
e498be7d
CL
350{
351 INIT_LIST_HEAD(&parent->slabs_full);
352 INIT_LIST_HEAD(&parent->slabs_partial);
353 INIT_LIST_HEAD(&parent->slabs_free);
354 parent->shared = NULL;
355 parent->alien = NULL;
2e1217cf 356 parent->colour_next = 0;
e498be7d
CL
357 spin_lock_init(&parent->list_lock);
358 parent->free_objects = 0;
359 parent->free_touched = 0;
360}
361
a737b3e2
AM
362#define MAKE_LIST(cachep, listp, slab, nodeid) \
363 do { \
364 INIT_LIST_HEAD(listp); \
365 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
e498be7d
CL
366 } while (0)
367
a737b3e2
AM
368#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
369 do { \
e498be7d
CL
370 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
371 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
372 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
373 } while (0)
1da177e4
LT
374
375/*
343e0d7a 376 * struct kmem_cache
1da177e4
LT
377 *
378 * manages a cache.
379 */
b28a02de 380
2109a2d1 381struct kmem_cache {
1da177e4 382/* 1) per-cpu data, touched during every alloc/free */
b28a02de 383 struct array_cache *array[NR_CPUS];
b5d8ca7c 384/* 2) Cache tunables. Protected by cache_chain_mutex */
b28a02de
PE
385 unsigned int batchcount;
386 unsigned int limit;
387 unsigned int shared;
b5d8ca7c 388
3dafccf2 389 unsigned int buffer_size;
6a2d7a95 390 u32 reciprocal_buffer_size;
b5d8ca7c 391/* 3) touched by every alloc & free from the backend */
b5d8ca7c 392
a737b3e2
AM
393 unsigned int flags; /* constant flags */
394 unsigned int num; /* # of objs per slab */
1da177e4 395
b5d8ca7c 396/* 4) cache_grow/shrink */
1da177e4 397 /* order of pgs per slab (2^n) */
b28a02de 398 unsigned int gfporder;
1da177e4
LT
399
400 /* force GFP flags, e.g. GFP_DMA */
b28a02de 401 gfp_t gfpflags;
1da177e4 402
a737b3e2 403 size_t colour; /* cache colouring range */
b28a02de 404 unsigned int colour_off; /* colour offset */
343e0d7a 405 struct kmem_cache *slabp_cache;
b28a02de 406 unsigned int slab_size;
a737b3e2 407 unsigned int dflags; /* dynamic flags */
1da177e4
LT
408
409 /* constructor func */
51cc5068 410 void (*ctor)(void *obj);
1da177e4 411
b5d8ca7c 412/* 5) cache creation/removal */
b28a02de
PE
413 const char *name;
414 struct list_head next;
1da177e4 415
b5d8ca7c 416/* 6) statistics */
1da177e4 417#if STATS
b28a02de
PE
418 unsigned long num_active;
419 unsigned long num_allocations;
420 unsigned long high_mark;
421 unsigned long grown;
422 unsigned long reaped;
423 unsigned long errors;
424 unsigned long max_freeable;
425 unsigned long node_allocs;
426 unsigned long node_frees;
fb7faf33 427 unsigned long node_overflow;
b28a02de
PE
428 atomic_t allochit;
429 atomic_t allocmiss;
430 atomic_t freehit;
431 atomic_t freemiss;
1da177e4
LT
432#endif
433#if DEBUG
3dafccf2
MS
434 /*
435 * If debugging is enabled, then the allocator can add additional
436 * fields and/or padding to every object. buffer_size contains the total
437 * object size including these internal fields, the following two
438 * variables contain the offset to the user object and its size.
439 */
440 int obj_offset;
441 int obj_size;
1da177e4 442#endif
8da3430d
ED
443 /*
444 * We put nodelists[] at the end of kmem_cache, because we want to size
445 * this array to nr_node_ids slots instead of MAX_NUMNODES
446 * (see kmem_cache_init())
447 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
448 * is statically defined, so we reserve the max number of nodes.
449 */
450 struct kmem_list3 *nodelists[MAX_NUMNODES];
451 /*
452 * Do not add fields after nodelists[]
453 */
1da177e4
LT
454};
455
456#define CFLGS_OFF_SLAB (0x80000000UL)
457#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
458
459#define BATCHREFILL_LIMIT 16
a737b3e2
AM
460/*
461 * Optimization question: fewer reaps means less probability for unnessary
462 * cpucache drain/refill cycles.
1da177e4 463 *
dc6f3f27 464 * OTOH the cpuarrays can contain lots of objects,
1da177e4
LT
465 * which could lock up otherwise freeable slabs.
466 */
467#define REAPTIMEOUT_CPUC (2*HZ)
468#define REAPTIMEOUT_LIST3 (4*HZ)
469
470#if STATS
471#define STATS_INC_ACTIVE(x) ((x)->num_active++)
472#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
473#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
474#define STATS_INC_GROWN(x) ((x)->grown++)
ed11d9eb 475#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
a737b3e2
AM
476#define STATS_SET_HIGH(x) \
477 do { \
478 if ((x)->num_active > (x)->high_mark) \
479 (x)->high_mark = (x)->num_active; \
480 } while (0)
1da177e4
LT
481#define STATS_INC_ERR(x) ((x)->errors++)
482#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
e498be7d 483#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
fb7faf33 484#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
a737b3e2
AM
485#define STATS_SET_FREEABLE(x, i) \
486 do { \
487 if ((x)->max_freeable < i) \
488 (x)->max_freeable = i; \
489 } while (0)
1da177e4
LT
490#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
491#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
492#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
493#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
494#else
495#define STATS_INC_ACTIVE(x) do { } while (0)
496#define STATS_DEC_ACTIVE(x) do { } while (0)
497#define STATS_INC_ALLOCED(x) do { } while (0)
498#define STATS_INC_GROWN(x) do { } while (0)
ed11d9eb 499#define STATS_ADD_REAPED(x,y) do { } while (0)
1da177e4
LT
500#define STATS_SET_HIGH(x) do { } while (0)
501#define STATS_INC_ERR(x) do { } while (0)
502#define STATS_INC_NODEALLOCS(x) do { } while (0)
e498be7d 503#define STATS_INC_NODEFREES(x) do { } while (0)
fb7faf33 504#define STATS_INC_ACOVERFLOW(x) do { } while (0)
a737b3e2 505#define STATS_SET_FREEABLE(x, i) do { } while (0)
1da177e4
LT
506#define STATS_INC_ALLOCHIT(x) do { } while (0)
507#define STATS_INC_ALLOCMISS(x) do { } while (0)
508#define STATS_INC_FREEHIT(x) do { } while (0)
509#define STATS_INC_FREEMISS(x) do { } while (0)
510#endif
511
512#if DEBUG
1da177e4 513
a737b3e2
AM
514/*
515 * memory layout of objects:
1da177e4 516 * 0 : objp
3dafccf2 517 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
1da177e4
LT
518 * the end of an object is aligned with the end of the real
519 * allocation. Catches writes behind the end of the allocation.
3dafccf2 520 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
1da177e4 521 * redzone word.
3dafccf2
MS
522 * cachep->obj_offset: The real object.
523 * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
a737b3e2
AM
524 * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
525 * [BYTES_PER_WORD long]
1da177e4 526 */
343e0d7a 527static int obj_offset(struct kmem_cache *cachep)
1da177e4 528{
3dafccf2 529 return cachep->obj_offset;
1da177e4
LT
530}
531
343e0d7a 532static int obj_size(struct kmem_cache *cachep)
1da177e4 533{
3dafccf2 534 return cachep->obj_size;
1da177e4
LT
535}
536
b46b8f19 537static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
1da177e4
LT
538{
539 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
b46b8f19
DW
540 return (unsigned long long*) (objp + obj_offset(cachep) -
541 sizeof(unsigned long long));
1da177e4
LT
542}
543
b46b8f19 544static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
1da177e4
LT
545{
546 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
547 if (cachep->flags & SLAB_STORE_USER)
b46b8f19
DW
548 return (unsigned long long *)(objp + cachep->buffer_size -
549 sizeof(unsigned long long) -
87a927c7 550 REDZONE_ALIGN);
b46b8f19
DW
551 return (unsigned long long *) (objp + cachep->buffer_size -
552 sizeof(unsigned long long));
1da177e4
LT
553}
554
343e0d7a 555static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1da177e4
LT
556{
557 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
3dafccf2 558 return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
1da177e4
LT
559}
560
561#else
562
3dafccf2
MS
563#define obj_offset(x) 0
564#define obj_size(cachep) (cachep->buffer_size)
b46b8f19
DW
565#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
566#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
1da177e4
LT
567#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
568
569#endif
570
1da177e4
LT
571/*
572 * Do not go above this order unless 0 objects fit into the slab.
573 */
574#define BREAK_GFP_ORDER_HI 1
575#define BREAK_GFP_ORDER_LO 0
576static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
577
a737b3e2
AM
578/*
579 * Functions for storing/retrieving the cachep and or slab from the page
580 * allocator. These are used to find the slab an obj belongs to. With kfree(),
581 * these are used to find the cache which an obj belongs to.
1da177e4 582 */
065d41cb
PE
583static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
584{
585 page->lru.next = (struct list_head *)cache;
586}
587
588static inline struct kmem_cache *page_get_cache(struct page *page)
589{
d85f3385 590 page = compound_head(page);
ddc2e812 591 BUG_ON(!PageSlab(page));
065d41cb
PE
592 return (struct kmem_cache *)page->lru.next;
593}
594
595static inline void page_set_slab(struct page *page, struct slab *slab)
596{
597 page->lru.prev = (struct list_head *)slab;
598}
599
600static inline struct slab *page_get_slab(struct page *page)
601{
ddc2e812 602 BUG_ON(!PageSlab(page));
065d41cb
PE
603 return (struct slab *)page->lru.prev;
604}
1da177e4 605
6ed5eb22
PE
606static inline struct kmem_cache *virt_to_cache(const void *obj)
607{
b49af68f 608 struct page *page = virt_to_head_page(obj);
6ed5eb22
PE
609 return page_get_cache(page);
610}
611
612static inline struct slab *virt_to_slab(const void *obj)
613{
b49af68f 614 struct page *page = virt_to_head_page(obj);
6ed5eb22
PE
615 return page_get_slab(page);
616}
617
8fea4e96
PE
618static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
619 unsigned int idx)
620{
621 return slab->s_mem + cache->buffer_size * idx;
622}
623
6a2d7a95
ED
624/*
625 * We want to avoid an expensive divide : (offset / cache->buffer_size)
626 * Using the fact that buffer_size is a constant for a particular cache,
627 * we can replace (offset / cache->buffer_size) by
628 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
629 */
630static inline unsigned int obj_to_index(const struct kmem_cache *cache,
631 const struct slab *slab, void *obj)
8fea4e96 632{
6a2d7a95
ED
633 u32 offset = (obj - slab->s_mem);
634 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
8fea4e96
PE
635}
636
a737b3e2
AM
637/*
638 * These are the default caches for kmalloc. Custom caches can have other sizes.
639 */
1da177e4
LT
640struct cache_sizes malloc_sizes[] = {
641#define CACHE(x) { .cs_size = (x) },
642#include <linux/kmalloc_sizes.h>
643 CACHE(ULONG_MAX)
644#undef CACHE
645};
646EXPORT_SYMBOL(malloc_sizes);
647
648/* Must match cache_sizes above. Out of line to keep cache footprint low. */
649struct cache_names {
650 char *name;
651 char *name_dma;
652};
653
654static struct cache_names __initdata cache_names[] = {
655#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
656#include <linux/kmalloc_sizes.h>
b28a02de 657 {NULL,}
1da177e4
LT
658#undef CACHE
659};
660
661static struct arraycache_init initarray_cache __initdata =
b28a02de 662 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4 663static struct arraycache_init initarray_generic =
b28a02de 664 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4
LT
665
666/* internal cache of cache description objs */
343e0d7a 667static struct kmem_cache cache_cache = {
b28a02de
PE
668 .batchcount = 1,
669 .limit = BOOT_CPUCACHE_ENTRIES,
670 .shared = 1,
343e0d7a 671 .buffer_size = sizeof(struct kmem_cache),
b28a02de 672 .name = "kmem_cache",
1da177e4
LT
673};
674
056c6241
RT
675#define BAD_ALIEN_MAGIC 0x01020304ul
676
f1aaee53
AV
677#ifdef CONFIG_LOCKDEP
678
679/*
680 * Slab sometimes uses the kmalloc slabs to store the slab headers
681 * for other slabs "off slab".
682 * The locking for this is tricky in that it nests within the locks
683 * of all other slabs in a few places; to deal with this special
684 * locking we put on-slab caches into a separate lock-class.
056c6241
RT
685 *
686 * We set lock class for alien array caches which are up during init.
687 * The lock annotation will be lost if all cpus of a node goes down and
688 * then comes back up during hotplug
f1aaee53 689 */
056c6241
RT
690static struct lock_class_key on_slab_l3_key;
691static struct lock_class_key on_slab_alc_key;
692
693static inline void init_lock_keys(void)
f1aaee53 694
f1aaee53
AV
695{
696 int q;
056c6241
RT
697 struct cache_sizes *s = malloc_sizes;
698
699 while (s->cs_size != ULONG_MAX) {
700 for_each_node(q) {
701 struct array_cache **alc;
702 int r;
703 struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
704 if (!l3 || OFF_SLAB(s->cs_cachep))
705 continue;
706 lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
707 alc = l3->alien;
708 /*
709 * FIXME: This check for BAD_ALIEN_MAGIC
710 * should go away when common slab code is taught to
711 * work even without alien caches.
712 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
713 * for alloc_alien_cache,
714 */
715 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
716 continue;
717 for_each_node(r) {
718 if (alc[r])
719 lockdep_set_class(&alc[r]->lock,
720 &on_slab_alc_key);
721 }
722 }
723 s++;
f1aaee53
AV
724 }
725}
f1aaee53 726#else
056c6241 727static inline void init_lock_keys(void)
f1aaee53
AV
728{
729}
730#endif
731
8f5be20b 732/*
95402b38 733 * Guard access to the cache-chain.
8f5be20b 734 */
fc0abb14 735static DEFINE_MUTEX(cache_chain_mutex);
1da177e4
LT
736static struct list_head cache_chain;
737
1da177e4
LT
738/*
739 * chicken and egg problem: delay the per-cpu array allocation
740 * until the general caches are up.
741 */
742static enum {
743 NONE,
e498be7d
CL
744 PARTIAL_AC,
745 PARTIAL_L3,
1da177e4
LT
746 FULL
747} g_cpucache_up;
748
39d24e64
MK
749/*
750 * used by boot code to determine if it can use slab based allocator
751 */
752int slab_is_available(void)
753{
754 return g_cpucache_up == FULL;
755}
756
52bad64d 757static DEFINE_PER_CPU(struct delayed_work, reap_work);
1da177e4 758
343e0d7a 759static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1da177e4
LT
760{
761 return cachep->array[smp_processor_id()];
762}
763
a737b3e2
AM
764static inline struct kmem_cache *__find_general_cachep(size_t size,
765 gfp_t gfpflags)
1da177e4
LT
766{
767 struct cache_sizes *csizep = malloc_sizes;
768
769#if DEBUG
770 /* This happens if someone tries to call
b28a02de
PE
771 * kmem_cache_create(), or __kmalloc(), before
772 * the generic caches are initialized.
773 */
c7e43c78 774 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
1da177e4 775#endif
6cb8f913
CL
776 if (!size)
777 return ZERO_SIZE_PTR;
778
1da177e4
LT
779 while (size > csizep->cs_size)
780 csizep++;
781
782 /*
0abf40c1 783 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
1da177e4
LT
784 * has cs_{dma,}cachep==NULL. Thus no special case
785 * for large kmalloc calls required.
786 */
4b51d669 787#ifdef CONFIG_ZONE_DMA
1da177e4
LT
788 if (unlikely(gfpflags & GFP_DMA))
789 return csizep->cs_dmacachep;
4b51d669 790#endif
1da177e4
LT
791 return csizep->cs_cachep;
792}
793
b221385b 794static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
97e2bde4
MS
795{
796 return __find_general_cachep(size, gfpflags);
797}
97e2bde4 798
fbaccacf 799static size_t slab_mgmt_size(size_t nr_objs, size_t align)
1da177e4 800{
fbaccacf
SR
801 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
802}
1da177e4 803
a737b3e2
AM
804/*
805 * Calculate the number of objects and left-over bytes for a given buffer size.
806 */
fbaccacf
SR
807static void cache_estimate(unsigned long gfporder, size_t buffer_size,
808 size_t align, int flags, size_t *left_over,
809 unsigned int *num)
810{
811 int nr_objs;
812 size_t mgmt_size;
813 size_t slab_size = PAGE_SIZE << gfporder;
1da177e4 814
fbaccacf
SR
815 /*
816 * The slab management structure can be either off the slab or
817 * on it. For the latter case, the memory allocated for a
818 * slab is used for:
819 *
820 * - The struct slab
821 * - One kmem_bufctl_t for each object
822 * - Padding to respect alignment of @align
823 * - @buffer_size bytes for each object
824 *
825 * If the slab management structure is off the slab, then the
826 * alignment will already be calculated into the size. Because
827 * the slabs are all pages aligned, the objects will be at the
828 * correct alignment when allocated.
829 */
830 if (flags & CFLGS_OFF_SLAB) {
831 mgmt_size = 0;
832 nr_objs = slab_size / buffer_size;
833
834 if (nr_objs > SLAB_LIMIT)
835 nr_objs = SLAB_LIMIT;
836 } else {
837 /*
838 * Ignore padding for the initial guess. The padding
839 * is at most @align-1 bytes, and @buffer_size is at
840 * least @align. In the worst case, this result will
841 * be one greater than the number of objects that fit
842 * into the memory allocation when taking the padding
843 * into account.
844 */
845 nr_objs = (slab_size - sizeof(struct slab)) /
846 (buffer_size + sizeof(kmem_bufctl_t));
847
848 /*
849 * This calculated number will be either the right
850 * amount, or one greater than what we want.
851 */
852 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
853 > slab_size)
854 nr_objs--;
855
856 if (nr_objs > SLAB_LIMIT)
857 nr_objs = SLAB_LIMIT;
858
859 mgmt_size = slab_mgmt_size(nr_objs, align);
860 }
861 *num = nr_objs;
862 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1da177e4
LT
863}
864
d40cee24 865#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
1da177e4 866
a737b3e2
AM
867static void __slab_error(const char *function, struct kmem_cache *cachep,
868 char *msg)
1da177e4
LT
869{
870 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
b28a02de 871 function, cachep->name, msg);
1da177e4
LT
872 dump_stack();
873}
874
3395ee05
PM
875/*
876 * By default on NUMA we use alien caches to stage the freeing of
877 * objects allocated from other nodes. This causes massive memory
878 * inefficiencies when using fake NUMA setup to split memory into a
879 * large number of small nodes, so it can be disabled on the command
880 * line
881 */
882
883static int use_alien_caches __read_mostly = 1;
1807a1aa 884static int numa_platform __read_mostly = 1;
3395ee05
PM
885static int __init noaliencache_setup(char *s)
886{
887 use_alien_caches = 0;
888 return 1;
889}
890__setup("noaliencache", noaliencache_setup);
891
8fce4d8e
CL
892#ifdef CONFIG_NUMA
893/*
894 * Special reaping functions for NUMA systems called from cache_reap().
895 * These take care of doing round robin flushing of alien caches (containing
896 * objects freed on different nodes from which they were allocated) and the
897 * flushing of remote pcps by calling drain_node_pages.
898 */
899static DEFINE_PER_CPU(unsigned long, reap_node);
900
901static void init_reap_node(int cpu)
902{
903 int node;
904
905 node = next_node(cpu_to_node(cpu), node_online_map);
906 if (node == MAX_NUMNODES)
442295c9 907 node = first_node(node_online_map);
8fce4d8e 908
7f6b8876 909 per_cpu(reap_node, cpu) = node;
8fce4d8e
CL
910}
911
912static void next_reap_node(void)
913{
914 int node = __get_cpu_var(reap_node);
915
8fce4d8e
CL
916 node = next_node(node, node_online_map);
917 if (unlikely(node >= MAX_NUMNODES))
918 node = first_node(node_online_map);
919 __get_cpu_var(reap_node) = node;
920}
921
922#else
923#define init_reap_node(cpu) do { } while (0)
924#define next_reap_node(void) do { } while (0)
925#endif
926
1da177e4
LT
927/*
928 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
929 * via the workqueue/eventd.
930 * Add the CPU number into the expiration time to minimize the possibility of
931 * the CPUs getting into lockstep and contending for the global cache chain
932 * lock.
933 */
897e679b 934static void __cpuinit start_cpu_timer(int cpu)
1da177e4 935{
52bad64d 936 struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
1da177e4
LT
937
938 /*
939 * When this gets called from do_initcalls via cpucache_init(),
940 * init_workqueues() has already run, so keventd will be setup
941 * at that time.
942 */
52bad64d 943 if (keventd_up() && reap_work->work.func == NULL) {
8fce4d8e 944 init_reap_node(cpu);
65f27f38 945 INIT_DELAYED_WORK(reap_work, cache_reap);
2b284214
AV
946 schedule_delayed_work_on(cpu, reap_work,
947 __round_jiffies_relative(HZ, cpu));
1da177e4
LT
948 }
949}
950
e498be7d 951static struct array_cache *alloc_arraycache(int node, int entries,
b28a02de 952 int batchcount)
1da177e4 953{
b28a02de 954 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
1da177e4
LT
955 struct array_cache *nc = NULL;
956
e498be7d 957 nc = kmalloc_node(memsize, GFP_KERNEL, node);
1da177e4
LT
958 if (nc) {
959 nc->avail = 0;
960 nc->limit = entries;
961 nc->batchcount = batchcount;
962 nc->touched = 0;
e498be7d 963 spin_lock_init(&nc->lock);
1da177e4
LT
964 }
965 return nc;
966}
967
3ded175a
CL
968/*
969 * Transfer objects in one arraycache to another.
970 * Locking must be handled by the caller.
971 *
972 * Return the number of entries transferred.
973 */
974static int transfer_objects(struct array_cache *to,
975 struct array_cache *from, unsigned int max)
976{
977 /* Figure out how many entries to transfer */
978 int nr = min(min(from->avail, max), to->limit - to->avail);
979
980 if (!nr)
981 return 0;
982
983 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
984 sizeof(void *) *nr);
985
986 from->avail -= nr;
987 to->avail += nr;
988 to->touched = 1;
989 return nr;
990}
991
765c4507
CL
992#ifndef CONFIG_NUMA
993
994#define drain_alien_cache(cachep, alien) do { } while (0)
995#define reap_alien(cachep, l3) do { } while (0)
996
997static inline struct array_cache **alloc_alien_cache(int node, int limit)
998{
999 return (struct array_cache **)BAD_ALIEN_MAGIC;
1000}
1001
1002static inline void free_alien_cache(struct array_cache **ac_ptr)
1003{
1004}
1005
1006static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1007{
1008 return 0;
1009}
1010
1011static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1012 gfp_t flags)
1013{
1014 return NULL;
1015}
1016
8b98c169 1017static inline void *____cache_alloc_node(struct kmem_cache *cachep,
765c4507
CL
1018 gfp_t flags, int nodeid)
1019{
1020 return NULL;
1021}
1022
1023#else /* CONFIG_NUMA */
1024
8b98c169 1025static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
c61afb18 1026static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
dc85da15 1027
5295a74c 1028static struct array_cache **alloc_alien_cache(int node, int limit)
e498be7d
CL
1029{
1030 struct array_cache **ac_ptr;
8ef82866 1031 int memsize = sizeof(void *) * nr_node_ids;
e498be7d
CL
1032 int i;
1033
1034 if (limit > 1)
1035 limit = 12;
1036 ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1037 if (ac_ptr) {
1038 for_each_node(i) {
1039 if (i == node || !node_online(i)) {
1040 ac_ptr[i] = NULL;
1041 continue;
1042 }
1043 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1044 if (!ac_ptr[i]) {
cc550def 1045 for (i--; i >= 0; i--)
e498be7d
CL
1046 kfree(ac_ptr[i]);
1047 kfree(ac_ptr);
1048 return NULL;
1049 }
1050 }
1051 }
1052 return ac_ptr;
1053}
1054
5295a74c 1055static void free_alien_cache(struct array_cache **ac_ptr)
e498be7d
CL
1056{
1057 int i;
1058
1059 if (!ac_ptr)
1060 return;
e498be7d 1061 for_each_node(i)
b28a02de 1062 kfree(ac_ptr[i]);
e498be7d
CL
1063 kfree(ac_ptr);
1064}
1065
343e0d7a 1066static void __drain_alien_cache(struct kmem_cache *cachep,
5295a74c 1067 struct array_cache *ac, int node)
e498be7d
CL
1068{
1069 struct kmem_list3 *rl3 = cachep->nodelists[node];
1070
1071 if (ac->avail) {
1072 spin_lock(&rl3->list_lock);
e00946fe
CL
1073 /*
1074 * Stuff objects into the remote nodes shared array first.
1075 * That way we could avoid the overhead of putting the objects
1076 * into the free lists and getting them back later.
1077 */
693f7d36
JS
1078 if (rl3->shared)
1079 transfer_objects(rl3->shared, ac, ac->limit);
e00946fe 1080
ff69416e 1081 free_block(cachep, ac->entry, ac->avail, node);
e498be7d
CL
1082 ac->avail = 0;
1083 spin_unlock(&rl3->list_lock);
1084 }
1085}
1086
8fce4d8e
CL
1087/*
1088 * Called from cache_reap() to regularly drain alien caches round robin.
1089 */
1090static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1091{
1092 int node = __get_cpu_var(reap_node);
1093
1094 if (l3->alien) {
1095 struct array_cache *ac = l3->alien[node];
e00946fe
CL
1096
1097 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
8fce4d8e
CL
1098 __drain_alien_cache(cachep, ac, node);
1099 spin_unlock_irq(&ac->lock);
1100 }
1101 }
1102}
1103
a737b3e2
AM
1104static void drain_alien_cache(struct kmem_cache *cachep,
1105 struct array_cache **alien)
e498be7d 1106{
b28a02de 1107 int i = 0;
e498be7d
CL
1108 struct array_cache *ac;
1109 unsigned long flags;
1110
1111 for_each_online_node(i) {
4484ebf1 1112 ac = alien[i];
e498be7d
CL
1113 if (ac) {
1114 spin_lock_irqsave(&ac->lock, flags);
1115 __drain_alien_cache(cachep, ac, i);
1116 spin_unlock_irqrestore(&ac->lock, flags);
1117 }
1118 }
1119}
729bd0b7 1120
873623df 1121static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
729bd0b7
PE
1122{
1123 struct slab *slabp = virt_to_slab(objp);
1124 int nodeid = slabp->nodeid;
1125 struct kmem_list3 *l3;
1126 struct array_cache *alien = NULL;
1ca4cb24
PE
1127 int node;
1128
1129 node = numa_node_id();
729bd0b7
PE
1130
1131 /*
1132 * Make sure we are not freeing a object from another node to the array
1133 * cache on this cpu.
1134 */
62918a03 1135 if (likely(slabp->nodeid == node))
729bd0b7
PE
1136 return 0;
1137
1ca4cb24 1138 l3 = cachep->nodelists[node];
729bd0b7
PE
1139 STATS_INC_NODEFREES(cachep);
1140 if (l3->alien && l3->alien[nodeid]) {
1141 alien = l3->alien[nodeid];
873623df 1142 spin_lock(&alien->lock);
729bd0b7
PE
1143 if (unlikely(alien->avail == alien->limit)) {
1144 STATS_INC_ACOVERFLOW(cachep);
1145 __drain_alien_cache(cachep, alien, nodeid);
1146 }
1147 alien->entry[alien->avail++] = objp;
1148 spin_unlock(&alien->lock);
1149 } else {
1150 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1151 free_block(cachep, &objp, 1, nodeid);
1152 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1153 }
1154 return 1;
1155}
e498be7d
CL
1156#endif
1157
fbf1e473
AM
1158static void __cpuinit cpuup_canceled(long cpu)
1159{
1160 struct kmem_cache *cachep;
1161 struct kmem_list3 *l3 = NULL;
1162 int node = cpu_to_node(cpu);
c5f59f08 1163 node_to_cpumask_ptr(mask, node);
fbf1e473
AM
1164
1165 list_for_each_entry(cachep, &cache_chain, next) {
1166 struct array_cache *nc;
1167 struct array_cache *shared;
1168 struct array_cache **alien;
fbf1e473 1169
fbf1e473
AM
1170 /* cpu is dead; no one can alloc from it. */
1171 nc = cachep->array[cpu];
1172 cachep->array[cpu] = NULL;
1173 l3 = cachep->nodelists[node];
1174
1175 if (!l3)
1176 goto free_array_cache;
1177
1178 spin_lock_irq(&l3->list_lock);
1179
1180 /* Free limit for this kmem_list3 */
1181 l3->free_limit -= cachep->batchcount;
1182 if (nc)
1183 free_block(cachep, nc->entry, nc->avail, node);
1184
c5f59f08 1185 if (!cpus_empty(*mask)) {
fbf1e473
AM
1186 spin_unlock_irq(&l3->list_lock);
1187 goto free_array_cache;
1188 }
1189
1190 shared = l3->shared;
1191 if (shared) {
1192 free_block(cachep, shared->entry,
1193 shared->avail, node);
1194 l3->shared = NULL;
1195 }
1196
1197 alien = l3->alien;
1198 l3->alien = NULL;
1199
1200 spin_unlock_irq(&l3->list_lock);
1201
1202 kfree(shared);
1203 if (alien) {
1204 drain_alien_cache(cachep, alien);
1205 free_alien_cache(alien);
1206 }
1207free_array_cache:
1208 kfree(nc);
1209 }
1210 /*
1211 * In the previous loop, all the objects were freed to
1212 * the respective cache's slabs, now we can go ahead and
1213 * shrink each nodelist to its limit.
1214 */
1215 list_for_each_entry(cachep, &cache_chain, next) {
1216 l3 = cachep->nodelists[node];
1217 if (!l3)
1218 continue;
1219 drain_freelist(cachep, l3, l3->free_objects);
1220 }
1221}
1222
1223static int __cpuinit cpuup_prepare(long cpu)
1da177e4 1224{
343e0d7a 1225 struct kmem_cache *cachep;
e498be7d
CL
1226 struct kmem_list3 *l3 = NULL;
1227 int node = cpu_to_node(cpu);
ea02e3dd 1228 const int memsize = sizeof(struct kmem_list3);
1da177e4 1229
fbf1e473
AM
1230 /*
1231 * We need to do this right in the beginning since
1232 * alloc_arraycache's are going to use this list.
1233 * kmalloc_node allows us to add the slab to the right
1234 * kmem_list3 and not this cpu's kmem_list3
1235 */
1236
1237 list_for_each_entry(cachep, &cache_chain, next) {
a737b3e2 1238 /*
fbf1e473
AM
1239 * Set up the size64 kmemlist for cpu before we can
1240 * begin anything. Make sure some other cpu on this
1241 * node has not already allocated this
e498be7d 1242 */
fbf1e473
AM
1243 if (!cachep->nodelists[node]) {
1244 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1245 if (!l3)
1246 goto bad;
1247 kmem_list3_init(l3);
1248 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1249 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
e498be7d 1250
a737b3e2 1251 /*
fbf1e473
AM
1252 * The l3s don't come and go as CPUs come and
1253 * go. cache_chain_mutex is sufficient
1254 * protection here.
e498be7d 1255 */
fbf1e473 1256 cachep->nodelists[node] = l3;
e498be7d
CL
1257 }
1258
fbf1e473
AM
1259 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1260 cachep->nodelists[node]->free_limit =
1261 (1 + nr_cpus_node(node)) *
1262 cachep->batchcount + cachep->num;
1263 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1264 }
1265
1266 /*
1267 * Now we can go ahead with allocating the shared arrays and
1268 * array caches
1269 */
1270 list_for_each_entry(cachep, &cache_chain, next) {
1271 struct array_cache *nc;
1272 struct array_cache *shared = NULL;
1273 struct array_cache **alien = NULL;
1274
1275 nc = alloc_arraycache(node, cachep->limit,
1276 cachep->batchcount);
1277 if (!nc)
1278 goto bad;
1279 if (cachep->shared) {
1280 shared = alloc_arraycache(node,
1281 cachep->shared * cachep->batchcount,
1282 0xbaadf00d);
12d00f6a
AM
1283 if (!shared) {
1284 kfree(nc);
1da177e4 1285 goto bad;
12d00f6a 1286 }
fbf1e473
AM
1287 }
1288 if (use_alien_caches) {
1289 alien = alloc_alien_cache(node, cachep->limit);
12d00f6a
AM
1290 if (!alien) {
1291 kfree(shared);
1292 kfree(nc);
fbf1e473 1293 goto bad;
12d00f6a 1294 }
fbf1e473
AM
1295 }
1296 cachep->array[cpu] = nc;
1297 l3 = cachep->nodelists[node];
1298 BUG_ON(!l3);
1299
1300 spin_lock_irq(&l3->list_lock);
1301 if (!l3->shared) {
1302 /*
1303 * We are serialised from CPU_DEAD or
1304 * CPU_UP_CANCELLED by the cpucontrol lock
1305 */
1306 l3->shared = shared;
1307 shared = NULL;
1308 }
4484ebf1 1309#ifdef CONFIG_NUMA
fbf1e473
AM
1310 if (!l3->alien) {
1311 l3->alien = alien;
1312 alien = NULL;
1da177e4 1313 }
fbf1e473
AM
1314#endif
1315 spin_unlock_irq(&l3->list_lock);
1316 kfree(shared);
1317 free_alien_cache(alien);
1318 }
1319 return 0;
1320bad:
12d00f6a 1321 cpuup_canceled(cpu);
fbf1e473
AM
1322 return -ENOMEM;
1323}
1324
1325static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1326 unsigned long action, void *hcpu)
1327{
1328 long cpu = (long)hcpu;
1329 int err = 0;
1330
1331 switch (action) {
fbf1e473
AM
1332 case CPU_UP_PREPARE:
1333 case CPU_UP_PREPARE_FROZEN:
95402b38 1334 mutex_lock(&cache_chain_mutex);
fbf1e473 1335 err = cpuup_prepare(cpu);
95402b38 1336 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1337 break;
1338 case CPU_ONLINE:
8bb78442 1339 case CPU_ONLINE_FROZEN:
1da177e4
LT
1340 start_cpu_timer(cpu);
1341 break;
1342#ifdef CONFIG_HOTPLUG_CPU
5830c590 1343 case CPU_DOWN_PREPARE:
8bb78442 1344 case CPU_DOWN_PREPARE_FROZEN:
5830c590
CL
1345 /*
1346 * Shutdown cache reaper. Note that the cache_chain_mutex is
1347 * held so that if cache_reap() is invoked it cannot do
1348 * anything expensive but will only modify reap_work
1349 * and reschedule the timer.
1350 */
1351 cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1352 /* Now the cache_reaper is guaranteed to be not running. */
1353 per_cpu(reap_work, cpu).work.func = NULL;
1354 break;
1355 case CPU_DOWN_FAILED:
8bb78442 1356 case CPU_DOWN_FAILED_FROZEN:
5830c590
CL
1357 start_cpu_timer(cpu);
1358 break;
1da177e4 1359 case CPU_DEAD:
8bb78442 1360 case CPU_DEAD_FROZEN:
4484ebf1
RT
1361 /*
1362 * Even if all the cpus of a node are down, we don't free the
1363 * kmem_list3 of any cache. This to avoid a race between
1364 * cpu_down, and a kmalloc allocation from another cpu for
1365 * memory from the node of the cpu going down. The list3
1366 * structure is usually allocated from kmem_cache_create() and
1367 * gets destroyed at kmem_cache_destroy().
1368 */
183ff22b 1369 /* fall through */
8f5be20b 1370#endif
1da177e4 1371 case CPU_UP_CANCELED:
8bb78442 1372 case CPU_UP_CANCELED_FROZEN:
95402b38 1373 mutex_lock(&cache_chain_mutex);
fbf1e473 1374 cpuup_canceled(cpu);
fc0abb14 1375 mutex_unlock(&cache_chain_mutex);
1da177e4 1376 break;
1da177e4 1377 }
fbf1e473 1378 return err ? NOTIFY_BAD : NOTIFY_OK;
1da177e4
LT
1379}
1380
74b85f37
CS
1381static struct notifier_block __cpuinitdata cpucache_notifier = {
1382 &cpuup_callback, NULL, 0
1383};
1da177e4 1384
e498be7d
CL
1385/*
1386 * swap the static kmem_list3 with kmalloced memory
1387 */
a737b3e2
AM
1388static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1389 int nodeid)
e498be7d
CL
1390{
1391 struct kmem_list3 *ptr;
1392
e498be7d
CL
1393 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1394 BUG_ON(!ptr);
1395
1396 local_irq_disable();
1397 memcpy(ptr, list, sizeof(struct kmem_list3));
2b2d5493
IM
1398 /*
1399 * Do not assume that spinlocks can be initialized via memcpy:
1400 */
1401 spin_lock_init(&ptr->list_lock);
1402
e498be7d
CL
1403 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1404 cachep->nodelists[nodeid] = ptr;
1405 local_irq_enable();
1406}
1407
556a169d
PE
1408/*
1409 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1410 * size of kmem_list3.
1411 */
1412static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1413{
1414 int node;
1415
1416 for_each_online_node(node) {
1417 cachep->nodelists[node] = &initkmem_list3[index + node];
1418 cachep->nodelists[node]->next_reap = jiffies +
1419 REAPTIMEOUT_LIST3 +
1420 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1421 }
1422}
1423
a737b3e2
AM
1424/*
1425 * Initialisation. Called after the page allocator have been initialised and
1426 * before smp_init().
1da177e4
LT
1427 */
1428void __init kmem_cache_init(void)
1429{
1430 size_t left_over;
1431 struct cache_sizes *sizes;
1432 struct cache_names *names;
e498be7d 1433 int i;
07ed76b2 1434 int order;
1ca4cb24 1435 int node;
e498be7d 1436
1807a1aa 1437 if (num_possible_nodes() == 1) {
62918a03 1438 use_alien_caches = 0;
1807a1aa
SS
1439 numa_platform = 0;
1440 }
62918a03 1441
e498be7d
CL
1442 for (i = 0; i < NUM_INIT_LISTS; i++) {
1443 kmem_list3_init(&initkmem_list3[i]);
1444 if (i < MAX_NUMNODES)
1445 cache_cache.nodelists[i] = NULL;
1446 }
556a169d 1447 set_up_list3s(&cache_cache, CACHE_CACHE);
1da177e4
LT
1448
1449 /*
1450 * Fragmentation resistance on low memory - only use bigger
1451 * page orders on machines with more than 32MB of memory.
1452 */
1453 if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1454 slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1455
1da177e4
LT
1456 /* Bootstrap is tricky, because several objects are allocated
1457 * from caches that do not exist yet:
a737b3e2
AM
1458 * 1) initialize the cache_cache cache: it contains the struct
1459 * kmem_cache structures of all caches, except cache_cache itself:
1460 * cache_cache is statically allocated.
e498be7d
CL
1461 * Initially an __init data area is used for the head array and the
1462 * kmem_list3 structures, it's replaced with a kmalloc allocated
1463 * array at the end of the bootstrap.
1da177e4 1464 * 2) Create the first kmalloc cache.
343e0d7a 1465 * The struct kmem_cache for the new cache is allocated normally.
e498be7d
CL
1466 * An __init data area is used for the head array.
1467 * 3) Create the remaining kmalloc caches, with minimally sized
1468 * head arrays.
1da177e4
LT
1469 * 4) Replace the __init data head arrays for cache_cache and the first
1470 * kmalloc cache with kmalloc allocated arrays.
e498be7d
CL
1471 * 5) Replace the __init data for kmem_list3 for cache_cache and
1472 * the other cache's with kmalloc allocated memory.
1473 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1da177e4
LT
1474 */
1475
1ca4cb24
PE
1476 node = numa_node_id();
1477
1da177e4 1478 /* 1) create the cache_cache */
1da177e4
LT
1479 INIT_LIST_HEAD(&cache_chain);
1480 list_add(&cache_cache.next, &cache_chain);
1481 cache_cache.colour_off = cache_line_size();
1482 cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
ec1f5eee 1483 cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1da177e4 1484
8da3430d
ED
1485 /*
1486 * struct kmem_cache size depends on nr_node_ids, which
1487 * can be less than MAX_NUMNODES.
1488 */
1489 cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1490 nr_node_ids * sizeof(struct kmem_list3 *);
1491#if DEBUG
1492 cache_cache.obj_size = cache_cache.buffer_size;
1493#endif
a737b3e2
AM
1494 cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1495 cache_line_size());
6a2d7a95
ED
1496 cache_cache.reciprocal_buffer_size =
1497 reciprocal_value(cache_cache.buffer_size);
1da177e4 1498
07ed76b2
JS
1499 for (order = 0; order < MAX_ORDER; order++) {
1500 cache_estimate(order, cache_cache.buffer_size,
1501 cache_line_size(), 0, &left_over, &cache_cache.num);
1502 if (cache_cache.num)
1503 break;
1504 }
40094fa6 1505 BUG_ON(!cache_cache.num);
07ed76b2 1506 cache_cache.gfporder = order;
b28a02de 1507 cache_cache.colour = left_over / cache_cache.colour_off;
b28a02de
PE
1508 cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1509 sizeof(struct slab), cache_line_size());
1da177e4
LT
1510
1511 /* 2+3) create the kmalloc caches */
1512 sizes = malloc_sizes;
1513 names = cache_names;
1514
a737b3e2
AM
1515 /*
1516 * Initialize the caches that provide memory for the array cache and the
1517 * kmem_list3 structures first. Without this, further allocations will
1518 * bug.
e498be7d
CL
1519 */
1520
1521 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
a737b3e2
AM
1522 sizes[INDEX_AC].cs_size,
1523 ARCH_KMALLOC_MINALIGN,
1524 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
20c2df83 1525 NULL);
e498be7d 1526
a737b3e2 1527 if (INDEX_AC != INDEX_L3) {
e498be7d 1528 sizes[INDEX_L3].cs_cachep =
a737b3e2
AM
1529 kmem_cache_create(names[INDEX_L3].name,
1530 sizes[INDEX_L3].cs_size,
1531 ARCH_KMALLOC_MINALIGN,
1532 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
20c2df83 1533 NULL);
a737b3e2 1534 }
e498be7d 1535
e0a42726
IM
1536 slab_early_init = 0;
1537
1da177e4 1538 while (sizes->cs_size != ULONG_MAX) {
e498be7d
CL
1539 /*
1540 * For performance, all the general caches are L1 aligned.
1da177e4
LT
1541 * This should be particularly beneficial on SMP boxes, as it
1542 * eliminates "false sharing".
1543 * Note for systems short on memory removing the alignment will
e498be7d
CL
1544 * allow tighter packing of the smaller caches.
1545 */
a737b3e2 1546 if (!sizes->cs_cachep) {
e498be7d 1547 sizes->cs_cachep = kmem_cache_create(names->name,
a737b3e2
AM
1548 sizes->cs_size,
1549 ARCH_KMALLOC_MINALIGN,
1550 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
20c2df83 1551 NULL);
a737b3e2 1552 }
4b51d669
CL
1553#ifdef CONFIG_ZONE_DMA
1554 sizes->cs_dmacachep = kmem_cache_create(
1555 names->name_dma,
a737b3e2
AM
1556 sizes->cs_size,
1557 ARCH_KMALLOC_MINALIGN,
1558 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1559 SLAB_PANIC,
20c2df83 1560 NULL);
4b51d669 1561#endif
1da177e4
LT
1562 sizes++;
1563 names++;
1564 }
1565 /* 4) Replace the bootstrap head arrays */
1566 {
2b2d5493 1567 struct array_cache *ptr;
e498be7d 1568
1da177e4 1569 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d 1570
1da177e4 1571 local_irq_disable();
9a2dba4b
PE
1572 BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1573 memcpy(ptr, cpu_cache_get(&cache_cache),
b28a02de 1574 sizeof(struct arraycache_init));
2b2d5493
IM
1575 /*
1576 * Do not assume that spinlocks can be initialized via memcpy:
1577 */
1578 spin_lock_init(&ptr->lock);
1579
1da177e4
LT
1580 cache_cache.array[smp_processor_id()] = ptr;
1581 local_irq_enable();
e498be7d 1582
1da177e4 1583 ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
e498be7d 1584
1da177e4 1585 local_irq_disable();
9a2dba4b 1586 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
b28a02de 1587 != &initarray_generic.cache);
9a2dba4b 1588 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
b28a02de 1589 sizeof(struct arraycache_init));
2b2d5493
IM
1590 /*
1591 * Do not assume that spinlocks can be initialized via memcpy:
1592 */
1593 spin_lock_init(&ptr->lock);
1594
e498be7d 1595 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
b28a02de 1596 ptr;
1da177e4
LT
1597 local_irq_enable();
1598 }
e498be7d
CL
1599 /* 5) Replace the bootstrap kmem_list3's */
1600 {
1ca4cb24
PE
1601 int nid;
1602
9c09a95c 1603 for_each_online_node(nid) {
ec1f5eee 1604 init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
556a169d 1605
e498be7d 1606 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1ca4cb24 1607 &initkmem_list3[SIZE_AC + nid], nid);
e498be7d
CL
1608
1609 if (INDEX_AC != INDEX_L3) {
1610 init_list(malloc_sizes[INDEX_L3].cs_cachep,
1ca4cb24 1611 &initkmem_list3[SIZE_L3 + nid], nid);
e498be7d
CL
1612 }
1613 }
1614 }
1da177e4 1615
e498be7d 1616 /* 6) resize the head arrays to their final sizes */
1da177e4 1617 {
343e0d7a 1618 struct kmem_cache *cachep;
fc0abb14 1619 mutex_lock(&cache_chain_mutex);
1da177e4 1620 list_for_each_entry(cachep, &cache_chain, next)
2ed3a4ef
CL
1621 if (enable_cpucache(cachep))
1622 BUG();
fc0abb14 1623 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
1624 }
1625
056c6241
RT
1626 /* Annotate slab for lockdep -- annotate the malloc caches */
1627 init_lock_keys();
1628
1629
1da177e4
LT
1630 /* Done! */
1631 g_cpucache_up = FULL;
1632
a737b3e2
AM
1633 /*
1634 * Register a cpu startup notifier callback that initializes
1635 * cpu_cache_get for all new cpus
1da177e4
LT
1636 */
1637 register_cpu_notifier(&cpucache_notifier);
1da177e4 1638
a737b3e2
AM
1639 /*
1640 * The reap timers are started later, with a module init call: That part
1641 * of the kernel is not yet operational.
1da177e4
LT
1642 */
1643}
1644
1645static int __init cpucache_init(void)
1646{
1647 int cpu;
1648
a737b3e2
AM
1649 /*
1650 * Register the timers that return unneeded pages to the page allocator
1da177e4 1651 */
e498be7d 1652 for_each_online_cpu(cpu)
a737b3e2 1653 start_cpu_timer(cpu);
1da177e4
LT
1654 return 0;
1655}
1da177e4
LT
1656__initcall(cpucache_init);
1657
1658/*
1659 * Interface to system's page allocator. No need to hold the cache-lock.
1660 *
1661 * If we requested dmaable memory, we will get it. Even if we
1662 * did not request dmaable memory, we might get it, but that
1663 * would be relatively rare and ignorable.
1664 */
343e0d7a 1665static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4
LT
1666{
1667 struct page *page;
e1b6aa6f 1668 int nr_pages;
1da177e4
LT
1669 int i;
1670
d6fef9da 1671#ifndef CONFIG_MMU
e1b6aa6f
CH
1672 /*
1673 * Nommu uses slab's for process anonymous memory allocations, and thus
1674 * requires __GFP_COMP to properly refcount higher order allocations
d6fef9da 1675 */
e1b6aa6f 1676 flags |= __GFP_COMP;
d6fef9da 1677#endif
765c4507 1678
3c517a61 1679 flags |= cachep->gfpflags;
e12ba74d
MG
1680 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1681 flags |= __GFP_RECLAIMABLE;
e1b6aa6f
CH
1682
1683 page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1da177e4
LT
1684 if (!page)
1685 return NULL;
1da177e4 1686
e1b6aa6f 1687 nr_pages = (1 << cachep->gfporder);
1da177e4 1688 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
972d1a7b
CL
1689 add_zone_page_state(page_zone(page),
1690 NR_SLAB_RECLAIMABLE, nr_pages);
1691 else
1692 add_zone_page_state(page_zone(page),
1693 NR_SLAB_UNRECLAIMABLE, nr_pages);
e1b6aa6f
CH
1694 for (i = 0; i < nr_pages; i++)
1695 __SetPageSlab(page + i);
1696 return page_address(page);
1da177e4
LT
1697}
1698
1699/*
1700 * Interface to system's page release.
1701 */
343e0d7a 1702static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1da177e4 1703{
b28a02de 1704 unsigned long i = (1 << cachep->gfporder);
1da177e4
LT
1705 struct page *page = virt_to_page(addr);
1706 const unsigned long nr_freed = i;
1707
972d1a7b
CL
1708 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1709 sub_zone_page_state(page_zone(page),
1710 NR_SLAB_RECLAIMABLE, nr_freed);
1711 else
1712 sub_zone_page_state(page_zone(page),
1713 NR_SLAB_UNRECLAIMABLE, nr_freed);
1da177e4 1714 while (i--) {
f205b2fe
NP
1715 BUG_ON(!PageSlab(page));
1716 __ClearPageSlab(page);
1da177e4
LT
1717 page++;
1718 }
1da177e4
LT
1719 if (current->reclaim_state)
1720 current->reclaim_state->reclaimed_slab += nr_freed;
1721 free_pages((unsigned long)addr, cachep->gfporder);
1da177e4
LT
1722}
1723
1724static void kmem_rcu_free(struct rcu_head *head)
1725{
b28a02de 1726 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
343e0d7a 1727 struct kmem_cache *cachep = slab_rcu->cachep;
1da177e4
LT
1728
1729 kmem_freepages(cachep, slab_rcu->addr);
1730 if (OFF_SLAB(cachep))
1731 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1732}
1733
1734#if DEBUG
1735
1736#ifdef CONFIG_DEBUG_PAGEALLOC
343e0d7a 1737static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
b28a02de 1738 unsigned long caller)
1da177e4 1739{
3dafccf2 1740 int size = obj_size(cachep);
1da177e4 1741
3dafccf2 1742 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1da177e4 1743
b28a02de 1744 if (size < 5 * sizeof(unsigned long))
1da177e4
LT
1745 return;
1746
b28a02de
PE
1747 *addr++ = 0x12345678;
1748 *addr++ = caller;
1749 *addr++ = smp_processor_id();
1750 size -= 3 * sizeof(unsigned long);
1da177e4
LT
1751 {
1752 unsigned long *sptr = &caller;
1753 unsigned long svalue;
1754
1755 while (!kstack_end(sptr)) {
1756 svalue = *sptr++;
1757 if (kernel_text_address(svalue)) {
b28a02de 1758 *addr++ = svalue;
1da177e4
LT
1759 size -= sizeof(unsigned long);
1760 if (size <= sizeof(unsigned long))
1761 break;
1762 }
1763 }
1764
1765 }
b28a02de 1766 *addr++ = 0x87654321;
1da177e4
LT
1767}
1768#endif
1769
343e0d7a 1770static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1da177e4 1771{
3dafccf2
MS
1772 int size = obj_size(cachep);
1773 addr = &((char *)addr)[obj_offset(cachep)];
1da177e4
LT
1774
1775 memset(addr, val, size);
b28a02de 1776 *(unsigned char *)(addr + size - 1) = POISON_END;
1da177e4
LT
1777}
1778
1779static void dump_line(char *data, int offset, int limit)
1780{
1781 int i;
aa83aa40
DJ
1782 unsigned char error = 0;
1783 int bad_count = 0;
1784
1da177e4 1785 printk(KERN_ERR "%03x:", offset);
aa83aa40
DJ
1786 for (i = 0; i < limit; i++) {
1787 if (data[offset + i] != POISON_FREE) {
1788 error = data[offset + i];
1789 bad_count++;
1790 }
b28a02de 1791 printk(" %02x", (unsigned char)data[offset + i]);
aa83aa40 1792 }
1da177e4 1793 printk("\n");
aa83aa40
DJ
1794
1795 if (bad_count == 1) {
1796 error ^= POISON_FREE;
1797 if (!(error & (error - 1))) {
1798 printk(KERN_ERR "Single bit error detected. Probably "
1799 "bad RAM.\n");
1800#ifdef CONFIG_X86
1801 printk(KERN_ERR "Run memtest86+ or a similar memory "
1802 "test tool.\n");
1803#else
1804 printk(KERN_ERR "Run a memory test tool.\n");
1805#endif
1806 }
1807 }
1da177e4
LT
1808}
1809#endif
1810
1811#if DEBUG
1812
343e0d7a 1813static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1da177e4
LT
1814{
1815 int i, size;
1816 char *realobj;
1817
1818 if (cachep->flags & SLAB_RED_ZONE) {
b46b8f19 1819 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
a737b3e2
AM
1820 *dbg_redzone1(cachep, objp),
1821 *dbg_redzone2(cachep, objp));
1da177e4
LT
1822 }
1823
1824 if (cachep->flags & SLAB_STORE_USER) {
1825 printk(KERN_ERR "Last user: [<%p>]",
a737b3e2 1826 *dbg_userword(cachep, objp));
1da177e4 1827 print_symbol("(%s)",
a737b3e2 1828 (unsigned long)*dbg_userword(cachep, objp));
1da177e4
LT
1829 printk("\n");
1830 }
3dafccf2
MS
1831 realobj = (char *)objp + obj_offset(cachep);
1832 size = obj_size(cachep);
b28a02de 1833 for (i = 0; i < size && lines; i += 16, lines--) {
1da177e4
LT
1834 int limit;
1835 limit = 16;
b28a02de
PE
1836 if (i + limit > size)
1837 limit = size - i;
1da177e4
LT
1838 dump_line(realobj, i, limit);
1839 }
1840}
1841
343e0d7a 1842static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1da177e4
LT
1843{
1844 char *realobj;
1845 int size, i;
1846 int lines = 0;
1847
3dafccf2
MS
1848 realobj = (char *)objp + obj_offset(cachep);
1849 size = obj_size(cachep);
1da177e4 1850
b28a02de 1851 for (i = 0; i < size; i++) {
1da177e4 1852 char exp = POISON_FREE;
b28a02de 1853 if (i == size - 1)
1da177e4
LT
1854 exp = POISON_END;
1855 if (realobj[i] != exp) {
1856 int limit;
1857 /* Mismatch ! */
1858 /* Print header */
1859 if (lines == 0) {
b28a02de 1860 printk(KERN_ERR
e94a40c5
DH
1861 "Slab corruption: %s start=%p, len=%d\n",
1862 cachep->name, realobj, size);
1da177e4
LT
1863 print_objinfo(cachep, objp, 0);
1864 }
1865 /* Hexdump the affected line */
b28a02de 1866 i = (i / 16) * 16;
1da177e4 1867 limit = 16;
b28a02de
PE
1868 if (i + limit > size)
1869 limit = size - i;
1da177e4
LT
1870 dump_line(realobj, i, limit);
1871 i += 16;
1872 lines++;
1873 /* Limit to 5 lines */
1874 if (lines > 5)
1875 break;
1876 }
1877 }
1878 if (lines != 0) {
1879 /* Print some data about the neighboring objects, if they
1880 * exist:
1881 */
6ed5eb22 1882 struct slab *slabp = virt_to_slab(objp);
8fea4e96 1883 unsigned int objnr;
1da177e4 1884
8fea4e96 1885 objnr = obj_to_index(cachep, slabp, objp);
1da177e4 1886 if (objnr) {
8fea4e96 1887 objp = index_to_obj(cachep, slabp, objnr - 1);
3dafccf2 1888 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1889 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
b28a02de 1890 realobj, size);
1da177e4
LT
1891 print_objinfo(cachep, objp, 2);
1892 }
b28a02de 1893 if (objnr + 1 < cachep->num) {
8fea4e96 1894 objp = index_to_obj(cachep, slabp, objnr + 1);
3dafccf2 1895 realobj = (char *)objp + obj_offset(cachep);
1da177e4 1896 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
b28a02de 1897 realobj, size);
1da177e4
LT
1898 print_objinfo(cachep, objp, 2);
1899 }
1900 }
1901}
1902#endif
1903
12dd36fa 1904#if DEBUG
e79aec29 1905static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1da177e4 1906{
1da177e4
LT
1907 int i;
1908 for (i = 0; i < cachep->num; i++) {
8fea4e96 1909 void *objp = index_to_obj(cachep, slabp, i);
1da177e4
LT
1910
1911 if (cachep->flags & SLAB_POISON) {
1912#ifdef CONFIG_DEBUG_PAGEALLOC
a737b3e2
AM
1913 if (cachep->buffer_size % PAGE_SIZE == 0 &&
1914 OFF_SLAB(cachep))
b28a02de 1915 kernel_map_pages(virt_to_page(objp),
a737b3e2 1916 cachep->buffer_size / PAGE_SIZE, 1);
1da177e4
LT
1917 else
1918 check_poison_obj(cachep, objp);
1919#else
1920 check_poison_obj(cachep, objp);
1921#endif
1922 }
1923 if (cachep->flags & SLAB_RED_ZONE) {
1924 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1925 slab_error(cachep, "start of a freed object "
b28a02de 1926 "was overwritten");
1da177e4
LT
1927 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1928 slab_error(cachep, "end of a freed object "
b28a02de 1929 "was overwritten");
1da177e4 1930 }
1da177e4 1931 }
12dd36fa 1932}
1da177e4 1933#else
e79aec29 1934static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa 1935{
12dd36fa 1936}
1da177e4
LT
1937#endif
1938
911851e6
RD
1939/**
1940 * slab_destroy - destroy and release all objects in a slab
1941 * @cachep: cache pointer being destroyed
1942 * @slabp: slab pointer being destroyed
1943 *
12dd36fa 1944 * Destroy all the objs in a slab, and release the mem back to the system.
a737b3e2
AM
1945 * Before calling the slab must have been unlinked from the cache. The
1946 * cache-lock is not held/needed.
12dd36fa 1947 */
343e0d7a 1948static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa
MD
1949{
1950 void *addr = slabp->s_mem - slabp->colouroff;
1951
e79aec29 1952 slab_destroy_debugcheck(cachep, slabp);
1da177e4
LT
1953 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1954 struct slab_rcu *slab_rcu;
1955
b28a02de 1956 slab_rcu = (struct slab_rcu *)slabp;
1da177e4
LT
1957 slab_rcu->cachep = cachep;
1958 slab_rcu->addr = addr;
1959 call_rcu(&slab_rcu->head, kmem_rcu_free);
1960 } else {
1961 kmem_freepages(cachep, addr);
873623df
IM
1962 if (OFF_SLAB(cachep))
1963 kmem_cache_free(cachep->slabp_cache, slabp);
1da177e4
LT
1964 }
1965}
1966
117f6eb1
CL
1967static void __kmem_cache_destroy(struct kmem_cache *cachep)
1968{
1969 int i;
1970 struct kmem_list3 *l3;
1971
1972 for_each_online_cpu(i)
1973 kfree(cachep->array[i]);
1974
1975 /* NUMA: free the list3 structures */
1976 for_each_online_node(i) {
1977 l3 = cachep->nodelists[i];
1978 if (l3) {
1979 kfree(l3->shared);
1980 free_alien_cache(l3->alien);
1981 kfree(l3);
1982 }
1983 }
1984 kmem_cache_free(&cache_cache, cachep);
1985}
1986
1987
4d268eba 1988/**
a70773dd
RD
1989 * calculate_slab_order - calculate size (page order) of slabs
1990 * @cachep: pointer to the cache that is being created
1991 * @size: size of objects to be created in this cache.
1992 * @align: required alignment for the objects.
1993 * @flags: slab allocation flags
1994 *
1995 * Also calculates the number of objects per slab.
4d268eba
PE
1996 *
1997 * This could be made much more intelligent. For now, try to avoid using
1998 * high order pages for slabs. When the gfp() functions are more friendly
1999 * towards high-order requests, this should be changed.
2000 */
a737b3e2 2001static size_t calculate_slab_order(struct kmem_cache *cachep,
ee13d785 2002 size_t size, size_t align, unsigned long flags)
4d268eba 2003{
b1ab41c4 2004 unsigned long offslab_limit;
4d268eba 2005 size_t left_over = 0;
9888e6fa 2006 int gfporder;
4d268eba 2007
0aa817f0 2008 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
4d268eba
PE
2009 unsigned int num;
2010 size_t remainder;
2011
9888e6fa 2012 cache_estimate(gfporder, size, align, flags, &remainder, &num);
4d268eba
PE
2013 if (!num)
2014 continue;
9888e6fa 2015
b1ab41c4
IM
2016 if (flags & CFLGS_OFF_SLAB) {
2017 /*
2018 * Max number of objs-per-slab for caches which
2019 * use off-slab slabs. Needed to avoid a possible
2020 * looping condition in cache_grow().
2021 */
2022 offslab_limit = size - sizeof(struct slab);
2023 offslab_limit /= sizeof(kmem_bufctl_t);
2024
2025 if (num > offslab_limit)
2026 break;
2027 }
4d268eba 2028
9888e6fa 2029 /* Found something acceptable - save it away */
4d268eba 2030 cachep->num = num;
9888e6fa 2031 cachep->gfporder = gfporder;
4d268eba
PE
2032 left_over = remainder;
2033
f78bb8ad
LT
2034 /*
2035 * A VFS-reclaimable slab tends to have most allocations
2036 * as GFP_NOFS and we really don't want to have to be allocating
2037 * higher-order pages when we are unable to shrink dcache.
2038 */
2039 if (flags & SLAB_RECLAIM_ACCOUNT)
2040 break;
2041
4d268eba
PE
2042 /*
2043 * Large number of objects is good, but very large slabs are
2044 * currently bad for the gfp()s.
2045 */
9888e6fa 2046 if (gfporder >= slab_break_gfp_order)
4d268eba
PE
2047 break;
2048
9888e6fa
LT
2049 /*
2050 * Acceptable internal fragmentation?
2051 */
a737b3e2 2052 if (left_over * 8 <= (PAGE_SIZE << gfporder))
4d268eba
PE
2053 break;
2054 }
2055 return left_over;
2056}
2057
38bdc32a 2058static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
f30cf7d1 2059{
2ed3a4ef
CL
2060 if (g_cpucache_up == FULL)
2061 return enable_cpucache(cachep);
2062
f30cf7d1
PE
2063 if (g_cpucache_up == NONE) {
2064 /*
2065 * Note: the first kmem_cache_create must create the cache
2066 * that's used by kmalloc(24), otherwise the creation of
2067 * further caches will BUG().
2068 */
2069 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2070
2071 /*
2072 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2073 * the first cache, then we need to set up all its list3s,
2074 * otherwise the creation of further caches will BUG().
2075 */
2076 set_up_list3s(cachep, SIZE_AC);
2077 if (INDEX_AC == INDEX_L3)
2078 g_cpucache_up = PARTIAL_L3;
2079 else
2080 g_cpucache_up = PARTIAL_AC;
2081 } else {
2082 cachep->array[smp_processor_id()] =
2083 kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2084
2085 if (g_cpucache_up == PARTIAL_AC) {
2086 set_up_list3s(cachep, SIZE_L3);
2087 g_cpucache_up = PARTIAL_L3;
2088 } else {
2089 int node;
556a169d 2090 for_each_online_node(node) {
f30cf7d1
PE
2091 cachep->nodelists[node] =
2092 kmalloc_node(sizeof(struct kmem_list3),
2093 GFP_KERNEL, node);
2094 BUG_ON(!cachep->nodelists[node]);
2095 kmem_list3_init(cachep->nodelists[node]);
2096 }
2097 }
2098 }
2099 cachep->nodelists[numa_node_id()]->next_reap =
2100 jiffies + REAPTIMEOUT_LIST3 +
2101 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2102
2103 cpu_cache_get(cachep)->avail = 0;
2104 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2105 cpu_cache_get(cachep)->batchcount = 1;
2106 cpu_cache_get(cachep)->touched = 0;
2107 cachep->batchcount = 1;
2108 cachep->limit = BOOT_CPUCACHE_ENTRIES;
2ed3a4ef 2109 return 0;
f30cf7d1
PE
2110}
2111
1da177e4
LT
2112/**
2113 * kmem_cache_create - Create a cache.
2114 * @name: A string which is used in /proc/slabinfo to identify this cache.
2115 * @size: The size of objects to be created in this cache.
2116 * @align: The required alignment for the objects.
2117 * @flags: SLAB flags
2118 * @ctor: A constructor for the objects.
1da177e4
LT
2119 *
2120 * Returns a ptr to the cache on success, NULL on failure.
2121 * Cannot be called within a int, but can be interrupted.
20c2df83 2122 * The @ctor is run when new pages are allocated by the cache.
1da177e4
LT
2123 *
2124 * @name must be valid until the cache is destroyed. This implies that
a737b3e2 2125 * the module calling this has to destroy the cache before getting unloaded.
249da166
CM
2126 * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2127 * therefore applications must manage it themselves.
a737b3e2 2128 *
1da177e4
LT
2129 * The flags are
2130 *
2131 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2132 * to catch references to uninitialised memory.
2133 *
2134 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2135 * for buffer overruns.
2136 *
1da177e4
LT
2137 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2138 * cacheline. This can be beneficial if you're counting cycles as closely
2139 * as davem.
2140 */
343e0d7a 2141struct kmem_cache *
1da177e4 2142kmem_cache_create (const char *name, size_t size, size_t align,
51cc5068 2143 unsigned long flags, void (*ctor)(void *))
1da177e4
LT
2144{
2145 size_t left_over, slab_size, ralign;
7a7c381d 2146 struct kmem_cache *cachep = NULL, *pc;
1da177e4
LT
2147
2148 /*
2149 * Sanity checks... these are all serious usage bugs.
2150 */
a737b3e2 2151 if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
20c2df83 2152 size > KMALLOC_MAX_SIZE) {
d40cee24 2153 printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
a737b3e2 2154 name);
b28a02de
PE
2155 BUG();
2156 }
1da177e4 2157
f0188f47 2158 /*
8f5be20b 2159 * We use cache_chain_mutex to ensure a consistent view of
174596a0 2160 * cpu_online_mask as well. Please see cpuup_callback
f0188f47 2161 */
95402b38 2162 get_online_cpus();
fc0abb14 2163 mutex_lock(&cache_chain_mutex);
4f12bb4f 2164
7a7c381d 2165 list_for_each_entry(pc, &cache_chain, next) {
4f12bb4f
AM
2166 char tmp;
2167 int res;
2168
2169 /*
2170 * This happens when the module gets unloaded and doesn't
2171 * destroy its slab cache and no-one else reuses the vmalloc
2172 * area of the module. Print a warning.
2173 */
138ae663 2174 res = probe_kernel_address(pc->name, tmp);
4f12bb4f 2175 if (res) {
b4169525 2176 printk(KERN_ERR
2177 "SLAB: cache with size %d has lost its name\n",
3dafccf2 2178 pc->buffer_size);
4f12bb4f
AM
2179 continue;
2180 }
2181
b28a02de 2182 if (!strcmp(pc->name, name)) {
b4169525 2183 printk(KERN_ERR
2184 "kmem_cache_create: duplicate cache %s\n", name);
4f12bb4f
AM
2185 dump_stack();
2186 goto oops;
2187 }
2188 }
2189
1da177e4
LT
2190#if DEBUG
2191 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
1da177e4
LT
2192#if FORCED_DEBUG
2193 /*
2194 * Enable redzoning and last user accounting, except for caches with
2195 * large objects, if the increased size would increase the object size
2196 * above the next power of two: caches with object sizes just above a
2197 * power of two have a significant amount of internal fragmentation.
2198 */
87a927c7
DW
2199 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2200 2 * sizeof(unsigned long long)))
b28a02de 2201 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1da177e4
LT
2202 if (!(flags & SLAB_DESTROY_BY_RCU))
2203 flags |= SLAB_POISON;
2204#endif
2205 if (flags & SLAB_DESTROY_BY_RCU)
2206 BUG_ON(flags & SLAB_POISON);
2207#endif
1da177e4 2208 /*
a737b3e2
AM
2209 * Always checks flags, a caller might be expecting debug support which
2210 * isn't available.
1da177e4 2211 */
40094fa6 2212 BUG_ON(flags & ~CREATE_MASK);
1da177e4 2213
a737b3e2
AM
2214 /*
2215 * Check that size is in terms of words. This is needed to avoid
1da177e4
LT
2216 * unaligned accesses for some archs when redzoning is used, and makes
2217 * sure any on-slab bufctl's are also correctly aligned.
2218 */
b28a02de
PE
2219 if (size & (BYTES_PER_WORD - 1)) {
2220 size += (BYTES_PER_WORD - 1);
2221 size &= ~(BYTES_PER_WORD - 1);
1da177e4
LT
2222 }
2223
a737b3e2
AM
2224 /* calculate the final buffer alignment: */
2225
1da177e4
LT
2226 /* 1) arch recommendation: can be overridden for debug */
2227 if (flags & SLAB_HWCACHE_ALIGN) {
a737b3e2
AM
2228 /*
2229 * Default alignment: as specified by the arch code. Except if
2230 * an object is really small, then squeeze multiple objects into
2231 * one cacheline.
1da177e4
LT
2232 */
2233 ralign = cache_line_size();
b28a02de 2234 while (size <= ralign / 2)
1da177e4
LT
2235 ralign /= 2;
2236 } else {
2237 ralign = BYTES_PER_WORD;
2238 }
ca5f9703
PE
2239
2240 /*
87a927c7
DW
2241 * Redzoning and user store require word alignment or possibly larger.
2242 * Note this will be overridden by architecture or caller mandated
2243 * alignment if either is greater than BYTES_PER_WORD.
ca5f9703 2244 */
87a927c7
DW
2245 if (flags & SLAB_STORE_USER)
2246 ralign = BYTES_PER_WORD;
2247
2248 if (flags & SLAB_RED_ZONE) {
2249 ralign = REDZONE_ALIGN;
2250 /* If redzoning, ensure that the second redzone is suitably
2251 * aligned, by adjusting the object size accordingly. */
2252 size += REDZONE_ALIGN - 1;
2253 size &= ~(REDZONE_ALIGN - 1);
2254 }
ca5f9703 2255
a44b56d3 2256 /* 2) arch mandated alignment */
1da177e4
LT
2257 if (ralign < ARCH_SLAB_MINALIGN) {
2258 ralign = ARCH_SLAB_MINALIGN;
1da177e4 2259 }
a44b56d3 2260 /* 3) caller mandated alignment */
1da177e4
LT
2261 if (ralign < align) {
2262 ralign = align;
1da177e4 2263 }
a44b56d3 2264 /* disable debug if necessary */
b46b8f19 2265 if (ralign > __alignof__(unsigned long long))
a44b56d3 2266 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
a737b3e2 2267 /*
ca5f9703 2268 * 4) Store it.
1da177e4
LT
2269 */
2270 align = ralign;
2271
2272 /* Get cache's description obj. */
e94b1766 2273 cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
1da177e4 2274 if (!cachep)
4f12bb4f 2275 goto oops;
1da177e4
LT
2276
2277#if DEBUG
3dafccf2 2278 cachep->obj_size = size;
1da177e4 2279
ca5f9703
PE
2280 /*
2281 * Both debugging options require word-alignment which is calculated
2282 * into align above.
2283 */
1da177e4 2284 if (flags & SLAB_RED_ZONE) {
1da177e4 2285 /* add space for red zone words */
b46b8f19
DW
2286 cachep->obj_offset += sizeof(unsigned long long);
2287 size += 2 * sizeof(unsigned long long);
1da177e4
LT
2288 }
2289 if (flags & SLAB_STORE_USER) {
ca5f9703 2290 /* user store requires one word storage behind the end of
87a927c7
DW
2291 * the real object. But if the second red zone needs to be
2292 * aligned to 64 bits, we must allow that much space.
1da177e4 2293 */
87a927c7
DW
2294 if (flags & SLAB_RED_ZONE)
2295 size += REDZONE_ALIGN;
2296 else
2297 size += BYTES_PER_WORD;
1da177e4
LT
2298 }
2299#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
b28a02de 2300 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
3dafccf2
MS
2301 && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2302 cachep->obj_offset += PAGE_SIZE - size;
1da177e4
LT
2303 size = PAGE_SIZE;
2304 }
2305#endif
2306#endif
2307
e0a42726
IM
2308 /*
2309 * Determine if the slab management is 'on' or 'off' slab.
2310 * (bootstrapping cannot cope with offslab caches so don't do
2311 * it too early on.)
2312 */
2313 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
1da177e4
LT
2314 /*
2315 * Size is large, assume best to place the slab management obj
2316 * off-slab (should allow better packing of objs).
2317 */
2318 flags |= CFLGS_OFF_SLAB;
2319
2320 size = ALIGN(size, align);
2321
f78bb8ad 2322 left_over = calculate_slab_order(cachep, size, align, flags);
1da177e4
LT
2323
2324 if (!cachep->num) {
b4169525 2325 printk(KERN_ERR
2326 "kmem_cache_create: couldn't create cache %s.\n", name);
1da177e4
LT
2327 kmem_cache_free(&cache_cache, cachep);
2328 cachep = NULL;
4f12bb4f 2329 goto oops;
1da177e4 2330 }
b28a02de
PE
2331 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2332 + sizeof(struct slab), align);
1da177e4
LT
2333
2334 /*
2335 * If the slab has been placed off-slab, and we have enough space then
2336 * move it on-slab. This is at the expense of any extra colouring.
2337 */
2338 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2339 flags &= ~CFLGS_OFF_SLAB;
2340 left_over -= slab_size;
2341 }
2342
2343 if (flags & CFLGS_OFF_SLAB) {
2344 /* really off slab. No need for manual alignment */
b28a02de
PE
2345 slab_size =
2346 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
1da177e4
LT
2347 }
2348
2349 cachep->colour_off = cache_line_size();
2350 /* Offset must be a multiple of the alignment. */
2351 if (cachep->colour_off < align)
2352 cachep->colour_off = align;
b28a02de 2353 cachep->colour = left_over / cachep->colour_off;
1da177e4
LT
2354 cachep->slab_size = slab_size;
2355 cachep->flags = flags;
2356 cachep->gfpflags = 0;
4b51d669 2357 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
1da177e4 2358 cachep->gfpflags |= GFP_DMA;
3dafccf2 2359 cachep->buffer_size = size;
6a2d7a95 2360 cachep->reciprocal_buffer_size = reciprocal_value(size);
1da177e4 2361
e5ac9c5a 2362 if (flags & CFLGS_OFF_SLAB) {
b2d55073 2363 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
e5ac9c5a
RT
2364 /*
2365 * This is a possibility for one of the malloc_sizes caches.
2366 * But since we go off slab only for object size greater than
2367 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2368 * this should not happen at all.
2369 * But leave a BUG_ON for some lucky dude.
2370 */
6cb8f913 2371 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
e5ac9c5a 2372 }
1da177e4 2373 cachep->ctor = ctor;
1da177e4
LT
2374 cachep->name = name;
2375
2ed3a4ef
CL
2376 if (setup_cpu_cache(cachep)) {
2377 __kmem_cache_destroy(cachep);
2378 cachep = NULL;
2379 goto oops;
2380 }
1da177e4 2381
1da177e4
LT
2382 /* cache setup completed, link it into the list */
2383 list_add(&cachep->next, &cache_chain);
a737b3e2 2384oops:
1da177e4
LT
2385 if (!cachep && (flags & SLAB_PANIC))
2386 panic("kmem_cache_create(): failed to create slab `%s'\n",
b28a02de 2387 name);
fc0abb14 2388 mutex_unlock(&cache_chain_mutex);
95402b38 2389 put_online_cpus();
1da177e4
LT
2390 return cachep;
2391}
2392EXPORT_SYMBOL(kmem_cache_create);
2393
2394#if DEBUG
2395static void check_irq_off(void)
2396{
2397 BUG_ON(!irqs_disabled());
2398}
2399
2400static void check_irq_on(void)
2401{
2402 BUG_ON(irqs_disabled());
2403}
2404
343e0d7a 2405static void check_spinlock_acquired(struct kmem_cache *cachep)
1da177e4
LT
2406{
2407#ifdef CONFIG_SMP
2408 check_irq_off();
e498be7d 2409 assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
1da177e4
LT
2410#endif
2411}
e498be7d 2412
343e0d7a 2413static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
e498be7d
CL
2414{
2415#ifdef CONFIG_SMP
2416 check_irq_off();
2417 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2418#endif
2419}
2420
1da177e4
LT
2421#else
2422#define check_irq_off() do { } while(0)
2423#define check_irq_on() do { } while(0)
2424#define check_spinlock_acquired(x) do { } while(0)
e498be7d 2425#define check_spinlock_acquired_node(x, y) do { } while(0)
1da177e4
LT
2426#endif
2427
aab2207c
CL
2428static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2429 struct array_cache *ac,
2430 int force, int node);
2431
1da177e4
LT
2432static void do_drain(void *arg)
2433{
a737b3e2 2434 struct kmem_cache *cachep = arg;
1da177e4 2435 struct array_cache *ac;
ff69416e 2436 int node = numa_node_id();
1da177e4
LT
2437
2438 check_irq_off();
9a2dba4b 2439 ac = cpu_cache_get(cachep);
ff69416e
CL
2440 spin_lock(&cachep->nodelists[node]->list_lock);
2441 free_block(cachep, ac->entry, ac->avail, node);
2442 spin_unlock(&cachep->nodelists[node]->list_lock);
1da177e4
LT
2443 ac->avail = 0;
2444}
2445
343e0d7a 2446static void drain_cpu_caches(struct kmem_cache *cachep)
1da177e4 2447{
e498be7d
CL
2448 struct kmem_list3 *l3;
2449 int node;
2450
15c8b6c1 2451 on_each_cpu(do_drain, cachep, 1);
1da177e4 2452 check_irq_on();
b28a02de 2453 for_each_online_node(node) {
e498be7d 2454 l3 = cachep->nodelists[node];
a4523a8b
RD
2455 if (l3 && l3->alien)
2456 drain_alien_cache(cachep, l3->alien);
2457 }
2458
2459 for_each_online_node(node) {
2460 l3 = cachep->nodelists[node];
2461 if (l3)
aab2207c 2462 drain_array(cachep, l3, l3->shared, 1, node);
e498be7d 2463 }
1da177e4
LT
2464}
2465
ed11d9eb
CL
2466/*
2467 * Remove slabs from the list of free slabs.
2468 * Specify the number of slabs to drain in tofree.
2469 *
2470 * Returns the actual number of slabs released.
2471 */
2472static int drain_freelist(struct kmem_cache *cache,
2473 struct kmem_list3 *l3, int tofree)
1da177e4 2474{
ed11d9eb
CL
2475 struct list_head *p;
2476 int nr_freed;
1da177e4 2477 struct slab *slabp;
1da177e4 2478
ed11d9eb
CL
2479 nr_freed = 0;
2480 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
1da177e4 2481
ed11d9eb 2482 spin_lock_irq(&l3->list_lock);
e498be7d 2483 p = l3->slabs_free.prev;
ed11d9eb
CL
2484 if (p == &l3->slabs_free) {
2485 spin_unlock_irq(&l3->list_lock);
2486 goto out;
2487 }
1da177e4 2488
ed11d9eb 2489 slabp = list_entry(p, struct slab, list);
1da177e4 2490#if DEBUG
40094fa6 2491 BUG_ON(slabp->inuse);
1da177e4
LT
2492#endif
2493 list_del(&slabp->list);
ed11d9eb
CL
2494 /*
2495 * Safe to drop the lock. The slab is no longer linked
2496 * to the cache.
2497 */
2498 l3->free_objects -= cache->num;
e498be7d 2499 spin_unlock_irq(&l3->list_lock);
ed11d9eb
CL
2500 slab_destroy(cache, slabp);
2501 nr_freed++;
1da177e4 2502 }
ed11d9eb
CL
2503out:
2504 return nr_freed;
1da177e4
LT
2505}
2506
8f5be20b 2507/* Called with cache_chain_mutex held to protect against cpu hotplug */
343e0d7a 2508static int __cache_shrink(struct kmem_cache *cachep)
e498be7d
CL
2509{
2510 int ret = 0, i = 0;
2511 struct kmem_list3 *l3;
2512
2513 drain_cpu_caches(cachep);
2514
2515 check_irq_on();
2516 for_each_online_node(i) {
2517 l3 = cachep->nodelists[i];
ed11d9eb
CL
2518 if (!l3)
2519 continue;
2520
2521 drain_freelist(cachep, l3, l3->free_objects);
2522
2523 ret += !list_empty(&l3->slabs_full) ||
2524 !list_empty(&l3->slabs_partial);
e498be7d
CL
2525 }
2526 return (ret ? 1 : 0);
2527}
2528
1da177e4
LT
2529/**
2530 * kmem_cache_shrink - Shrink a cache.
2531 * @cachep: The cache to shrink.
2532 *
2533 * Releases as many slabs as possible for a cache.
2534 * To help debugging, a zero exit status indicates all slabs were released.
2535 */
343e0d7a 2536int kmem_cache_shrink(struct kmem_cache *cachep)
1da177e4 2537{
8f5be20b 2538 int ret;
40094fa6 2539 BUG_ON(!cachep || in_interrupt());
1da177e4 2540
95402b38 2541 get_online_cpus();
8f5be20b
RT
2542 mutex_lock(&cache_chain_mutex);
2543 ret = __cache_shrink(cachep);
2544 mutex_unlock(&cache_chain_mutex);
95402b38 2545 put_online_cpus();
8f5be20b 2546 return ret;
1da177e4
LT
2547}
2548EXPORT_SYMBOL(kmem_cache_shrink);
2549
2550/**
2551 * kmem_cache_destroy - delete a cache
2552 * @cachep: the cache to destroy
2553 *
72fd4a35 2554 * Remove a &struct kmem_cache object from the slab cache.
1da177e4
LT
2555 *
2556 * It is expected this function will be called by a module when it is
2557 * unloaded. This will remove the cache completely, and avoid a duplicate
2558 * cache being allocated each time a module is loaded and unloaded, if the
2559 * module doesn't have persistent in-kernel storage across loads and unloads.
2560 *
2561 * The cache must be empty before calling this function.
2562 *
2563 * The caller must guarantee that noone will allocate memory from the cache
2564 * during the kmem_cache_destroy().
2565 */
133d205a 2566void kmem_cache_destroy(struct kmem_cache *cachep)
1da177e4 2567{
40094fa6 2568 BUG_ON(!cachep || in_interrupt());
1da177e4 2569
1da177e4 2570 /* Find the cache in the chain of caches. */
95402b38 2571 get_online_cpus();
fc0abb14 2572 mutex_lock(&cache_chain_mutex);
1da177e4
LT
2573 /*
2574 * the chain is never empty, cache_cache is never destroyed
2575 */
2576 list_del(&cachep->next);
1da177e4
LT
2577 if (__cache_shrink(cachep)) {
2578 slab_error(cachep, "Can't free all objects");
b28a02de 2579 list_add(&cachep->next, &cache_chain);
fc0abb14 2580 mutex_unlock(&cache_chain_mutex);
95402b38 2581 put_online_cpus();
133d205a 2582 return;
1da177e4
LT
2583 }
2584
2585 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
fbd568a3 2586 synchronize_rcu();
1da177e4 2587
117f6eb1 2588 __kmem_cache_destroy(cachep);
8f5be20b 2589 mutex_unlock(&cache_chain_mutex);
95402b38 2590 put_online_cpus();
1da177e4
LT
2591}
2592EXPORT_SYMBOL(kmem_cache_destroy);
2593
e5ac9c5a
RT
2594/*
2595 * Get the memory for a slab management obj.
2596 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2597 * always come from malloc_sizes caches. The slab descriptor cannot
2598 * come from the same cache which is getting created because,
2599 * when we are searching for an appropriate cache for these
2600 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2601 * If we are creating a malloc_sizes cache here it would not be visible to
2602 * kmem_find_general_cachep till the initialization is complete.
2603 * Hence we cannot have slabp_cache same as the original cache.
2604 */
343e0d7a 2605static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
5b74ada7
RT
2606 int colour_off, gfp_t local_flags,
2607 int nodeid)
1da177e4
LT
2608{
2609 struct slab *slabp;
b28a02de 2610
1da177e4
LT
2611 if (OFF_SLAB(cachep)) {
2612 /* Slab management obj is off-slab. */
5b74ada7 2613 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
8759ec50 2614 local_flags, nodeid);
1da177e4
LT
2615 if (!slabp)
2616 return NULL;
2617 } else {
b28a02de 2618 slabp = objp + colour_off;
1da177e4
LT
2619 colour_off += cachep->slab_size;
2620 }
2621 slabp->inuse = 0;
2622 slabp->colouroff = colour_off;
b28a02de 2623 slabp->s_mem = objp + colour_off;
5b74ada7 2624 slabp->nodeid = nodeid;
e51bfd0a 2625 slabp->free = 0;
1da177e4
LT
2626 return slabp;
2627}
2628
2629static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2630{
b28a02de 2631 return (kmem_bufctl_t *) (slabp + 1);
1da177e4
LT
2632}
2633
343e0d7a 2634static void cache_init_objs(struct kmem_cache *cachep,
a35afb83 2635 struct slab *slabp)
1da177e4
LT
2636{
2637 int i;
2638
2639 for (i = 0; i < cachep->num; i++) {
8fea4e96 2640 void *objp = index_to_obj(cachep, slabp, i);
1da177e4
LT
2641#if DEBUG
2642 /* need to poison the objs? */
2643 if (cachep->flags & SLAB_POISON)
2644 poison_obj(cachep, objp, POISON_FREE);
2645 if (cachep->flags & SLAB_STORE_USER)
2646 *dbg_userword(cachep, objp) = NULL;
2647
2648 if (cachep->flags & SLAB_RED_ZONE) {
2649 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2650 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2651 }
2652 /*
a737b3e2
AM
2653 * Constructors are not allowed to allocate memory from the same
2654 * cache which they are a constructor for. Otherwise, deadlock.
2655 * They must also be threaded.
1da177e4
LT
2656 */
2657 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
51cc5068 2658 cachep->ctor(objp + obj_offset(cachep));
1da177e4
LT
2659
2660 if (cachep->flags & SLAB_RED_ZONE) {
2661 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2662 slab_error(cachep, "constructor overwrote the"
b28a02de 2663 " end of an object");
1da177e4
LT
2664 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2665 slab_error(cachep, "constructor overwrote the"
b28a02de 2666 " start of an object");
1da177e4 2667 }
a737b3e2
AM
2668 if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2669 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
b28a02de 2670 kernel_map_pages(virt_to_page(objp),
3dafccf2 2671 cachep->buffer_size / PAGE_SIZE, 0);
1da177e4
LT
2672#else
2673 if (cachep->ctor)
51cc5068 2674 cachep->ctor(objp);
1da177e4 2675#endif
b28a02de 2676 slab_bufctl(slabp)[i] = i + 1;
1da177e4 2677 }
b28a02de 2678 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
1da177e4
LT
2679}
2680
343e0d7a 2681static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
1da177e4 2682{
4b51d669
CL
2683 if (CONFIG_ZONE_DMA_FLAG) {
2684 if (flags & GFP_DMA)
2685 BUG_ON(!(cachep->gfpflags & GFP_DMA));
2686 else
2687 BUG_ON(cachep->gfpflags & GFP_DMA);
2688 }
1da177e4
LT
2689}
2690
a737b3e2
AM
2691static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2692 int nodeid)
78d382d7 2693{
8fea4e96 2694 void *objp = index_to_obj(cachep, slabp, slabp->free);
78d382d7
MD
2695 kmem_bufctl_t next;
2696
2697 slabp->inuse++;
2698 next = slab_bufctl(slabp)[slabp->free];
2699#if DEBUG
2700 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2701 WARN_ON(slabp->nodeid != nodeid);
2702#endif
2703 slabp->free = next;
2704
2705 return objp;
2706}
2707
a737b3e2
AM
2708static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2709 void *objp, int nodeid)
78d382d7 2710{
8fea4e96 2711 unsigned int objnr = obj_to_index(cachep, slabp, objp);
78d382d7
MD
2712
2713#if DEBUG
2714 /* Verify that the slab belongs to the intended node */
2715 WARN_ON(slabp->nodeid != nodeid);
2716
871751e2 2717 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
78d382d7 2718 printk(KERN_ERR "slab: double free detected in cache "
a737b3e2 2719 "'%s', objp %p\n", cachep->name, objp);
78d382d7
MD
2720 BUG();
2721 }
2722#endif
2723 slab_bufctl(slabp)[objnr] = slabp->free;
2724 slabp->free = objnr;
2725 slabp->inuse--;
2726}
2727
4776874f
PE
2728/*
2729 * Map pages beginning at addr to the given cache and slab. This is required
2730 * for the slab allocator to be able to lookup the cache and slab of a
2731 * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2732 */
2733static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2734 void *addr)
1da177e4 2735{
4776874f 2736 int nr_pages;
1da177e4
LT
2737 struct page *page;
2738
4776874f 2739 page = virt_to_page(addr);
84097518 2740
4776874f 2741 nr_pages = 1;
84097518 2742 if (likely(!PageCompound(page)))
4776874f
PE
2743 nr_pages <<= cache->gfporder;
2744
1da177e4 2745 do {
4776874f
PE
2746 page_set_cache(page, cache);
2747 page_set_slab(page, slab);
1da177e4 2748 page++;
4776874f 2749 } while (--nr_pages);
1da177e4
LT
2750}
2751
2752/*
2753 * Grow (by 1) the number of slabs within a cache. This is called by
2754 * kmem_cache_alloc() when there are no active objs left in a cache.
2755 */
3c517a61
CL
2756static int cache_grow(struct kmem_cache *cachep,
2757 gfp_t flags, int nodeid, void *objp)
1da177e4 2758{
b28a02de 2759 struct slab *slabp;
b28a02de
PE
2760 size_t offset;
2761 gfp_t local_flags;
e498be7d 2762 struct kmem_list3 *l3;
1da177e4 2763
a737b3e2
AM
2764 /*
2765 * Be lazy and only check for valid flags here, keeping it out of the
2766 * critical path in kmem_cache_alloc().
1da177e4 2767 */
6cb06229
CL
2768 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2769 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
1da177e4 2770
2e1217cf 2771 /* Take the l3 list lock to change the colour_next on this node */
1da177e4 2772 check_irq_off();
2e1217cf
RT
2773 l3 = cachep->nodelists[nodeid];
2774 spin_lock(&l3->list_lock);
1da177e4
LT
2775
2776 /* Get colour for the slab, and cal the next value. */
2e1217cf
RT
2777 offset = l3->colour_next;
2778 l3->colour_next++;
2779 if (l3->colour_next >= cachep->colour)
2780 l3->colour_next = 0;
2781 spin_unlock(&l3->list_lock);
1da177e4 2782
2e1217cf 2783 offset *= cachep->colour_off;
1da177e4
LT
2784
2785 if (local_flags & __GFP_WAIT)
2786 local_irq_enable();
2787
2788 /*
2789 * The test for missing atomic flag is performed here, rather than
2790 * the more obvious place, simply to reduce the critical path length
2791 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2792 * will eventually be caught here (where it matters).
2793 */
2794 kmem_flagcheck(cachep, flags);
2795
a737b3e2
AM
2796 /*
2797 * Get mem for the objs. Attempt to allocate a physical page from
2798 * 'nodeid'.
e498be7d 2799 */
3c517a61 2800 if (!objp)
b8c1c5da 2801 objp = kmem_getpages(cachep, local_flags, nodeid);
a737b3e2 2802 if (!objp)
1da177e4
LT
2803 goto failed;
2804
2805 /* Get slab management. */
3c517a61 2806 slabp = alloc_slabmgmt(cachep, objp, offset,
6cb06229 2807 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
a737b3e2 2808 if (!slabp)
1da177e4
LT
2809 goto opps1;
2810
4776874f 2811 slab_map_pages(cachep, slabp, objp);
1da177e4 2812
a35afb83 2813 cache_init_objs(cachep, slabp);
1da177e4
LT
2814
2815 if (local_flags & __GFP_WAIT)
2816 local_irq_disable();
2817 check_irq_off();
e498be7d 2818 spin_lock(&l3->list_lock);
1da177e4
LT
2819
2820 /* Make slab active. */
e498be7d 2821 list_add_tail(&slabp->list, &(l3->slabs_free));
1da177e4 2822 STATS_INC_GROWN(cachep);
e498be7d
CL
2823 l3->free_objects += cachep->num;
2824 spin_unlock(&l3->list_lock);
1da177e4 2825 return 1;
a737b3e2 2826opps1:
1da177e4 2827 kmem_freepages(cachep, objp);
a737b3e2 2828failed:
1da177e4
LT
2829 if (local_flags & __GFP_WAIT)
2830 local_irq_disable();
2831 return 0;
2832}
2833
2834#if DEBUG
2835
2836/*
2837 * Perform extra freeing checks:
2838 * - detect bad pointers.
2839 * - POISON/RED_ZONE checking
1da177e4
LT
2840 */
2841static void kfree_debugcheck(const void *objp)
2842{
1da177e4
LT
2843 if (!virt_addr_valid(objp)) {
2844 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
b28a02de
PE
2845 (unsigned long)objp);
2846 BUG();
1da177e4 2847 }
1da177e4
LT
2848}
2849
58ce1fd5
PE
2850static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2851{
b46b8f19 2852 unsigned long long redzone1, redzone2;
58ce1fd5
PE
2853
2854 redzone1 = *dbg_redzone1(cache, obj);
2855 redzone2 = *dbg_redzone2(cache, obj);
2856
2857 /*
2858 * Redzone is ok.
2859 */
2860 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2861 return;
2862
2863 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2864 slab_error(cache, "double free detected");
2865 else
2866 slab_error(cache, "memory outside object was overwritten");
2867
b46b8f19 2868 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
58ce1fd5
PE
2869 obj, redzone1, redzone2);
2870}
2871
343e0d7a 2872static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
b28a02de 2873 void *caller)
1da177e4
LT
2874{
2875 struct page *page;
2876 unsigned int objnr;
2877 struct slab *slabp;
2878
80cbd911
MW
2879 BUG_ON(virt_to_cache(objp) != cachep);
2880
3dafccf2 2881 objp -= obj_offset(cachep);
1da177e4 2882 kfree_debugcheck(objp);
b49af68f 2883 page = virt_to_head_page(objp);
1da177e4 2884
065d41cb 2885 slabp = page_get_slab(page);
1da177e4
LT
2886
2887 if (cachep->flags & SLAB_RED_ZONE) {
58ce1fd5 2888 verify_redzone_free(cachep, objp);
1da177e4
LT
2889 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2890 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2891 }
2892 if (cachep->flags & SLAB_STORE_USER)
2893 *dbg_userword(cachep, objp) = caller;
2894
8fea4e96 2895 objnr = obj_to_index(cachep, slabp, objp);
1da177e4
LT
2896
2897 BUG_ON(objnr >= cachep->num);
8fea4e96 2898 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
1da177e4 2899
871751e2
AV
2900#ifdef CONFIG_DEBUG_SLAB_LEAK
2901 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2902#endif
1da177e4
LT
2903 if (cachep->flags & SLAB_POISON) {
2904#ifdef CONFIG_DEBUG_PAGEALLOC
a737b3e2 2905 if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
1da177e4 2906 store_stackinfo(cachep, objp, (unsigned long)caller);
b28a02de 2907 kernel_map_pages(virt_to_page(objp),
3dafccf2 2908 cachep->buffer_size / PAGE_SIZE, 0);
1da177e4
LT
2909 } else {
2910 poison_obj(cachep, objp, POISON_FREE);
2911 }
2912#else
2913 poison_obj(cachep, objp, POISON_FREE);
2914#endif
2915 }
2916 return objp;
2917}
2918
343e0d7a 2919static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
1da177e4
LT
2920{
2921 kmem_bufctl_t i;
2922 int entries = 0;
b28a02de 2923
1da177e4
LT
2924 /* Check slab's freelist to see if this obj is there. */
2925 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2926 entries++;
2927 if (entries > cachep->num || i >= cachep->num)
2928 goto bad;
2929 }
2930 if (entries != cachep->num - slabp->inuse) {
a737b3e2
AM
2931bad:
2932 printk(KERN_ERR "slab: Internal list corruption detected in "
2933 "cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2934 cachep->name, cachep->num, slabp, slabp->inuse);
b28a02de 2935 for (i = 0;
264132bc 2936 i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
b28a02de 2937 i++) {
a737b3e2 2938 if (i % 16 == 0)
1da177e4 2939 printk("\n%03x:", i);
b28a02de 2940 printk(" %02x", ((unsigned char *)slabp)[i]);
1da177e4
LT
2941 }
2942 printk("\n");
2943 BUG();
2944 }
2945}
2946#else
2947#define kfree_debugcheck(x) do { } while(0)
2948#define cache_free_debugcheck(x,objp,z) (objp)
2949#define check_slabp(x,y) do { } while(0)
2950#endif
2951
343e0d7a 2952static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
1da177e4
LT
2953{
2954 int batchcount;
2955 struct kmem_list3 *l3;
2956 struct array_cache *ac;
1ca4cb24
PE
2957 int node;
2958
6d2144d3 2959retry:
1da177e4 2960 check_irq_off();
6d2144d3 2961 node = numa_node_id();
9a2dba4b 2962 ac = cpu_cache_get(cachep);
1da177e4
LT
2963 batchcount = ac->batchcount;
2964 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
a737b3e2
AM
2965 /*
2966 * If there was little recent activity on this cache, then
2967 * perform only a partial refill. Otherwise we could generate
2968 * refill bouncing.
1da177e4
LT
2969 */
2970 batchcount = BATCHREFILL_LIMIT;
2971 }
1ca4cb24 2972 l3 = cachep->nodelists[node];
e498be7d
CL
2973
2974 BUG_ON(ac->avail > 0 || !l3);
2975 spin_lock(&l3->list_lock);
1da177e4 2976
3ded175a
CL
2977 /* See if we can refill from the shared array */
2978 if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2979 goto alloc_done;
2980
1da177e4
LT
2981 while (batchcount > 0) {
2982 struct list_head *entry;
2983 struct slab *slabp;
2984 /* Get slab alloc is to come from. */
2985 entry = l3->slabs_partial.next;
2986 if (entry == &l3->slabs_partial) {
2987 l3->free_touched = 1;
2988 entry = l3->slabs_free.next;
2989 if (entry == &l3->slabs_free)
2990 goto must_grow;
2991 }
2992
2993 slabp = list_entry(entry, struct slab, list);
2994 check_slabp(cachep, slabp);
2995 check_spinlock_acquired(cachep);
714b8171
PE
2996
2997 /*
2998 * The slab was either on partial or free list so
2999 * there must be at least one object available for
3000 * allocation.
3001 */
249b9f33 3002 BUG_ON(slabp->inuse >= cachep->num);
714b8171 3003
1da177e4 3004 while (slabp->inuse < cachep->num && batchcount--) {
1da177e4
LT
3005 STATS_INC_ALLOCED(cachep);
3006 STATS_INC_ACTIVE(cachep);
3007 STATS_SET_HIGH(cachep);
3008
78d382d7 3009 ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
1ca4cb24 3010 node);
1da177e4
LT
3011 }
3012 check_slabp(cachep, slabp);
3013
3014 /* move slabp to correct slabp list: */
3015 list_del(&slabp->list);
3016 if (slabp->free == BUFCTL_END)
3017 list_add(&slabp->list, &l3->slabs_full);
3018 else
3019 list_add(&slabp->list, &l3->slabs_partial);
3020 }
3021
a737b3e2 3022must_grow:
1da177e4 3023 l3->free_objects -= ac->avail;
a737b3e2 3024alloc_done:
e498be7d 3025 spin_unlock(&l3->list_lock);
1da177e4
LT
3026
3027 if (unlikely(!ac->avail)) {
3028 int x;
3c517a61 3029 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
e498be7d 3030
a737b3e2 3031 /* cache_grow can reenable interrupts, then ac could change. */
9a2dba4b 3032 ac = cpu_cache_get(cachep);
a737b3e2 3033 if (!x && ac->avail == 0) /* no objects in sight? abort */
1da177e4
LT
3034 return NULL;
3035
a737b3e2 3036 if (!ac->avail) /* objects refilled by interrupt? */
1da177e4
LT
3037 goto retry;
3038 }
3039 ac->touched = 1;
e498be7d 3040 return ac->entry[--ac->avail];
1da177e4
LT
3041}
3042
a737b3e2
AM
3043static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3044 gfp_t flags)
1da177e4
LT
3045{
3046 might_sleep_if(flags & __GFP_WAIT);
3047#if DEBUG
3048 kmem_flagcheck(cachep, flags);
3049#endif
3050}
3051
3052#if DEBUG
a737b3e2
AM
3053static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3054 gfp_t flags, void *objp, void *caller)
1da177e4 3055{
b28a02de 3056 if (!objp)
1da177e4 3057 return objp;
b28a02de 3058 if (cachep->flags & SLAB_POISON) {
1da177e4 3059#ifdef CONFIG_DEBUG_PAGEALLOC
3dafccf2 3060 if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
b28a02de 3061 kernel_map_pages(virt_to_page(objp),
3dafccf2 3062 cachep->buffer_size / PAGE_SIZE, 1);
1da177e4
LT
3063 else
3064 check_poison_obj(cachep, objp);
3065#else
3066 check_poison_obj(cachep, objp);
3067#endif
3068 poison_obj(cachep, objp, POISON_INUSE);
3069 }
3070 if (cachep->flags & SLAB_STORE_USER)
3071 *dbg_userword(cachep, objp) = caller;
3072
3073 if (cachep->flags & SLAB_RED_ZONE) {
a737b3e2
AM
3074 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3075 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3076 slab_error(cachep, "double free, or memory outside"
3077 " object was overwritten");
b28a02de 3078 printk(KERN_ERR
b46b8f19 3079 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
a737b3e2
AM
3080 objp, *dbg_redzone1(cachep, objp),
3081 *dbg_redzone2(cachep, objp));
1da177e4
LT
3082 }
3083 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3084 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3085 }
871751e2
AV
3086#ifdef CONFIG_DEBUG_SLAB_LEAK
3087 {
3088 struct slab *slabp;
3089 unsigned objnr;
3090
b49af68f 3091 slabp = page_get_slab(virt_to_head_page(objp));
871751e2
AV
3092 objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3093 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3094 }
3095#endif
3dafccf2 3096 objp += obj_offset(cachep);
4f104934 3097 if (cachep->ctor && cachep->flags & SLAB_POISON)
51cc5068 3098 cachep->ctor(objp);
a44b56d3
KH
3099#if ARCH_SLAB_MINALIGN
3100 if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3101 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3102 objp, ARCH_SLAB_MINALIGN);
3103 }
3104#endif
1da177e4
LT
3105 return objp;
3106}
3107#else
3108#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3109#endif
3110
773ff60e 3111static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
8a8b6502
AM
3112{
3113 if (cachep == &cache_cache)
773ff60e 3114 return false;
8a8b6502 3115
773ff60e 3116 return should_failslab(obj_size(cachep), flags);
8a8b6502
AM
3117}
3118
343e0d7a 3119static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3120{
b28a02de 3121 void *objp;
1da177e4
LT
3122 struct array_cache *ac;
3123
5c382300 3124 check_irq_off();
8a8b6502 3125
9a2dba4b 3126 ac = cpu_cache_get(cachep);
1da177e4
LT
3127 if (likely(ac->avail)) {
3128 STATS_INC_ALLOCHIT(cachep);
3129 ac->touched = 1;
e498be7d 3130 objp = ac->entry[--ac->avail];
1da177e4
LT
3131 } else {
3132 STATS_INC_ALLOCMISS(cachep);
3133 objp = cache_alloc_refill(cachep, flags);
3134 }
5c382300
AK
3135 return objp;
3136}
3137
e498be7d 3138#ifdef CONFIG_NUMA
c61afb18 3139/*
b2455396 3140 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
c61afb18
PJ
3141 *
3142 * If we are in_interrupt, then process context, including cpusets and
3143 * mempolicy, may not apply and should not be used for allocation policy.
3144 */
3145static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3146{
3147 int nid_alloc, nid_here;
3148
765c4507 3149 if (in_interrupt() || (flags & __GFP_THISNODE))
c61afb18
PJ
3150 return NULL;
3151 nid_alloc = nid_here = numa_node_id();
3152 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3153 nid_alloc = cpuset_mem_spread_node();
3154 else if (current->mempolicy)
3155 nid_alloc = slab_node(current->mempolicy);
3156 if (nid_alloc != nid_here)
8b98c169 3157 return ____cache_alloc_node(cachep, flags, nid_alloc);
c61afb18
PJ
3158 return NULL;
3159}
3160
765c4507
CL
3161/*
3162 * Fallback function if there was no memory available and no objects on a
3c517a61
CL
3163 * certain node and fall back is permitted. First we scan all the
3164 * available nodelists for available objects. If that fails then we
3165 * perform an allocation without specifying a node. This allows the page
3166 * allocator to do its reclaim / fallback magic. We then insert the
3167 * slab into the proper nodelist and then allocate from it.
765c4507 3168 */
8c8cc2c1 3169static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
765c4507 3170{
8c8cc2c1
PE
3171 struct zonelist *zonelist;
3172 gfp_t local_flags;
dd1a239f 3173 struct zoneref *z;
54a6eb5c
MG
3174 struct zone *zone;
3175 enum zone_type high_zoneidx = gfp_zone(flags);
765c4507 3176 void *obj = NULL;
3c517a61 3177 int nid;
8c8cc2c1
PE
3178
3179 if (flags & __GFP_THISNODE)
3180 return NULL;
3181
0e88460d 3182 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
6cb06229 3183 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
765c4507 3184
3c517a61
CL
3185retry:
3186 /*
3187 * Look through allowed nodes for objects available
3188 * from existing per node queues.
3189 */
54a6eb5c
MG
3190 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3191 nid = zone_to_nid(zone);
aedb0eb1 3192
54a6eb5c 3193 if (cpuset_zone_allowed_hardwall(zone, flags) &&
3c517a61 3194 cache->nodelists[nid] &&
481c5346 3195 cache->nodelists[nid]->free_objects) {
3c517a61
CL
3196 obj = ____cache_alloc_node(cache,
3197 flags | GFP_THISNODE, nid);
481c5346
CL
3198 if (obj)
3199 break;
3200 }
3c517a61
CL
3201 }
3202
cfce6604 3203 if (!obj) {
3c517a61
CL
3204 /*
3205 * This allocation will be performed within the constraints
3206 * of the current cpuset / memory policy requirements.
3207 * We may trigger various forms of reclaim on the allowed
3208 * set and go into memory reserves if necessary.
3209 */
dd47ea75
CL
3210 if (local_flags & __GFP_WAIT)
3211 local_irq_enable();
3212 kmem_flagcheck(cache, flags);
9ac33b2b 3213 obj = kmem_getpages(cache, local_flags, -1);
dd47ea75
CL
3214 if (local_flags & __GFP_WAIT)
3215 local_irq_disable();
3c517a61
CL
3216 if (obj) {
3217 /*
3218 * Insert into the appropriate per node queues
3219 */
3220 nid = page_to_nid(virt_to_page(obj));
3221 if (cache_grow(cache, flags, nid, obj)) {
3222 obj = ____cache_alloc_node(cache,
3223 flags | GFP_THISNODE, nid);
3224 if (!obj)
3225 /*
3226 * Another processor may allocate the
3227 * objects in the slab since we are
3228 * not holding any locks.
3229 */
3230 goto retry;
3231 } else {
b6a60451 3232 /* cache_grow already freed obj */
3c517a61
CL
3233 obj = NULL;
3234 }
3235 }
aedb0eb1 3236 }
765c4507
CL
3237 return obj;
3238}
3239
e498be7d
CL
3240/*
3241 * A interface to enable slab creation on nodeid
1da177e4 3242 */
8b98c169 3243static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
a737b3e2 3244 int nodeid)
e498be7d
CL
3245{
3246 struct list_head *entry;
b28a02de
PE
3247 struct slab *slabp;
3248 struct kmem_list3 *l3;
3249 void *obj;
b28a02de
PE
3250 int x;
3251
3252 l3 = cachep->nodelists[nodeid];
3253 BUG_ON(!l3);
3254
a737b3e2 3255retry:
ca3b9b91 3256 check_irq_off();
b28a02de
PE
3257 spin_lock(&l3->list_lock);
3258 entry = l3->slabs_partial.next;
3259 if (entry == &l3->slabs_partial) {
3260 l3->free_touched = 1;
3261 entry = l3->slabs_free.next;
3262 if (entry == &l3->slabs_free)
3263 goto must_grow;
3264 }
3265
3266 slabp = list_entry(entry, struct slab, list);
3267 check_spinlock_acquired_node(cachep, nodeid);
3268 check_slabp(cachep, slabp);
3269
3270 STATS_INC_NODEALLOCS(cachep);
3271 STATS_INC_ACTIVE(cachep);
3272 STATS_SET_HIGH(cachep);
3273
3274 BUG_ON(slabp->inuse == cachep->num);
3275
78d382d7 3276 obj = slab_get_obj(cachep, slabp, nodeid);
b28a02de
PE
3277 check_slabp(cachep, slabp);
3278 l3->free_objects--;
3279 /* move slabp to correct slabp list: */
3280 list_del(&slabp->list);
3281
a737b3e2 3282 if (slabp->free == BUFCTL_END)
b28a02de 3283 list_add(&slabp->list, &l3->slabs_full);
a737b3e2 3284 else
b28a02de 3285 list_add(&slabp->list, &l3->slabs_partial);
e498be7d 3286
b28a02de
PE
3287 spin_unlock(&l3->list_lock);
3288 goto done;
e498be7d 3289
a737b3e2 3290must_grow:
b28a02de 3291 spin_unlock(&l3->list_lock);
3c517a61 3292 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
765c4507
CL
3293 if (x)
3294 goto retry;
1da177e4 3295
8c8cc2c1 3296 return fallback_alloc(cachep, flags);
e498be7d 3297
a737b3e2 3298done:
b28a02de 3299 return obj;
e498be7d 3300}
8c8cc2c1
PE
3301
3302/**
3303 * kmem_cache_alloc_node - Allocate an object on the specified node
3304 * @cachep: The cache to allocate from.
3305 * @flags: See kmalloc().
3306 * @nodeid: node number of the target node.
3307 * @caller: return address of caller, used for debug information
3308 *
3309 * Identical to kmem_cache_alloc but it will allocate memory on the given
3310 * node, which can improve the performance for cpu bound structures.
3311 *
3312 * Fallback to other node is possible if __GFP_THISNODE is not set.
3313 */
3314static __always_inline void *
3315__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3316 void *caller)
3317{
3318 unsigned long save_flags;
3319 void *ptr;
3320
773ff60e 3321 if (slab_should_failslab(cachep, flags))
824ebef1
AM
3322 return NULL;
3323
8c8cc2c1
PE
3324 cache_alloc_debugcheck_before(cachep, flags);
3325 local_irq_save(save_flags);
3326
3327 if (unlikely(nodeid == -1))
3328 nodeid = numa_node_id();
3329
3330 if (unlikely(!cachep->nodelists[nodeid])) {
3331 /* Node not bootstrapped yet */
3332 ptr = fallback_alloc(cachep, flags);
3333 goto out;
3334 }
3335
3336 if (nodeid == numa_node_id()) {
3337 /*
3338 * Use the locally cached objects if possible.
3339 * However ____cache_alloc does not allow fallback
3340 * to other nodes. It may fail while we still have
3341 * objects on other nodes available.
3342 */
3343 ptr = ____cache_alloc(cachep, flags);
3344 if (ptr)
3345 goto out;
3346 }
3347 /* ___cache_alloc_node can fall back to other nodes */
3348 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3349 out:
3350 local_irq_restore(save_flags);
3351 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3352
d07dbea4
CL
3353 if (unlikely((flags & __GFP_ZERO) && ptr))
3354 memset(ptr, 0, obj_size(cachep));
3355
8c8cc2c1
PE
3356 return ptr;
3357}
3358
3359static __always_inline void *
3360__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3361{
3362 void *objp;
3363
3364 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3365 objp = alternate_node_alloc(cache, flags);
3366 if (objp)
3367 goto out;
3368 }
3369 objp = ____cache_alloc(cache, flags);
3370
3371 /*
3372 * We may just have run out of memory on the local node.
3373 * ____cache_alloc_node() knows how to locate memory on other nodes
3374 */
3375 if (!objp)
3376 objp = ____cache_alloc_node(cache, flags, numa_node_id());
3377
3378 out:
3379 return objp;
3380}
3381#else
3382
3383static __always_inline void *
3384__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3385{
3386 return ____cache_alloc(cachep, flags);
3387}
3388
3389#endif /* CONFIG_NUMA */
3390
3391static __always_inline void *
3392__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3393{
3394 unsigned long save_flags;
3395 void *objp;
3396
773ff60e 3397 if (slab_should_failslab(cachep, flags))
824ebef1
AM
3398 return NULL;
3399
8c8cc2c1
PE
3400 cache_alloc_debugcheck_before(cachep, flags);
3401 local_irq_save(save_flags);
3402 objp = __do_cache_alloc(cachep, flags);
3403 local_irq_restore(save_flags);
3404 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3405 prefetchw(objp);
3406
d07dbea4
CL
3407 if (unlikely((flags & __GFP_ZERO) && objp))
3408 memset(objp, 0, obj_size(cachep));
3409
8c8cc2c1
PE
3410 return objp;
3411}
e498be7d
CL
3412
3413/*
3414 * Caller needs to acquire correct kmem_list's list_lock
3415 */
343e0d7a 3416static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
b28a02de 3417 int node)
1da177e4
LT
3418{
3419 int i;
e498be7d 3420 struct kmem_list3 *l3;
1da177e4
LT
3421
3422 for (i = 0; i < nr_objects; i++) {
3423 void *objp = objpp[i];
3424 struct slab *slabp;
1da177e4 3425
6ed5eb22 3426 slabp = virt_to_slab(objp);
ff69416e 3427 l3 = cachep->nodelists[node];
1da177e4 3428 list_del(&slabp->list);
ff69416e 3429 check_spinlock_acquired_node(cachep, node);
1da177e4 3430 check_slabp(cachep, slabp);
78d382d7 3431 slab_put_obj(cachep, slabp, objp, node);
1da177e4 3432 STATS_DEC_ACTIVE(cachep);
e498be7d 3433 l3->free_objects++;
1da177e4
LT
3434 check_slabp(cachep, slabp);
3435
3436 /* fixup slab chains */
3437 if (slabp->inuse == 0) {
e498be7d
CL
3438 if (l3->free_objects > l3->free_limit) {
3439 l3->free_objects -= cachep->num;
e5ac9c5a
RT
3440 /* No need to drop any previously held
3441 * lock here, even if we have a off-slab slab
3442 * descriptor it is guaranteed to come from
3443 * a different cache, refer to comments before
3444 * alloc_slabmgmt.
3445 */
1da177e4
LT
3446 slab_destroy(cachep, slabp);
3447 } else {
e498be7d 3448 list_add(&slabp->list, &l3->slabs_free);
1da177e4
LT
3449 }
3450 } else {
3451 /* Unconditionally move a slab to the end of the
3452 * partial list on free - maximum time for the
3453 * other objects to be freed, too.
3454 */
e498be7d 3455 list_add_tail(&slabp->list, &l3->slabs_partial);
1da177e4
LT
3456 }
3457 }
3458}
3459
343e0d7a 3460static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
1da177e4
LT
3461{
3462 int batchcount;
e498be7d 3463 struct kmem_list3 *l3;
ff69416e 3464 int node = numa_node_id();
1da177e4
LT
3465
3466 batchcount = ac->batchcount;
3467#if DEBUG
3468 BUG_ON(!batchcount || batchcount > ac->avail);
3469#endif
3470 check_irq_off();
ff69416e 3471 l3 = cachep->nodelists[node];
873623df 3472 spin_lock(&l3->list_lock);
e498be7d
CL
3473 if (l3->shared) {
3474 struct array_cache *shared_array = l3->shared;
b28a02de 3475 int max = shared_array->limit - shared_array->avail;
1da177e4
LT
3476 if (max) {
3477 if (batchcount > max)
3478 batchcount = max;
e498be7d 3479 memcpy(&(shared_array->entry[shared_array->avail]),
b28a02de 3480 ac->entry, sizeof(void *) * batchcount);
1da177e4
LT
3481 shared_array->avail += batchcount;
3482 goto free_done;
3483 }
3484 }
3485
ff69416e 3486 free_block(cachep, ac->entry, batchcount, node);
a737b3e2 3487free_done:
1da177e4
LT
3488#if STATS
3489 {
3490 int i = 0;
3491 struct list_head *p;
3492
e498be7d
CL
3493 p = l3->slabs_free.next;
3494 while (p != &(l3->slabs_free)) {
1da177e4
LT
3495 struct slab *slabp;
3496
3497 slabp = list_entry(p, struct slab, list);
3498 BUG_ON(slabp->inuse);
3499
3500 i++;
3501 p = p->next;
3502 }
3503 STATS_SET_FREEABLE(cachep, i);
3504 }
3505#endif
e498be7d 3506 spin_unlock(&l3->list_lock);
1da177e4 3507 ac->avail -= batchcount;
a737b3e2 3508 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
1da177e4
LT
3509}
3510
3511/*
a737b3e2
AM
3512 * Release an obj back to its cache. If the obj has a constructed state, it must
3513 * be in this state _before_ it is released. Called with disabled ints.
1da177e4 3514 */
873623df 3515static inline void __cache_free(struct kmem_cache *cachep, void *objp)
1da177e4 3516{
9a2dba4b 3517 struct array_cache *ac = cpu_cache_get(cachep);
1da177e4
LT
3518
3519 check_irq_off();
3520 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3521
1807a1aa
SS
3522 /*
3523 * Skip calling cache_free_alien() when the platform is not numa.
3524 * This will avoid cache misses that happen while accessing slabp (which
3525 * is per page memory reference) to get nodeid. Instead use a global
3526 * variable to skip the call, which is mostly likely to be present in
3527 * the cache.
3528 */
3529 if (numa_platform && cache_free_alien(cachep, objp))
729bd0b7
PE
3530 return;
3531
1da177e4
LT
3532 if (likely(ac->avail < ac->limit)) {
3533 STATS_INC_FREEHIT(cachep);
e498be7d 3534 ac->entry[ac->avail++] = objp;
1da177e4
LT
3535 return;
3536 } else {
3537 STATS_INC_FREEMISS(cachep);
3538 cache_flusharray(cachep, ac);
e498be7d 3539 ac->entry[ac->avail++] = objp;
1da177e4
LT
3540 }
3541}
3542
3543/**
3544 * kmem_cache_alloc - Allocate an object
3545 * @cachep: The cache to allocate from.
3546 * @flags: See kmalloc().
3547 *
3548 * Allocate an object from this cache. The flags are only relevant
3549 * if the cache has no available objects.
3550 */
343e0d7a 3551void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3552{
7fd6b141 3553 return __cache_alloc(cachep, flags, __builtin_return_address(0));
1da177e4
LT
3554}
3555EXPORT_SYMBOL(kmem_cache_alloc);
3556
3557/**
7682486b 3558 * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
1da177e4
LT
3559 * @cachep: the cache we're checking against
3560 * @ptr: pointer to validate
3561 *
7682486b 3562 * This verifies that the untrusted pointer looks sane;
1da177e4
LT
3563 * it is _not_ a guarantee that the pointer is actually
3564 * part of the slab cache in question, but it at least
3565 * validates that the pointer can be dereferenced and
3566 * looks half-way sane.
3567 *
3568 * Currently only used for dentry validation.
3569 */
b7f869a2 3570int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
1da177e4 3571{
b28a02de 3572 unsigned long addr = (unsigned long)ptr;
1da177e4 3573 unsigned long min_addr = PAGE_OFFSET;
b28a02de 3574 unsigned long align_mask = BYTES_PER_WORD - 1;
3dafccf2 3575 unsigned long size = cachep->buffer_size;
1da177e4
LT
3576 struct page *page;
3577
3578 if (unlikely(addr < min_addr))
3579 goto out;
3580 if (unlikely(addr > (unsigned long)high_memory - size))
3581 goto out;
3582 if (unlikely(addr & align_mask))
3583 goto out;
3584 if (unlikely(!kern_addr_valid(addr)))
3585 goto out;
3586 if (unlikely(!kern_addr_valid(addr + size - 1)))
3587 goto out;
3588 page = virt_to_page(ptr);
3589 if (unlikely(!PageSlab(page)))
3590 goto out;
065d41cb 3591 if (unlikely(page_get_cache(page) != cachep))
1da177e4
LT
3592 goto out;
3593 return 1;
a737b3e2 3594out:
1da177e4
LT
3595 return 0;
3596}
3597
3598#ifdef CONFIG_NUMA
8b98c169
CH
3599void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3600{
3601 return __cache_alloc_node(cachep, flags, nodeid,
3602 __builtin_return_address(0));
3603}
1da177e4
LT
3604EXPORT_SYMBOL(kmem_cache_alloc_node);
3605
8b98c169
CH
3606static __always_inline void *
3607__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
97e2bde4 3608{
343e0d7a 3609 struct kmem_cache *cachep;
97e2bde4
MS
3610
3611 cachep = kmem_find_general_cachep(size, flags);
6cb8f913
CL
3612 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3613 return cachep;
97e2bde4
MS
3614 return kmem_cache_alloc_node(cachep, flags, node);
3615}
8b98c169
CH
3616
3617#ifdef CONFIG_DEBUG_SLAB
3618void *__kmalloc_node(size_t size, gfp_t flags, int node)
3619{
3620 return __do_kmalloc_node(size, flags, node,
3621 __builtin_return_address(0));
3622}
dbe5e69d 3623EXPORT_SYMBOL(__kmalloc_node);
8b98c169
CH
3624
3625void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
ce71e27c 3626 int node, unsigned long caller)
8b98c169 3627{
ce71e27c 3628 return __do_kmalloc_node(size, flags, node, (void *)caller);
8b98c169
CH
3629}
3630EXPORT_SYMBOL(__kmalloc_node_track_caller);
3631#else
3632void *__kmalloc_node(size_t size, gfp_t flags, int node)
3633{
3634 return __do_kmalloc_node(size, flags, node, NULL);
3635}
3636EXPORT_SYMBOL(__kmalloc_node);
3637#endif /* CONFIG_DEBUG_SLAB */
3638#endif /* CONFIG_NUMA */
1da177e4
LT
3639
3640/**
800590f5 3641 * __do_kmalloc - allocate memory
1da177e4 3642 * @size: how many bytes of memory are required.
800590f5 3643 * @flags: the type of memory to allocate (see kmalloc).
911851e6 3644 * @caller: function caller for debug tracking of the caller
1da177e4 3645 */
7fd6b141
PE
3646static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647 void *caller)
1da177e4 3648{
343e0d7a 3649 struct kmem_cache *cachep;
1da177e4 3650
97e2bde4
MS
3651 /* If you want to save a few bytes .text space: replace
3652 * __ with kmem_.
3653 * Then kmalloc uses the uninlined functions instead of the inline
3654 * functions.
3655 */
3656 cachep = __find_general_cachep(size, flags);
a5c96d8a
LT
3657 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3658 return cachep;
7fd6b141
PE
3659 return __cache_alloc(cachep, flags, caller);
3660}
3661
7fd6b141 3662
1d2c8eea 3663#ifdef CONFIG_DEBUG_SLAB
7fd6b141
PE
3664void *__kmalloc(size_t size, gfp_t flags)
3665{
871751e2 3666 return __do_kmalloc(size, flags, __builtin_return_address(0));
1da177e4
LT
3667}
3668EXPORT_SYMBOL(__kmalloc);
3669
ce71e27c 3670void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
7fd6b141 3671{
ce71e27c 3672 return __do_kmalloc(size, flags, (void *)caller);
7fd6b141
PE
3673}
3674EXPORT_SYMBOL(__kmalloc_track_caller);
1d2c8eea
CH
3675
3676#else
3677void *__kmalloc(size_t size, gfp_t flags)
3678{
3679 return __do_kmalloc(size, flags, NULL);
3680}
3681EXPORT_SYMBOL(__kmalloc);
7fd6b141
PE
3682#endif
3683
1da177e4
LT
3684/**
3685 * kmem_cache_free - Deallocate an object
3686 * @cachep: The cache the allocation was from.
3687 * @objp: The previously allocated object.
3688 *
3689 * Free an object which was previously allocated from this
3690 * cache.
3691 */
343e0d7a 3692void kmem_cache_free(struct kmem_cache *cachep, void *objp)
1da177e4
LT
3693{
3694 unsigned long flags;
3695
3696 local_irq_save(flags);
898552c9 3697 debug_check_no_locks_freed(objp, obj_size(cachep));
3ac7fe5a
TG
3698 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3699 debug_check_no_obj_freed(objp, obj_size(cachep));
873623df 3700 __cache_free(cachep, objp);
1da177e4
LT
3701 local_irq_restore(flags);
3702}
3703EXPORT_SYMBOL(kmem_cache_free);
3704
1da177e4
LT
3705/**
3706 * kfree - free previously allocated memory
3707 * @objp: pointer returned by kmalloc.
3708 *
80e93eff
PE
3709 * If @objp is NULL, no operation is performed.
3710 *
1da177e4
LT
3711 * Don't free memory not originally allocated by kmalloc()
3712 * or you will run into trouble.
3713 */
3714void kfree(const void *objp)
3715{
343e0d7a 3716 struct kmem_cache *c;
1da177e4
LT
3717 unsigned long flags;
3718
6cb8f913 3719 if (unlikely(ZERO_OR_NULL_PTR(objp)))
1da177e4
LT
3720 return;
3721 local_irq_save(flags);
3722 kfree_debugcheck(objp);
6ed5eb22 3723 c = virt_to_cache(objp);
f9b8404c 3724 debug_check_no_locks_freed(objp, obj_size(c));
3ac7fe5a 3725 debug_check_no_obj_freed(objp, obj_size(c));
873623df 3726 __cache_free(c, (void *)objp);
1da177e4
LT
3727 local_irq_restore(flags);
3728}
3729EXPORT_SYMBOL(kfree);
3730
343e0d7a 3731unsigned int kmem_cache_size(struct kmem_cache *cachep)
1da177e4 3732{
3dafccf2 3733 return obj_size(cachep);
1da177e4
LT
3734}
3735EXPORT_SYMBOL(kmem_cache_size);
3736
343e0d7a 3737const char *kmem_cache_name(struct kmem_cache *cachep)
1944972d
ACM
3738{
3739 return cachep->name;
3740}
3741EXPORT_SYMBOL_GPL(kmem_cache_name);
3742
e498be7d 3743/*
183ff22b 3744 * This initializes kmem_list3 or resizes various caches for all nodes.
e498be7d 3745 */
343e0d7a 3746static int alloc_kmemlist(struct kmem_cache *cachep)
e498be7d
CL
3747{
3748 int node;
3749 struct kmem_list3 *l3;
cafeb02e 3750 struct array_cache *new_shared;
3395ee05 3751 struct array_cache **new_alien = NULL;
e498be7d 3752
9c09a95c 3753 for_each_online_node(node) {
cafeb02e 3754
3395ee05
PM
3755 if (use_alien_caches) {
3756 new_alien = alloc_alien_cache(node, cachep->limit);
3757 if (!new_alien)
3758 goto fail;
3759 }
cafeb02e 3760
63109846
ED
3761 new_shared = NULL;
3762 if (cachep->shared) {
3763 new_shared = alloc_arraycache(node,
0718dc2a 3764 cachep->shared*cachep->batchcount,
a737b3e2 3765 0xbaadf00d);
63109846
ED
3766 if (!new_shared) {
3767 free_alien_cache(new_alien);
3768 goto fail;
3769 }
0718dc2a 3770 }
cafeb02e 3771
a737b3e2
AM
3772 l3 = cachep->nodelists[node];
3773 if (l3) {
cafeb02e
CL
3774 struct array_cache *shared = l3->shared;
3775
e498be7d
CL
3776 spin_lock_irq(&l3->list_lock);
3777
cafeb02e 3778 if (shared)
0718dc2a
CL
3779 free_block(cachep, shared->entry,
3780 shared->avail, node);
e498be7d 3781
cafeb02e
CL
3782 l3->shared = new_shared;
3783 if (!l3->alien) {
e498be7d
CL
3784 l3->alien = new_alien;
3785 new_alien = NULL;
3786 }
b28a02de 3787 l3->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 3788 cachep->batchcount + cachep->num;
e498be7d 3789 spin_unlock_irq(&l3->list_lock);
cafeb02e 3790 kfree(shared);
e498be7d
CL
3791 free_alien_cache(new_alien);
3792 continue;
3793 }
a737b3e2 3794 l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
0718dc2a
CL
3795 if (!l3) {
3796 free_alien_cache(new_alien);
3797 kfree(new_shared);
e498be7d 3798 goto fail;
0718dc2a 3799 }
e498be7d
CL
3800
3801 kmem_list3_init(l3);
3802 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
a737b3e2 3803 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cafeb02e 3804 l3->shared = new_shared;
e498be7d 3805 l3->alien = new_alien;
b28a02de 3806 l3->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 3807 cachep->batchcount + cachep->num;
e498be7d
CL
3808 cachep->nodelists[node] = l3;
3809 }
cafeb02e 3810 return 0;
0718dc2a 3811
a737b3e2 3812fail:
0718dc2a
CL
3813 if (!cachep->next.next) {
3814 /* Cache is not active yet. Roll back what we did */
3815 node--;
3816 while (node >= 0) {
3817 if (cachep->nodelists[node]) {
3818 l3 = cachep->nodelists[node];
3819
3820 kfree(l3->shared);
3821 free_alien_cache(l3->alien);
3822 kfree(l3);
3823 cachep->nodelists[node] = NULL;
3824 }
3825 node--;
3826 }
3827 }
cafeb02e 3828 return -ENOMEM;
e498be7d
CL
3829}
3830
1da177e4 3831struct ccupdate_struct {
343e0d7a 3832 struct kmem_cache *cachep;
1da177e4
LT
3833 struct array_cache *new[NR_CPUS];
3834};
3835
3836static void do_ccupdate_local(void *info)
3837{
a737b3e2 3838 struct ccupdate_struct *new = info;
1da177e4
LT
3839 struct array_cache *old;
3840
3841 check_irq_off();
9a2dba4b 3842 old = cpu_cache_get(new->cachep);
e498be7d 3843
1da177e4
LT
3844 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3845 new->new[smp_processor_id()] = old;
3846}
3847
b5d8ca7c 3848/* Always called with the cache_chain_mutex held */
a737b3e2
AM
3849static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3850 int batchcount, int shared)
1da177e4 3851{
d2e7b7d0 3852 struct ccupdate_struct *new;
2ed3a4ef 3853 int i;
1da177e4 3854
d2e7b7d0
SS
3855 new = kzalloc(sizeof(*new), GFP_KERNEL);
3856 if (!new)
3857 return -ENOMEM;
3858
e498be7d 3859 for_each_online_cpu(i) {
d2e7b7d0 3860 new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
a737b3e2 3861 batchcount);
d2e7b7d0 3862 if (!new->new[i]) {
b28a02de 3863 for (i--; i >= 0; i--)
d2e7b7d0
SS
3864 kfree(new->new[i]);
3865 kfree(new);
e498be7d 3866 return -ENOMEM;
1da177e4
LT
3867 }
3868 }
d2e7b7d0 3869 new->cachep = cachep;
1da177e4 3870
15c8b6c1 3871 on_each_cpu(do_ccupdate_local, (void *)new, 1);
e498be7d 3872
1da177e4 3873 check_irq_on();
1da177e4
LT
3874 cachep->batchcount = batchcount;
3875 cachep->limit = limit;
e498be7d 3876 cachep->shared = shared;
1da177e4 3877
e498be7d 3878 for_each_online_cpu(i) {
d2e7b7d0 3879 struct array_cache *ccold = new->new[i];
1da177e4
LT
3880 if (!ccold)
3881 continue;
e498be7d 3882 spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
ff69416e 3883 free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
e498be7d 3884 spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
1da177e4
LT
3885 kfree(ccold);
3886 }
d2e7b7d0 3887 kfree(new);
2ed3a4ef 3888 return alloc_kmemlist(cachep);
1da177e4
LT
3889}
3890
b5d8ca7c 3891/* Called with cache_chain_mutex held always */
2ed3a4ef 3892static int enable_cpucache(struct kmem_cache *cachep)
1da177e4
LT
3893{
3894 int err;
3895 int limit, shared;
3896
a737b3e2
AM
3897 /*
3898 * The head array serves three purposes:
1da177e4
LT
3899 * - create a LIFO ordering, i.e. return objects that are cache-warm
3900 * - reduce the number of spinlock operations.
a737b3e2 3901 * - reduce the number of linked list operations on the slab and
1da177e4
LT
3902 * bufctl chains: array operations are cheaper.
3903 * The numbers are guessed, we should auto-tune as described by
3904 * Bonwick.
3905 */
3dafccf2 3906 if (cachep->buffer_size > 131072)
1da177e4 3907 limit = 1;
3dafccf2 3908 else if (cachep->buffer_size > PAGE_SIZE)
1da177e4 3909 limit = 8;
3dafccf2 3910 else if (cachep->buffer_size > 1024)
1da177e4 3911 limit = 24;
3dafccf2 3912 else if (cachep->buffer_size > 256)
1da177e4
LT
3913 limit = 54;
3914 else
3915 limit = 120;
3916
a737b3e2
AM
3917 /*
3918 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
1da177e4
LT
3919 * allocation behaviour: Most allocs on one cpu, most free operations
3920 * on another cpu. For these cases, an efficient object passing between
3921 * cpus is necessary. This is provided by a shared array. The array
3922 * replaces Bonwick's magazine layer.
3923 * On uniprocessor, it's functionally equivalent (but less efficient)
3924 * to a larger limit. Thus disabled by default.
3925 */
3926 shared = 0;
364fbb29 3927 if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
1da177e4 3928 shared = 8;
1da177e4
LT
3929
3930#if DEBUG
a737b3e2
AM
3931 /*
3932 * With debugging enabled, large batchcount lead to excessively long
3933 * periods with disabled local interrupts. Limit the batchcount
1da177e4
LT
3934 */
3935 if (limit > 32)
3936 limit = 32;
3937#endif
b28a02de 3938 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
1da177e4
LT
3939 if (err)
3940 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
b28a02de 3941 cachep->name, -err);
2ed3a4ef 3942 return err;
1da177e4
LT
3943}
3944
1b55253a
CL
3945/*
3946 * Drain an array if it contains any elements taking the l3 lock only if
b18e7e65
CL
3947 * necessary. Note that the l3 listlock also protects the array_cache
3948 * if drain_array() is used on the shared array.
1b55253a
CL
3949 */
3950void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3951 struct array_cache *ac, int force, int node)
1da177e4
LT
3952{
3953 int tofree;
3954
1b55253a
CL
3955 if (!ac || !ac->avail)
3956 return;
1da177e4
LT
3957 if (ac->touched && !force) {
3958 ac->touched = 0;
b18e7e65 3959 } else {
1b55253a 3960 spin_lock_irq(&l3->list_lock);
b18e7e65
CL
3961 if (ac->avail) {
3962 tofree = force ? ac->avail : (ac->limit + 4) / 5;
3963 if (tofree > ac->avail)
3964 tofree = (ac->avail + 1) / 2;
3965 free_block(cachep, ac->entry, tofree, node);
3966 ac->avail -= tofree;
3967 memmove(ac->entry, &(ac->entry[tofree]),
3968 sizeof(void *) * ac->avail);
3969 }
1b55253a 3970 spin_unlock_irq(&l3->list_lock);
1da177e4
LT
3971 }
3972}
3973
3974/**
3975 * cache_reap - Reclaim memory from caches.
05fb6bf0 3976 * @w: work descriptor
1da177e4
LT
3977 *
3978 * Called from workqueue/eventd every few seconds.
3979 * Purpose:
3980 * - clear the per-cpu caches for this CPU.
3981 * - return freeable pages to the main free memory pool.
3982 *
a737b3e2
AM
3983 * If we cannot acquire the cache chain mutex then just give up - we'll try
3984 * again on the next iteration.
1da177e4 3985 */
7c5cae36 3986static void cache_reap(struct work_struct *w)
1da177e4 3987{
7a7c381d 3988 struct kmem_cache *searchp;
e498be7d 3989 struct kmem_list3 *l3;
aab2207c 3990 int node = numa_node_id();
7c5cae36
CL
3991 struct delayed_work *work =
3992 container_of(w, struct delayed_work, work);
1da177e4 3993
7c5cae36 3994 if (!mutex_trylock(&cache_chain_mutex))
1da177e4 3995 /* Give up. Setup the next iteration. */
7c5cae36 3996 goto out;
1da177e4 3997
7a7c381d 3998 list_for_each_entry(searchp, &cache_chain, next) {
1da177e4
LT
3999 check_irq_on();
4000
35386e3b
CL
4001 /*
4002 * We only take the l3 lock if absolutely necessary and we
4003 * have established with reasonable certainty that
4004 * we can do some work if the lock was obtained.
4005 */
aab2207c 4006 l3 = searchp->nodelists[node];
35386e3b 4007
8fce4d8e 4008 reap_alien(searchp, l3);
1da177e4 4009
aab2207c 4010 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
1da177e4 4011
35386e3b
CL
4012 /*
4013 * These are racy checks but it does not matter
4014 * if we skip one check or scan twice.
4015 */
e498be7d 4016 if (time_after(l3->next_reap, jiffies))
35386e3b 4017 goto next;
1da177e4 4018
e498be7d 4019 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
1da177e4 4020
aab2207c 4021 drain_array(searchp, l3, l3->shared, 0, node);
1da177e4 4022
ed11d9eb 4023 if (l3->free_touched)
e498be7d 4024 l3->free_touched = 0;
ed11d9eb
CL
4025 else {
4026 int freed;
1da177e4 4027
ed11d9eb
CL
4028 freed = drain_freelist(searchp, l3, (l3->free_limit +
4029 5 * searchp->num - 1) / (5 * searchp->num));
4030 STATS_ADD_REAPED(searchp, freed);
4031 }
35386e3b 4032next:
1da177e4
LT
4033 cond_resched();
4034 }
4035 check_irq_on();
fc0abb14 4036 mutex_unlock(&cache_chain_mutex);
8fce4d8e 4037 next_reap_node();
7c5cae36 4038out:
a737b3e2 4039 /* Set up the next iteration */
7c5cae36 4040 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
1da177e4
LT
4041}
4042
158a9624 4043#ifdef CONFIG_SLABINFO
1da177e4 4044
85289f98 4045static void print_slabinfo_header(struct seq_file *m)
1da177e4 4046{
85289f98
PE
4047 /*
4048 * Output format version, so at least we can change it
4049 * without _too_ many complaints.
4050 */
1da177e4 4051#if STATS
85289f98 4052 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1da177e4 4053#else
85289f98 4054 seq_puts(m, "slabinfo - version: 2.1\n");
1da177e4 4055#endif
85289f98
PE
4056 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4057 "<objperslab> <pagesperslab>");
4058 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4059 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1da177e4 4060#if STATS
85289f98 4061 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
fb7faf33 4062 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
85289f98 4063 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1da177e4 4064#endif
85289f98
PE
4065 seq_putc(m, '\n');
4066}
4067
4068static void *s_start(struct seq_file *m, loff_t *pos)
4069{
4070 loff_t n = *pos;
85289f98 4071
fc0abb14 4072 mutex_lock(&cache_chain_mutex);
85289f98
PE
4073 if (!n)
4074 print_slabinfo_header(m);
b92151ba
PE
4075
4076 return seq_list_start(&cache_chain, *pos);
1da177e4
LT
4077}
4078
4079static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4080{
b92151ba 4081 return seq_list_next(p, &cache_chain, pos);
1da177e4
LT
4082}
4083
4084static void s_stop(struct seq_file *m, void *p)
4085{
fc0abb14 4086 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
4087}
4088
4089static int s_show(struct seq_file *m, void *p)
4090{
b92151ba 4091 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
b28a02de
PE
4092 struct slab *slabp;
4093 unsigned long active_objs;
4094 unsigned long num_objs;
4095 unsigned long active_slabs = 0;
4096 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
e498be7d 4097 const char *name;
1da177e4 4098 char *error = NULL;
e498be7d
CL
4099 int node;
4100 struct kmem_list3 *l3;
1da177e4 4101
1da177e4
LT
4102 active_objs = 0;
4103 num_slabs = 0;
e498be7d
CL
4104 for_each_online_node(node) {
4105 l3 = cachep->nodelists[node];
4106 if (!l3)
4107 continue;
4108
ca3b9b91
RT
4109 check_irq_on();
4110 spin_lock_irq(&l3->list_lock);
e498be7d 4111
7a7c381d 4112 list_for_each_entry(slabp, &l3->slabs_full, list) {
e498be7d
CL
4113 if (slabp->inuse != cachep->num && !error)
4114 error = "slabs_full accounting error";
4115 active_objs += cachep->num;
4116 active_slabs++;
4117 }
7a7c381d 4118 list_for_each_entry(slabp, &l3->slabs_partial, list) {
e498be7d
CL
4119 if (slabp->inuse == cachep->num && !error)
4120 error = "slabs_partial inuse accounting error";
4121 if (!slabp->inuse && !error)
4122 error = "slabs_partial/inuse accounting error";
4123 active_objs += slabp->inuse;
4124 active_slabs++;
4125 }
7a7c381d 4126 list_for_each_entry(slabp, &l3->slabs_free, list) {
e498be7d
CL
4127 if (slabp->inuse && !error)
4128 error = "slabs_free/inuse accounting error";
4129 num_slabs++;
4130 }
4131 free_objects += l3->free_objects;
4484ebf1
RT
4132 if (l3->shared)
4133 shared_avail += l3->shared->avail;
e498be7d 4134
ca3b9b91 4135 spin_unlock_irq(&l3->list_lock);
1da177e4 4136 }
b28a02de
PE
4137 num_slabs += active_slabs;
4138 num_objs = num_slabs * cachep->num;
e498be7d 4139 if (num_objs - active_objs != free_objects && !error)
1da177e4
LT
4140 error = "free_objects accounting error";
4141
b28a02de 4142 name = cachep->name;
1da177e4
LT
4143 if (error)
4144 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4145
4146 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
3dafccf2 4147 name, active_objs, num_objs, cachep->buffer_size,
b28a02de 4148 cachep->num, (1 << cachep->gfporder));
1da177e4 4149 seq_printf(m, " : tunables %4u %4u %4u",
b28a02de 4150 cachep->limit, cachep->batchcount, cachep->shared);
e498be7d 4151 seq_printf(m, " : slabdata %6lu %6lu %6lu",
b28a02de 4152 active_slabs, num_slabs, shared_avail);
1da177e4 4153#if STATS
b28a02de 4154 { /* list3 stats */
1da177e4
LT
4155 unsigned long high = cachep->high_mark;
4156 unsigned long allocs = cachep->num_allocations;
4157 unsigned long grown = cachep->grown;
4158 unsigned long reaped = cachep->reaped;
4159 unsigned long errors = cachep->errors;
4160 unsigned long max_freeable = cachep->max_freeable;
1da177e4 4161 unsigned long node_allocs = cachep->node_allocs;
e498be7d 4162 unsigned long node_frees = cachep->node_frees;
fb7faf33 4163 unsigned long overflows = cachep->node_overflow;
1da177e4 4164
e498be7d 4165 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
fb7faf33 4166 %4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
a737b3e2 4167 reaped, errors, max_freeable, node_allocs,
fb7faf33 4168 node_frees, overflows);
1da177e4
LT
4169 }
4170 /* cpu stats */
4171 {
4172 unsigned long allochit = atomic_read(&cachep->allochit);
4173 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4174 unsigned long freehit = atomic_read(&cachep->freehit);
4175 unsigned long freemiss = atomic_read(&cachep->freemiss);
4176
4177 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
b28a02de 4178 allochit, allocmiss, freehit, freemiss);
1da177e4
LT
4179 }
4180#endif
4181 seq_putc(m, '\n');
1da177e4
LT
4182 return 0;
4183}
4184
4185/*
4186 * slabinfo_op - iterator that generates /proc/slabinfo
4187 *
4188 * Output layout:
4189 * cache-name
4190 * num-active-objs
4191 * total-objs
4192 * object size
4193 * num-active-slabs
4194 * total-slabs
4195 * num-pages-per-slab
4196 * + further values on SMP and with statistics enabled
4197 */
4198
7b3c3a50 4199static const struct seq_operations slabinfo_op = {
b28a02de
PE
4200 .start = s_start,
4201 .next = s_next,
4202 .stop = s_stop,
4203 .show = s_show,
1da177e4
LT
4204};
4205
4206#define MAX_SLABINFO_WRITE 128
4207/**
4208 * slabinfo_write - Tuning for the slab allocator
4209 * @file: unused
4210 * @buffer: user buffer
4211 * @count: data length
4212 * @ppos: unused
4213 */
b28a02de
PE
4214ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4215 size_t count, loff_t *ppos)
1da177e4 4216{
b28a02de 4217 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
1da177e4 4218 int limit, batchcount, shared, res;
7a7c381d 4219 struct kmem_cache *cachep;
b28a02de 4220
1da177e4
LT
4221 if (count > MAX_SLABINFO_WRITE)
4222 return -EINVAL;
4223 if (copy_from_user(&kbuf, buffer, count))
4224 return -EFAULT;
b28a02de 4225 kbuf[MAX_SLABINFO_WRITE] = '\0';
1da177e4
LT
4226
4227 tmp = strchr(kbuf, ' ');
4228 if (!tmp)
4229 return -EINVAL;
4230 *tmp = '\0';
4231 tmp++;
4232 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4233 return -EINVAL;
4234
4235 /* Find the cache in the chain of caches. */
fc0abb14 4236 mutex_lock(&cache_chain_mutex);
1da177e4 4237 res = -EINVAL;
7a7c381d 4238 list_for_each_entry(cachep, &cache_chain, next) {
1da177e4 4239 if (!strcmp(cachep->name, kbuf)) {
a737b3e2
AM
4240 if (limit < 1 || batchcount < 1 ||
4241 batchcount > limit || shared < 0) {
e498be7d 4242 res = 0;
1da177e4 4243 } else {
e498be7d 4244 res = do_tune_cpucache(cachep, limit,
b28a02de 4245 batchcount, shared);
1da177e4
LT
4246 }
4247 break;
4248 }
4249 }
fc0abb14 4250 mutex_unlock(&cache_chain_mutex);
1da177e4
LT
4251 if (res >= 0)
4252 res = count;
4253 return res;
4254}
871751e2 4255
7b3c3a50
AD
4256static int slabinfo_open(struct inode *inode, struct file *file)
4257{
4258 return seq_open(file, &slabinfo_op);
4259}
4260
4261static const struct file_operations proc_slabinfo_operations = {
4262 .open = slabinfo_open,
4263 .read = seq_read,
4264 .write = slabinfo_write,
4265 .llseek = seq_lseek,
4266 .release = seq_release,
4267};
4268
871751e2
AV
4269#ifdef CONFIG_DEBUG_SLAB_LEAK
4270
4271static void *leaks_start(struct seq_file *m, loff_t *pos)
4272{
871751e2 4273 mutex_lock(&cache_chain_mutex);
b92151ba 4274 return seq_list_start(&cache_chain, *pos);
871751e2
AV
4275}
4276
4277static inline int add_caller(unsigned long *n, unsigned long v)
4278{
4279 unsigned long *p;
4280 int l;
4281 if (!v)
4282 return 1;
4283 l = n[1];
4284 p = n + 2;
4285 while (l) {
4286 int i = l/2;
4287 unsigned long *q = p + 2 * i;
4288 if (*q == v) {
4289 q[1]++;
4290 return 1;
4291 }
4292 if (*q > v) {
4293 l = i;
4294 } else {
4295 p = q + 2;
4296 l -= i + 1;
4297 }
4298 }
4299 if (++n[1] == n[0])
4300 return 0;
4301 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4302 p[0] = v;
4303 p[1] = 1;
4304 return 1;
4305}
4306
4307static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4308{
4309 void *p;
4310 int i;
4311 if (n[0] == n[1])
4312 return;
4313 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4314 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4315 continue;
4316 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4317 return;
4318 }
4319}
4320
4321static void show_symbol(struct seq_file *m, unsigned long address)
4322{
4323#ifdef CONFIG_KALLSYMS
871751e2 4324 unsigned long offset, size;
9281acea 4325 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
871751e2 4326
a5c43dae 4327 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
871751e2 4328 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
a5c43dae 4329 if (modname[0])
871751e2
AV
4330 seq_printf(m, " [%s]", modname);
4331 return;
4332 }
4333#endif
4334 seq_printf(m, "%p", (void *)address);
4335}
4336
4337static int leaks_show(struct seq_file *m, void *p)
4338{
b92151ba 4339 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
871751e2
AV
4340 struct slab *slabp;
4341 struct kmem_list3 *l3;
4342 const char *name;
4343 unsigned long *n = m->private;
4344 int node;
4345 int i;
4346
4347 if (!(cachep->flags & SLAB_STORE_USER))
4348 return 0;
4349 if (!(cachep->flags & SLAB_RED_ZONE))
4350 return 0;
4351
4352 /* OK, we can do it */
4353
4354 n[1] = 0;
4355
4356 for_each_online_node(node) {
4357 l3 = cachep->nodelists[node];
4358 if (!l3)
4359 continue;
4360
4361 check_irq_on();
4362 spin_lock_irq(&l3->list_lock);
4363
7a7c381d 4364 list_for_each_entry(slabp, &l3->slabs_full, list)
871751e2 4365 handle_slab(n, cachep, slabp);
7a7c381d 4366 list_for_each_entry(slabp, &l3->slabs_partial, list)
871751e2 4367 handle_slab(n, cachep, slabp);
871751e2
AV
4368 spin_unlock_irq(&l3->list_lock);
4369 }
4370 name = cachep->name;
4371 if (n[0] == n[1]) {
4372 /* Increase the buffer size */
4373 mutex_unlock(&cache_chain_mutex);
4374 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4375 if (!m->private) {
4376 /* Too bad, we are really out */
4377 m->private = n;
4378 mutex_lock(&cache_chain_mutex);
4379 return -ENOMEM;
4380 }
4381 *(unsigned long *)m->private = n[0] * 2;
4382 kfree(n);
4383 mutex_lock(&cache_chain_mutex);
4384 /* Now make sure this entry will be retried */
4385 m->count = m->size;
4386 return 0;
4387 }
4388 for (i = 0; i < n[1]; i++) {
4389 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4390 show_symbol(m, n[2*i+2]);
4391 seq_putc(m, '\n');
4392 }
d2e7b7d0 4393
871751e2
AV
4394 return 0;
4395}
4396
a0ec95a8 4397static const struct seq_operations slabstats_op = {
871751e2
AV
4398 .start = leaks_start,
4399 .next = s_next,
4400 .stop = s_stop,
4401 .show = leaks_show,
4402};
a0ec95a8
AD
4403
4404static int slabstats_open(struct inode *inode, struct file *file)
4405{
4406 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4407 int ret = -ENOMEM;
4408 if (n) {
4409 ret = seq_open(file, &slabstats_op);
4410 if (!ret) {
4411 struct seq_file *m = file->private_data;
4412 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4413 m->private = n;
4414 n = NULL;
4415 }
4416 kfree(n);
4417 }
4418 return ret;
4419}
4420
4421static const struct file_operations proc_slabstats_operations = {
4422 .open = slabstats_open,
4423 .read = seq_read,
4424 .llseek = seq_lseek,
4425 .release = seq_release_private,
4426};
4427#endif
4428
4429static int __init slab_proc_init(void)
4430{
7b3c3a50 4431 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
a0ec95a8
AD
4432#ifdef CONFIG_DEBUG_SLAB_LEAK
4433 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
871751e2 4434#endif
a0ec95a8
AD
4435 return 0;
4436}
4437module_init(slab_proc_init);
1da177e4
LT
4438#endif
4439
00e145b6
MS
4440/**
4441 * ksize - get the actual amount of memory allocated for a given object
4442 * @objp: Pointer to the object
4443 *
4444 * kmalloc may internally round up allocations and return more memory
4445 * than requested. ksize() can be used to determine the actual amount of
4446 * memory allocated. The caller may use this additional memory, even though
4447 * a smaller amount of memory was initially specified with the kmalloc call.
4448 * The caller must guarantee that objp points to a valid object previously
4449 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4450 * must not be freed during the duration of the call.
4451 */
fd76bab2 4452size_t ksize(const void *objp)
1da177e4 4453{
ef8b4520
CL
4454 BUG_ON(!objp);
4455 if (unlikely(objp == ZERO_SIZE_PTR))
00e145b6 4456 return 0;
1da177e4 4457
6ed5eb22 4458 return obj_size(virt_to_cache(objp));
1da177e4 4459}
b1aabecd 4460EXPORT_SYMBOL(ksize);