]> bbs.cooldavid.org Git - net-next-2.6.git/blame - mm/slub.c
page-flags: record page flag overlays explicitly
[net-next-2.6.git] / mm / slub.c
CommitLineData
81819f0f
CL
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
81819f0f
CL
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/seq_file.h>
18#include <linux/cpu.h>
19#include <linux/cpuset.h>
20#include <linux/mempolicy.h>
21#include <linux/ctype.h>
3ac7fe5a 22#include <linux/debugobjects.h>
81819f0f 23#include <linux/kallsyms.h>
b9049e23 24#include <linux/memory.h>
f8bd2258 25#include <linux/math64.h>
81819f0f
CL
26
27/*
28 * Lock order:
29 * 1. slab_lock(page)
30 * 2. slab->list_lock
31 *
32 * The slab_lock protects operations on the object of a particular
33 * slab and its metadata in the page struct. If the slab lock
34 * has been taken then no allocations nor frees can be performed
35 * on the objects in the slab nor can the slab be added or removed
36 * from the partial or full lists since this would mean modifying
37 * the page_struct of the slab.
38 *
39 * The list_lock protects the partial and full list on each node and
40 * the partial slab counter. If taken then no new slabs may be added or
41 * removed from the lists nor make the number of partial slabs be modified.
42 * (Note that the total number of slabs is an atomic value that may be
43 * modified without taking the list lock).
44 *
45 * The list_lock is a centralized lock and thus we avoid taking it as
46 * much as possible. As long as SLUB does not have to handle partial
47 * slabs, operations can continue without any centralized lock. F.e.
48 * allocating a long series of objects that fill up slabs does not require
49 * the list lock.
50 *
51 * The lock order is sometimes inverted when we are trying to get a slab
52 * off a list. We take the list_lock and then look for a page on the list
53 * to use. While we do that objects in the slabs may be freed. We can
54 * only operate on the slab if we have also taken the slab_lock. So we use
55 * a slab_trylock() on the slab. If trylock was successful then no frees
56 * can occur anymore and we can use the slab for allocations etc. If the
57 * slab_trylock() does not succeed then frees are in progress in the slab and
58 * we must stay away from it for a while since we may cause a bouncing
59 * cacheline if we try to acquire the lock. So go onto the next slab.
60 * If all pages are busy then we may allocate a new slab instead of reusing
61 * a partial slab. A new slab has noone operating on it and thus there is
62 * no danger of cacheline contention.
63 *
64 * Interrupts are disabled during allocation and deallocation in order to
65 * make the slab allocator safe to use in the context of an irq. In addition
66 * interrupts are disabled to ensure that the processor does not change
67 * while handling per_cpu slabs, due to kernel preemption.
68 *
69 * SLUB assigns one slab for allocation to each processor.
70 * Allocations only occur from these slabs called cpu slabs.
71 *
672bba3a
CL
72 * Slabs with free elements are kept on a partial list and during regular
73 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 74 * freed then the slab will show up again on the partial lists.
672bba3a
CL
75 * We track full slabs for debugging purposes though because otherwise we
76 * cannot scan all objects.
81819f0f
CL
77 *
78 * Slabs are freed when they become empty. Teardown and setup is
79 * minimal so we rely on the page allocators per cpu caches for
80 * fast frees and allocs.
81 *
82 * Overloading of page flags that are otherwise used for LRU management.
83 *
4b6f0750
CL
84 * PageActive The slab is frozen and exempt from list processing.
85 * This means that the slab is dedicated to a purpose
86 * such as satisfying allocations for a specific
87 * processor. Objects may be freed in the slab while
88 * it is frozen but slab_free will then skip the usual
89 * list operations. It is up to the processor holding
90 * the slab to integrate the slab into the slab lists
91 * when the slab is no longer needed.
92 *
93 * One use of this flag is to mark slabs that are
94 * used for allocations. Then such a slab becomes a cpu
95 * slab. The cpu slab may be equipped with an additional
dfb4f096 96 * freelist that allows lockless access to
894b8788
CL
97 * free objects in addition to the regular freelist
98 * that requires the slab lock.
81819f0f
CL
99 *
100 * PageError Slab requires special handling due to debug
101 * options set. This moves slab handling out of
894b8788 102 * the fast path and disables lockless freelists.
81819f0f
CL
103 */
104
5577bd8a
CL
105#define FROZEN (1 << PG_active)
106
107#ifdef CONFIG_SLUB_DEBUG
108#define SLABDEBUG (1 << PG_error)
109#else
110#define SLABDEBUG 0
111#endif
112
4b6f0750
CL
113static inline int SlabFrozen(struct page *page)
114{
5577bd8a 115 return page->flags & FROZEN;
4b6f0750
CL
116}
117
118static inline void SetSlabFrozen(struct page *page)
119{
5577bd8a 120 page->flags |= FROZEN;
4b6f0750
CL
121}
122
123static inline void ClearSlabFrozen(struct page *page)
124{
5577bd8a 125 page->flags &= ~FROZEN;
4b6f0750
CL
126}
127
35e5d7ee
CL
128static inline int SlabDebug(struct page *page)
129{
5577bd8a 130 return page->flags & SLABDEBUG;
35e5d7ee
CL
131}
132
133static inline void SetSlabDebug(struct page *page)
134{
5577bd8a 135 page->flags |= SLABDEBUG;
35e5d7ee
CL
136}
137
138static inline void ClearSlabDebug(struct page *page)
139{
5577bd8a 140 page->flags &= ~SLABDEBUG;
35e5d7ee
CL
141}
142
81819f0f
CL
143/*
144 * Issues still to be resolved:
145 *
81819f0f
CL
146 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
147 *
81819f0f
CL
148 * - Variable sizing of the per node arrays
149 */
150
151/* Enable to test recovery from slab corruption on boot */
152#undef SLUB_RESILIENCY_TEST
153
2086d26a
CL
154/*
155 * Mininum number of partial slabs. These will be left on the partial
156 * lists even if they are empty. kmem_cache_shrink may reclaim them.
157 */
76be8950 158#define MIN_PARTIAL 5
e95eed57 159
2086d26a
CL
160/*
161 * Maximum number of desirable partial slabs.
162 * The existence of more partial slabs makes kmem_cache_shrink
163 * sort the partial list by the number of objects in the.
164 */
165#define MAX_PARTIAL 10
166
81819f0f
CL
167#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
168 SLAB_POISON | SLAB_STORE_USER)
672bba3a 169
81819f0f
CL
170/*
171 * Set of flags that will prevent slab merging
172 */
173#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
174 SLAB_TRACE | SLAB_DESTROY_BY_RCU)
175
176#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
177 SLAB_CACHE_DMA)
178
179#ifndef ARCH_KMALLOC_MINALIGN
47bfdc0d 180#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
181#endif
182
183#ifndef ARCH_SLAB_MINALIGN
47bfdc0d 184#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
185#endif
186
187/* Internal SLUB flags */
1ceef402
CL
188#define __OBJECT_POISON 0x80000000 /* Poison object */
189#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
81819f0f
CL
190
191static int kmem_size = sizeof(struct kmem_cache);
192
193#ifdef CONFIG_SMP
194static struct notifier_block slab_notifier;
195#endif
196
197static enum {
198 DOWN, /* No slab functionality available */
199 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
672bba3a 200 UP, /* Everything works but does not show up in sysfs */
81819f0f
CL
201 SYSFS /* Sysfs up */
202} slab_state = DOWN;
203
204/* A list of all slab caches on the system */
205static DECLARE_RWSEM(slub_lock);
5af328a5 206static LIST_HEAD(slab_caches);
81819f0f 207
02cbc874
CL
208/*
209 * Tracking user of a slab.
210 */
211struct track {
212 void *addr; /* Called from address */
213 int cpu; /* Was running on cpu */
214 int pid; /* Pid context */
215 unsigned long when; /* When did the operation occur */
216};
217
218enum track_item { TRACK_ALLOC, TRACK_FREE };
219
f6acb635 220#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
221static int sysfs_slab_add(struct kmem_cache *);
222static int sysfs_slab_alias(struct kmem_cache *, const char *);
223static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc 224
81819f0f 225#else
0c710013
CL
226static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
227static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
228 { return 0; }
151c602f
CL
229static inline void sysfs_slab_remove(struct kmem_cache *s)
230{
231 kfree(s);
232}
8ff12cfc 233
81819f0f
CL
234#endif
235
8ff12cfc
CL
236static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
237{
238#ifdef CONFIG_SLUB_STATS
239 c->stat[si]++;
240#endif
241}
242
81819f0f
CL
243/********************************************************************
244 * Core slab cache functions
245 *******************************************************************/
246
247int slab_is_available(void)
248{
249 return slab_state >= UP;
250}
251
252static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
253{
254#ifdef CONFIG_NUMA
255 return s->node[node];
256#else
257 return &s->local_node;
258#endif
259}
260
dfb4f096
CL
261static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
262{
4c93c355
CL
263#ifdef CONFIG_SMP
264 return s->cpu_slab[cpu];
265#else
266 return &s->cpu_slab;
267#endif
dfb4f096
CL
268}
269
6446faa2 270/* Verify that a pointer has an address that is valid within a slab page */
02cbc874
CL
271static inline int check_valid_pointer(struct kmem_cache *s,
272 struct page *page, const void *object)
273{
274 void *base;
275
a973e9dd 276 if (!object)
02cbc874
CL
277 return 1;
278
a973e9dd 279 base = page_address(page);
39b26464 280 if (object < base || object >= base + page->objects * s->size ||
02cbc874
CL
281 (object - base) % s->size) {
282 return 0;
283 }
284
285 return 1;
286}
287
7656c72b
CL
288/*
289 * Slow version of get and set free pointer.
290 *
291 * This version requires touching the cache lines of kmem_cache which
292 * we avoid to do in the fast alloc free paths. There we obtain the offset
293 * from the page struct.
294 */
295static inline void *get_freepointer(struct kmem_cache *s, void *object)
296{
297 return *(void **)(object + s->offset);
298}
299
300static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
301{
302 *(void **)(object + s->offset) = fp;
303}
304
305/* Loop over all objects in a slab */
224a88be
CL
306#define for_each_object(__p, __s, __addr, __objects) \
307 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b
CL
308 __p += (__s)->size)
309
310/* Scan freelist */
311#define for_each_free_object(__p, __s, __free) \
a973e9dd 312 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
7656c72b
CL
313
314/* Determine object index from a given position */
315static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
316{
317 return (p - addr) / s->size;
318}
319
834f3d11
CL
320static inline struct kmem_cache_order_objects oo_make(int order,
321 unsigned long size)
322{
323 struct kmem_cache_order_objects x = {
324 (order << 16) + (PAGE_SIZE << order) / size
325 };
326
327 return x;
328}
329
330static inline int oo_order(struct kmem_cache_order_objects x)
331{
332 return x.x >> 16;
333}
334
335static inline int oo_objects(struct kmem_cache_order_objects x)
336{
337 return x.x & ((1 << 16) - 1);
338}
339
41ecc55b
CL
340#ifdef CONFIG_SLUB_DEBUG
341/*
342 * Debug settings:
343 */
f0630fff
CL
344#ifdef CONFIG_SLUB_DEBUG_ON
345static int slub_debug = DEBUG_DEFAULT_FLAGS;
346#else
41ecc55b 347static int slub_debug;
f0630fff 348#endif
41ecc55b
CL
349
350static char *slub_debug_slabs;
351
81819f0f
CL
352/*
353 * Object debugging
354 */
355static void print_section(char *text, u8 *addr, unsigned int length)
356{
357 int i, offset;
358 int newline = 1;
359 char ascii[17];
360
361 ascii[16] = 0;
362
363 for (i = 0; i < length; i++) {
364 if (newline) {
24922684 365 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
81819f0f
CL
366 newline = 0;
367 }
06428780 368 printk(KERN_CONT " %02x", addr[i]);
81819f0f
CL
369 offset = i % 16;
370 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
371 if (offset == 15) {
06428780 372 printk(KERN_CONT " %s\n", ascii);
81819f0f
CL
373 newline = 1;
374 }
375 }
376 if (!newline) {
377 i %= 16;
378 while (i < 16) {
06428780 379 printk(KERN_CONT " ");
81819f0f
CL
380 ascii[i] = ' ';
381 i++;
382 }
06428780 383 printk(KERN_CONT " %s\n", ascii);
81819f0f
CL
384 }
385}
386
81819f0f
CL
387static struct track *get_track(struct kmem_cache *s, void *object,
388 enum track_item alloc)
389{
390 struct track *p;
391
392 if (s->offset)
393 p = object + s->offset + sizeof(void *);
394 else
395 p = object + s->inuse;
396
397 return p + alloc;
398}
399
400static void set_track(struct kmem_cache *s, void *object,
401 enum track_item alloc, void *addr)
402{
403 struct track *p;
404
405 if (s->offset)
406 p = object + s->offset + sizeof(void *);
407 else
408 p = object + s->inuse;
409
410 p += alloc;
411 if (addr) {
412 p->addr = addr;
413 p->cpu = smp_processor_id();
88e4ccf2 414 p->pid = current->pid;
81819f0f
CL
415 p->when = jiffies;
416 } else
417 memset(p, 0, sizeof(struct track));
418}
419
81819f0f
CL
420static void init_tracking(struct kmem_cache *s, void *object)
421{
24922684
CL
422 if (!(s->flags & SLAB_STORE_USER))
423 return;
424
425 set_track(s, object, TRACK_FREE, NULL);
426 set_track(s, object, TRACK_ALLOC, NULL);
81819f0f
CL
427}
428
429static void print_track(const char *s, struct track *t)
430{
431 if (!t->addr)
432 return;
433
7daf705f
LT
434 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
435 s, t->addr, jiffies - t->when, t->cpu, t->pid);
24922684
CL
436}
437
438static void print_tracking(struct kmem_cache *s, void *object)
439{
440 if (!(s->flags & SLAB_STORE_USER))
441 return;
442
443 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
444 print_track("Freed", get_track(s, object, TRACK_FREE));
445}
446
447static void print_page_info(struct page *page)
448{
39b26464
CL
449 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
450 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
451
452}
453
454static void slab_bug(struct kmem_cache *s, char *fmt, ...)
455{
456 va_list args;
457 char buf[100];
458
459 va_start(args, fmt);
460 vsnprintf(buf, sizeof(buf), fmt, args);
461 va_end(args);
462 printk(KERN_ERR "========================================"
463 "=====================================\n");
464 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
465 printk(KERN_ERR "----------------------------------------"
466 "-------------------------------------\n\n");
81819f0f
CL
467}
468
24922684
CL
469static void slab_fix(struct kmem_cache *s, char *fmt, ...)
470{
471 va_list args;
472 char buf[100];
473
474 va_start(args, fmt);
475 vsnprintf(buf, sizeof(buf), fmt, args);
476 va_end(args);
477 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
478}
479
480static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
481{
482 unsigned int off; /* Offset of last byte */
a973e9dd 483 u8 *addr = page_address(page);
24922684
CL
484
485 print_tracking(s, p);
486
487 print_page_info(page);
488
489 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
490 p, p - addr, get_freepointer(s, p));
491
492 if (p > addr + 16)
493 print_section("Bytes b4", p - 16, 16);
494
0ebd652b 495 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
81819f0f
CL
496
497 if (s->flags & SLAB_RED_ZONE)
498 print_section("Redzone", p + s->objsize,
499 s->inuse - s->objsize);
500
81819f0f
CL
501 if (s->offset)
502 off = s->offset + sizeof(void *);
503 else
504 off = s->inuse;
505
24922684 506 if (s->flags & SLAB_STORE_USER)
81819f0f 507 off += 2 * sizeof(struct track);
81819f0f
CL
508
509 if (off != s->size)
510 /* Beginning of the filler is the free pointer */
24922684
CL
511 print_section("Padding", p + off, s->size - off);
512
513 dump_stack();
81819f0f
CL
514}
515
516static void object_err(struct kmem_cache *s, struct page *page,
517 u8 *object, char *reason)
518{
3dc50637 519 slab_bug(s, "%s", reason);
24922684 520 print_trailer(s, page, object);
81819f0f
CL
521}
522
24922684 523static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0f
CL
524{
525 va_list args;
526 char buf[100];
527
24922684
CL
528 va_start(args, fmt);
529 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 530 va_end(args);
3dc50637 531 slab_bug(s, "%s", buf);
24922684 532 print_page_info(page);
81819f0f
CL
533 dump_stack();
534}
535
536static void init_object(struct kmem_cache *s, void *object, int active)
537{
538 u8 *p = object;
539
540 if (s->flags & __OBJECT_POISON) {
541 memset(p, POISON_FREE, s->objsize - 1);
06428780 542 p[s->objsize - 1] = POISON_END;
81819f0f
CL
543 }
544
545 if (s->flags & SLAB_RED_ZONE)
546 memset(p + s->objsize,
547 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
548 s->inuse - s->objsize);
549}
550
24922684 551static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
81819f0f
CL
552{
553 while (bytes) {
554 if (*start != (u8)value)
24922684 555 return start;
81819f0f
CL
556 start++;
557 bytes--;
558 }
24922684
CL
559 return NULL;
560}
561
562static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
563 void *from, void *to)
564{
565 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
566 memset(from, data, to - from);
567}
568
569static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
570 u8 *object, char *what,
06428780 571 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
572{
573 u8 *fault;
574 u8 *end;
575
576 fault = check_bytes(start, value, bytes);
577 if (!fault)
578 return 1;
579
580 end = start + bytes;
581 while (end > fault && end[-1] == value)
582 end--;
583
584 slab_bug(s, "%s overwritten", what);
585 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
586 fault, end - 1, fault[0], value);
587 print_trailer(s, page, object);
588
589 restore_bytes(s, what, value, fault, end);
590 return 0;
81819f0f
CL
591}
592
81819f0f
CL
593/*
594 * Object layout:
595 *
596 * object address
597 * Bytes of the object to be managed.
598 * If the freepointer may overlay the object then the free
599 * pointer is the first word of the object.
672bba3a 600 *
81819f0f
CL
601 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
602 * 0xa5 (POISON_END)
603 *
604 * object + s->objsize
605 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a
CL
606 * Padding is extended by another word if Redzoning is enabled and
607 * objsize == inuse.
608 *
81819f0f
CL
609 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
610 * 0xcc (RED_ACTIVE) for objects in use.
611 *
612 * object + s->inuse
672bba3a
CL
613 * Meta data starts here.
614 *
81819f0f
CL
615 * A. Free pointer (if we cannot overwrite object on free)
616 * B. Tracking data for SLAB_STORE_USER
672bba3a 617 * C. Padding to reach required alignment boundary or at mininum
6446faa2 618 * one word if debugging is on to be able to detect writes
672bba3a
CL
619 * before the word boundary.
620 *
621 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
622 *
623 * object + s->size
672bba3a 624 * Nothing is used beyond s->size.
81819f0f 625 *
672bba3a
CL
626 * If slabcaches are merged then the objsize and inuse boundaries are mostly
627 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
628 * may be used with merged slabcaches.
629 */
630
81819f0f
CL
631static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
632{
633 unsigned long off = s->inuse; /* The end of info */
634
635 if (s->offset)
636 /* Freepointer is placed after the object. */
637 off += sizeof(void *);
638
639 if (s->flags & SLAB_STORE_USER)
640 /* We also have user information there */
641 off += 2 * sizeof(struct track);
642
643 if (s->size == off)
644 return 1;
645
24922684
CL
646 return check_bytes_and_report(s, page, p, "Object padding",
647 p + off, POISON_INUSE, s->size - off);
81819f0f
CL
648}
649
39b26464 650/* Check the pad bytes at the end of a slab page */
81819f0f
CL
651static int slab_pad_check(struct kmem_cache *s, struct page *page)
652{
24922684
CL
653 u8 *start;
654 u8 *fault;
655 u8 *end;
656 int length;
657 int remainder;
81819f0f
CL
658
659 if (!(s->flags & SLAB_POISON))
660 return 1;
661
a973e9dd 662 start = page_address(page);
834f3d11 663 length = (PAGE_SIZE << compound_order(page));
39b26464
CL
664 end = start + length;
665 remainder = length % s->size;
81819f0f
CL
666 if (!remainder)
667 return 1;
668
39b26464 669 fault = check_bytes(end - remainder, POISON_INUSE, remainder);
24922684
CL
670 if (!fault)
671 return 1;
672 while (end > fault && end[-1] == POISON_INUSE)
673 end--;
674
675 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
39b26464 676 print_section("Padding", end - remainder, remainder);
24922684
CL
677
678 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
679 return 0;
81819f0f
CL
680}
681
682static int check_object(struct kmem_cache *s, struct page *page,
683 void *object, int active)
684{
685 u8 *p = object;
686 u8 *endobject = object + s->objsize;
687
688 if (s->flags & SLAB_RED_ZONE) {
689 unsigned int red =
690 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
691
24922684
CL
692 if (!check_bytes_and_report(s, page, object, "Redzone",
693 endobject, red, s->inuse - s->objsize))
81819f0f 694 return 0;
81819f0f 695 } else {
3adbefee
IM
696 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
697 check_bytes_and_report(s, page, p, "Alignment padding",
698 endobject, POISON_INUSE, s->inuse - s->objsize);
699 }
81819f0f
CL
700 }
701
702 if (s->flags & SLAB_POISON) {
703 if (!active && (s->flags & __OBJECT_POISON) &&
24922684
CL
704 (!check_bytes_and_report(s, page, p, "Poison", p,
705 POISON_FREE, s->objsize - 1) ||
706 !check_bytes_and_report(s, page, p, "Poison",
06428780 707 p + s->objsize - 1, POISON_END, 1)))
81819f0f 708 return 0;
81819f0f
CL
709 /*
710 * check_pad_bytes cleans up on its own.
711 */
712 check_pad_bytes(s, page, p);
713 }
714
715 if (!s->offset && active)
716 /*
717 * Object and freepointer overlap. Cannot check
718 * freepointer while object is allocated.
719 */
720 return 1;
721
722 /* Check free pointer validity */
723 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
724 object_err(s, page, p, "Freepointer corrupt");
725 /*
726 * No choice but to zap it and thus loose the remainder
727 * of the free objects in this slab. May cause
672bba3a 728 * another error because the object count is now wrong.
81819f0f 729 */
a973e9dd 730 set_freepointer(s, p, NULL);
81819f0f
CL
731 return 0;
732 }
733 return 1;
734}
735
736static int check_slab(struct kmem_cache *s, struct page *page)
737{
39b26464
CL
738 int maxobj;
739
81819f0f
CL
740 VM_BUG_ON(!irqs_disabled());
741
742 if (!PageSlab(page)) {
24922684 743 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
744 return 0;
745 }
39b26464
CL
746
747 maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
748 if (page->objects > maxobj) {
749 slab_err(s, page, "objects %u > max %u",
750 s->name, page->objects, maxobj);
751 return 0;
752 }
753 if (page->inuse > page->objects) {
24922684 754 slab_err(s, page, "inuse %u > max %u",
39b26464 755 s->name, page->inuse, page->objects);
81819f0f
CL
756 return 0;
757 }
758 /* Slab_pad_check fixes things up after itself */
759 slab_pad_check(s, page);
760 return 1;
761}
762
763/*
672bba3a
CL
764 * Determine if a certain object on a page is on the freelist. Must hold the
765 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
766 */
767static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
768{
769 int nr = 0;
770 void *fp = page->freelist;
771 void *object = NULL;
224a88be 772 unsigned long max_objects;
81819f0f 773
39b26464 774 while (fp && nr <= page->objects) {
81819f0f
CL
775 if (fp == search)
776 return 1;
777 if (!check_valid_pointer(s, page, fp)) {
778 if (object) {
779 object_err(s, page, object,
780 "Freechain corrupt");
a973e9dd 781 set_freepointer(s, object, NULL);
81819f0f
CL
782 break;
783 } else {
24922684 784 slab_err(s, page, "Freepointer corrupt");
a973e9dd 785 page->freelist = NULL;
39b26464 786 page->inuse = page->objects;
24922684 787 slab_fix(s, "Freelist cleared");
81819f0f
CL
788 return 0;
789 }
790 break;
791 }
792 object = fp;
793 fp = get_freepointer(s, object);
794 nr++;
795 }
796
224a88be
CL
797 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
798 if (max_objects > 65535)
799 max_objects = 65535;
800
801 if (page->objects != max_objects) {
802 slab_err(s, page, "Wrong number of objects. Found %d but "
803 "should be %d", page->objects, max_objects);
804 page->objects = max_objects;
805 slab_fix(s, "Number of objects adjusted.");
806 }
39b26464 807 if (page->inuse != page->objects - nr) {
70d71228 808 slab_err(s, page, "Wrong object count. Counter is %d but "
39b26464
CL
809 "counted were %d", page->inuse, page->objects - nr);
810 page->inuse = page->objects - nr;
24922684 811 slab_fix(s, "Object count adjusted.");
81819f0f
CL
812 }
813 return search == NULL;
814}
815
0121c619
CL
816static void trace(struct kmem_cache *s, struct page *page, void *object,
817 int alloc)
3ec09742
CL
818{
819 if (s->flags & SLAB_TRACE) {
820 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
821 s->name,
822 alloc ? "alloc" : "free",
823 object, page->inuse,
824 page->freelist);
825
826 if (!alloc)
827 print_section("Object", (void *)object, s->objsize);
828
829 dump_stack();
830 }
831}
832
643b1138 833/*
672bba3a 834 * Tracking of fully allocated slabs for debugging purposes.
643b1138 835 */
e95eed57 836static void add_full(struct kmem_cache_node *n, struct page *page)
643b1138 837{
643b1138
CL
838 spin_lock(&n->list_lock);
839 list_add(&page->lru, &n->full);
840 spin_unlock(&n->list_lock);
841}
842
843static void remove_full(struct kmem_cache *s, struct page *page)
844{
845 struct kmem_cache_node *n;
846
847 if (!(s->flags & SLAB_STORE_USER))
848 return;
849
850 n = get_node(s, page_to_nid(page));
851
852 spin_lock(&n->list_lock);
853 list_del(&page->lru);
854 spin_unlock(&n->list_lock);
855}
856
0f389ec6
CL
857/* Tracking of the number of slabs for debugging purposes */
858static inline unsigned long slabs_node(struct kmem_cache *s, int node)
859{
860 struct kmem_cache_node *n = get_node(s, node);
861
862 return atomic_long_read(&n->nr_slabs);
863}
864
205ab99d 865static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
866{
867 struct kmem_cache_node *n = get_node(s, node);
868
869 /*
870 * May be called early in order to allocate a slab for the
871 * kmem_cache_node structure. Solve the chicken-egg
872 * dilemma by deferring the increment of the count during
873 * bootstrap (see early_kmem_cache_node_alloc).
874 */
205ab99d 875 if (!NUMA_BUILD || n) {
0f389ec6 876 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
877 atomic_long_add(objects, &n->total_objects);
878 }
0f389ec6 879}
205ab99d 880static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
881{
882 struct kmem_cache_node *n = get_node(s, node);
883
884 atomic_long_dec(&n->nr_slabs);
205ab99d 885 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
886}
887
888/* Object debug checks for alloc/free paths */
3ec09742
CL
889static void setup_object_debug(struct kmem_cache *s, struct page *page,
890 void *object)
891{
892 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
893 return;
894
895 init_object(s, object, 0);
896 init_tracking(s, object);
897}
898
899static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
900 void *object, void *addr)
81819f0f
CL
901{
902 if (!check_slab(s, page))
903 goto bad;
904
d692ef6d 905 if (!on_freelist(s, page, object)) {
24922684 906 object_err(s, page, object, "Object already allocated");
70d71228 907 goto bad;
81819f0f
CL
908 }
909
910 if (!check_valid_pointer(s, page, object)) {
911 object_err(s, page, object, "Freelist Pointer check fails");
70d71228 912 goto bad;
81819f0f
CL
913 }
914
d692ef6d 915 if (!check_object(s, page, object, 0))
81819f0f 916 goto bad;
81819f0f 917
3ec09742
CL
918 /* Success perform special debug activities for allocs */
919 if (s->flags & SLAB_STORE_USER)
920 set_track(s, object, TRACK_ALLOC, addr);
921 trace(s, page, object, 1);
922 init_object(s, object, 1);
81819f0f 923 return 1;
3ec09742 924
81819f0f
CL
925bad:
926 if (PageSlab(page)) {
927 /*
928 * If this is a slab page then lets do the best we can
929 * to avoid issues in the future. Marking all objects
672bba3a 930 * as used avoids touching the remaining objects.
81819f0f 931 */
24922684 932 slab_fix(s, "Marking all objects used");
39b26464 933 page->inuse = page->objects;
a973e9dd 934 page->freelist = NULL;
81819f0f
CL
935 }
936 return 0;
937}
938
3ec09742
CL
939static int free_debug_processing(struct kmem_cache *s, struct page *page,
940 void *object, void *addr)
81819f0f
CL
941{
942 if (!check_slab(s, page))
943 goto fail;
944
945 if (!check_valid_pointer(s, page, object)) {
70d71228 946 slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0f
CL
947 goto fail;
948 }
949
950 if (on_freelist(s, page, object)) {
24922684 951 object_err(s, page, object, "Object already free");
81819f0f
CL
952 goto fail;
953 }
954
955 if (!check_object(s, page, object, 1))
956 return 0;
957
958 if (unlikely(s != page->slab)) {
3adbefee 959 if (!PageSlab(page)) {
70d71228
CL
960 slab_err(s, page, "Attempt to free object(0x%p) "
961 "outside of slab", object);
3adbefee 962 } else if (!page->slab) {
81819f0f 963 printk(KERN_ERR
70d71228 964 "SLUB <none>: no slab for object 0x%p.\n",
81819f0f 965 object);
70d71228 966 dump_stack();
06428780 967 } else
24922684
CL
968 object_err(s, page, object,
969 "page slab pointer corrupt.");
81819f0f
CL
970 goto fail;
971 }
3ec09742
CL
972
973 /* Special debug activities for freeing objects */
a973e9dd 974 if (!SlabFrozen(page) && !page->freelist)
3ec09742
CL
975 remove_full(s, page);
976 if (s->flags & SLAB_STORE_USER)
977 set_track(s, object, TRACK_FREE, addr);
978 trace(s, page, object, 0);
979 init_object(s, object, 0);
81819f0f 980 return 1;
3ec09742 981
81819f0f 982fail:
24922684 983 slab_fix(s, "Object at 0x%p not freed", object);
81819f0f
CL
984 return 0;
985}
986
41ecc55b
CL
987static int __init setup_slub_debug(char *str)
988{
f0630fff
CL
989 slub_debug = DEBUG_DEFAULT_FLAGS;
990 if (*str++ != '=' || !*str)
991 /*
992 * No options specified. Switch on full debugging.
993 */
994 goto out;
995
996 if (*str == ',')
997 /*
998 * No options but restriction on slabs. This means full
999 * debugging for slabs matching a pattern.
1000 */
1001 goto check_slabs;
1002
1003 slub_debug = 0;
1004 if (*str == '-')
1005 /*
1006 * Switch off all debugging measures.
1007 */
1008 goto out;
1009
1010 /*
1011 * Determine which debug features should be switched on
1012 */
06428780 1013 for (; *str && *str != ','; str++) {
f0630fff
CL
1014 switch (tolower(*str)) {
1015 case 'f':
1016 slub_debug |= SLAB_DEBUG_FREE;
1017 break;
1018 case 'z':
1019 slub_debug |= SLAB_RED_ZONE;
1020 break;
1021 case 'p':
1022 slub_debug |= SLAB_POISON;
1023 break;
1024 case 'u':
1025 slub_debug |= SLAB_STORE_USER;
1026 break;
1027 case 't':
1028 slub_debug |= SLAB_TRACE;
1029 break;
1030 default:
1031 printk(KERN_ERR "slub_debug option '%c' "
06428780 1032 "unknown. skipped\n", *str);
f0630fff 1033 }
41ecc55b
CL
1034 }
1035
f0630fff 1036check_slabs:
41ecc55b
CL
1037 if (*str == ',')
1038 slub_debug_slabs = str + 1;
f0630fff 1039out:
41ecc55b
CL
1040 return 1;
1041}
1042
1043__setup("slub_debug", setup_slub_debug);
1044
ba0268a8
CL
1045static unsigned long kmem_cache_flags(unsigned long objsize,
1046 unsigned long flags, const char *name,
4ba9b9d0 1047 void (*ctor)(struct kmem_cache *, void *))
41ecc55b
CL
1048{
1049 /*
e153362a 1050 * Enable debugging if selected on the kernel commandline.
41ecc55b 1051 */
e153362a
CL
1052 if (slub_debug && (!slub_debug_slabs ||
1053 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1054 flags |= slub_debug;
ba0268a8
CL
1055
1056 return flags;
41ecc55b
CL
1057}
1058#else
3ec09742
CL
1059static inline void setup_object_debug(struct kmem_cache *s,
1060 struct page *page, void *object) {}
41ecc55b 1061
3ec09742
CL
1062static inline int alloc_debug_processing(struct kmem_cache *s,
1063 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1064
3ec09742
CL
1065static inline int free_debug_processing(struct kmem_cache *s,
1066 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1067
41ecc55b
CL
1068static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1069 { return 1; }
1070static inline int check_object(struct kmem_cache *s, struct page *page,
1071 void *object, int active) { return 1; }
3ec09742 1072static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
ba0268a8
CL
1073static inline unsigned long kmem_cache_flags(unsigned long objsize,
1074 unsigned long flags, const char *name,
4ba9b9d0 1075 void (*ctor)(struct kmem_cache *, void *))
ba0268a8
CL
1076{
1077 return flags;
1078}
41ecc55b 1079#define slub_debug 0
0f389ec6
CL
1080
1081static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1082 { return 0; }
205ab99d
CL
1083static inline void inc_slabs_node(struct kmem_cache *s, int node,
1084 int objects) {}
1085static inline void dec_slabs_node(struct kmem_cache *s, int node,
1086 int objects) {}
41ecc55b 1087#endif
205ab99d 1088
81819f0f
CL
1089/*
1090 * Slab allocation and freeing
1091 */
65c3376a
CL
1092static inline struct page *alloc_slab_page(gfp_t flags, int node,
1093 struct kmem_cache_order_objects oo)
1094{
1095 int order = oo_order(oo);
1096
1097 if (node == -1)
1098 return alloc_pages(flags, order);
1099 else
1100 return alloc_pages_node(node, flags, order);
1101}
1102
81819f0f
CL
1103static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1104{
06428780 1105 struct page *page;
834f3d11 1106 struct kmem_cache_order_objects oo = s->oo;
81819f0f 1107
b7a49f0d 1108 flags |= s->allocflags;
e12ba74d 1109
65c3376a
CL
1110 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
1111 oo);
1112 if (unlikely(!page)) {
1113 oo = s->min;
1114 /*
1115 * Allocation may have failed due to fragmentation.
1116 * Try a lower order alloc if possible
1117 */
1118 page = alloc_slab_page(flags, node, oo);
1119 if (!page)
1120 return NULL;
81819f0f 1121
65c3376a
CL
1122 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1123 }
834f3d11 1124 page->objects = oo_objects(oo);
81819f0f
CL
1125 mod_zone_page_state(page_zone(page),
1126 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1127 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376a 1128 1 << oo_order(oo));
81819f0f
CL
1129
1130 return page;
1131}
1132
1133static void setup_object(struct kmem_cache *s, struct page *page,
1134 void *object)
1135{
3ec09742 1136 setup_object_debug(s, page, object);
4f104934 1137 if (unlikely(s->ctor))
4ba9b9d0 1138 s->ctor(s, object);
81819f0f
CL
1139}
1140
1141static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1142{
1143 struct page *page;
81819f0f 1144 void *start;
81819f0f
CL
1145 void *last;
1146 void *p;
1147
6cb06229 1148 BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0f 1149
6cb06229
CL
1150 page = allocate_slab(s,
1151 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0f
CL
1152 if (!page)
1153 goto out;
1154
205ab99d 1155 inc_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1156 page->slab = s;
1157 page->flags |= 1 << PG_slab;
1158 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1159 SLAB_STORE_USER | SLAB_TRACE))
35e5d7ee 1160 SetSlabDebug(page);
81819f0f
CL
1161
1162 start = page_address(page);
81819f0f
CL
1163
1164 if (unlikely(s->flags & SLAB_POISON))
834f3d11 1165 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0f
CL
1166
1167 last = start;
224a88be 1168 for_each_object(p, s, start, page->objects) {
81819f0f
CL
1169 setup_object(s, page, last);
1170 set_freepointer(s, last, p);
1171 last = p;
1172 }
1173 setup_object(s, page, last);
a973e9dd 1174 set_freepointer(s, last, NULL);
81819f0f
CL
1175
1176 page->freelist = start;
1177 page->inuse = 0;
1178out:
81819f0f
CL
1179 return page;
1180}
1181
1182static void __free_slab(struct kmem_cache *s, struct page *page)
1183{
834f3d11
CL
1184 int order = compound_order(page);
1185 int pages = 1 << order;
81819f0f 1186
c59def9f 1187 if (unlikely(SlabDebug(page))) {
81819f0f
CL
1188 void *p;
1189
1190 slab_pad_check(s, page);
224a88be
CL
1191 for_each_object(p, s, page_address(page),
1192 page->objects)
81819f0f 1193 check_object(s, page, p, 0);
2208b764 1194 ClearSlabDebug(page);
81819f0f
CL
1195 }
1196
1197 mod_zone_page_state(page_zone(page),
1198 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1199 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1200 -pages);
81819f0f 1201
49bd5221
CL
1202 __ClearPageSlab(page);
1203 reset_page_mapcount(page);
834f3d11 1204 __free_pages(page, order);
81819f0f
CL
1205}
1206
1207static void rcu_free_slab(struct rcu_head *h)
1208{
1209 struct page *page;
1210
1211 page = container_of((struct list_head *)h, struct page, lru);
1212 __free_slab(page->slab, page);
1213}
1214
1215static void free_slab(struct kmem_cache *s, struct page *page)
1216{
1217 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1218 /*
1219 * RCU free overloads the RCU head over the LRU
1220 */
1221 struct rcu_head *head = (void *)&page->lru;
1222
1223 call_rcu(head, rcu_free_slab);
1224 } else
1225 __free_slab(s, page);
1226}
1227
1228static void discard_slab(struct kmem_cache *s, struct page *page)
1229{
205ab99d 1230 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1231 free_slab(s, page);
1232}
1233
1234/*
1235 * Per slab locking using the pagelock
1236 */
1237static __always_inline void slab_lock(struct page *page)
1238{
1239 bit_spin_lock(PG_locked, &page->flags);
1240}
1241
1242static __always_inline void slab_unlock(struct page *page)
1243{
a76d3546 1244 __bit_spin_unlock(PG_locked, &page->flags);
81819f0f
CL
1245}
1246
1247static __always_inline int slab_trylock(struct page *page)
1248{
1249 int rc = 1;
1250
1251 rc = bit_spin_trylock(PG_locked, &page->flags);
1252 return rc;
1253}
1254
1255/*
1256 * Management of partially allocated slabs
1257 */
7c2e132c
CL
1258static void add_partial(struct kmem_cache_node *n,
1259 struct page *page, int tail)
81819f0f 1260{
e95eed57
CL
1261 spin_lock(&n->list_lock);
1262 n->nr_partial++;
7c2e132c
CL
1263 if (tail)
1264 list_add_tail(&page->lru, &n->partial);
1265 else
1266 list_add(&page->lru, &n->partial);
81819f0f
CL
1267 spin_unlock(&n->list_lock);
1268}
1269
0121c619 1270static void remove_partial(struct kmem_cache *s, struct page *page)
81819f0f
CL
1271{
1272 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1273
1274 spin_lock(&n->list_lock);
1275 list_del(&page->lru);
1276 n->nr_partial--;
1277 spin_unlock(&n->list_lock);
1278}
1279
1280/*
672bba3a 1281 * Lock slab and remove from the partial list.
81819f0f 1282 *
672bba3a 1283 * Must hold list_lock.
81819f0f 1284 */
0121c619
CL
1285static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1286 struct page *page)
81819f0f
CL
1287{
1288 if (slab_trylock(page)) {
1289 list_del(&page->lru);
1290 n->nr_partial--;
4b6f0750 1291 SetSlabFrozen(page);
81819f0f
CL
1292 return 1;
1293 }
1294 return 0;
1295}
1296
1297/*
672bba3a 1298 * Try to allocate a partial slab from a specific node.
81819f0f
CL
1299 */
1300static struct page *get_partial_node(struct kmem_cache_node *n)
1301{
1302 struct page *page;
1303
1304 /*
1305 * Racy check. If we mistakenly see no partial slabs then we
1306 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1307 * partial slab and there is none available then get_partials()
1308 * will return NULL.
81819f0f
CL
1309 */
1310 if (!n || !n->nr_partial)
1311 return NULL;
1312
1313 spin_lock(&n->list_lock);
1314 list_for_each_entry(page, &n->partial, lru)
4b6f0750 1315 if (lock_and_freeze_slab(n, page))
81819f0f
CL
1316 goto out;
1317 page = NULL;
1318out:
1319 spin_unlock(&n->list_lock);
1320 return page;
1321}
1322
1323/*
672bba3a 1324 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f
CL
1325 */
1326static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1327{
1328#ifdef CONFIG_NUMA
1329 struct zonelist *zonelist;
dd1a239f 1330 struct zoneref *z;
54a6eb5c
MG
1331 struct zone *zone;
1332 enum zone_type high_zoneidx = gfp_zone(flags);
81819f0f
CL
1333 struct page *page;
1334
1335 /*
672bba3a
CL
1336 * The defrag ratio allows a configuration of the tradeoffs between
1337 * inter node defragmentation and node local allocations. A lower
1338 * defrag_ratio increases the tendency to do local allocations
1339 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1340 *
672bba3a
CL
1341 * If the defrag_ratio is set to 0 then kmalloc() always
1342 * returns node local objects. If the ratio is higher then kmalloc()
1343 * may return off node objects because partial slabs are obtained
1344 * from other nodes and filled up.
81819f0f 1345 *
6446faa2 1346 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a
CL
1347 * defrag_ratio = 1000) then every (well almost) allocation will
1348 * first attempt to defrag slab caches on other nodes. This means
1349 * scanning over all nodes to look for partial slabs which may be
1350 * expensive if we do it every time we are trying to find a slab
1351 * with available objects.
81819f0f 1352 */
9824601e
CL
1353 if (!s->remote_node_defrag_ratio ||
1354 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1355 return NULL;
1356
0e88460d 1357 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
54a6eb5c 1358 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
81819f0f
CL
1359 struct kmem_cache_node *n;
1360
54a6eb5c 1361 n = get_node(s, zone_to_nid(zone));
81819f0f 1362
54a6eb5c 1363 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
e95eed57 1364 n->nr_partial > MIN_PARTIAL) {
81819f0f
CL
1365 page = get_partial_node(n);
1366 if (page)
1367 return page;
1368 }
1369 }
1370#endif
1371 return NULL;
1372}
1373
1374/*
1375 * Get a partial page, lock it and return it.
1376 */
1377static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1378{
1379 struct page *page;
1380 int searchnode = (node == -1) ? numa_node_id() : node;
1381
1382 page = get_partial_node(get_node(s, searchnode));
1383 if (page || (flags & __GFP_THISNODE))
1384 return page;
1385
1386 return get_any_partial(s, flags);
1387}
1388
1389/*
1390 * Move a page back to the lists.
1391 *
1392 * Must be called with the slab lock held.
1393 *
1394 * On exit the slab lock will have been dropped.
1395 */
7c2e132c 1396static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
81819f0f 1397{
e95eed57 1398 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
8ff12cfc 1399 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
e95eed57 1400
4b6f0750 1401 ClearSlabFrozen(page);
81819f0f 1402 if (page->inuse) {
e95eed57 1403
a973e9dd 1404 if (page->freelist) {
7c2e132c 1405 add_partial(n, page, tail);
8ff12cfc
CL
1406 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1407 } else {
1408 stat(c, DEACTIVATE_FULL);
1409 if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
1410 add_full(n, page);
1411 }
81819f0f
CL
1412 slab_unlock(page);
1413 } else {
8ff12cfc 1414 stat(c, DEACTIVATE_EMPTY);
e95eed57
CL
1415 if (n->nr_partial < MIN_PARTIAL) {
1416 /*
672bba3a
CL
1417 * Adding an empty slab to the partial slabs in order
1418 * to avoid page allocator overhead. This slab needs
1419 * to come after the other slabs with objects in
6446faa2
CL
1420 * so that the others get filled first. That way the
1421 * size of the partial list stays small.
1422 *
0121c619
CL
1423 * kmem_cache_shrink can reclaim any empty slabs from
1424 * the partial list.
e95eed57 1425 */
7c2e132c 1426 add_partial(n, page, 1);
e95eed57
CL
1427 slab_unlock(page);
1428 } else {
1429 slab_unlock(page);
8ff12cfc 1430 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
e95eed57
CL
1431 discard_slab(s, page);
1432 }
81819f0f
CL
1433 }
1434}
1435
1436/*
1437 * Remove the cpu slab
1438 */
dfb4f096 1439static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1440{
dfb4f096 1441 struct page *page = c->page;
7c2e132c 1442 int tail = 1;
8ff12cfc 1443
b773ad73 1444 if (page->freelist)
8ff12cfc 1445 stat(c, DEACTIVATE_REMOTE_FREES);
894b8788 1446 /*
6446faa2 1447 * Merge cpu freelist into slab freelist. Typically we get here
894b8788
CL
1448 * because both freelists are empty. So this is unlikely
1449 * to occur.
1450 */
a973e9dd 1451 while (unlikely(c->freelist)) {
894b8788
CL
1452 void **object;
1453
7c2e132c
CL
1454 tail = 0; /* Hot objects. Put the slab first */
1455
894b8788 1456 /* Retrieve object from cpu_freelist */
dfb4f096 1457 object = c->freelist;
b3fba8da 1458 c->freelist = c->freelist[c->offset];
894b8788
CL
1459
1460 /* And put onto the regular freelist */
b3fba8da 1461 object[c->offset] = page->freelist;
894b8788
CL
1462 page->freelist = object;
1463 page->inuse--;
1464 }
dfb4f096 1465 c->page = NULL;
7c2e132c 1466 unfreeze_slab(s, page, tail);
81819f0f
CL
1467}
1468
dfb4f096 1469static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1470{
8ff12cfc 1471 stat(c, CPUSLAB_FLUSH);
dfb4f096
CL
1472 slab_lock(c->page);
1473 deactivate_slab(s, c);
81819f0f
CL
1474}
1475
1476/*
1477 * Flush cpu slab.
6446faa2 1478 *
81819f0f
CL
1479 * Called from IPI handler with interrupts disabled.
1480 */
0c710013 1481static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 1482{
dfb4f096 1483 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
81819f0f 1484
dfb4f096
CL
1485 if (likely(c && c->page))
1486 flush_slab(s, c);
81819f0f
CL
1487}
1488
1489static void flush_cpu_slab(void *d)
1490{
1491 struct kmem_cache *s = d;
81819f0f 1492
dfb4f096 1493 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
1494}
1495
1496static void flush_all(struct kmem_cache *s)
1497{
15c8b6c1 1498 on_each_cpu(flush_cpu_slab, s, 1);
81819f0f
CL
1499}
1500
dfb4f096
CL
1501/*
1502 * Check if the objects in a per cpu structure fit numa
1503 * locality expectations.
1504 */
1505static inline int node_match(struct kmem_cache_cpu *c, int node)
1506{
1507#ifdef CONFIG_NUMA
1508 if (node != -1 && c->node != node)
1509 return 0;
1510#endif
1511 return 1;
1512}
1513
81819f0f 1514/*
894b8788
CL
1515 * Slow path. The lockless freelist is empty or we need to perform
1516 * debugging duties.
1517 *
1518 * Interrupts are disabled.
81819f0f 1519 *
894b8788
CL
1520 * Processing is still very fast if new objects have been freed to the
1521 * regular freelist. In that case we simply take over the regular freelist
1522 * as the lockless freelist and zap the regular freelist.
81819f0f 1523 *
894b8788
CL
1524 * If that is not working then we fall back to the partial lists. We take the
1525 * first element of the freelist as the object to allocate now and move the
1526 * rest of the freelist to the lockless freelist.
81819f0f 1527 *
894b8788 1528 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
1529 * we need to allocate a new slab. This is the slowest path since it involves
1530 * a call to the page allocator and the setup of a new slab.
81819f0f 1531 */
894b8788 1532static void *__slab_alloc(struct kmem_cache *s,
dfb4f096 1533 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
81819f0f 1534{
81819f0f 1535 void **object;
dfb4f096 1536 struct page *new;
81819f0f 1537
e72e9c23
LT
1538 /* We handle __GFP_ZERO in the caller */
1539 gfpflags &= ~__GFP_ZERO;
1540
dfb4f096 1541 if (!c->page)
81819f0f
CL
1542 goto new_slab;
1543
dfb4f096
CL
1544 slab_lock(c->page);
1545 if (unlikely(!node_match(c, node)))
81819f0f 1546 goto another_slab;
6446faa2 1547
8ff12cfc 1548 stat(c, ALLOC_REFILL);
6446faa2 1549
894b8788 1550load_freelist:
dfb4f096 1551 object = c->page->freelist;
a973e9dd 1552 if (unlikely(!object))
81819f0f 1553 goto another_slab;
dfb4f096 1554 if (unlikely(SlabDebug(c->page)))
81819f0f
CL
1555 goto debug;
1556
b3fba8da 1557 c->freelist = object[c->offset];
39b26464 1558 c->page->inuse = c->page->objects;
a973e9dd 1559 c->page->freelist = NULL;
dfb4f096 1560 c->node = page_to_nid(c->page);
1f84260c 1561unlock_out:
dfb4f096 1562 slab_unlock(c->page);
8ff12cfc 1563 stat(c, ALLOC_SLOWPATH);
81819f0f
CL
1564 return object;
1565
1566another_slab:
dfb4f096 1567 deactivate_slab(s, c);
81819f0f
CL
1568
1569new_slab:
dfb4f096
CL
1570 new = get_partial(s, gfpflags, node);
1571 if (new) {
1572 c->page = new;
8ff12cfc 1573 stat(c, ALLOC_FROM_PARTIAL);
894b8788 1574 goto load_freelist;
81819f0f
CL
1575 }
1576
b811c202
CL
1577 if (gfpflags & __GFP_WAIT)
1578 local_irq_enable();
1579
dfb4f096 1580 new = new_slab(s, gfpflags, node);
b811c202
CL
1581
1582 if (gfpflags & __GFP_WAIT)
1583 local_irq_disable();
1584
dfb4f096
CL
1585 if (new) {
1586 c = get_cpu_slab(s, smp_processor_id());
8ff12cfc 1587 stat(c, ALLOC_SLAB);
05aa3450 1588 if (c->page)
dfb4f096 1589 flush_slab(s, c);
dfb4f096
CL
1590 slab_lock(new);
1591 SetSlabFrozen(new);
1592 c->page = new;
4b6f0750 1593 goto load_freelist;
81819f0f 1594 }
71c7a06f 1595 return NULL;
81819f0f 1596debug:
dfb4f096 1597 if (!alloc_debug_processing(s, c->page, object, addr))
81819f0f 1598 goto another_slab;
894b8788 1599
dfb4f096 1600 c->page->inuse++;
b3fba8da 1601 c->page->freelist = object[c->offset];
ee3c72a1 1602 c->node = -1;
1f84260c 1603 goto unlock_out;
894b8788
CL
1604}
1605
1606/*
1607 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1608 * have the fastpath folded into their functions. So no function call
1609 * overhead for requests that can be satisfied on the fastpath.
1610 *
1611 * The fastpath works by first checking if the lockless freelist can be used.
1612 * If not then __slab_alloc is called for slow processing.
1613 *
1614 * Otherwise we can simply pick the next object from the lockless free list.
1615 */
06428780 1616static __always_inline void *slab_alloc(struct kmem_cache *s,
ce15fea8 1617 gfp_t gfpflags, int node, void *addr)
894b8788 1618{
894b8788 1619 void **object;
dfb4f096 1620 struct kmem_cache_cpu *c;
1f84260c 1621 unsigned long flags;
bdb21928 1622 unsigned int objsize;
1f84260c 1623
894b8788 1624 local_irq_save(flags);
dfb4f096 1625 c = get_cpu_slab(s, smp_processor_id());
bdb21928 1626 objsize = c->objsize;
a973e9dd 1627 if (unlikely(!c->freelist || !node_match(c, node)))
894b8788 1628
dfb4f096 1629 object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788
CL
1630
1631 else {
dfb4f096 1632 object = c->freelist;
b3fba8da 1633 c->freelist = object[c->offset];
8ff12cfc 1634 stat(c, ALLOC_FASTPATH);
894b8788
CL
1635 }
1636 local_irq_restore(flags);
d07dbea4
CL
1637
1638 if (unlikely((gfpflags & __GFP_ZERO) && object))
bdb21928 1639 memset(object, 0, objsize);
d07dbea4 1640
894b8788 1641 return object;
81819f0f
CL
1642}
1643
1644void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1645{
ce15fea8 1646 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
81819f0f
CL
1647}
1648EXPORT_SYMBOL(kmem_cache_alloc);
1649
1650#ifdef CONFIG_NUMA
1651void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1652{
ce15fea8 1653 return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
81819f0f
CL
1654}
1655EXPORT_SYMBOL(kmem_cache_alloc_node);
1656#endif
1657
1658/*
894b8788
CL
1659 * Slow patch handling. This may still be called frequently since objects
1660 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 1661 *
894b8788
CL
1662 * So we still attempt to reduce cache line usage. Just take the slab
1663 * lock and free the item. If there is no additional partial page
1664 * handling required then we can return immediately.
81819f0f 1665 */
894b8788 1666static void __slab_free(struct kmem_cache *s, struct page *page,
b3fba8da 1667 void *x, void *addr, unsigned int offset)
81819f0f
CL
1668{
1669 void *prior;
1670 void **object = (void *)x;
8ff12cfc 1671 struct kmem_cache_cpu *c;
81819f0f 1672
8ff12cfc
CL
1673 c = get_cpu_slab(s, raw_smp_processor_id());
1674 stat(c, FREE_SLOWPATH);
81819f0f
CL
1675 slab_lock(page);
1676
35e5d7ee 1677 if (unlikely(SlabDebug(page)))
81819f0f 1678 goto debug;
6446faa2 1679
81819f0f 1680checks_ok:
b3fba8da 1681 prior = object[offset] = page->freelist;
81819f0f
CL
1682 page->freelist = object;
1683 page->inuse--;
1684
8ff12cfc
CL
1685 if (unlikely(SlabFrozen(page))) {
1686 stat(c, FREE_FROZEN);
81819f0f 1687 goto out_unlock;
8ff12cfc 1688 }
81819f0f
CL
1689
1690 if (unlikely(!page->inuse))
1691 goto slab_empty;
1692
1693 /*
6446faa2 1694 * Objects left in the slab. If it was not on the partial list before
81819f0f
CL
1695 * then add it.
1696 */
a973e9dd 1697 if (unlikely(!prior)) {
7c2e132c 1698 add_partial(get_node(s, page_to_nid(page)), page, 1);
8ff12cfc
CL
1699 stat(c, FREE_ADD_PARTIAL);
1700 }
81819f0f
CL
1701
1702out_unlock:
1703 slab_unlock(page);
81819f0f
CL
1704 return;
1705
1706slab_empty:
a973e9dd 1707 if (prior) {
81819f0f 1708 /*
672bba3a 1709 * Slab still on the partial list.
81819f0f
CL
1710 */
1711 remove_partial(s, page);
8ff12cfc
CL
1712 stat(c, FREE_REMOVE_PARTIAL);
1713 }
81819f0f 1714 slab_unlock(page);
8ff12cfc 1715 stat(c, FREE_SLAB);
81819f0f 1716 discard_slab(s, page);
81819f0f
CL
1717 return;
1718
1719debug:
3ec09742 1720 if (!free_debug_processing(s, page, x, addr))
77c5e2d0 1721 goto out_unlock;
77c5e2d0 1722 goto checks_ok;
81819f0f
CL
1723}
1724
894b8788
CL
1725/*
1726 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1727 * can perform fastpath freeing without additional function calls.
1728 *
1729 * The fastpath is only possible if we are freeing to the current cpu slab
1730 * of this processor. This typically the case if we have just allocated
1731 * the item before.
1732 *
1733 * If fastpath is not possible then fall back to __slab_free where we deal
1734 * with all sorts of special processing.
1735 */
06428780 1736static __always_inline void slab_free(struct kmem_cache *s,
894b8788
CL
1737 struct page *page, void *x, void *addr)
1738{
1739 void **object = (void *)x;
dfb4f096 1740 struct kmem_cache_cpu *c;
1f84260c
CL
1741 unsigned long flags;
1742
894b8788 1743 local_irq_save(flags);
dfb4f096 1744 c = get_cpu_slab(s, smp_processor_id());
27d9e4e9 1745 debug_check_no_locks_freed(object, c->objsize);
3ac7fe5a
TG
1746 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1747 debug_check_no_obj_freed(object, s->objsize);
ee3c72a1 1748 if (likely(page == c->page && c->node >= 0)) {
b3fba8da 1749 object[c->offset] = c->freelist;
dfb4f096 1750 c->freelist = object;
8ff12cfc 1751 stat(c, FREE_FASTPATH);
894b8788 1752 } else
b3fba8da 1753 __slab_free(s, page, x, addr, c->offset);
894b8788
CL
1754
1755 local_irq_restore(flags);
1756}
1757
81819f0f
CL
1758void kmem_cache_free(struct kmem_cache *s, void *x)
1759{
77c5e2d0 1760 struct page *page;
81819f0f 1761
b49af68f 1762 page = virt_to_head_page(x);
81819f0f 1763
77c5e2d0 1764 slab_free(s, page, x, __builtin_return_address(0));
81819f0f
CL
1765}
1766EXPORT_SYMBOL(kmem_cache_free);
1767
1768/* Figure out on which slab object the object resides */
1769static struct page *get_object_page(const void *x)
1770{
b49af68f 1771 struct page *page = virt_to_head_page(x);
81819f0f
CL
1772
1773 if (!PageSlab(page))
1774 return NULL;
1775
1776 return page;
1777}
1778
1779/*
672bba3a
CL
1780 * Object placement in a slab is made very easy because we always start at
1781 * offset 0. If we tune the size of the object to the alignment then we can
1782 * get the required alignment by putting one properly sized object after
1783 * another.
81819f0f
CL
1784 *
1785 * Notice that the allocation order determines the sizes of the per cpu
1786 * caches. Each processor has always one slab available for allocations.
1787 * Increasing the allocation order reduces the number of times that slabs
672bba3a 1788 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 1789 * locking overhead.
81819f0f
CL
1790 */
1791
1792/*
1793 * Mininum / Maximum order of slab pages. This influences locking overhead
1794 * and slab fragmentation. A higher order reduces the number of partial slabs
1795 * and increases the number of allocations possible without having to
1796 * take the list_lock.
1797 */
1798static int slub_min_order;
114e9e89 1799static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506 1800static int slub_min_objects;
81819f0f
CL
1801
1802/*
1803 * Merge control. If this is set then no merging of slab caches will occur.
672bba3a 1804 * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0f
CL
1805 */
1806static int slub_nomerge;
1807
81819f0f
CL
1808/*
1809 * Calculate the order of allocation given an slab object size.
1810 *
672bba3a
CL
1811 * The order of allocation has significant impact on performance and other
1812 * system components. Generally order 0 allocations should be preferred since
1813 * order 0 does not cause fragmentation in the page allocator. Larger objects
1814 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 1815 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
1816 * would be wasted.
1817 *
1818 * In order to reach satisfactory performance we must ensure that a minimum
1819 * number of objects is in one slab. Otherwise we may generate too much
1820 * activity on the partial lists which requires taking the list_lock. This is
1821 * less a concern for large slabs though which are rarely used.
81819f0f 1822 *
672bba3a
CL
1823 * slub_max_order specifies the order where we begin to stop considering the
1824 * number of objects in a slab as critical. If we reach slub_max_order then
1825 * we try to keep the page order as low as possible. So we accept more waste
1826 * of space in favor of a small page order.
81819f0f 1827 *
672bba3a
CL
1828 * Higher order allocations also allow the placement of more objects in a
1829 * slab and thereby reduce object handling overhead. If the user has
1830 * requested a higher mininum order then we start with that one instead of
1831 * the smallest order which will fit the object.
81819f0f 1832 */
5e6d444e
CL
1833static inline int slab_order(int size, int min_objects,
1834 int max_order, int fract_leftover)
81819f0f
CL
1835{
1836 int order;
1837 int rem;
6300ea75 1838 int min_order = slub_min_order;
81819f0f 1839
39b26464
CL
1840 if ((PAGE_SIZE << min_order) / size > 65535)
1841 return get_order(size * 65535) - 1;
1842
6300ea75 1843 for (order = max(min_order,
5e6d444e
CL
1844 fls(min_objects * size - 1) - PAGE_SHIFT);
1845 order <= max_order; order++) {
81819f0f 1846
5e6d444e 1847 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 1848
5e6d444e 1849 if (slab_size < min_objects * size)
81819f0f
CL
1850 continue;
1851
1852 rem = slab_size % size;
1853
5e6d444e 1854 if (rem <= slab_size / fract_leftover)
81819f0f
CL
1855 break;
1856
1857 }
672bba3a 1858
81819f0f
CL
1859 return order;
1860}
1861
5e6d444e
CL
1862static inline int calculate_order(int size)
1863{
1864 int order;
1865 int min_objects;
1866 int fraction;
1867
1868 /*
1869 * Attempt to find best configuration for a slab. This
1870 * works by first attempting to generate a layout with
1871 * the best configuration and backing off gradually.
1872 *
1873 * First we reduce the acceptable waste in a slab. Then
1874 * we reduce the minimum objects required in a slab.
1875 */
1876 min_objects = slub_min_objects;
9b2cd506
CL
1877 if (!min_objects)
1878 min_objects = 4 * (fls(nr_cpu_ids) + 1);
5e6d444e 1879 while (min_objects > 1) {
c124f5b5 1880 fraction = 16;
5e6d444e
CL
1881 while (fraction >= 4) {
1882 order = slab_order(size, min_objects,
1883 slub_max_order, fraction);
1884 if (order <= slub_max_order)
1885 return order;
1886 fraction /= 2;
1887 }
1888 min_objects /= 2;
1889 }
1890
1891 /*
1892 * We were unable to place multiple objects in a slab. Now
1893 * lets see if we can place a single object there.
1894 */
1895 order = slab_order(size, 1, slub_max_order, 1);
1896 if (order <= slub_max_order)
1897 return order;
1898
1899 /*
1900 * Doh this slab cannot be placed using slub_max_order.
1901 */
1902 order = slab_order(size, 1, MAX_ORDER, 1);
1903 if (order <= MAX_ORDER)
1904 return order;
1905 return -ENOSYS;
1906}
1907
81819f0f 1908/*
672bba3a 1909 * Figure out what the alignment of the objects will be.
81819f0f
CL
1910 */
1911static unsigned long calculate_alignment(unsigned long flags,
1912 unsigned long align, unsigned long size)
1913{
1914 /*
6446faa2
CL
1915 * If the user wants hardware cache aligned objects then follow that
1916 * suggestion if the object is sufficiently large.
81819f0f 1917 *
6446faa2
CL
1918 * The hardware cache alignment cannot override the specified
1919 * alignment though. If that is greater then use it.
81819f0f 1920 */
b6210386
NP
1921 if (flags & SLAB_HWCACHE_ALIGN) {
1922 unsigned long ralign = cache_line_size();
1923 while (size <= ralign / 2)
1924 ralign /= 2;
1925 align = max(align, ralign);
1926 }
81819f0f
CL
1927
1928 if (align < ARCH_SLAB_MINALIGN)
b6210386 1929 align = ARCH_SLAB_MINALIGN;
81819f0f
CL
1930
1931 return ALIGN(align, sizeof(void *));
1932}
1933
dfb4f096
CL
1934static void init_kmem_cache_cpu(struct kmem_cache *s,
1935 struct kmem_cache_cpu *c)
1936{
1937 c->page = NULL;
a973e9dd 1938 c->freelist = NULL;
dfb4f096 1939 c->node = 0;
42a9fdbb
CL
1940 c->offset = s->offset / sizeof(void *);
1941 c->objsize = s->objsize;
62f75532
PE
1942#ifdef CONFIG_SLUB_STATS
1943 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1944#endif
dfb4f096
CL
1945}
1946
81819f0f
CL
1947static void init_kmem_cache_node(struct kmem_cache_node *n)
1948{
1949 n->nr_partial = 0;
81819f0f
CL
1950 spin_lock_init(&n->list_lock);
1951 INIT_LIST_HEAD(&n->partial);
8ab1372f 1952#ifdef CONFIG_SLUB_DEBUG
0f389ec6 1953 atomic_long_set(&n->nr_slabs, 0);
643b1138 1954 INIT_LIST_HEAD(&n->full);
8ab1372f 1955#endif
81819f0f
CL
1956}
1957
4c93c355
CL
1958#ifdef CONFIG_SMP
1959/*
1960 * Per cpu array for per cpu structures.
1961 *
1962 * The per cpu array places all kmem_cache_cpu structures from one processor
1963 * close together meaning that it becomes possible that multiple per cpu
1964 * structures are contained in one cacheline. This may be particularly
1965 * beneficial for the kmalloc caches.
1966 *
1967 * A desktop system typically has around 60-80 slabs. With 100 here we are
1968 * likely able to get per cpu structures for all caches from the array defined
1969 * here. We must be able to cover all kmalloc caches during bootstrap.
1970 *
1971 * If the per cpu array is exhausted then fall back to kmalloc
1972 * of individual cachelines. No sharing is possible then.
1973 */
1974#define NR_KMEM_CACHE_CPU 100
1975
1976static DEFINE_PER_CPU(struct kmem_cache_cpu,
1977 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1978
1979static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1980static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1981
1982static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1983 int cpu, gfp_t flags)
1984{
1985 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1986
1987 if (c)
1988 per_cpu(kmem_cache_cpu_free, cpu) =
1989 (void *)c->freelist;
1990 else {
1991 /* Table overflow: So allocate ourselves */
1992 c = kmalloc_node(
1993 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1994 flags, cpu_to_node(cpu));
1995 if (!c)
1996 return NULL;
1997 }
1998
1999 init_kmem_cache_cpu(s, c);
2000 return c;
2001}
2002
2003static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
2004{
2005 if (c < per_cpu(kmem_cache_cpu, cpu) ||
2006 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
2007 kfree(c);
2008 return;
2009 }
2010 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
2011 per_cpu(kmem_cache_cpu_free, cpu) = c;
2012}
2013
2014static void free_kmem_cache_cpus(struct kmem_cache *s)
2015{
2016 int cpu;
2017
2018 for_each_online_cpu(cpu) {
2019 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2020
2021 if (c) {
2022 s->cpu_slab[cpu] = NULL;
2023 free_kmem_cache_cpu(c, cpu);
2024 }
2025 }
2026}
2027
2028static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2029{
2030 int cpu;
2031
2032 for_each_online_cpu(cpu) {
2033 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2034
2035 if (c)
2036 continue;
2037
2038 c = alloc_kmem_cache_cpu(s, cpu, flags);
2039 if (!c) {
2040 free_kmem_cache_cpus(s);
2041 return 0;
2042 }
2043 s->cpu_slab[cpu] = c;
2044 }
2045 return 1;
2046}
2047
2048/*
2049 * Initialize the per cpu array.
2050 */
2051static void init_alloc_cpu_cpu(int cpu)
2052{
2053 int i;
2054
2055 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
2056 return;
2057
2058 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2059 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2060
2061 cpu_set(cpu, kmem_cach_cpu_free_init_once);
2062}
2063
2064static void __init init_alloc_cpu(void)
2065{
2066 int cpu;
2067
2068 for_each_online_cpu(cpu)
2069 init_alloc_cpu_cpu(cpu);
2070 }
2071
2072#else
2073static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2074static inline void init_alloc_cpu(void) {}
2075
2076static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2077{
2078 init_kmem_cache_cpu(s, &s->cpu_slab);
2079 return 1;
2080}
2081#endif
2082
81819f0f
CL
2083#ifdef CONFIG_NUMA
2084/*
2085 * No kmalloc_node yet so do it by hand. We know that this is the first
2086 * slab on the node for this slabcache. There are no concurrent accesses
2087 * possible.
2088 *
2089 * Note that this function only works on the kmalloc_node_cache
4c93c355
CL
2090 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2091 * memory on a fresh node that has no slab structures yet.
81819f0f 2092 */
1cd7daa5
AB
2093static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2094 int node)
81819f0f
CL
2095{
2096 struct page *page;
2097 struct kmem_cache_node *n;
ba84c73c 2098 unsigned long flags;
81819f0f
CL
2099
2100 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2101
a2f92ee7 2102 page = new_slab(kmalloc_caches, gfpflags, node);
81819f0f
CL
2103
2104 BUG_ON(!page);
a2f92ee7
CL
2105 if (page_to_nid(page) != node) {
2106 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2107 "node %d\n", node);
2108 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2109 "in order to be able to continue\n");
2110 }
2111
81819f0f
CL
2112 n = page->freelist;
2113 BUG_ON(!n);
2114 page->freelist = get_freepointer(kmalloc_caches, n);
2115 page->inuse++;
2116 kmalloc_caches->node[node] = n;
8ab1372f 2117#ifdef CONFIG_SLUB_DEBUG
d45f39cb
CL
2118 init_object(kmalloc_caches, n, 1);
2119 init_tracking(kmalloc_caches, n);
8ab1372f 2120#endif
81819f0f 2121 init_kmem_cache_node(n);
205ab99d 2122 inc_slabs_node(kmalloc_caches, node, page->objects);
6446faa2 2123
ba84c73c 2124 /*
2125 * lockdep requires consistent irq usage for each lock
2126 * so even though there cannot be a race this early in
2127 * the boot sequence, we still disable irqs.
2128 */
2129 local_irq_save(flags);
7c2e132c 2130 add_partial(n, page, 0);
ba84c73c 2131 local_irq_restore(flags);
81819f0f
CL
2132 return n;
2133}
2134
2135static void free_kmem_cache_nodes(struct kmem_cache *s)
2136{
2137 int node;
2138
f64dc58c 2139 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2140 struct kmem_cache_node *n = s->node[node];
2141 if (n && n != &s->local_node)
2142 kmem_cache_free(kmalloc_caches, n);
2143 s->node[node] = NULL;
2144 }
2145}
2146
2147static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2148{
2149 int node;
2150 int local_node;
2151
2152 if (slab_state >= UP)
2153 local_node = page_to_nid(virt_to_page(s));
2154 else
2155 local_node = 0;
2156
f64dc58c 2157 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2158 struct kmem_cache_node *n;
2159
2160 if (local_node == node)
2161 n = &s->local_node;
2162 else {
2163 if (slab_state == DOWN) {
2164 n = early_kmem_cache_node_alloc(gfpflags,
2165 node);
2166 continue;
2167 }
2168 n = kmem_cache_alloc_node(kmalloc_caches,
2169 gfpflags, node);
2170
2171 if (!n) {
2172 free_kmem_cache_nodes(s);
2173 return 0;
2174 }
2175
2176 }
2177 s->node[node] = n;
2178 init_kmem_cache_node(n);
2179 }
2180 return 1;
2181}
2182#else
2183static void free_kmem_cache_nodes(struct kmem_cache *s)
2184{
2185}
2186
2187static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2188{
2189 init_kmem_cache_node(&s->local_node);
2190 return 1;
2191}
2192#endif
2193
2194/*
2195 * calculate_sizes() determines the order and the distribution of data within
2196 * a slab object.
2197 */
06b285dc 2198static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f
CL
2199{
2200 unsigned long flags = s->flags;
2201 unsigned long size = s->objsize;
2202 unsigned long align = s->align;
834f3d11 2203 int order;
81819f0f 2204
d8b42bf5
CL
2205 /*
2206 * Round up object size to the next word boundary. We can only
2207 * place the free pointer at word boundaries and this determines
2208 * the possible location of the free pointer.
2209 */
2210 size = ALIGN(size, sizeof(void *));
2211
2212#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2213 /*
2214 * Determine if we can poison the object itself. If the user of
2215 * the slab may touch the object after free or before allocation
2216 * then we should never poison the object itself.
2217 */
2218 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f 2219 !s->ctor)
81819f0f
CL
2220 s->flags |= __OBJECT_POISON;
2221 else
2222 s->flags &= ~__OBJECT_POISON;
2223
81819f0f
CL
2224
2225 /*
672bba3a 2226 * If we are Redzoning then check if there is some space between the
81819f0f 2227 * end of the object and the free pointer. If not then add an
672bba3a 2228 * additional word to have some bytes to store Redzone information.
81819f0f
CL
2229 */
2230 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2231 size += sizeof(void *);
41ecc55b 2232#endif
81819f0f
CL
2233
2234 /*
672bba3a
CL
2235 * With that we have determined the number of bytes in actual use
2236 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
2237 */
2238 s->inuse = size;
2239
2240 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f 2241 s->ctor)) {
81819f0f
CL
2242 /*
2243 * Relocate free pointer after the object if it is not
2244 * permitted to overwrite the first word of the object on
2245 * kmem_cache_free.
2246 *
2247 * This is the case if we do RCU, have a constructor or
2248 * destructor or are poisoning the objects.
2249 */
2250 s->offset = size;
2251 size += sizeof(void *);
2252 }
2253
c12b3c62 2254#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2255 if (flags & SLAB_STORE_USER)
2256 /*
2257 * Need to store information about allocs and frees after
2258 * the object.
2259 */
2260 size += 2 * sizeof(struct track);
2261
be7b3fbc 2262 if (flags & SLAB_RED_ZONE)
81819f0f
CL
2263 /*
2264 * Add some empty padding so that we can catch
2265 * overwrites from earlier objects rather than let
2266 * tracking information or the free pointer be
2267 * corrupted if an user writes before the start
2268 * of the object.
2269 */
2270 size += sizeof(void *);
41ecc55b 2271#endif
672bba3a 2272
81819f0f
CL
2273 /*
2274 * Determine the alignment based on various parameters that the
65c02d4c
CL
2275 * user specified and the dynamic determination of cache line size
2276 * on bootup.
81819f0f
CL
2277 */
2278 align = calculate_alignment(flags, align, s->objsize);
2279
2280 /*
2281 * SLUB stores one object immediately after another beginning from
2282 * offset 0. In order to align the objects we have to simply size
2283 * each object to conform to the alignment.
2284 */
2285 size = ALIGN(size, align);
2286 s->size = size;
06b285dc
CL
2287 if (forced_order >= 0)
2288 order = forced_order;
2289 else
2290 order = calculate_order(size);
81819f0f 2291
834f3d11 2292 if (order < 0)
81819f0f
CL
2293 return 0;
2294
b7a49f0d 2295 s->allocflags = 0;
834f3d11 2296 if (order)
b7a49f0d
CL
2297 s->allocflags |= __GFP_COMP;
2298
2299 if (s->flags & SLAB_CACHE_DMA)
2300 s->allocflags |= SLUB_DMA;
2301
2302 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2303 s->allocflags |= __GFP_RECLAIMABLE;
2304
81819f0f
CL
2305 /*
2306 * Determine the number of objects per slab
2307 */
834f3d11 2308 s->oo = oo_make(order, size);
65c3376a 2309 s->min = oo_make(get_order(size), size);
205ab99d
CL
2310 if (oo_objects(s->oo) > oo_objects(s->max))
2311 s->max = s->oo;
81819f0f 2312
834f3d11 2313 return !!oo_objects(s->oo);
81819f0f
CL
2314
2315}
2316
81819f0f
CL
2317static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2318 const char *name, size_t size,
2319 size_t align, unsigned long flags,
4ba9b9d0 2320 void (*ctor)(struct kmem_cache *, void *))
81819f0f
CL
2321{
2322 memset(s, 0, kmem_size);
2323 s->name = name;
2324 s->ctor = ctor;
81819f0f 2325 s->objsize = size;
81819f0f 2326 s->align = align;
ba0268a8 2327 s->flags = kmem_cache_flags(size, flags, name, ctor);
81819f0f 2328
06b285dc 2329 if (!calculate_sizes(s, -1))
81819f0f
CL
2330 goto error;
2331
2332 s->refcount = 1;
2333#ifdef CONFIG_NUMA
9824601e 2334 s->remote_node_defrag_ratio = 100;
81819f0f 2335#endif
dfb4f096
CL
2336 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2337 goto error;
81819f0f 2338
dfb4f096 2339 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
81819f0f 2340 return 1;
4c93c355 2341 free_kmem_cache_nodes(s);
81819f0f
CL
2342error:
2343 if (flags & SLAB_PANIC)
2344 panic("Cannot create slab %s size=%lu realsize=%u "
2345 "order=%u offset=%u flags=%lx\n",
834f3d11 2346 s->name, (unsigned long)size, s->size, oo_order(s->oo),
81819f0f
CL
2347 s->offset, flags);
2348 return 0;
2349}
81819f0f
CL
2350
2351/*
2352 * Check if a given pointer is valid
2353 */
2354int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2355{
06428780 2356 struct page *page;
81819f0f
CL
2357
2358 page = get_object_page(object);
2359
2360 if (!page || s != page->slab)
2361 /* No slab or wrong slab */
2362 return 0;
2363
abcd08a6 2364 if (!check_valid_pointer(s, page, object))
81819f0f
CL
2365 return 0;
2366
2367 /*
2368 * We could also check if the object is on the slabs freelist.
2369 * But this would be too expensive and it seems that the main
6446faa2 2370 * purpose of kmem_ptr_valid() is to check if the object belongs
81819f0f
CL
2371 * to a certain slab.
2372 */
2373 return 1;
2374}
2375EXPORT_SYMBOL(kmem_ptr_validate);
2376
2377/*
2378 * Determine the size of a slab object
2379 */
2380unsigned int kmem_cache_size(struct kmem_cache *s)
2381{
2382 return s->objsize;
2383}
2384EXPORT_SYMBOL(kmem_cache_size);
2385
2386const char *kmem_cache_name(struct kmem_cache *s)
2387{
2388 return s->name;
2389}
2390EXPORT_SYMBOL(kmem_cache_name);
2391
33b12c38
CL
2392static void list_slab_objects(struct kmem_cache *s, struct page *page,
2393 const char *text)
2394{
2395#ifdef CONFIG_SLUB_DEBUG
2396 void *addr = page_address(page);
2397 void *p;
2398 DECLARE_BITMAP(map, page->objects);
2399
2400 bitmap_zero(map, page->objects);
2401 slab_err(s, page, "%s", text);
2402 slab_lock(page);
2403 for_each_free_object(p, s, page->freelist)
2404 set_bit(slab_index(p, s, addr), map);
2405
2406 for_each_object(p, s, addr, page->objects) {
2407
2408 if (!test_bit(slab_index(p, s, addr), map)) {
2409 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2410 p, p - addr);
2411 print_tracking(s, p);
2412 }
2413 }
2414 slab_unlock(page);
2415#endif
2416}
2417
81819f0f 2418/*
599870b1 2419 * Attempt to free all partial slabs on a node.
81819f0f 2420 */
599870b1 2421static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 2422{
81819f0f
CL
2423 unsigned long flags;
2424 struct page *page, *h;
2425
2426 spin_lock_irqsave(&n->list_lock, flags);
33b12c38 2427 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f
CL
2428 if (!page->inuse) {
2429 list_del(&page->lru);
2430 discard_slab(s, page);
599870b1 2431 n->nr_partial--;
33b12c38
CL
2432 } else {
2433 list_slab_objects(s, page,
2434 "Objects remaining on kmem_cache_close()");
599870b1 2435 }
33b12c38 2436 }
81819f0f 2437 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2438}
2439
2440/*
672bba3a 2441 * Release all resources used by a slab cache.
81819f0f 2442 */
0c710013 2443static inline int kmem_cache_close(struct kmem_cache *s)
81819f0f
CL
2444{
2445 int node;
2446
2447 flush_all(s);
2448
2449 /* Attempt to free all objects */
4c93c355 2450 free_kmem_cache_cpus(s);
f64dc58c 2451 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2452 struct kmem_cache_node *n = get_node(s, node);
2453
599870b1
CL
2454 free_partial(s, n);
2455 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
2456 return 1;
2457 }
2458 free_kmem_cache_nodes(s);
2459 return 0;
2460}
2461
2462/*
2463 * Close a cache and release the kmem_cache structure
2464 * (must be used for caches created using kmem_cache_create)
2465 */
2466void kmem_cache_destroy(struct kmem_cache *s)
2467{
2468 down_write(&slub_lock);
2469 s->refcount--;
2470 if (!s->refcount) {
2471 list_del(&s->list);
a0e1d1be 2472 up_write(&slub_lock);
d629d819
PE
2473 if (kmem_cache_close(s)) {
2474 printk(KERN_ERR "SLUB %s: %s called for cache that "
2475 "still has objects.\n", s->name, __func__);
2476 dump_stack();
2477 }
81819f0f 2478 sysfs_slab_remove(s);
a0e1d1be
CL
2479 } else
2480 up_write(&slub_lock);
81819f0f
CL
2481}
2482EXPORT_SYMBOL(kmem_cache_destroy);
2483
2484/********************************************************************
2485 * Kmalloc subsystem
2486 *******************************************************************/
2487
331dc558 2488struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
81819f0f
CL
2489EXPORT_SYMBOL(kmalloc_caches);
2490
81819f0f
CL
2491static int __init setup_slub_min_order(char *str)
2492{
06428780 2493 get_option(&str, &slub_min_order);
81819f0f
CL
2494
2495 return 1;
2496}
2497
2498__setup("slub_min_order=", setup_slub_min_order);
2499
2500static int __init setup_slub_max_order(char *str)
2501{
06428780 2502 get_option(&str, &slub_max_order);
81819f0f
CL
2503
2504 return 1;
2505}
2506
2507__setup("slub_max_order=", setup_slub_max_order);
2508
2509static int __init setup_slub_min_objects(char *str)
2510{
06428780 2511 get_option(&str, &slub_min_objects);
81819f0f
CL
2512
2513 return 1;
2514}
2515
2516__setup("slub_min_objects=", setup_slub_min_objects);
2517
2518static int __init setup_slub_nomerge(char *str)
2519{
2520 slub_nomerge = 1;
2521 return 1;
2522}
2523
2524__setup("slub_nomerge", setup_slub_nomerge);
2525
81819f0f
CL
2526static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2527 const char *name, int size, gfp_t gfp_flags)
2528{
2529 unsigned int flags = 0;
2530
2531 if (gfp_flags & SLUB_DMA)
2532 flags = SLAB_CACHE_DMA;
2533
2534 down_write(&slub_lock);
2535 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
319d1e24 2536 flags, NULL))
81819f0f
CL
2537 goto panic;
2538
2539 list_add(&s->list, &slab_caches);
2540 up_write(&slub_lock);
2541 if (sysfs_slab_add(s))
2542 goto panic;
2543 return s;
2544
2545panic:
2546 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2547}
2548
2e443fd0 2549#ifdef CONFIG_ZONE_DMA
4097d601 2550static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
1ceef402
CL
2551
2552static void sysfs_add_func(struct work_struct *w)
2553{
2554 struct kmem_cache *s;
2555
2556 down_write(&slub_lock);
2557 list_for_each_entry(s, &slab_caches, list) {
2558 if (s->flags & __SYSFS_ADD_DEFERRED) {
2559 s->flags &= ~__SYSFS_ADD_DEFERRED;
2560 sysfs_slab_add(s);
2561 }
2562 }
2563 up_write(&slub_lock);
2564}
2565
2566static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2567
2e443fd0
CL
2568static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2569{
2570 struct kmem_cache *s;
2e443fd0
CL
2571 char *text;
2572 size_t realsize;
2573
2574 s = kmalloc_caches_dma[index];
2575 if (s)
2576 return s;
2577
2578 /* Dynamically create dma cache */
1ceef402
CL
2579 if (flags & __GFP_WAIT)
2580 down_write(&slub_lock);
2581 else {
2582 if (!down_write_trylock(&slub_lock))
2583 goto out;
2584 }
2585
2586 if (kmalloc_caches_dma[index])
2587 goto unlock_out;
2e443fd0 2588
7b55f620 2589 realsize = kmalloc_caches[index].objsize;
3adbefee
IM
2590 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2591 (unsigned int)realsize);
1ceef402
CL
2592 s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2593
2594 if (!s || !text || !kmem_cache_open(s, flags, text,
2595 realsize, ARCH_KMALLOC_MINALIGN,
2596 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2597 kfree(s);
2598 kfree(text);
2599 goto unlock_out;
dfce8648 2600 }
1ceef402
CL
2601
2602 list_add(&s->list, &slab_caches);
2603 kmalloc_caches_dma[index] = s;
2604
2605 schedule_work(&sysfs_add_work);
2606
2607unlock_out:
dfce8648 2608 up_write(&slub_lock);
1ceef402 2609out:
dfce8648 2610 return kmalloc_caches_dma[index];
2e443fd0
CL
2611}
2612#endif
2613
f1b26339
CL
2614/*
2615 * Conversion table for small slabs sizes / 8 to the index in the
2616 * kmalloc array. This is necessary for slabs < 192 since we have non power
2617 * of two cache sizes there. The size of larger slabs can be determined using
2618 * fls.
2619 */
2620static s8 size_index[24] = {
2621 3, /* 8 */
2622 4, /* 16 */
2623 5, /* 24 */
2624 5, /* 32 */
2625 6, /* 40 */
2626 6, /* 48 */
2627 6, /* 56 */
2628 6, /* 64 */
2629 1, /* 72 */
2630 1, /* 80 */
2631 1, /* 88 */
2632 1, /* 96 */
2633 7, /* 104 */
2634 7, /* 112 */
2635 7, /* 120 */
2636 7, /* 128 */
2637 2, /* 136 */
2638 2, /* 144 */
2639 2, /* 152 */
2640 2, /* 160 */
2641 2, /* 168 */
2642 2, /* 176 */
2643 2, /* 184 */
2644 2 /* 192 */
2645};
2646
81819f0f
CL
2647static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2648{
f1b26339 2649 int index;
81819f0f 2650
f1b26339
CL
2651 if (size <= 192) {
2652 if (!size)
2653 return ZERO_SIZE_PTR;
81819f0f 2654
f1b26339 2655 index = size_index[(size - 1) / 8];
aadb4bc4 2656 } else
f1b26339 2657 index = fls(size - 1);
81819f0f
CL
2658
2659#ifdef CONFIG_ZONE_DMA
f1b26339 2660 if (unlikely((flags & SLUB_DMA)))
2e443fd0 2661 return dma_kmalloc_cache(index, flags);
f1b26339 2662
81819f0f
CL
2663#endif
2664 return &kmalloc_caches[index];
2665}
2666
2667void *__kmalloc(size_t size, gfp_t flags)
2668{
aadb4bc4 2669 struct kmem_cache *s;
81819f0f 2670
331dc558 2671 if (unlikely(size > PAGE_SIZE))
eada35ef 2672 return kmalloc_large(size, flags);
aadb4bc4
CL
2673
2674 s = get_slab(size, flags);
2675
2676 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2677 return s;
2678
ce15fea8 2679 return slab_alloc(s, flags, -1, __builtin_return_address(0));
81819f0f
CL
2680}
2681EXPORT_SYMBOL(__kmalloc);
2682
f619cfe1
CL
2683static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2684{
2685 struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2686 get_order(size));
2687
2688 if (page)
2689 return page_address(page);
2690 else
2691 return NULL;
2692}
2693
81819f0f
CL
2694#ifdef CONFIG_NUMA
2695void *__kmalloc_node(size_t size, gfp_t flags, int node)
2696{
aadb4bc4 2697 struct kmem_cache *s;
81819f0f 2698
331dc558 2699 if (unlikely(size > PAGE_SIZE))
f619cfe1 2700 return kmalloc_large_node(size, flags, node);
aadb4bc4
CL
2701
2702 s = get_slab(size, flags);
2703
2704 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2705 return s;
2706
ce15fea8 2707 return slab_alloc(s, flags, node, __builtin_return_address(0));
81819f0f
CL
2708}
2709EXPORT_SYMBOL(__kmalloc_node);
2710#endif
2711
2712size_t ksize(const void *object)
2713{
272c1d21 2714 struct page *page;
81819f0f
CL
2715 struct kmem_cache *s;
2716
ef8b4520 2717 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
2718 return 0;
2719
294a80a8 2720 page = virt_to_head_page(object);
294a80a8 2721
76994412
PE
2722 if (unlikely(!PageSlab(page))) {
2723 WARN_ON(!PageCompound(page));
294a80a8 2724 return PAGE_SIZE << compound_order(page);
76994412 2725 }
81819f0f 2726 s = page->slab;
81819f0f 2727
ae20bfda 2728#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2729 /*
2730 * Debugging requires use of the padding between object
2731 * and whatever may come after it.
2732 */
2733 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2734 return s->objsize;
2735
ae20bfda 2736#endif
81819f0f
CL
2737 /*
2738 * If we have the need to store the freelist pointer
2739 * back there or track user information then we can
2740 * only use the space before that information.
2741 */
2742 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2743 return s->inuse;
81819f0f
CL
2744 /*
2745 * Else we can use all the padding etc for the allocation
2746 */
2747 return s->size;
2748}
2749EXPORT_SYMBOL(ksize);
2750
2751void kfree(const void *x)
2752{
81819f0f 2753 struct page *page;
5bb983b0 2754 void *object = (void *)x;
81819f0f 2755
2408c550 2756 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
2757 return;
2758
b49af68f 2759 page = virt_to_head_page(x);
aadb4bc4 2760 if (unlikely(!PageSlab(page))) {
0937502a 2761 BUG_ON(!PageCompound(page));
aadb4bc4
CL
2762 put_page(page);
2763 return;
2764 }
5bb983b0 2765 slab_free(page->slab, page, object, __builtin_return_address(0));
81819f0f
CL
2766}
2767EXPORT_SYMBOL(kfree);
2768
2086d26a 2769/*
672bba3a
CL
2770 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2771 * the remaining slabs by the number of items in use. The slabs with the
2772 * most items in use come first. New allocations will then fill those up
2773 * and thus they can be removed from the partial lists.
2774 *
2775 * The slabs with the least items are placed last. This results in them
2776 * being allocated from last increasing the chance that the last objects
2777 * are freed in them.
2086d26a
CL
2778 */
2779int kmem_cache_shrink(struct kmem_cache *s)
2780{
2781 int node;
2782 int i;
2783 struct kmem_cache_node *n;
2784 struct page *page;
2785 struct page *t;
205ab99d 2786 int objects = oo_objects(s->max);
2086d26a 2787 struct list_head *slabs_by_inuse =
834f3d11 2788 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a
CL
2789 unsigned long flags;
2790
2791 if (!slabs_by_inuse)
2792 return -ENOMEM;
2793
2794 flush_all(s);
f64dc58c 2795 for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a
CL
2796 n = get_node(s, node);
2797
2798 if (!n->nr_partial)
2799 continue;
2800
834f3d11 2801 for (i = 0; i < objects; i++)
2086d26a
CL
2802 INIT_LIST_HEAD(slabs_by_inuse + i);
2803
2804 spin_lock_irqsave(&n->list_lock, flags);
2805
2806 /*
672bba3a 2807 * Build lists indexed by the items in use in each slab.
2086d26a 2808 *
672bba3a
CL
2809 * Note that concurrent frees may occur while we hold the
2810 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
2811 */
2812 list_for_each_entry_safe(page, t, &n->partial, lru) {
2813 if (!page->inuse && slab_trylock(page)) {
2814 /*
2815 * Must hold slab lock here because slab_free
2816 * may have freed the last object and be
2817 * waiting to release the slab.
2818 */
2819 list_del(&page->lru);
2820 n->nr_partial--;
2821 slab_unlock(page);
2822 discard_slab(s, page);
2823 } else {
fcda3d89
CL
2824 list_move(&page->lru,
2825 slabs_by_inuse + page->inuse);
2086d26a
CL
2826 }
2827 }
2828
2086d26a 2829 /*
672bba3a
CL
2830 * Rebuild the partial list with the slabs filled up most
2831 * first and the least used slabs at the end.
2086d26a 2832 */
834f3d11 2833 for (i = objects - 1; i >= 0; i--)
2086d26a
CL
2834 list_splice(slabs_by_inuse + i, n->partial.prev);
2835
2086d26a
CL
2836 spin_unlock_irqrestore(&n->list_lock, flags);
2837 }
2838
2839 kfree(slabs_by_inuse);
2840 return 0;
2841}
2842EXPORT_SYMBOL(kmem_cache_shrink);
2843
b9049e23
YG
2844#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2845static int slab_mem_going_offline_callback(void *arg)
2846{
2847 struct kmem_cache *s;
2848
2849 down_read(&slub_lock);
2850 list_for_each_entry(s, &slab_caches, list)
2851 kmem_cache_shrink(s);
2852 up_read(&slub_lock);
2853
2854 return 0;
2855}
2856
2857static void slab_mem_offline_callback(void *arg)
2858{
2859 struct kmem_cache_node *n;
2860 struct kmem_cache *s;
2861 struct memory_notify *marg = arg;
2862 int offline_node;
2863
2864 offline_node = marg->status_change_nid;
2865
2866 /*
2867 * If the node still has available memory. we need kmem_cache_node
2868 * for it yet.
2869 */
2870 if (offline_node < 0)
2871 return;
2872
2873 down_read(&slub_lock);
2874 list_for_each_entry(s, &slab_caches, list) {
2875 n = get_node(s, offline_node);
2876 if (n) {
2877 /*
2878 * if n->nr_slabs > 0, slabs still exist on the node
2879 * that is going down. We were unable to free them,
2880 * and offline_pages() function shoudn't call this
2881 * callback. So, we must fail.
2882 */
0f389ec6 2883 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
2884
2885 s->node[offline_node] = NULL;
2886 kmem_cache_free(kmalloc_caches, n);
2887 }
2888 }
2889 up_read(&slub_lock);
2890}
2891
2892static int slab_mem_going_online_callback(void *arg)
2893{
2894 struct kmem_cache_node *n;
2895 struct kmem_cache *s;
2896 struct memory_notify *marg = arg;
2897 int nid = marg->status_change_nid;
2898 int ret = 0;
2899
2900 /*
2901 * If the node's memory is already available, then kmem_cache_node is
2902 * already created. Nothing to do.
2903 */
2904 if (nid < 0)
2905 return 0;
2906
2907 /*
0121c619 2908 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
2909 * allocate a kmem_cache_node structure in order to bring the node
2910 * online.
2911 */
2912 down_read(&slub_lock);
2913 list_for_each_entry(s, &slab_caches, list) {
2914 /*
2915 * XXX: kmem_cache_alloc_node will fallback to other nodes
2916 * since memory is not yet available from the node that
2917 * is brought up.
2918 */
2919 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2920 if (!n) {
2921 ret = -ENOMEM;
2922 goto out;
2923 }
2924 init_kmem_cache_node(n);
2925 s->node[nid] = n;
2926 }
2927out:
2928 up_read(&slub_lock);
2929 return ret;
2930}
2931
2932static int slab_memory_callback(struct notifier_block *self,
2933 unsigned long action, void *arg)
2934{
2935 int ret = 0;
2936
2937 switch (action) {
2938 case MEM_GOING_ONLINE:
2939 ret = slab_mem_going_online_callback(arg);
2940 break;
2941 case MEM_GOING_OFFLINE:
2942 ret = slab_mem_going_offline_callback(arg);
2943 break;
2944 case MEM_OFFLINE:
2945 case MEM_CANCEL_ONLINE:
2946 slab_mem_offline_callback(arg);
2947 break;
2948 case MEM_ONLINE:
2949 case MEM_CANCEL_OFFLINE:
2950 break;
2951 }
2952
2953 ret = notifier_from_errno(ret);
2954 return ret;
2955}
2956
2957#endif /* CONFIG_MEMORY_HOTPLUG */
2958
81819f0f
CL
2959/********************************************************************
2960 * Basic setup of slabs
2961 *******************************************************************/
2962
2963void __init kmem_cache_init(void)
2964{
2965 int i;
4b356be0 2966 int caches = 0;
81819f0f 2967
4c93c355
CL
2968 init_alloc_cpu();
2969
81819f0f
CL
2970#ifdef CONFIG_NUMA
2971 /*
2972 * Must first have the slab cache available for the allocations of the
672bba3a 2973 * struct kmem_cache_node's. There is special bootstrap code in
81819f0f
CL
2974 * kmem_cache_open for slab_state == DOWN.
2975 */
2976 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2977 sizeof(struct kmem_cache_node), GFP_KERNEL);
8ffa6875 2978 kmalloc_caches[0].refcount = -1;
4b356be0 2979 caches++;
b9049e23 2980
0c40ba4f 2981 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0f
CL
2982#endif
2983
2984 /* Able to allocate the per node structures */
2985 slab_state = PARTIAL;
2986
2987 /* Caches that are not of the two-to-the-power-of size */
4b356be0
CL
2988 if (KMALLOC_MIN_SIZE <= 64) {
2989 create_kmalloc_cache(&kmalloc_caches[1],
81819f0f 2990 "kmalloc-96", 96, GFP_KERNEL);
4b356be0 2991 caches++;
4b356be0 2992 create_kmalloc_cache(&kmalloc_caches[2],
81819f0f 2993 "kmalloc-192", 192, GFP_KERNEL);
4b356be0
CL
2994 caches++;
2995 }
81819f0f 2996
331dc558 2997 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
81819f0f
CL
2998 create_kmalloc_cache(&kmalloc_caches[i],
2999 "kmalloc", 1 << i, GFP_KERNEL);
4b356be0
CL
3000 caches++;
3001 }
81819f0f 3002
f1b26339
CL
3003
3004 /*
3005 * Patch up the size_index table if we have strange large alignment
3006 * requirements for the kmalloc array. This is only the case for
6446faa2 3007 * MIPS it seems. The standard arches will not generate any code here.
f1b26339
CL
3008 *
3009 * Largest permitted alignment is 256 bytes due to the way we
3010 * handle the index determination for the smaller caches.
3011 *
3012 * Make sure that nothing crazy happens if someone starts tinkering
3013 * around with ARCH_KMALLOC_MINALIGN
3014 */
3015 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
3016 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
3017
12ad6843 3018 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
f1b26339
CL
3019 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3020
41d54d3b
CL
3021 if (KMALLOC_MIN_SIZE == 128) {
3022 /*
3023 * The 192 byte sized cache is not used if the alignment
3024 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3025 * instead.
3026 */
3027 for (i = 128 + 8; i <= 192; i += 8)
3028 size_index[(i - 1) / 8] = 8;
3029 }
3030
81819f0f
CL
3031 slab_state = UP;
3032
3033 /* Provide the correct kmalloc names now that the caches are up */
331dc558 3034 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
81819f0f
CL
3035 kmalloc_caches[i]. name =
3036 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3037
3038#ifdef CONFIG_SMP
3039 register_cpu_notifier(&slab_notifier);
4c93c355
CL
3040 kmem_size = offsetof(struct kmem_cache, cpu_slab) +
3041 nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
3042#else
3043 kmem_size = sizeof(struct kmem_cache);
81819f0f
CL
3044#endif
3045
3adbefee
IM
3046 printk(KERN_INFO
3047 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be0
CL
3048 " CPUs=%d, Nodes=%d\n",
3049 caches, cache_line_size(),
81819f0f
CL
3050 slub_min_order, slub_max_order, slub_min_objects,
3051 nr_cpu_ids, nr_node_ids);
3052}
3053
3054/*
3055 * Find a mergeable slab cache
3056 */
3057static int slab_unmergeable(struct kmem_cache *s)
3058{
3059 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3060 return 1;
3061
c59def9f 3062 if (s->ctor)
81819f0f
CL
3063 return 1;
3064
8ffa6875
CL
3065 /*
3066 * We may have set a slab to be unmergeable during bootstrap.
3067 */
3068 if (s->refcount < 0)
3069 return 1;
3070
81819f0f
CL
3071 return 0;
3072}
3073
3074static struct kmem_cache *find_mergeable(size_t size,
ba0268a8 3075 size_t align, unsigned long flags, const char *name,
4ba9b9d0 3076 void (*ctor)(struct kmem_cache *, void *))
81819f0f 3077{
5b95a4ac 3078 struct kmem_cache *s;
81819f0f
CL
3079
3080 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3081 return NULL;
3082
c59def9f 3083 if (ctor)
81819f0f
CL
3084 return NULL;
3085
3086 size = ALIGN(size, sizeof(void *));
3087 align = calculate_alignment(flags, align, size);
3088 size = ALIGN(size, align);
ba0268a8 3089 flags = kmem_cache_flags(size, flags, name, NULL);
81819f0f 3090
5b95a4ac 3091 list_for_each_entry(s, &slab_caches, list) {
81819f0f
CL
3092 if (slab_unmergeable(s))
3093 continue;
3094
3095 if (size > s->size)
3096 continue;
3097
ba0268a8 3098 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0f
CL
3099 continue;
3100 /*
3101 * Check if alignment is compatible.
3102 * Courtesy of Adrian Drzewiecki
3103 */
06428780 3104 if ((s->size & ~(align - 1)) != s->size)
81819f0f
CL
3105 continue;
3106
3107 if (s->size - size >= sizeof(void *))
3108 continue;
3109
3110 return s;
3111 }
3112 return NULL;
3113}
3114
3115struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3116 size_t align, unsigned long flags,
4ba9b9d0 3117 void (*ctor)(struct kmem_cache *, void *))
81819f0f
CL
3118{
3119 struct kmem_cache *s;
3120
3121 down_write(&slub_lock);
ba0268a8 3122 s = find_mergeable(size, align, flags, name, ctor);
81819f0f 3123 if (s) {
42a9fdbb
CL
3124 int cpu;
3125
81819f0f
CL
3126 s->refcount++;
3127 /*
3128 * Adjust the object sizes so that we clear
3129 * the complete object on kzalloc.
3130 */
3131 s->objsize = max(s->objsize, (int)size);
42a9fdbb
CL
3132
3133 /*
3134 * And then we need to update the object size in the
3135 * per cpu structures
3136 */
3137 for_each_online_cpu(cpu)
3138 get_cpu_slab(s, cpu)->objsize = s->objsize;
6446faa2 3139
81819f0f 3140 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
a0e1d1be 3141 up_write(&slub_lock);
6446faa2 3142
81819f0f
CL
3143 if (sysfs_slab_alias(s, name))
3144 goto err;
a0e1d1be
CL
3145 return s;
3146 }
6446faa2 3147
a0e1d1be
CL
3148 s = kmalloc(kmem_size, GFP_KERNEL);
3149 if (s) {
3150 if (kmem_cache_open(s, GFP_KERNEL, name,
c59def9f 3151 size, align, flags, ctor)) {
81819f0f 3152 list_add(&s->list, &slab_caches);
a0e1d1be
CL
3153 up_write(&slub_lock);
3154 if (sysfs_slab_add(s))
3155 goto err;
3156 return s;
3157 }
3158 kfree(s);
81819f0f
CL
3159 }
3160 up_write(&slub_lock);
81819f0f
CL
3161
3162err:
81819f0f
CL
3163 if (flags & SLAB_PANIC)
3164 panic("Cannot create slabcache %s\n", name);
3165 else
3166 s = NULL;
3167 return s;
3168}
3169EXPORT_SYMBOL(kmem_cache_create);
3170
81819f0f 3171#ifdef CONFIG_SMP
81819f0f 3172/*
672bba3a
CL
3173 * Use the cpu notifier to insure that the cpu slabs are flushed when
3174 * necessary.
81819f0f
CL
3175 */
3176static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3177 unsigned long action, void *hcpu)
3178{
3179 long cpu = (long)hcpu;
5b95a4ac
CL
3180 struct kmem_cache *s;
3181 unsigned long flags;
81819f0f
CL
3182
3183 switch (action) {
4c93c355
CL
3184 case CPU_UP_PREPARE:
3185 case CPU_UP_PREPARE_FROZEN:
3186 init_alloc_cpu_cpu(cpu);
3187 down_read(&slub_lock);
3188 list_for_each_entry(s, &slab_caches, list)
3189 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3190 GFP_KERNEL);
3191 up_read(&slub_lock);
3192 break;
3193
81819f0f 3194 case CPU_UP_CANCELED:
8bb78442 3195 case CPU_UP_CANCELED_FROZEN:
81819f0f 3196 case CPU_DEAD:
8bb78442 3197 case CPU_DEAD_FROZEN:
5b95a4ac
CL
3198 down_read(&slub_lock);
3199 list_for_each_entry(s, &slab_caches, list) {
4c93c355
CL
3200 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3201
5b95a4ac
CL
3202 local_irq_save(flags);
3203 __flush_cpu_slab(s, cpu);
3204 local_irq_restore(flags);
4c93c355
CL
3205 free_kmem_cache_cpu(c, cpu);
3206 s->cpu_slab[cpu] = NULL;
5b95a4ac
CL
3207 }
3208 up_read(&slub_lock);
81819f0f
CL
3209 break;
3210 default:
3211 break;
3212 }
3213 return NOTIFY_OK;
3214}
3215
06428780 3216static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee 3217 .notifier_call = slab_cpuup_callback
06428780 3218};
81819f0f
CL
3219
3220#endif
3221
81819f0f
CL
3222void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3223{
aadb4bc4
CL
3224 struct kmem_cache *s;
3225
331dc558 3226 if (unlikely(size > PAGE_SIZE))
eada35ef
PE
3227 return kmalloc_large(size, gfpflags);
3228
aadb4bc4 3229 s = get_slab(size, gfpflags);
81819f0f 3230
2408c550 3231 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3232 return s;
81819f0f 3233
ce15fea8 3234 return slab_alloc(s, gfpflags, -1, caller);
81819f0f
CL
3235}
3236
3237void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3238 int node, void *caller)
3239{
aadb4bc4
CL
3240 struct kmem_cache *s;
3241
331dc558 3242 if (unlikely(size > PAGE_SIZE))
f619cfe1 3243 return kmalloc_large_node(size, gfpflags, node);
eada35ef 3244
aadb4bc4 3245 s = get_slab(size, gfpflags);
81819f0f 3246
2408c550 3247 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3248 return s;
81819f0f 3249
ce15fea8 3250 return slab_alloc(s, gfpflags, node, caller);
81819f0f
CL
3251}
3252
f6acb635 3253#ifdef CONFIG_SLUB_DEBUG
205ab99d
CL
3254static unsigned long count_partial(struct kmem_cache_node *n,
3255 int (*get_count)(struct page *))
5b06c853
CL
3256{
3257 unsigned long flags;
3258 unsigned long x = 0;
3259 struct page *page;
3260
3261 spin_lock_irqsave(&n->list_lock, flags);
3262 list_for_each_entry(page, &n->partial, lru)
205ab99d 3263 x += get_count(page);
5b06c853
CL
3264 spin_unlock_irqrestore(&n->list_lock, flags);
3265 return x;
3266}
205ab99d
CL
3267
3268static int count_inuse(struct page *page)
3269{
3270 return page->inuse;
3271}
3272
3273static int count_total(struct page *page)
3274{
3275 return page->objects;
3276}
3277
3278static int count_free(struct page *page)
3279{
3280 return page->objects - page->inuse;
3281}
5b06c853 3282
434e245d
CL
3283static int validate_slab(struct kmem_cache *s, struct page *page,
3284 unsigned long *map)
53e15af0
CL
3285{
3286 void *p;
a973e9dd 3287 void *addr = page_address(page);
53e15af0
CL
3288
3289 if (!check_slab(s, page) ||
3290 !on_freelist(s, page, NULL))
3291 return 0;
3292
3293 /* Now we know that a valid freelist exists */
39b26464 3294 bitmap_zero(map, page->objects);
53e15af0 3295
7656c72b
CL
3296 for_each_free_object(p, s, page->freelist) {
3297 set_bit(slab_index(p, s, addr), map);
53e15af0
CL
3298 if (!check_object(s, page, p, 0))
3299 return 0;
3300 }
3301
224a88be 3302 for_each_object(p, s, addr, page->objects)
7656c72b 3303 if (!test_bit(slab_index(p, s, addr), map))
53e15af0
CL
3304 if (!check_object(s, page, p, 1))
3305 return 0;
3306 return 1;
3307}
3308
434e245d
CL
3309static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3310 unsigned long *map)
53e15af0
CL
3311{
3312 if (slab_trylock(page)) {
434e245d 3313 validate_slab(s, page, map);
53e15af0
CL
3314 slab_unlock(page);
3315 } else
3316 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3317 s->name, page);
3318
3319 if (s->flags & DEBUG_DEFAULT_FLAGS) {
35e5d7ee
CL
3320 if (!SlabDebug(page))
3321 printk(KERN_ERR "SLUB %s: SlabDebug not set "
53e15af0
CL
3322 "on slab 0x%p\n", s->name, page);
3323 } else {
35e5d7ee
CL
3324 if (SlabDebug(page))
3325 printk(KERN_ERR "SLUB %s: SlabDebug set on "
53e15af0
CL
3326 "slab 0x%p\n", s->name, page);
3327 }
3328}
3329
434e245d
CL
3330static int validate_slab_node(struct kmem_cache *s,
3331 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
3332{
3333 unsigned long count = 0;
3334 struct page *page;
3335 unsigned long flags;
3336
3337 spin_lock_irqsave(&n->list_lock, flags);
3338
3339 list_for_each_entry(page, &n->partial, lru) {
434e245d 3340 validate_slab_slab(s, page, map);
53e15af0
CL
3341 count++;
3342 }
3343 if (count != n->nr_partial)
3344 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3345 "counter=%ld\n", s->name, count, n->nr_partial);
3346
3347 if (!(s->flags & SLAB_STORE_USER))
3348 goto out;
3349
3350 list_for_each_entry(page, &n->full, lru) {
434e245d 3351 validate_slab_slab(s, page, map);
53e15af0
CL
3352 count++;
3353 }
3354 if (count != atomic_long_read(&n->nr_slabs))
3355 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3356 "counter=%ld\n", s->name, count,
3357 atomic_long_read(&n->nr_slabs));
3358
3359out:
3360 spin_unlock_irqrestore(&n->list_lock, flags);
3361 return count;
3362}
3363
434e245d 3364static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
3365{
3366 int node;
3367 unsigned long count = 0;
205ab99d 3368 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245d
CL
3369 sizeof(unsigned long), GFP_KERNEL);
3370
3371 if (!map)
3372 return -ENOMEM;
53e15af0
CL
3373
3374 flush_all(s);
f64dc58c 3375 for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af0
CL
3376 struct kmem_cache_node *n = get_node(s, node);
3377
434e245d 3378 count += validate_slab_node(s, n, map);
53e15af0 3379 }
434e245d 3380 kfree(map);
53e15af0
CL
3381 return count;
3382}
3383
b3459709
CL
3384#ifdef SLUB_RESILIENCY_TEST
3385static void resiliency_test(void)
3386{
3387 u8 *p;
3388
3389 printk(KERN_ERR "SLUB resiliency testing\n");
3390 printk(KERN_ERR "-----------------------\n");
3391 printk(KERN_ERR "A. Corruption after allocation\n");
3392
3393 p = kzalloc(16, GFP_KERNEL);
3394 p[16] = 0x12;
3395 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3396 " 0x12->0x%p\n\n", p + 16);
3397
3398 validate_slab_cache(kmalloc_caches + 4);
3399
3400 /* Hmmm... The next two are dangerous */
3401 p = kzalloc(32, GFP_KERNEL);
3402 p[32 + sizeof(void *)] = 0x34;
3403 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3adbefee
IM
3404 " 0x34 -> -0x%p\n", p);
3405 printk(KERN_ERR
3406 "If allocated object is overwritten then not detectable\n\n");
b3459709
CL
3407
3408 validate_slab_cache(kmalloc_caches + 5);
3409 p = kzalloc(64, GFP_KERNEL);
3410 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3411 *p = 0x56;
3412 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3413 p);
3adbefee
IM
3414 printk(KERN_ERR
3415 "If allocated object is overwritten then not detectable\n\n");
b3459709
CL
3416 validate_slab_cache(kmalloc_caches + 6);
3417
3418 printk(KERN_ERR "\nB. Corruption after free\n");
3419 p = kzalloc(128, GFP_KERNEL);
3420 kfree(p);
3421 *p = 0x78;
3422 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3423 validate_slab_cache(kmalloc_caches + 7);
3424
3425 p = kzalloc(256, GFP_KERNEL);
3426 kfree(p);
3427 p[50] = 0x9a;
3adbefee
IM
3428 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3429 p);
b3459709
CL
3430 validate_slab_cache(kmalloc_caches + 8);
3431
3432 p = kzalloc(512, GFP_KERNEL);
3433 kfree(p);
3434 p[512] = 0xab;
3435 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3436 validate_slab_cache(kmalloc_caches + 9);
3437}
3438#else
3439static void resiliency_test(void) {};
3440#endif
3441
88a420e4 3442/*
672bba3a 3443 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
3444 * and freed.
3445 */
3446
3447struct location {
3448 unsigned long count;
3449 void *addr;
45edfa58
CL
3450 long long sum_time;
3451 long min_time;
3452 long max_time;
3453 long min_pid;
3454 long max_pid;
3455 cpumask_t cpus;
3456 nodemask_t nodes;
88a420e4
CL
3457};
3458
3459struct loc_track {
3460 unsigned long max;
3461 unsigned long count;
3462 struct location *loc;
3463};
3464
3465static void free_loc_track(struct loc_track *t)
3466{
3467 if (t->max)
3468 free_pages((unsigned long)t->loc,
3469 get_order(sizeof(struct location) * t->max));
3470}
3471
68dff6a9 3472static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
3473{
3474 struct location *l;
3475 int order;
3476
88a420e4
CL
3477 order = get_order(sizeof(struct location) * max);
3478
68dff6a9 3479 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
3480 if (!l)
3481 return 0;
3482
3483 if (t->count) {
3484 memcpy(l, t->loc, sizeof(struct location) * t->count);
3485 free_loc_track(t);
3486 }
3487 t->max = max;
3488 t->loc = l;
3489 return 1;
3490}
3491
3492static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 3493 const struct track *track)
88a420e4
CL
3494{
3495 long start, end, pos;
3496 struct location *l;
3497 void *caddr;
45edfa58 3498 unsigned long age = jiffies - track->when;
88a420e4
CL
3499
3500 start = -1;
3501 end = t->count;
3502
3503 for ( ; ; ) {
3504 pos = start + (end - start + 1) / 2;
3505
3506 /*
3507 * There is nothing at "end". If we end up there
3508 * we need to add something to before end.
3509 */
3510 if (pos == end)
3511 break;
3512
3513 caddr = t->loc[pos].addr;
45edfa58
CL
3514 if (track->addr == caddr) {
3515
3516 l = &t->loc[pos];
3517 l->count++;
3518 if (track->when) {
3519 l->sum_time += age;
3520 if (age < l->min_time)
3521 l->min_time = age;
3522 if (age > l->max_time)
3523 l->max_time = age;
3524
3525 if (track->pid < l->min_pid)
3526 l->min_pid = track->pid;
3527 if (track->pid > l->max_pid)
3528 l->max_pid = track->pid;
3529
3530 cpu_set(track->cpu, l->cpus);
3531 }
3532 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3533 return 1;
3534 }
3535
45edfa58 3536 if (track->addr < caddr)
88a420e4
CL
3537 end = pos;
3538 else
3539 start = pos;
3540 }
3541
3542 /*
672bba3a 3543 * Not found. Insert new tracking element.
88a420e4 3544 */
68dff6a9 3545 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
3546 return 0;
3547
3548 l = t->loc + pos;
3549 if (pos < t->count)
3550 memmove(l + 1, l,
3551 (t->count - pos) * sizeof(struct location));
3552 t->count++;
3553 l->count = 1;
45edfa58
CL
3554 l->addr = track->addr;
3555 l->sum_time = age;
3556 l->min_time = age;
3557 l->max_time = age;
3558 l->min_pid = track->pid;
3559 l->max_pid = track->pid;
3560 cpus_clear(l->cpus);
3561 cpu_set(track->cpu, l->cpus);
3562 nodes_clear(l->nodes);
3563 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3564 return 1;
3565}
3566
3567static void process_slab(struct loc_track *t, struct kmem_cache *s,
3568 struct page *page, enum track_item alloc)
3569{
a973e9dd 3570 void *addr = page_address(page);
39b26464 3571 DECLARE_BITMAP(map, page->objects);
88a420e4
CL
3572 void *p;
3573
39b26464 3574 bitmap_zero(map, page->objects);
7656c72b
CL
3575 for_each_free_object(p, s, page->freelist)
3576 set_bit(slab_index(p, s, addr), map);
88a420e4 3577
224a88be 3578 for_each_object(p, s, addr, page->objects)
45edfa58
CL
3579 if (!test_bit(slab_index(p, s, addr), map))
3580 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
3581}
3582
3583static int list_locations(struct kmem_cache *s, char *buf,
3584 enum track_item alloc)
3585{
e374d483 3586 int len = 0;
88a420e4 3587 unsigned long i;
68dff6a9 3588 struct loc_track t = { 0, 0, NULL };
88a420e4
CL
3589 int node;
3590
68dff6a9 3591 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
ea3061d2 3592 GFP_TEMPORARY))
68dff6a9 3593 return sprintf(buf, "Out of memory\n");
88a420e4
CL
3594
3595 /* Push back cpu slabs */
3596 flush_all(s);
3597
f64dc58c 3598 for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4
CL
3599 struct kmem_cache_node *n = get_node(s, node);
3600 unsigned long flags;
3601 struct page *page;
3602
9e86943b 3603 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
3604 continue;
3605
3606 spin_lock_irqsave(&n->list_lock, flags);
3607 list_for_each_entry(page, &n->partial, lru)
3608 process_slab(&t, s, page, alloc);
3609 list_for_each_entry(page, &n->full, lru)
3610 process_slab(&t, s, page, alloc);
3611 spin_unlock_irqrestore(&n->list_lock, flags);
3612 }
3613
3614 for (i = 0; i < t.count; i++) {
45edfa58 3615 struct location *l = &t.loc[i];
88a420e4 3616
e374d483 3617 if (len > PAGE_SIZE - 100)
88a420e4 3618 break;
e374d483 3619 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
3620
3621 if (l->addr)
e374d483 3622 len += sprint_symbol(buf + len, (unsigned long)l->addr);
88a420e4 3623 else
e374d483 3624 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
3625
3626 if (l->sum_time != l->min_time) {
e374d483 3627 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
3628 l->min_time,
3629 (long)div_u64(l->sum_time, l->count),
3630 l->max_time);
45edfa58 3631 } else
e374d483 3632 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
3633 l->min_time);
3634
3635 if (l->min_pid != l->max_pid)
e374d483 3636 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
3637 l->min_pid, l->max_pid);
3638 else
e374d483 3639 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
3640 l->min_pid);
3641
84966343 3642 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
e374d483
HH
3643 len < PAGE_SIZE - 60) {
3644 len += sprintf(buf + len, " cpus=");
3645 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa58
CL
3646 l->cpus);
3647 }
3648
84966343 3649 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
e374d483
HH
3650 len < PAGE_SIZE - 60) {
3651 len += sprintf(buf + len, " nodes=");
3652 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa58
CL
3653 l->nodes);
3654 }
3655
e374d483 3656 len += sprintf(buf + len, "\n");
88a420e4
CL
3657 }
3658
3659 free_loc_track(&t);
3660 if (!t.count)
e374d483
HH
3661 len += sprintf(buf, "No data\n");
3662 return len;
88a420e4
CL
3663}
3664
81819f0f 3665enum slab_stat_type {
205ab99d
CL
3666 SL_ALL, /* All slabs */
3667 SL_PARTIAL, /* Only partially allocated slabs */
3668 SL_CPU, /* Only slabs used for cpu caches */
3669 SL_OBJECTS, /* Determine allocated objects not slabs */
3670 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
3671};
3672
205ab99d 3673#define SO_ALL (1 << SL_ALL)
81819f0f
CL
3674#define SO_PARTIAL (1 << SL_PARTIAL)
3675#define SO_CPU (1 << SL_CPU)
3676#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 3677#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 3678
62e5c4b4
CG
3679static ssize_t show_slab_objects(struct kmem_cache *s,
3680 char *buf, unsigned long flags)
81819f0f
CL
3681{
3682 unsigned long total = 0;
81819f0f
CL
3683 int node;
3684 int x;
3685 unsigned long *nodes;
3686 unsigned long *per_cpu;
3687
3688 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4
CG
3689 if (!nodes)
3690 return -ENOMEM;
81819f0f
CL
3691 per_cpu = nodes + nr_node_ids;
3692
205ab99d
CL
3693 if (flags & SO_CPU) {
3694 int cpu;
81819f0f 3695
205ab99d
CL
3696 for_each_possible_cpu(cpu) {
3697 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
dfb4f096 3698
205ab99d
CL
3699 if (!c || c->node < 0)
3700 continue;
3701
3702 if (c->page) {
3703 if (flags & SO_TOTAL)
3704 x = c->page->objects;
3705 else if (flags & SO_OBJECTS)
3706 x = c->page->inuse;
81819f0f
CL
3707 else
3708 x = 1;
205ab99d 3709
81819f0f 3710 total += x;
205ab99d 3711 nodes[c->node] += x;
81819f0f 3712 }
205ab99d 3713 per_cpu[c->node]++;
81819f0f
CL
3714 }
3715 }
3716
205ab99d
CL
3717 if (flags & SO_ALL) {
3718 for_each_node_state(node, N_NORMAL_MEMORY) {
3719 struct kmem_cache_node *n = get_node(s, node);
3720
3721 if (flags & SO_TOTAL)
3722 x = atomic_long_read(&n->total_objects);
3723 else if (flags & SO_OBJECTS)
3724 x = atomic_long_read(&n->total_objects) -
3725 count_partial(n, count_free);
81819f0f 3726
81819f0f 3727 else
205ab99d 3728 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
3729 total += x;
3730 nodes[node] += x;
3731 }
3732
205ab99d
CL
3733 } else if (flags & SO_PARTIAL) {
3734 for_each_node_state(node, N_NORMAL_MEMORY) {
3735 struct kmem_cache_node *n = get_node(s, node);
81819f0f 3736
205ab99d
CL
3737 if (flags & SO_TOTAL)
3738 x = count_partial(n, count_total);
3739 else if (flags & SO_OBJECTS)
3740 x = count_partial(n, count_inuse);
81819f0f 3741 else
205ab99d 3742 x = n->nr_partial;
81819f0f
CL
3743 total += x;
3744 nodes[node] += x;
3745 }
3746 }
81819f0f
CL
3747 x = sprintf(buf, "%lu", total);
3748#ifdef CONFIG_NUMA
f64dc58c 3749 for_each_node_state(node, N_NORMAL_MEMORY)
81819f0f
CL
3750 if (nodes[node])
3751 x += sprintf(buf + x, " N%d=%lu",
3752 node, nodes[node]);
3753#endif
3754 kfree(nodes);
3755 return x + sprintf(buf + x, "\n");
3756}
3757
3758static int any_slab_objects(struct kmem_cache *s)
3759{
3760 int node;
81819f0f 3761
dfb4f096 3762 for_each_online_node(node) {
81819f0f
CL
3763 struct kmem_cache_node *n = get_node(s, node);
3764
dfb4f096
CL
3765 if (!n)
3766 continue;
3767
4ea33e2d 3768 if (atomic_long_read(&n->total_objects))
81819f0f
CL
3769 return 1;
3770 }
3771 return 0;
3772}
3773
3774#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3775#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3776
3777struct slab_attribute {
3778 struct attribute attr;
3779 ssize_t (*show)(struct kmem_cache *s, char *buf);
3780 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3781};
3782
3783#define SLAB_ATTR_RO(_name) \
3784 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3785
3786#define SLAB_ATTR(_name) \
3787 static struct slab_attribute _name##_attr = \
3788 __ATTR(_name, 0644, _name##_show, _name##_store)
3789
81819f0f
CL
3790static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3791{
3792 return sprintf(buf, "%d\n", s->size);
3793}
3794SLAB_ATTR_RO(slab_size);
3795
3796static ssize_t align_show(struct kmem_cache *s, char *buf)
3797{
3798 return sprintf(buf, "%d\n", s->align);
3799}
3800SLAB_ATTR_RO(align);
3801
3802static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3803{
3804 return sprintf(buf, "%d\n", s->objsize);
3805}
3806SLAB_ATTR_RO(object_size);
3807
3808static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3809{
834f3d11 3810 return sprintf(buf, "%d\n", oo_objects(s->oo));
81819f0f
CL
3811}
3812SLAB_ATTR_RO(objs_per_slab);
3813
06b285dc
CL
3814static ssize_t order_store(struct kmem_cache *s,
3815 const char *buf, size_t length)
3816{
0121c619
CL
3817 unsigned long order;
3818 int err;
3819
3820 err = strict_strtoul(buf, 10, &order);
3821 if (err)
3822 return err;
06b285dc
CL
3823
3824 if (order > slub_max_order || order < slub_min_order)
3825 return -EINVAL;
3826
3827 calculate_sizes(s, order);
3828 return length;
3829}
3830
81819f0f
CL
3831static ssize_t order_show(struct kmem_cache *s, char *buf)
3832{
834f3d11 3833 return sprintf(buf, "%d\n", oo_order(s->oo));
81819f0f 3834}
06b285dc 3835SLAB_ATTR(order);
81819f0f
CL
3836
3837static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3838{
3839 if (s->ctor) {
3840 int n = sprint_symbol(buf, (unsigned long)s->ctor);
3841
3842 return n + sprintf(buf + n, "\n");
3843 }
3844 return 0;
3845}
3846SLAB_ATTR_RO(ctor);
3847
81819f0f
CL
3848static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3849{
3850 return sprintf(buf, "%d\n", s->refcount - 1);
3851}
3852SLAB_ATTR_RO(aliases);
3853
3854static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3855{
205ab99d 3856 return show_slab_objects(s, buf, SO_ALL);
81819f0f
CL
3857}
3858SLAB_ATTR_RO(slabs);
3859
3860static ssize_t partial_show(struct kmem_cache *s, char *buf)
3861{
d9acf4b7 3862 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
3863}
3864SLAB_ATTR_RO(partial);
3865
3866static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3867{
d9acf4b7 3868 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
3869}
3870SLAB_ATTR_RO(cpu_slabs);
3871
3872static ssize_t objects_show(struct kmem_cache *s, char *buf)
3873{
205ab99d 3874 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
3875}
3876SLAB_ATTR_RO(objects);
3877
205ab99d
CL
3878static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3879{
3880 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3881}
3882SLAB_ATTR_RO(objects_partial);
3883
3884static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3885{
3886 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3887}
3888SLAB_ATTR_RO(total_objects);
3889
81819f0f
CL
3890static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3891{
3892 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3893}
3894
3895static ssize_t sanity_checks_store(struct kmem_cache *s,
3896 const char *buf, size_t length)
3897{
3898 s->flags &= ~SLAB_DEBUG_FREE;
3899 if (buf[0] == '1')
3900 s->flags |= SLAB_DEBUG_FREE;
3901 return length;
3902}
3903SLAB_ATTR(sanity_checks);
3904
3905static ssize_t trace_show(struct kmem_cache *s, char *buf)
3906{
3907 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3908}
3909
3910static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3911 size_t length)
3912{
3913 s->flags &= ~SLAB_TRACE;
3914 if (buf[0] == '1')
3915 s->flags |= SLAB_TRACE;
3916 return length;
3917}
3918SLAB_ATTR(trace);
3919
3920static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3921{
3922 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3923}
3924
3925static ssize_t reclaim_account_store(struct kmem_cache *s,
3926 const char *buf, size_t length)
3927{
3928 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3929 if (buf[0] == '1')
3930 s->flags |= SLAB_RECLAIM_ACCOUNT;
3931 return length;
3932}
3933SLAB_ATTR(reclaim_account);
3934
3935static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3936{
5af60839 3937 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
81819f0f
CL
3938}
3939SLAB_ATTR_RO(hwcache_align);
3940
3941#ifdef CONFIG_ZONE_DMA
3942static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3943{
3944 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3945}
3946SLAB_ATTR_RO(cache_dma);
3947#endif
3948
3949static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3950{
3951 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3952}
3953SLAB_ATTR_RO(destroy_by_rcu);
3954
3955static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3956{
3957 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3958}
3959
3960static ssize_t red_zone_store(struct kmem_cache *s,
3961 const char *buf, size_t length)
3962{
3963 if (any_slab_objects(s))
3964 return -EBUSY;
3965
3966 s->flags &= ~SLAB_RED_ZONE;
3967 if (buf[0] == '1')
3968 s->flags |= SLAB_RED_ZONE;
06b285dc 3969 calculate_sizes(s, -1);
81819f0f
CL
3970 return length;
3971}
3972SLAB_ATTR(red_zone);
3973
3974static ssize_t poison_show(struct kmem_cache *s, char *buf)
3975{
3976 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3977}
3978
3979static ssize_t poison_store(struct kmem_cache *s,
3980 const char *buf, size_t length)
3981{
3982 if (any_slab_objects(s))
3983 return -EBUSY;
3984
3985 s->flags &= ~SLAB_POISON;
3986 if (buf[0] == '1')
3987 s->flags |= SLAB_POISON;
06b285dc 3988 calculate_sizes(s, -1);
81819f0f
CL
3989 return length;
3990}
3991SLAB_ATTR(poison);
3992
3993static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3994{
3995 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3996}
3997
3998static ssize_t store_user_store(struct kmem_cache *s,
3999 const char *buf, size_t length)
4000{
4001 if (any_slab_objects(s))
4002 return -EBUSY;
4003
4004 s->flags &= ~SLAB_STORE_USER;
4005 if (buf[0] == '1')
4006 s->flags |= SLAB_STORE_USER;
06b285dc 4007 calculate_sizes(s, -1);
81819f0f
CL
4008 return length;
4009}
4010SLAB_ATTR(store_user);
4011
53e15af0
CL
4012static ssize_t validate_show(struct kmem_cache *s, char *buf)
4013{
4014 return 0;
4015}
4016
4017static ssize_t validate_store(struct kmem_cache *s,
4018 const char *buf, size_t length)
4019{
434e245d
CL
4020 int ret = -EINVAL;
4021
4022 if (buf[0] == '1') {
4023 ret = validate_slab_cache(s);
4024 if (ret >= 0)
4025 ret = length;
4026 }
4027 return ret;
53e15af0
CL
4028}
4029SLAB_ATTR(validate);
4030
2086d26a
CL
4031static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4032{
4033 return 0;
4034}
4035
4036static ssize_t shrink_store(struct kmem_cache *s,
4037 const char *buf, size_t length)
4038{
4039 if (buf[0] == '1') {
4040 int rc = kmem_cache_shrink(s);
4041
4042 if (rc)
4043 return rc;
4044 } else
4045 return -EINVAL;
4046 return length;
4047}
4048SLAB_ATTR(shrink);
4049
88a420e4
CL
4050static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4051{
4052 if (!(s->flags & SLAB_STORE_USER))
4053 return -ENOSYS;
4054 return list_locations(s, buf, TRACK_ALLOC);
4055}
4056SLAB_ATTR_RO(alloc_calls);
4057
4058static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4059{
4060 if (!(s->flags & SLAB_STORE_USER))
4061 return -ENOSYS;
4062 return list_locations(s, buf, TRACK_FREE);
4063}
4064SLAB_ATTR_RO(free_calls);
4065
81819f0f 4066#ifdef CONFIG_NUMA
9824601e 4067static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 4068{
9824601e 4069 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
4070}
4071
9824601e 4072static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
4073 const char *buf, size_t length)
4074{
0121c619
CL
4075 unsigned long ratio;
4076 int err;
4077
4078 err = strict_strtoul(buf, 10, &ratio);
4079 if (err)
4080 return err;
4081
4082 if (ratio < 100)
4083 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 4084
81819f0f
CL
4085 return length;
4086}
9824601e 4087SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
4088#endif
4089
8ff12cfc 4090#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
4091static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4092{
4093 unsigned long sum = 0;
4094 int cpu;
4095 int len;
4096 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4097
4098 if (!data)
4099 return -ENOMEM;
4100
4101 for_each_online_cpu(cpu) {
4102 unsigned x = get_cpu_slab(s, cpu)->stat[si];
4103
4104 data[cpu] = x;
4105 sum += x;
4106 }
4107
4108 len = sprintf(buf, "%lu", sum);
4109
50ef37b9 4110#ifdef CONFIG_SMP
8ff12cfc
CL
4111 for_each_online_cpu(cpu) {
4112 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 4113 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 4114 }
50ef37b9 4115#endif
8ff12cfc
CL
4116 kfree(data);
4117 return len + sprintf(buf + len, "\n");
4118}
4119
4120#define STAT_ATTR(si, text) \
4121static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4122{ \
4123 return show_stat(s, buf, si); \
4124} \
4125SLAB_ATTR_RO(text); \
4126
4127STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4128STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4129STAT_ATTR(FREE_FASTPATH, free_fastpath);
4130STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4131STAT_ATTR(FREE_FROZEN, free_frozen);
4132STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4133STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4134STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4135STAT_ATTR(ALLOC_SLAB, alloc_slab);
4136STAT_ATTR(ALLOC_REFILL, alloc_refill);
4137STAT_ATTR(FREE_SLAB, free_slab);
4138STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4139STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4140STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4141STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4142STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4143STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
65c3376a 4144STAT_ATTR(ORDER_FALLBACK, order_fallback);
8ff12cfc
CL
4145#endif
4146
06428780 4147static struct attribute *slab_attrs[] = {
81819f0f
CL
4148 &slab_size_attr.attr,
4149 &object_size_attr.attr,
4150 &objs_per_slab_attr.attr,
4151 &order_attr.attr,
4152 &objects_attr.attr,
205ab99d
CL
4153 &objects_partial_attr.attr,
4154 &total_objects_attr.attr,
81819f0f
CL
4155 &slabs_attr.attr,
4156 &partial_attr.attr,
4157 &cpu_slabs_attr.attr,
4158 &ctor_attr.attr,
81819f0f
CL
4159 &aliases_attr.attr,
4160 &align_attr.attr,
4161 &sanity_checks_attr.attr,
4162 &trace_attr.attr,
4163 &hwcache_align_attr.attr,
4164 &reclaim_account_attr.attr,
4165 &destroy_by_rcu_attr.attr,
4166 &red_zone_attr.attr,
4167 &poison_attr.attr,
4168 &store_user_attr.attr,
53e15af0 4169 &validate_attr.attr,
2086d26a 4170 &shrink_attr.attr,
88a420e4
CL
4171 &alloc_calls_attr.attr,
4172 &free_calls_attr.attr,
81819f0f
CL
4173#ifdef CONFIG_ZONE_DMA
4174 &cache_dma_attr.attr,
4175#endif
4176#ifdef CONFIG_NUMA
9824601e 4177 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
4178#endif
4179#ifdef CONFIG_SLUB_STATS
4180 &alloc_fastpath_attr.attr,
4181 &alloc_slowpath_attr.attr,
4182 &free_fastpath_attr.attr,
4183 &free_slowpath_attr.attr,
4184 &free_frozen_attr.attr,
4185 &free_add_partial_attr.attr,
4186 &free_remove_partial_attr.attr,
4187 &alloc_from_partial_attr.attr,
4188 &alloc_slab_attr.attr,
4189 &alloc_refill_attr.attr,
4190 &free_slab_attr.attr,
4191 &cpuslab_flush_attr.attr,
4192 &deactivate_full_attr.attr,
4193 &deactivate_empty_attr.attr,
4194 &deactivate_to_head_attr.attr,
4195 &deactivate_to_tail_attr.attr,
4196 &deactivate_remote_frees_attr.attr,
65c3376a 4197 &order_fallback_attr.attr,
81819f0f
CL
4198#endif
4199 NULL
4200};
4201
4202static struct attribute_group slab_attr_group = {
4203 .attrs = slab_attrs,
4204};
4205
4206static ssize_t slab_attr_show(struct kobject *kobj,
4207 struct attribute *attr,
4208 char *buf)
4209{
4210 struct slab_attribute *attribute;
4211 struct kmem_cache *s;
4212 int err;
4213
4214 attribute = to_slab_attr(attr);
4215 s = to_slab(kobj);
4216
4217 if (!attribute->show)
4218 return -EIO;
4219
4220 err = attribute->show(s, buf);
4221
4222 return err;
4223}
4224
4225static ssize_t slab_attr_store(struct kobject *kobj,
4226 struct attribute *attr,
4227 const char *buf, size_t len)
4228{
4229 struct slab_attribute *attribute;
4230 struct kmem_cache *s;
4231 int err;
4232
4233 attribute = to_slab_attr(attr);
4234 s = to_slab(kobj);
4235
4236 if (!attribute->store)
4237 return -EIO;
4238
4239 err = attribute->store(s, buf, len);
4240
4241 return err;
4242}
4243
151c602f
CL
4244static void kmem_cache_release(struct kobject *kobj)
4245{
4246 struct kmem_cache *s = to_slab(kobj);
4247
4248 kfree(s);
4249}
4250
81819f0f
CL
4251static struct sysfs_ops slab_sysfs_ops = {
4252 .show = slab_attr_show,
4253 .store = slab_attr_store,
4254};
4255
4256static struct kobj_type slab_ktype = {
4257 .sysfs_ops = &slab_sysfs_ops,
151c602f 4258 .release = kmem_cache_release
81819f0f
CL
4259};
4260
4261static int uevent_filter(struct kset *kset, struct kobject *kobj)
4262{
4263 struct kobj_type *ktype = get_ktype(kobj);
4264
4265 if (ktype == &slab_ktype)
4266 return 1;
4267 return 0;
4268}
4269
4270static struct kset_uevent_ops slab_uevent_ops = {
4271 .filter = uevent_filter,
4272};
4273
27c3a314 4274static struct kset *slab_kset;
81819f0f
CL
4275
4276#define ID_STR_LENGTH 64
4277
4278/* Create a unique string id for a slab cache:
6446faa2
CL
4279 *
4280 * Format :[flags-]size
81819f0f
CL
4281 */
4282static char *create_unique_id(struct kmem_cache *s)
4283{
4284 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4285 char *p = name;
4286
4287 BUG_ON(!name);
4288
4289 *p++ = ':';
4290 /*
4291 * First flags affecting slabcache operations. We will only
4292 * get here for aliasable slabs so we do not need to support
4293 * too many flags. The flags here must cover all flags that
4294 * are matched during merging to guarantee that the id is
4295 * unique.
4296 */
4297 if (s->flags & SLAB_CACHE_DMA)
4298 *p++ = 'd';
4299 if (s->flags & SLAB_RECLAIM_ACCOUNT)
4300 *p++ = 'a';
4301 if (s->flags & SLAB_DEBUG_FREE)
4302 *p++ = 'F';
4303 if (p != name + 1)
4304 *p++ = '-';
4305 p += sprintf(p, "%07d", s->size);
4306 BUG_ON(p > name + ID_STR_LENGTH - 1);
4307 return name;
4308}
4309
4310static int sysfs_slab_add(struct kmem_cache *s)
4311{
4312 int err;
4313 const char *name;
4314 int unmergeable;
4315
4316 if (slab_state < SYSFS)
4317 /* Defer until later */
4318 return 0;
4319
4320 unmergeable = slab_unmergeable(s);
4321 if (unmergeable) {
4322 /*
4323 * Slabcache can never be merged so we can use the name proper.
4324 * This is typically the case for debug situations. In that
4325 * case we can catch duplicate names easily.
4326 */
27c3a314 4327 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
4328 name = s->name;
4329 } else {
4330 /*
4331 * Create a unique name for the slab as a target
4332 * for the symlinks.
4333 */
4334 name = create_unique_id(s);
4335 }
4336
27c3a314 4337 s->kobj.kset = slab_kset;
1eada11c
GKH
4338 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4339 if (err) {
4340 kobject_put(&s->kobj);
81819f0f 4341 return err;
1eada11c 4342 }
81819f0f
CL
4343
4344 err = sysfs_create_group(&s->kobj, &slab_attr_group);
4345 if (err)
4346 return err;
4347 kobject_uevent(&s->kobj, KOBJ_ADD);
4348 if (!unmergeable) {
4349 /* Setup first alias */
4350 sysfs_slab_alias(s, s->name);
4351 kfree(name);
4352 }
4353 return 0;
4354}
4355
4356static void sysfs_slab_remove(struct kmem_cache *s)
4357{
4358 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4359 kobject_del(&s->kobj);
151c602f 4360 kobject_put(&s->kobj);
81819f0f
CL
4361}
4362
4363/*
4364 * Need to buffer aliases during bootup until sysfs becomes
4365 * available lest we loose that information.
4366 */
4367struct saved_alias {
4368 struct kmem_cache *s;
4369 const char *name;
4370 struct saved_alias *next;
4371};
4372
5af328a5 4373static struct saved_alias *alias_list;
81819f0f
CL
4374
4375static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4376{
4377 struct saved_alias *al;
4378
4379 if (slab_state == SYSFS) {
4380 /*
4381 * If we have a leftover link then remove it.
4382 */
27c3a314
GKH
4383 sysfs_remove_link(&slab_kset->kobj, name);
4384 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
4385 }
4386
4387 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4388 if (!al)
4389 return -ENOMEM;
4390
4391 al->s = s;
4392 al->name = name;
4393 al->next = alias_list;
4394 alias_list = al;
4395 return 0;
4396}
4397
4398static int __init slab_sysfs_init(void)
4399{
5b95a4ac 4400 struct kmem_cache *s;
81819f0f
CL
4401 int err;
4402
0ff21e46 4403 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 4404 if (!slab_kset) {
81819f0f
CL
4405 printk(KERN_ERR "Cannot register slab subsystem.\n");
4406 return -ENOSYS;
4407 }
4408
26a7bd03
CL
4409 slab_state = SYSFS;
4410
5b95a4ac 4411 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 4412 err = sysfs_slab_add(s);
5d540fb7
CL
4413 if (err)
4414 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4415 " to sysfs\n", s->name);
26a7bd03 4416 }
81819f0f
CL
4417
4418 while (alias_list) {
4419 struct saved_alias *al = alias_list;
4420
4421 alias_list = alias_list->next;
4422 err = sysfs_slab_alias(al->s, al->name);
5d540fb7
CL
4423 if (err)
4424 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4425 " %s to sysfs\n", s->name);
81819f0f
CL
4426 kfree(al);
4427 }
4428
4429 resiliency_test();
4430 return 0;
4431}
4432
4433__initcall(slab_sysfs_init);
81819f0f 4434#endif
57ed3eda
PE
4435
4436/*
4437 * The /proc/slabinfo ABI
4438 */
158a9624
LT
4439#ifdef CONFIG_SLABINFO
4440
0121c619
CL
4441ssize_t slabinfo_write(struct file *file, const char __user *buffer,
4442 size_t count, loff_t *ppos)
158a9624
LT
4443{
4444 return -EINVAL;
4445}
4446
57ed3eda
PE
4447
4448static void print_slabinfo_header(struct seq_file *m)
4449{
4450 seq_puts(m, "slabinfo - version: 2.1\n");
4451 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4452 "<objperslab> <pagesperslab>");
4453 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4454 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4455 seq_putc(m, '\n');
4456}
4457
4458static void *s_start(struct seq_file *m, loff_t *pos)
4459{
4460 loff_t n = *pos;
4461
4462 down_read(&slub_lock);
4463 if (!n)
4464 print_slabinfo_header(m);
4465
4466 return seq_list_start(&slab_caches, *pos);
4467}
4468
4469static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4470{
4471 return seq_list_next(p, &slab_caches, pos);
4472}
4473
4474static void s_stop(struct seq_file *m, void *p)
4475{
4476 up_read(&slub_lock);
4477}
4478
4479static int s_show(struct seq_file *m, void *p)
4480{
4481 unsigned long nr_partials = 0;
4482 unsigned long nr_slabs = 0;
4483 unsigned long nr_inuse = 0;
205ab99d
CL
4484 unsigned long nr_objs = 0;
4485 unsigned long nr_free = 0;
57ed3eda
PE
4486 struct kmem_cache *s;
4487 int node;
4488
4489 s = list_entry(p, struct kmem_cache, list);
4490
4491 for_each_online_node(node) {
4492 struct kmem_cache_node *n = get_node(s, node);
4493
4494 if (!n)
4495 continue;
4496
4497 nr_partials += n->nr_partial;
4498 nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99d
CL
4499 nr_objs += atomic_long_read(&n->total_objects);
4500 nr_free += count_partial(n, count_free);
57ed3eda
PE
4501 }
4502
205ab99d 4503 nr_inuse = nr_objs - nr_free;
57ed3eda
PE
4504
4505 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
834f3d11
CL
4506 nr_objs, s->size, oo_objects(s->oo),
4507 (1 << oo_order(s->oo)));
57ed3eda
PE
4508 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4509 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4510 0UL);
4511 seq_putc(m, '\n');
4512 return 0;
4513}
4514
4515const struct seq_operations slabinfo_op = {
4516 .start = s_start,
4517 .next = s_next,
4518 .stop = s_stop,
4519 .show = s_show,
4520};
4521
158a9624 4522#endif /* CONFIG_SLABINFO */