]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
slub: Remove static kmem_cache_cpu array for boot
authorChristoph Lameter <cl@linux.com>
Fri, 20 Aug 2010 17:37:14 +0000 (12:37 -0500)
committerPekka Enberg <penberg@kernel.org>
Sat, 2 Oct 2010 07:24:26 +0000 (10:24 +0300)
The percpu allocator can now handle allocations during early boot.
So drop the static kmem_cache_cpu array.

Cc: Tejun Heo <tj@kernel.org>
Acked-by: David Rientjes <rientjes@google.com>
Signed-off-by: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Pekka Enberg <penberg@kernel.org>
mm/slub.c

index 38c73a3364c6d1cb2779c94baa9ebcaa14ad85e8..e8c1175953673cf3120758b608a1d41bf4b1c2b5 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2062,23 +2062,14 @@ init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
 #endif
 }
 
-static DEFINE_PER_CPU(struct kmem_cache_cpu, kmalloc_percpu[KMALLOC_CACHES]);
-
 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
 {
-       if (s < kmalloc_caches + KMALLOC_CACHES && s >= kmalloc_caches)
-               /*
-                * Boot time creation of the kmalloc array. Use static per cpu data
-                * since the per cpu allocator is not available yet.
-                */
-               s->cpu_slab = kmalloc_percpu + (s - kmalloc_caches);
-       else
-               s->cpu_slab =  alloc_percpu(struct kmem_cache_cpu);
+       BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
+                       SLUB_PAGE_SHIFT * sizeof(struct kmem_cache_cpu));
 
-       if (!s->cpu_slab)
-               return 0;
+       s->cpu_slab = alloc_percpu(struct kmem_cache_cpu);
 
-       return 1;
+       return s->cpu_slab != NULL;
 }
 
 #ifdef CONFIG_NUMA