]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'slub/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 30 May 2010 19:46:17 +0000 (12:46 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 30 May 2010 19:46:17 +0000 (12:46 -0700)
* 'slub/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6:
  SLUB: Allow full duplication of kmalloc array for 390
  slub: move kmem_cache_node into it's own cacheline

include/linux/slub_def.h
mm/slub.c

index 55695c8d2f8ad954a7575ac11961d1254347b1b3..4ba59cfc1f7562c0bb8cb900c4f8d444554d25d5 100644 (file)
@@ -75,12 +75,6 @@ struct kmem_cache {
        int offset;             /* Free pointer offset. */
        struct kmem_cache_order_objects oo;
 
-       /*
-        * Avoid an extra cache line for UP, SMP and for the node local to
-        * struct kmem_cache.
-        */
-       struct kmem_cache_node local_node;
-
        /* Allocation and freeing of slabs */
        struct kmem_cache_order_objects max;
        struct kmem_cache_order_objects min;
@@ -102,6 +96,9 @@ struct kmem_cache {
         */
        int remote_node_defrag_ratio;
        struct kmem_cache_node *node[MAX_NUMNODES];
+#else
+       /* Avoid an extra cache line for UP */
+       struct kmem_cache_node local_node;
 #endif
 };
 
@@ -140,7 +137,7 @@ struct kmem_cache {
 #ifdef CONFIG_ZONE_DMA
 #define SLUB_DMA __GFP_DMA
 /* Reserve extra caches for potential DMA use */
-#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT - 6)
+#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
 #else
 /* Disable DMA functionality */
 #define SLUB_DMA (__force gfp_t)0
index 26f0cb9cc5848d1dcae71761700c365e216f38a0..578f68f3c51f76ed3c2ff21b5ff2545d2ab64c0d 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2137,7 +2137,7 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
 
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n = s->node[node];
-               if (n && n != &s->local_node)
+               if (n)
                        kmem_cache_free(kmalloc_caches, n);
                s->node[node] = NULL;
        }
@@ -2146,33 +2146,22 @@ static void free_kmem_cache_nodes(struct kmem_cache *s)
 static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
 {
        int node;
-       int local_node;
-
-       if (slab_state >= UP && (s < kmalloc_caches ||
-                       s >= kmalloc_caches + KMALLOC_CACHES))
-               local_node = page_to_nid(virt_to_page(s));
-       else
-               local_node = 0;
 
        for_each_node_state(node, N_NORMAL_MEMORY) {
                struct kmem_cache_node *n;
 
-               if (local_node == node)
-                       n = &s->local_node;
-               else {
-                       if (slab_state == DOWN) {
-                               early_kmem_cache_node_alloc(gfpflags, node);
-                               continue;
-                       }
-                       n = kmem_cache_alloc_node(kmalloc_caches,
-                                                       gfpflags, node);
-
-                       if (!n) {
-                               free_kmem_cache_nodes(s);
-                               return 0;
-                       }
+               if (slab_state == DOWN) {
+                       early_kmem_cache_node_alloc(gfpflags, node);
+                       continue;
+               }
+               n = kmem_cache_alloc_node(kmalloc_caches,
+                                               gfpflags, node);
 
+               if (!n) {
+                       free_kmem_cache_nodes(s);
+                       return 0;
                }
+
                s->node[node] = n;
                init_kmem_cache_node(n, s);
        }