]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - arch/frv/mm/pgalloc.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / arch / frv / mm / pgalloc.c
index 598a26ab8ad85af99981d8786d08f62362d82f52..c42c83d507bc42c2208b21ff978d92e84c42d161 100644 (file)
  */
 
 #include <linux/sched.h>
-#include <linux/slab.h>
+#include <linux/gfp.h>
 #include <linux/mm.h>
 #include <linux/highmem.h>
+#include <linux/quicklist.h>
 #include <asm/pgalloc.h>
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
-struct kmem_cache *pgd_cache;
 
 pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
 {
@@ -28,7 +28,7 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
        return pte;
 }
 
-struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
+pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
        struct page *page;
 
@@ -37,9 +37,11 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
 #else
        page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
 #endif
-       if (page)
+       if (page) {
                clear_highpage(page);
-       flush_dcache_page(page);
+               pgtable_page_ctor(page);
+               flush_dcache_page(page);
+       }
        return page;
 }
 
@@ -100,7 +102,7 @@ static inline void pgd_list_del(pgd_t *pgd)
                set_page_private(next, (unsigned long) pprev);
 }
 
-void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
+void pgd_ctor(void *pgd)
 {
        unsigned long flags;
 
@@ -120,7 +122,7 @@ void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
 }
 
 /* never called when PTRS_PER_PMD > 1 */
-void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
+void pgd_dtor(void *pgd)
 {
        unsigned long flags; /* can be called from interrupt context */
 
@@ -133,25 +135,25 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
 {
        pgd_t *pgd;
 
-       pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
+       pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
        if (!pgd)
                return pgd;
 
        return pgd;
 }
 
-void pgd_free(pgd_t *pgd)
+void pgd_free(struct mm_struct *mm, pgd_t *pgd)
 {
        /* in the non-PAE case, clear_page_tables() clears user pgd entries */
-       kmem_cache_free(pgd_cache, pgd);
+       quicklist_free(0, pgd_dtor, pgd);
 }
 
 void __init pgtable_cache_init(void)
 {
-       pgd_cache = kmem_cache_create("pgd",
-                                     PTRS_PER_PGD * sizeof(pgd_t),
-                                     PTRS_PER_PGD * sizeof(pgd_t),
-                                     SLAB_PANIC,
-                                     pgd_ctor,
-                                     pgd_dtor);
 }
+
+void check_pgt_cache(void)
+{
+       quicklist_trim(0, pgd_dtor, 25, 16);
+}
+