]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
x86: cpa: fix the self-test
authorIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:34:09 +0000 (13:34 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:34:09 +0000 (13:34 +0100)
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/Kconfig.debug
arch/x86/mm/init_32.c
arch/x86/mm/init_64.c
arch/x86/mm/pageattr-test.c
arch/x86/mm/pageattr.c
include/asm-x86/pgtable.h

index 2d0bd33b73aabe9f2b9f82476571f29b69337b77..2e1e3af28c3a2d5c455b048a330b720a74d1f8bf 100644 (file)
@@ -40,7 +40,7 @@ comment "Page alloc debug is incompatible with Software Suspend on i386"
 
 config DEBUG_PAGEALLOC
        bool "Debug page memory allocations"
-       depends on DEBUG_KERNEL
+       depends on DEBUG_KERNEL && X86_32
        help
          Unmap pages from the kernel linear mapping after free_pages().
          This results in a large slowdown, but helps to find certain types
index 8d7f723cfc28181786f78c63c894198a6dcc0cb3..8ed5c189d7aab67df506d221dada91aad2d90997 100644 (file)
@@ -781,8 +781,6 @@ void mark_rodata_ro(void)
 
 void free_init_pages(char *what, unsigned long begin, unsigned long end)
 {
-       unsigned long addr;
-
 #ifdef CONFIG_DEBUG_PAGEALLOC
        /*
         * If debugging page accesses then do not free this memory but
@@ -793,6 +791,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
                begin, PAGE_ALIGN(end));
        set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
 #else
+       unsigned long addr;
+
        /*
         * We just marked the kernel text read only above, now that
         * we are going to free part of that, we need to make that
index e0c1e98ad1bf4c80935b31010e18299199c5761a..8a7b725ce3c7d082d9433e466041f21c669cdf0a 100644 (file)
@@ -569,22 +569,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
                free_page(addr);
                totalram_pages++;
        }
-#ifdef CONFIG_DEBUG_RODATA
-       /*
-        * This will make the __init pages not present and
-        * not executable, so that any attempt to use a
-        * __init function from now on will fault immediately
-        * rather than supriously later when memory gets reused.
-        *
-        * We only do this for DEBUG_RODATA to not break up the
-        * 2Mb kernel mapping just for this debug feature.
-        */
-       if (begin >= __START_KERNEL_map) {
-               set_memory_rw(begin, (end - begin)/PAGE_SIZE);
-               set_memory_np(begin, (end - begin)/PAGE_SIZE);
-               set_memory_nx(begin, (end - begin)/PAGE_SIZE);
-       }
-#endif
 #endif
 }
 
index 554820265b95d1a4334632d09a7aede7564393cc..06353d43f72e073cc01034502d426a37f90d015e 100644 (file)
@@ -15,8 +15,7 @@
 #include <asm/kdebug.h>
 
 enum {
-       NTEST                   = 400,
-       LOWEST_LEVEL            = PG_LEVEL_4K,
+       NTEST                   = 4000,
 #ifdef CONFIG_X86_64
        LPS                     = (1 << PMD_SHIFT),
 #elif defined(CONFIG_X86_PAE)
@@ -59,10 +58,10 @@ static __init int print_split(struct split_state *s)
                        continue;
                }
 
-               if (level == 2 && sizeof(long) == 8) {
+               if (level == PG_LEVEL_1G && sizeof(long) == 8) {
                        s->gpg++;
                        i += GPS/PAGE_SIZE;
-               } else if (level != LOWEST_LEVEL) {
+               } else if (level == PG_LEVEL_2M) {
                        if (!(pte_val(*pte) & _PAGE_PSE)) {
                                printk(KERN_ERR
                                        "%lx level %d but not PSE %Lx\n",
@@ -162,7 +161,7 @@ static __init int exercise_pageattr(void)
                        continue;
                }
 
-               err = __change_page_attr_clear(addr[i], len[i],
+               err = change_page_attr_clear(addr[i], len[i],
                                               __pgprot(_PAGE_GLOBAL));
                if (err < 0) {
                        printk(KERN_ERR "CPA %d failed %d\n", i, err);
@@ -175,7 +174,7 @@ static __init int exercise_pageattr(void)
                                pte ? (u64)pte_val(*pte) : 0ULL);
                        failed++;
                }
-               if (level != LOWEST_LEVEL) {
+               if (level != PG_LEVEL_4K) {
                        printk(KERN_ERR "CPA %lx: unexpected level %d\n",
                                addr[i], level);
                        failed++;
@@ -183,7 +182,6 @@ static __init int exercise_pageattr(void)
 
        }
        vfree(bm);
-       cpa_flush_all();
 
        failed += print_split(&sb);
 
@@ -197,7 +195,7 @@ static __init int exercise_pageattr(void)
                        failed++;
                        continue;
                }
-               err = __change_page_attr_set(addr[i], len[i],
+               err = change_page_attr_set(addr[i], len[i],
                                             __pgprot(_PAGE_GLOBAL));
                if (err < 0) {
                        printk(KERN_ERR "CPA reverting failed: %d\n", err);
@@ -211,7 +209,6 @@ static __init int exercise_pageattr(void)
                }
 
        }
-       cpa_flush_all();
 
        failed += print_split(&sc);
 
index 97ec9e7d29d9ca2b93890163667c443108b9852a..532a40bc0e7e8aca565b64af6044f1efa5cec449 100644 (file)
@@ -197,10 +197,11 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        unsigned long addr;
        pte_t *pbase, *tmp;
        struct page *base;
-       int i, level;
+       unsigned int i, level;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
-       gfp_flags = GFP_ATOMIC;
+       gfp_flags = __GFP_HIGH | __GFP_NOFAIL | __GFP_NOWARN;
+       gfp_flags = GFP_ATOMIC | __GFP_NOWARN;
 #endif
        base = alloc_pages(gfp_flags, 0);
        if (!base)
@@ -224,6 +225,7 @@ static int split_large_page(pte_t *kpte, unsigned long address)
        paravirt_alloc_pt(&init_mm, page_to_pfn(base));
 #endif
 
+       pgprot_val(ref_prot) &= ~_PAGE_NX;
        for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE)
                set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT, ref_prot));
 
@@ -248,7 +250,8 @@ out_unlock:
 }
 
 static int
-__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot)
+__change_page_attr(unsigned long address, unsigned long pfn,
+                  pgprot_t mask_set, pgprot_t mask_clr)
 {
        struct page *kpte_page;
        int level, err = 0;
@@ -267,15 +270,20 @@ repeat:
        BUG_ON(PageLRU(kpte_page));
        BUG_ON(PageCompound(kpte_page));
 
-       prot = static_protections(prot, address);
-
        if (level == PG_LEVEL_4K) {
-               WARN_ON_ONCE(pgprot_val(prot) & _PAGE_PSE);
-               set_pte_atomic(kpte, pfn_pte(pfn, canon_pgprot(prot)));
-       } else {
-               /* Clear the PSE bit for the 4k level pages ! */
-               pgprot_val(prot) = pgprot_val(prot) & ~_PAGE_PSE;
+               pgprot_t new_prot = pte_pgprot(*kpte);
+               pte_t new_pte, old_pte = *kpte;
+
+               pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
+               pgprot_val(new_prot) |= pgprot_val(mask_set);
+
+               new_prot = static_protections(new_prot, address);
+
+               new_pte = pfn_pte(pfn, canon_pgprot(new_prot));
+               BUG_ON(pte_pfn(new_pte) != pte_pfn(old_pte));
 
+               set_pte_atomic(kpte, new_pte);
+       } else {
                err = split_large_page(kpte, address);
                if (!err)
                        goto repeat;
@@ -297,22 +305,26 @@ repeat:
  * Modules and drivers should use the set_memory_* APIs instead.
  */
 
-static int change_page_attr_addr(unsigned long address, pgprot_t prot)
+static int
+change_page_attr_addr(unsigned long address, pgprot_t mask_set,
+                                                       pgprot_t mask_clr)
 {
        int err = 0, kernel_map = 0;
-       unsigned long pfn = __pa(address) >> PAGE_SHIFT;
+       unsigned long pfn;
 
 #ifdef CONFIG_X86_64
        if (address >= __START_KERNEL_map &&
                        address < __START_KERNEL_map + KERNEL_TEXT_SIZE) {
 
-               address = (unsigned long)__va(__pa(address));
+               address = (unsigned long)__va(__pa((void *)address));
                kernel_map = 1;
        }
 #endif
 
-       if (!kernel_map || pte_present(pfn_pte(0, prot))) {
-               err = __change_page_attr(address, pfn, prot);
+       pfn = __pa(address) >> PAGE_SHIFT;
+
+       if (!kernel_map || 1) {
+               err = __change_page_attr(address, pfn, mask_set, mask_clr);
                if (err)
                        return err;
        }
@@ -324,12 +336,15 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
         */
        if (__pa(address) < KERNEL_TEXT_SIZE) {
                unsigned long addr2;
-               pgprot_t prot2;
 
-               addr2 = __START_KERNEL_map + __pa(address);
+               addr2 = __pa(address) + __START_KERNEL_map - phys_base;
                /* Make sure the kernel mappings stay executable */
-               prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
-               err = __change_page_attr(addr2, pfn, prot2);
+               pgprot_val(mask_clr) |= _PAGE_NX;
+               /*
+                * Our high aliases are imprecise, so do not propagate
+                * failures back to users:
+                */
+               __change_page_attr(addr2, pfn, mask_set, mask_clr);
        }
 #endif
 
@@ -339,26 +354,13 @@ static int change_page_attr_addr(unsigned long address, pgprot_t prot)
 static int __change_page_attr_set_clr(unsigned long addr, int numpages,
                                      pgprot_t mask_set, pgprot_t mask_clr)
 {
-       pgprot_t new_prot;
-       int level;
-       pte_t *pte;
-       int i, ret;
-
-       for (i = 0; i < numpages ; i++) {
-
-               pte = lookup_address(addr, &level);
-               if (!pte)
-                       return -EINVAL;
-
-               new_prot = pte_pgprot(*pte);
-
-               pgprot_val(new_prot) &= ~pgprot_val(mask_clr);
-               pgprot_val(new_prot) |= pgprot_val(mask_set);
+       unsigned int i;
+       int ret;
 
-               ret = change_page_attr_addr(addr, new_prot);
+       for (i = 0; i < numpages ; i++, addr += PAGE_SIZE) {
+               ret = change_page_attr_addr(addr, mask_set, mask_clr);
                if (ret)
                        return ret;
-               addr += PAGE_SIZE;
        }
 
        return 0;
index ee40a88882f60f824c68fb35f8b32d8ca782a415..269e7e29ea8edaf1cbe936379b6ca55ea32fb300 100644 (file)
@@ -240,6 +240,7 @@ enum {
        PG_LEVEL_NONE,
        PG_LEVEL_4K,
        PG_LEVEL_2M,
+       PG_LEVEL_1G,
 };
 
 /*