]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - mm/hugetlb.c
qlcnic: cleanup port mode setting
[net-next-2.6.git] / mm / hugetlb.c
index 8c163f64cf1076ce6b3fed95ad5ee0e30990eada..cc5be788a39fe132c72cbc1d2fb1c03f71708575 100644 (file)
@@ -19,6 +19,8 @@
 #include <linux/sysfs.h>
 #include <linux/slab.h>
 #include <linux/rmap.h>
+#include <linux/swap.h>
+#include <linux/swapops.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -613,6 +615,8 @@ int PageHuge(struct page *page)
        return dtor == free_huge_page;
 }
 
+EXPORT_SYMBOL_GPL(PageHuge);
+
 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
 {
        struct page *page;
@@ -2149,6 +2153,19 @@ nomem:
        return -ENOMEM;
 }
 
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp)) {
+               return 1;
+       } else
+               return 0;
+}
+
 void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                            unsigned long end, struct page *ref_page)
 {
@@ -2207,6 +2224,12 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
                if (huge_pte_none(pte))
                        continue;
 
+               /*
+                * HWPoisoned hugepage is already unmapped and dropped reference
+                */
+               if (unlikely(is_hugetlb_entry_hwpoisoned(pte)))
+                       continue;
+
                page = pte_page(pte);
                if (pte_dirty(pte))
                        set_page_dirty(page);
@@ -2301,9 +2324,11 @@ retry_avoidcopy:
         * and just make the page writable */
        avoidcopy = (page_mapcount(old_page) == 1);
        if (avoidcopy) {
-               if (!trylock_page(old_page))
+               if (!trylock_page(old_page)) {
                        if (PageAnon(old_page))
                                page_move_anon_rmap(old_page, vma, address);
+               } else
+                       unlock_page(old_page);
                set_huge_ptep_writable(vma, address, ptep);
                return 0;
        }
@@ -2372,6 +2397,9 @@ retry_avoidcopy:
        ptep = huge_pte_offset(mm, address & huge_page_mask(h));
        if (likely(pte_same(huge_ptep_get(ptep), pte))) {
                /* Break COW */
+               mmu_notifier_invalidate_range_start(mm,
+                       address & huge_page_mask(h),
+                       (address & huge_page_mask(h)) + huge_page_size(h));
                huge_ptep_clear_flush(vma, address, ptep);
                set_huge_pte_at(mm, address, ptep,
                                make_huge_pte(vma, new_page, 1));
@@ -2379,6 +2407,9 @@ retry_avoidcopy:
                hugepage_add_anon_rmap(new_page, vma, address);
                /* Make the old page be freed below */
                new_page = old_page;
+               mmu_notifier_invalidate_range_end(mm,
+                       address & huge_page_mask(h),
+                       (address & huge_page_mask(h)) + huge_page_size(h));
        }
        page_cache_release(new_page);
        page_cache_release(old_page);
@@ -2490,6 +2521,18 @@ retry:
                page_dup_rmap(page);
        }
 
+       /*
+        * Since memory error handler replaces pte into hwpoison swap entry
+        * at the time of error handling, a process which reserved but not have
+        * the mapping to the error hugepage does not have hwpoison swap entry.
+        * So we need to block accesses from such a process by checking
+        * PG_hwpoison bit here.
+        */
+       if (unlikely(PageHWPoison(page))) {
+               ret = VM_FAULT_HWPOISON;
+               goto backout_unlocked;
+       }
+
        /*
         * If we are going to COW a private mapping later, we examine the
         * pending reservations for this page now. This will ensure that
@@ -2544,6 +2587,13 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        static DEFINE_MUTEX(hugetlb_instantiation_mutex);
        struct hstate *h = hstate_vma(vma);
 
+       ptep = huge_pte_offset(mm, address);
+       if (ptep) {
+               entry = huge_ptep_get(ptep);
+               if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
+                       return VM_FAULT_HWPOISON;
+       }
+
        ptep = huge_pte_alloc(mm, address, huge_page_size(h));
        if (!ptep)
                return VM_FAULT_OOM;