]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - mm/memory.c
mm: wrap get_locked_pte() using __cond_lock()
[net-next-2.6.git] / mm / memory.c
index 98b58fecedeffc236a9c7285689fe4720409bd30..4ce24a4d5d481abce591576b218b901dde134c83 100644 (file)
@@ -736,7 +736,7 @@ again:
        dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl);
        if (!dst_pte)
                return -ENOMEM;
-       src_pte = pte_offset_map_nested(src_pmd, addr);
+       src_pte = pte_offset_map(src_pmd, addr);
        src_ptl = pte_lockptr(src_mm, src_pmd);
        spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
        orig_src_pte = src_pte;
@@ -767,7 +767,7 @@ again:
 
        arch_leave_lazy_mmu_mode();
        spin_unlock(src_ptl);
-       pte_unmap_nested(orig_src_pte);
+       pte_unmap(orig_src_pte);
        add_mm_rss_vec(dst_mm, rss);
        pte_unmap_unlock(orig_dst_pte, dst_ptl);
        cond_resched();
@@ -1450,7 +1450,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
                                        if (ret & VM_FAULT_OOM)
                                                return i ? i : -ENOMEM;
                                        if (ret &
-                                           (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
+                                           (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE|
+                                            VM_FAULT_SIGBUS))
                                                return i ? i : -EFAULT;
                                        BUG();
                                }
@@ -1590,7 +1591,7 @@ struct page *get_dump_page(unsigned long addr)
 }
 #endif /* CONFIG_ELF_CORE */
 
-pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
+pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
                        spinlock_t **ptl)
 {
        pgd_t * pgd = pgd_offset(mm, addr);
@@ -2626,6 +2627,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
        struct page *page, *swapcache = NULL;
        swp_entry_t entry;
        pte_t pte;
+       int locked;
        struct mem_cgroup *ptr = NULL;
        int exclusive = 0;
        int ret = 0;
@@ -2676,8 +2678,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
                goto out_release;
        }
 
-       lock_page(page);
+       locked = lock_page_or_retry(page, mm, flags);
        delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
+       if (!locked) {
+               ret |= VM_FAULT_RETRY;
+               goto out_release;
+       }
 
        /*
         * Make sure try_to_free_swap or reuse_swap_page or swapoff did not
@@ -2926,7 +2932,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
        vmf.page = NULL;
 
        ret = vma->vm_ops->fault(vma, &vmf);
-       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE)))
+       if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE |
+                           VM_FAULT_RETRY)))
                return ret;
 
        if (unlikely(PageHWPoison(vmf.page))) {