]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
rmap: wrap page_check_address() using __cond_lock()
authorNamhyung Kim <namhyung@gmail.com>
Tue, 26 Oct 2010 21:22:01 +0000 (14:22 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 26 Oct 2010 23:52:09 +0000 (16:52 -0700)
The page_check_address() conditionally grabs *@ptlp in case of returning
non-NULL.  Rename and wrap it using __cond_lock() removes following
warnings from sparse:

 mm/rmap.c:472:9: warning: context imbalance in 'page_mapped_in_vma' - unexpected unlock
 mm/rmap.c:524:9: warning: context imbalance in 'page_referenced_one' - unexpected unlock
 mm/rmap.c:706:9: warning: context imbalance in 'page_mkclean_one' - unexpected unlock
 mm/rmap.c:1066:9: warning: context imbalance in 'try_to_unmap_one' - unexpected unlock

Signed-off-by: Namhyung Kim <namhyung@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/rmap.h
mm/rmap.c

index 07ea89c167618d5ad2c71311ea983a2e9b7fcab5..bb83c0da207197122c9ef3988cf0b3d843404cd3 100644 (file)
@@ -205,9 +205,20 @@ int try_to_unmap_one(struct page *, struct vm_area_struct *,
 /*
  * Called from mm/filemap_xip.c to unmap empty zero page
  */
-pte_t *page_check_address(struct page *, struct mm_struct *,
+pte_t *__page_check_address(struct page *, struct mm_struct *,
                                unsigned long, spinlock_t **, int);
 
+static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+                                       unsigned long address,
+                                       spinlock_t **ptlp, int sync)
+{
+       pte_t *ptep;
+
+       __cond_lock(*ptlp, ptep = __page_check_address(page, mm, address,
+                                                      ptlp, sync));
+       return ptep;
+}
+
 /*
  * Used by swapoff to help locate where page is expected in vma.
  */
index 0995a8f68866a5eb3e6d6404b69010b9a118356c..bfeffbddb71207616a7e4547d5a4dba77bb03d31 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -409,7 +409,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
  *
  * On success returns with pte mapped and locked.
  */
-pte_t *page_check_address(struct page *page, struct mm_struct *mm,
+pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
                          unsigned long address, spinlock_t **ptlp, int sync)
 {
        pgd_t *pgd;