]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - mm/mmap.c
kconfig: Temporarily disable dependency warnings
[net-next-2.6.git] / mm / mmap.c
index e38e910cb75673c528116e9e0a114fb0c8f3b8cb..31003338b978b78ef66d4579033cdd1d3ebd1a16 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -452,12 +452,10 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
                spin_lock(&mapping->i_mmap_lock);
                vma->vm_truncate_count = mapping->truncate_count;
        }
-       anon_vma_lock(vma);
 
        __vma_link(mm, vma, prev, rb_link, rb_parent);
        __vma_link_file(vma);
 
-       anon_vma_unlock(vma);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
@@ -506,6 +504,7 @@ int vma_adjust(struct vm_area_struct *vma, unsigned long start,
        struct vm_area_struct *importer = NULL;
        struct address_space *mapping = NULL;
        struct prio_tree_root *root = NULL;
+       struct anon_vma *anon_vma = NULL;
        struct file *file = vma->vm_file;
        long adjust_next = 0;
        int remove_next = 0;
@@ -578,6 +577,17 @@ again:                     remove_next = 1 + (end > next->vm_end);
                }
        }
 
+       /*
+        * When changing only vma->vm_end, we don't really need anon_vma
+        * lock. This is a fairly rare case by itself, but the anon_vma
+        * lock may be shared between many sibling processes.  Skipping
+        * the lock for brk adjustments makes a difference sometimes.
+        */
+       if (vma->anon_vma && (insert || importer || start != vma->vm_start)) {
+               anon_vma = vma->anon_vma;
+               anon_vma_lock(anon_vma);
+       }
+
        if (root) {
                flush_dcache_mmap_lock(mapping);
                vma_prio_tree_remove(vma, root);
@@ -617,6 +627,8 @@ again:                      remove_next = 1 + (end > next->vm_end);
                __insert_vm_struct(mm, insert);
        }
 
+       if (anon_vma)
+               anon_vma_unlock(anon_vma);
        if (mapping)
                spin_unlock(&mapping->i_mmap_lock);
 
@@ -1710,7 +1722,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
         */
        if (unlikely(anon_vma_prepare(vma)))
                return -ENOMEM;
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1721,7 +1733,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
        if (address < PAGE_ALIGN(address+4))
                address = PAGE_ALIGN(address+4);
        else {
-               anon_vma_unlock(vma);
+               vma_unlock_anon_vma(vma);
                return -ENOMEM;
        }
        error = 0;
@@ -1739,7 +1751,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                        perf_event_mmap(vma);
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        return error;
 }
 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
@@ -1764,7 +1776,7 @@ static int expand_downwards(struct vm_area_struct *vma,
        if (error)
                return error;
 
-       anon_vma_lock(vma);
+       vma_lock_anon_vma(vma);
 
        /*
         * vma->vm_start/vm_end cannot change under us because the caller
@@ -1786,7 +1798,7 @@ static int expand_downwards(struct vm_area_struct *vma,
                        perf_event_mmap(vma);
                }
        }
-       anon_vma_unlock(vma);
+       vma_unlock_anon_vma(vma);
        return error;
 }
 
@@ -2470,23 +2482,23 @@ static DEFINE_MUTEX(mm_all_locks_mutex);
 
 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
 {
-       if (!test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+       if (!test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
                /*
                 * The LSB of head.next can't change from under us
                 * because we hold the mm_all_locks_mutex.
                 */
-               spin_lock_nest_lock(&anon_vma->lock, &mm->mmap_sem);
+               spin_lock_nest_lock(&anon_vma->root->lock, &mm->mmap_sem);
                /*
                 * We can safely modify head.next after taking the
-                * anon_vma->lock. If some other vma in this mm shares
+                * anon_vma->root->lock. If some other vma in this mm shares
                 * the same anon_vma we won't take it again.
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us thanks to the
-                * anon_vma->lock.
+                * anon_vma->root->lock.
                 */
                if (__test_and_set_bit(0, (unsigned long *)
-                                      &anon_vma->head.next))
+                                      &anon_vma->root->head.next))
                        BUG();
        }
 }
@@ -2577,7 +2589,7 @@ out_unlock:
 
 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
 {
-       if (test_bit(0, (unsigned long *) &anon_vma->head.next)) {
+       if (test_bit(0, (unsigned long *) &anon_vma->root->head.next)) {
                /*
                 * The LSB of head.next can't change to 0 from under
                 * us because we hold the mm_all_locks_mutex.
@@ -2588,12 +2600,12 @@ static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
                 *
                 * No need of atomic instructions here, head.next
                 * can't change from under us until we release the
-                * anon_vma->lock.
+                * anon_vma->root->lock.
                 */
                if (!__test_and_clear_bit(0, (unsigned long *)
-                                         &anon_vma->head.next))
+                                         &anon_vma->root->head.next))
                        BUG();
-               spin_unlock(&anon_vma->lock);
+               anon_vma_unlock(anon_vma);
        }
 }