]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Btrfs: optimize set extent bit
authorChris Mason <chris.mason@oracle.com>
Wed, 5 Aug 2009 16:57:59 +0000 (12:57 -0400)
committerChris Mason <chris.mason@oracle.com>
Fri, 11 Sep 2009 17:31:03 +0000 (13:31 -0400)
The Btrfs set_extent_bit call currently searches the rbtree
every time it needs to find more extent_state objects to fill
the requested operation.

This adds a simple test with rb_next to see if the next object
in the tree was adjacent to the one we just found.  If so,
we skip the search and just use the next object.

Signed-off-by: Chris Mason <chris.mason@oracle.com>
fs/btrfs/extent_io.c
fs/btrfs/file.c

index 68260180f5871975b8f673df1234bf4091d91bb4..7e5c5a0749e23b5d3231bfc8714cff1c7b41c34b 100644 (file)
@@ -694,8 +694,8 @@ again:
                BUG_ON(err == -EEXIST);
                goto out;
        }
-
        state = rb_entry(node, struct extent_state, rb_node);
+hit_next:
        last_start = state->start;
        last_end = state->end;
 
@@ -706,6 +706,7 @@ again:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
+               struct rb_node *next_node;
                set = state->state & bits;
                if (set && exclusive) {
                        *failed_start = state->start;
@@ -716,7 +717,17 @@ again:
                merge_state(tree, state);
                if (last_end == (u64)-1)
                        goto out;
+
                start = last_end + 1;
+               if (start < end && prealloc && !need_resched()) {
+                       next_node = rb_next(node);
+                       if (next_node) {
+                               state = rb_entry(next_node, struct extent_state,
+                                                rb_node);
+                               if (state->start == start)
+                                       goto hit_next;
+                       }
+               }
                goto search_again;
        }
 
@@ -852,7 +863,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                     gfp_t mask)
 {
        return set_extent_bit(tree, start, end,
-                             EXTENT_DELALLOC | EXTENT_DIRTY,
+                             EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
                              0, NULL, mask);
 }
 
index 7c3cd248d8d6151ce5a89d876c2541b2abf73179..a760d97279ace0d24ef752f6591afdd602ea6d48 100644 (file)
@@ -136,8 +136,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans,
        btrfs_set_trans_block_group(trans, inode);
        hint_byte = 0;
 
-       set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS);
-
        /* check for reserved extents on each page, we don't want
         * to reset the delalloc bit on things that already have
         * extents reserved.