From: Chris Mason Date: Wed, 5 Aug 2009 16:57:59 +0000 (-0400) Subject: Btrfs: optimize set extent bit X-Git-Tag: v2.6.32-rc1~57^2^2~26^2~14 X-Git-Url: http://bbs.cooldavid.org/git/?a=commitdiff_plain;h=40431d6c1288793a682fc6f5e5b5c9d5cac34608;p=net-next-2.6.git Btrfs: optimize set extent bit The Btrfs set_extent_bit call currently searches the rbtree every time it needs to find more extent_state objects to fill the requested operation. This adds a simple test with rb_next to see if the next object in the tree was adjacent to the one we just found. If so, we skip the search and just use the next object. Signed-off-by: Chris Mason --- diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 68260180f58..7e5c5a0749e 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -694,8 +694,8 @@ again: BUG_ON(err == -EEXIST); goto out; } - state = rb_entry(node, struct extent_state, rb_node); +hit_next: last_start = state->start; last_end = state->end; @@ -706,6 +706,7 @@ again: * Just lock what we found and keep going */ if (state->start == start && state->end <= end) { + struct rb_node *next_node; set = state->state & bits; if (set && exclusive) { *failed_start = state->start; @@ -716,7 +717,17 @@ again: merge_state(tree, state); if (last_end == (u64)-1) goto out; + start = last_end + 1; + if (start < end && prealloc && !need_resched()) { + next_node = rb_next(node); + if (next_node) { + state = rb_entry(next_node, struct extent_state, + rb_node); + if (state->start == start) + goto hit_next; + } + } goto search_again; } @@ -852,7 +863,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) { return set_extent_bit(tree, start, end, - EXTENT_DELALLOC | EXTENT_DIRTY, + EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE, 0, NULL, mask); } diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 7c3cd248d8d..a760d97279a 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -136,8 +136,6 @@ static noinline int dirty_and_release_pages(struct btrfs_trans_handle *trans, btrfs_set_trans_block_group(trans, inode); hint_byte = 0; - set_extent_uptodate(io_tree, start_pos, end_of_last_block, GFP_NOFS); - /* check for reserved extents on each page, we don't want * to reset the delalloc bit on things that already have * extents reserved.