]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - fs/btrfs/extent_io.c
Btrfs: reduce CPU usage in the extent_state tree
[net-next-2.6.git] / fs / btrfs / extent_io.c
index eb2bee8b7fbfb19fb37dc26cbe6fe01ec92ea3ef..7c70613eb72c10547ac94fde0b9ea600ef1f2cc6 100644 (file)
 #include "ctree.h"
 #include "btrfs_inode.h"
 
-/* temporary define until extent_map moves out of btrfs */
-struct kmem_cache *btrfs_cache_create(const char *name, size_t size,
-                                      unsigned long extra_flags,
-                                      void (*ctor)(void *, struct kmem_cache *,
-                                                   unsigned long));
-
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
 
@@ -50,20 +44,23 @@ struct extent_page_data {
        /* tells writepage not to lock the state bits for this range
         * it still does the unlocking
         */
-       int extent_locked;
+       unsigned int extent_locked:1;
+
+       /* tells the submit_bio code to use a WRITE_SYNC */
+       unsigned int sync_io:1;
 };
 
 int __init extent_io_init(void)
 {
-       extent_state_cache = btrfs_cache_create("extent_state",
-                                           sizeof(struct extent_state), 0,
-                                           NULL);
+       extent_state_cache = kmem_cache_create("extent_state",
+                       sizeof(struct extent_state), 0,
+                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
        if (!extent_state_cache)
                return -ENOMEM;
 
-       extent_buffer_cache = btrfs_cache_create("extent_buffers",
-                                           sizeof(struct extent_buffer), 0,
-                                           NULL);
+       extent_buffer_cache = kmem_cache_create("extent_buffers",
+                       sizeof(struct extent_buffer), 0,
+                       SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
        if (!extent_buffer_cache)
                goto free_state_cache;
        return 0;
@@ -370,10 +367,10 @@ static int insert_state(struct extent_io_tree *tree,
        }
        if (bits & EXTENT_DIRTY)
                tree->dirty_bytes += end - start + 1;
-       set_state_cb(tree, state, bits);
-       state->state |= bits;
        state->start = start;
        state->end = end;
+       set_state_cb(tree, state, bits);
+       state->state |= bits;
        node = tree_insert(&tree->state, end, &state->rb_node);
        if (node) {
                struct extent_state *found;
@@ -479,6 +476,7 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
        struct extent_state *state;
        struct extent_state *prealloc = NULL;
        struct rb_node *node;
+       u64 last_end;
        int err;
        int set = 0;
 
@@ -501,6 +499,7 @@ again:
        if (state->start > end)
                goto out;
        WARN_ON(state->end < start);
+       last_end = state->end;
 
        /*
         *     | ---- desired range ---- |
@@ -527,9 +526,11 @@ again:
                if (err)
                        goto out;
                if (state->end <= end) {
-                       start = state->end + 1;
                        set |= clear_state_bit(tree, state, bits,
                                        wake, delete);
+                       if (last_end == (u64)-1)
+                               goto out;
+                       start = last_end + 1;
                } else {
                        start = state->start;
                }
@@ -555,8 +556,10 @@ again:
                goto out;
        }
 
-       start = state->end + 1;
        set |= clear_state_bit(tree, state, bits, wake, delete);
+       if (last_end == (u64)-1)
+               goto out;
+       start = last_end + 1;
        goto search_again;
 
 out:
@@ -651,25 +654,24 @@ static void set_state_bits(struct extent_io_tree *tree,
 }
 
 /*
- * set some bits on a range in the tree.  This may require allocations
- * or sleeping, so the gfp mask is used to indicate what is allowed.
+ * set some bits on a range in the tree.  This may require allocations or
+ * sleeping, so the gfp mask is used to indicate what is allowed.
  *
- * If 'exclusive' == 1, this will fail with -EEXIST if some part of the
- * range already has the desired bits set.  The start of the existing
- * range is returned in failed_start in this case.
+ * If any of the exclusive bits are set, this will fail with -EEXIST if some
+ * part of the range already has the desired bits set.  The start of the
+ * existing range is returned in failed_start in this case.
  *
- * [start, end] is inclusive
- * This takes the tree lock.
+ * [start, end] is inclusive This takes the tree lock.
  */
+
 static int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
-                         int bits, int exclusive, u64 *failed_start,
+                         int bits, int exclusive_bits, u64 *failed_start,
                          gfp_t mask)
 {
        struct extent_state *state;
        struct extent_state *prealloc = NULL;
        struct rb_node *node;
        int err = 0;
-       int set;
        u64 last_start;
        u64 last_end;
 again:
@@ -691,8 +693,8 @@ again:
                BUG_ON(err == -EEXIST);
                goto out;
        }
-
        state = rb_entry(node, struct extent_state, rb_node);
+hit_next:
        last_start = state->start;
        last_end = state->end;
 
@@ -703,15 +705,27 @@ again:
         * Just lock what we found and keep going
         */
        if (state->start == start && state->end <= end) {
-               set = state->state & bits;
-               if (set && exclusive) {
+               struct rb_node *next_node;
+               if (state->state & exclusive_bits) {
                        *failed_start = state->start;
                        err = -EEXIST;
                        goto out;
                }
                set_state_bits(tree, state, bits);
-               start = state->end + 1;
                merge_state(tree, state);
+               if (last_end == (u64)-1)
+                       goto out;
+
+               start = last_end + 1;
+               if (start < end && prealloc && !need_resched()) {
+                       next_node = rb_next(node);
+                       if (next_node) {
+                               state = rb_entry(next_node, struct extent_state,
+                                                rb_node);
+                               if (state->start == start)
+                                       goto hit_next;
+                       }
+               }
                goto search_again;
        }
 
@@ -732,8 +746,7 @@ again:
         * desired bit on it.
         */
        if (state->start < start) {
-               set = state->state & bits;
-               if (exclusive && set) {
+               if (state->state & exclusive_bits) {
                        *failed_start = start;
                        err = -EEXIST;
                        goto out;
@@ -745,8 +758,10 @@ again:
                        goto out;
                if (state->end <= end) {
                        set_state_bits(tree, state, bits);
-                       start = state->end + 1;
                        merge_state(tree, state);
+                       if (last_end == (u64)-1)
+                               goto out;
+                       start = last_end + 1;
                } else {
                        start = state->start;
                }
@@ -781,8 +796,7 @@ again:
         * on the first half
         */
        if (state->start <= end && state->end > end) {
-               set = state->state & bits;
-               if (exclusive && set) {
+               if (state->state & exclusive_bits) {
                        *failed_start = start;
                        err = -EEXIST;
                        goto out;
@@ -845,7 +859,7 @@ int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
                     gfp_t mask)
 {
        return set_extent_bit(tree, start, end,
-                             EXTENT_DELALLOC | EXTENT_DIRTY,
+                             EXTENT_DELALLOC | EXTENT_DIRTY | EXTENT_UPTODATE,
                              0, NULL, mask);
 }
 
@@ -888,19 +902,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
        return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, mask);
 }
 
-static int set_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end,
-                        gfp_t mask)
-{
-       return set_extent_bit(tree, start, end, EXTENT_WRITEBACK,
-                             0, NULL, mask);
-}
-
-static int clear_extent_writeback(struct extent_io_tree *tree, u64 start,
-                                 u64 end, gfp_t mask)
-{
-       return clear_extent_bit(tree, start, end, EXTENT_WRITEBACK, 1, 0, mask);
-}
-
 int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
 {
        return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK);
@@ -910,13 +911,14 @@ int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end)
  * either insert or lock state struct between start and end use mask to tell
  * us if waiting is desired.
  */
-int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
+                    int bits, gfp_t mask)
 {
        int err;
        u64 failed_start;
        while (1) {
-               err = set_extent_bit(tree, start, end, EXTENT_LOCKED, 1,
-                                    &failed_start, mask);
+               err = set_extent_bit(tree, start, end, EXTENT_LOCKED | bits,
+                                    EXTENT_LOCKED, &failed_start, mask);
                if (err == -EEXIST && (mask & __GFP_WAIT)) {
                        wait_extent_bit(tree, failed_start, end, EXTENT_LOCKED);
                        start = failed_start;
@@ -928,6 +930,11 @@ int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
        return err;
 }
 
+int lock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask)
+{
+       return lock_extent_bits(tree, start, end, 0, mask);
+}
+
 int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end,
                    gfp_t mask)
 {
@@ -967,7 +974,6 @@ int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end)
                page_cache_release(page);
                index++;
        }
-       set_extent_dirty(tree, start, end, GFP_NOFS);
        return 0;
 }
 
@@ -987,7 +993,6 @@ static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end)
                page_cache_release(page);
                index++;
        }
-       set_extent_writeback(tree, start, end, GFP_NOFS);
        return 0;
 }
 
@@ -1404,69 +1409,6 @@ out:
        return total_bytes;
 }
 
-#if 0
-/*
- * helper function to lock both pages and extents in the tree.
- * pages must be locked first.
- */
-static int lock_range(struct extent_io_tree *tree, u64 start, u64 end)
-{
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
-       struct page *page;
-       int err;
-
-       while (index <= end_index) {
-               page = grab_cache_page(tree->mapping, index);
-               if (!page) {
-                       err = -ENOMEM;
-                       goto failed;
-               }
-               if (IS_ERR(page)) {
-                       err = PTR_ERR(page);
-                       goto failed;
-               }
-               index++;
-       }
-       lock_extent(tree, start, end, GFP_NOFS);
-       return 0;
-
-failed:
-       /*
-        * we failed above in getting the page at 'index', so we undo here
-        * up to but not including the page at 'index'
-        */
-       end_index = index;
-       index = start >> PAGE_CACHE_SHIFT;
-       while (index < end_index) {
-               page = find_get_page(tree->mapping, index);
-               unlock_page(page);
-               page_cache_release(page);
-               index++;
-       }
-       return err;
-}
-
-/*
- * helper function to unlock both pages and extents in the tree.
- */
-static int unlock_range(struct extent_io_tree *tree, u64 start, u64 end)
-{
-       unsigned long index = start >> PAGE_CACHE_SHIFT;
-       unsigned long end_index = end >> PAGE_CACHE_SHIFT;
-       struct page *page;
-
-       while (index <= end_index) {
-               page = find_get_page(tree->mapping, index);
-               unlock_page(page);
-               page_cache_release(page);
-               index++;
-       }
-       unlock_extent(tree, start, end, GFP_NOFS);
-       return 0;
-}
-#endif
-
 /*
  * set the private field for a given byte offset in the tree.  If there isn't
  * an extent_state there already, this does nothing.
@@ -1608,10 +1550,7 @@ static int check_page_locked(struct extent_io_tree *tree,
 static int check_page_writeback(struct extent_io_tree *tree,
                             struct page *page)
 {
-       u64 start = (u64)page->index << PAGE_CACHE_SHIFT;
-       u64 end = start + PAGE_CACHE_SIZE - 1;
-       if (!test_range_bit(tree, start, end, EXTENT_WRITEBACK, 0))
-               end_page_writeback(page);
+       end_page_writeback(page);
        return 0;
 }
 
@@ -1669,13 +1608,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
                }
 
                if (!uptodate) {
-                       clear_extent_uptodate(tree, start, end, GFP_ATOMIC);
+                       clear_extent_uptodate(tree, start, end, GFP_NOFS);
                        ClearPageUptodate(page);
                        SetPageError(page);
                }
 
-               clear_extent_writeback(tree, start, end, GFP_ATOMIC);
-
                if (whole_page)
                        end_page_writeback(page);
                else
@@ -2101,6 +2038,16 @@ int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
        return ret;
 }
 
+static noinline void update_nr_written(struct page *page,
+                                     struct writeback_control *wbc,
+                                     unsigned long nr_written)
+{
+       wbc->nr_to_write -= nr_written;
+       if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
+           wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
+               page->mapping->writeback_index = page->index + nr_written;
+}
+
 /*
  * the writepage semantics are similar to regular writepage.  extent
  * records are inserted to lock ranges in the tree, and as dirty areas
@@ -2136,8 +2083,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        u64 delalloc_end;
        int page_started;
        int compressed;
+       int write_flags;
        unsigned long nr_written = 0;
 
+       if (wbc->sync_mode == WB_SYNC_ALL)
+               write_flags = WRITE_SYNC_PLUG;
+       else
+               write_flags = WRITE;
+
        WARN_ON(!PageLocked(page));
        pg_offset = i_size & (PAGE_CACHE_SIZE - 1);
        if (page->index > end_index ||
@@ -2164,6 +2117,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
        delalloc_end = 0;
        page_started = 0;
        if (!epd->extent_locked) {
+               u64 delalloc_to_write;
+               /*
+                * make sure the wbc mapping index is at least updated
+                * to this page.
+                */
+               update_nr_written(page, wbc, 0);
+
                while (delalloc_end < page_end) {
                        nr_delalloc = find_lock_delalloc_range(inode, tree,
                                                       page,
@@ -2177,6 +2137,14 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                        tree->ops->fill_delalloc(inode, page, delalloc_start,
                                                 delalloc_end, &page_started,
                                                 &nr_written);
+                       delalloc_to_write = (delalloc_end -
+                                       max_t(u64, page_offset(page),
+                                             delalloc_start) + 1) >>
+                                       PAGE_CACHE_SHIFT;
+                       if (wbc->nr_to_write < delalloc_to_write) {
+                               wbc->nr_to_write = min_t(long, 8192,
+                                                delalloc_to_write);
+                       }
                        delalloc_start = delalloc_end + 1;
                }
 
@@ -2185,7 +2153,13 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                 */
                if (page_started) {
                        ret = 0;
-                       goto update_nr_written;
+                       /*
+                        * we've unlocked the page, so we can't update
+                        * the mapping's writeback index, just update
+                        * nr_to_write.
+                        */
+                       wbc->nr_to_write -= nr_written;
+                       goto done_unlocked;
                }
        }
        lock_extent(tree, start, page_end, GFP_NOFS);
@@ -2198,21 +2172,27 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                if (ret == -EAGAIN) {
                        unlock_extent(tree, start, page_end, GFP_NOFS);
                        redirty_page_for_writepage(wbc, page);
+                       update_nr_written(page, wbc, nr_written);
                        unlock_page(page);
                        ret = 0;
-                       goto update_nr_written;
+                       goto done_unlocked;
                }
        }
 
-       nr_written++;
+       /*
+        * we don't want to touch the inode after unlocking the page,
+        * so we update the mapping writeback index now
+        */
+       update_nr_written(page, wbc, nr_written + 1);
 
        end = page_end;
        if (test_range_bit(tree, start, page_end, EXTENT_DELALLOC, 0))
                printk(KERN_ERR "btrfs delalloc bits after lock_extent\n");
 
        if (last_byte <= start) {
-               clear_extent_dirty(tree, start, page_end, GFP_NOFS);
-               unlock_extent(tree, start, page_end, GFP_NOFS);
+               clear_extent_bit(tree, start, page_end,
+                                EXTENT_LOCKED | EXTENT_DIRTY,
+                                1, 0, GFP_NOFS);
                if (tree->ops && tree->ops->writepage_end_io_hook)
                        tree->ops->writepage_end_io_hook(page, start,
                                                         page_end, NULL, 1);
@@ -2220,12 +2200,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                goto done;
        }
 
-       set_extent_uptodate(tree, start, page_end, GFP_NOFS);
        blocksize = inode->i_sb->s_blocksize;
 
        while (cur <= end) {
                if (cur >= last_byte) {
-                       clear_extent_dirty(tree, cur, page_end, GFP_NOFS);
                        unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
                        if (tree->ops && tree->ops->writepage_end_io_hook)
                                tree->ops->writepage_end_io_hook(page, cur,
@@ -2258,9 +2236,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                 */
                if (compressed || block_start == EXTENT_MAP_HOLE ||
                    block_start == EXTENT_MAP_INLINE) {
-                       clear_extent_dirty(tree, cur,
-                                          cur + iosize - 1, GFP_NOFS);
-
                        unlock_extent(tree, unlock_start, cur + iosize - 1,
                                      GFP_NOFS);
 
@@ -2294,7 +2269,6 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                        continue;
                }
 
-               clear_extent_dirty(tree, cur, cur + iosize - 1, GFP_NOFS);
                if (tree->ops && tree->ops->writepage_io_hook) {
                        ret = tree->ops->writepage_io_hook(page, cur,
                                                cur + iosize - 1);
@@ -2314,9 +2288,9 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
                                       (unsigned long long)end);
                        }
 
-                       ret = submit_extent_page(WRITE, tree, page, sector,
-                                                iosize, pg_offset, bdev,
-                                                &epd->bio, max_nr,
+                       ret = submit_extent_page(write_flags, tree, page,
+                                                sector, iosize, pg_offset,
+                                                bdev, &epd->bio, max_nr,
                                                 end_bio_extent_writepage,
                                                 0, 0, 0);
                        if (ret)
@@ -2336,11 +2310,8 @@ done:
                unlock_extent(tree, unlock_start, page_end, GFP_NOFS);
        unlock_page(page);
 
-update_nr_written:
-       wbc->nr_to_write -= nr_written;
-       if (wbc->range_cyclic || (wbc->nr_to_write > 0 &&
-           wbc->range_start == 0 && wbc->range_end == LLONG_MAX))
-               page->mapping->writeback_index = page->index + nr_written;
+done_unlocked:
+
        return 0;
 }
 
@@ -2365,7 +2336,6 @@ static int extent_write_cache_pages(struct extent_io_tree *tree,
                             writepage_t writepage, void *data,
                             void (*flush_fn)(void *))
 {
-       struct backing_dev_info *bdi = mapping->backing_dev_info;
        int ret = 0;
        int done = 0;
        struct pagevec pvec;
@@ -2440,10 +2410,6 @@ retry:
                        }
                        if (ret || wbc->nr_to_write <= 0)
                                done = 1;
-                       if (wbc->nonblocking && bdi_write_congested(bdi)) {
-                               wbc->encountered_congestion = 1;
-                               done = 1;
-                       }
                }
                pagevec_release(&pvec);
                cond_resched();
@@ -2460,15 +2426,23 @@ retry:
        return ret;
 }
 
-static noinline void flush_write_bio(void *data)
+static void flush_epd_write_bio(struct extent_page_data *epd)
 {
-       struct extent_page_data *epd = data;
        if (epd->bio) {
-               submit_one_bio(WRITE, epd->bio, 0, 0);
+               if (epd->sync_io)
+                       submit_one_bio(WRITE_SYNC, epd->bio, 0, 0);
+               else
+                       submit_one_bio(WRITE, epd->bio, 0, 0);
                epd->bio = NULL;
        }
 }
 
+static noinline void flush_write_bio(void *data)
+{
+       struct extent_page_data *epd = data;
+       flush_epd_write_bio(epd);
+}
+
 int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
                          get_extent_t *get_extent,
                          struct writeback_control *wbc)
@@ -2480,23 +2454,22 @@ int extent_write_full_page(struct extent_io_tree *tree, struct page *page,
                .tree = tree,
                .get_extent = get_extent,
                .extent_locked = 0,
+               .sync_io = wbc->sync_mode == WB_SYNC_ALL,
        };
        struct writeback_control wbc_writepages = {
                .bdi            = wbc->bdi,
-               .sync_mode      = WB_SYNC_NONE,
+               .sync_mode      = wbc->sync_mode,
                .older_than_this = NULL,
                .nr_to_write    = 64,
                .range_start    = page_offset(page) + PAGE_CACHE_SIZE,
                .range_end      = (loff_t)-1,
        };
 
-
        ret = __extent_writepage(page, wbc, &epd);
 
        extent_write_cache_pages(tree, mapping, &wbc_writepages,
                                 __extent_writepage, &epd, flush_write_bio);
-       if (epd.bio)
-               submit_one_bio(WRITE, epd.bio, 0, 0);
+       flush_epd_write_bio(&epd);
        return ret;
 }
 
@@ -2515,6 +2488,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
                .tree = tree,
                .get_extent = get_extent,
                .extent_locked = 1,
+               .sync_io = mode == WB_SYNC_ALL,
        };
        struct writeback_control wbc_writepages = {
                .bdi            = inode->i_mapping->backing_dev_info,
@@ -2540,8 +2514,7 @@ int extent_write_locked_range(struct extent_io_tree *tree, struct inode *inode,
                start += PAGE_CACHE_SIZE;
        }
 
-       if (epd.bio)
-               submit_one_bio(WRITE, epd.bio, 0, 0);
+       flush_epd_write_bio(&epd);
        return ret;
 }
 
@@ -2556,13 +2529,13 @@ int extent_writepages(struct extent_io_tree *tree,
                .tree = tree,
                .get_extent = get_extent,
                .extent_locked = 0,
+               .sync_io = wbc->sync_mode == WB_SYNC_ALL,
        };
 
        ret = extent_write_cache_pages(tree, mapping, wbc,
                                       __extent_writepage, &epd,
                                       flush_write_bio);
-       if (epd.bio)
-               submit_one_bio(WRITE, epd.bio, 0, 0);
+       flush_epd_write_bio(&epd);
        return ret;
 }
 
@@ -2623,7 +2596,7 @@ int extent_invalidatepage(struct extent_io_tree *tree,
                return 0;
 
        lock_extent(tree, start, end, GFP_NOFS);
-       wait_on_extent_writeback(tree, start, end);
+       wait_on_page_writeback(page);
        clear_extent_bit(tree, start, end,
                         EXTENT_LOCKED | EXTENT_DIRTY | EXTENT_DELALLOC,
                         1, 1, GFP_NOFS);
@@ -2790,15 +2763,15 @@ int try_release_extent_mapping(struct extent_map_tree *map,
                u64 len;
                while (start <= end) {
                        len = end - start + 1;
-                       spin_lock(&map->lock);
+                       write_lock(&map->lock);
                        em = lookup_extent_mapping(map, start, len);
                        if (!em || IS_ERR(em)) {
-                               spin_unlock(&map->lock);
+                               write_unlock(&map->lock);
                                break;
                        }
                        if (test_bit(EXTENT_FLAG_PINNED, &em->flags) ||
                            em->start != start) {
-                               spin_unlock(&map->lock);
+                               write_unlock(&map->lock);
                                free_extent_map(em);
                                break;
                        }
@@ -2812,7 +2785,7 @@ int try_release_extent_mapping(struct extent_map_tree *map,
                                free_extent_map(em);
                        }
                        start = extent_map_end(em);
-                       spin_unlock(&map->lock);
+                       write_unlock(&map->lock);
 
                        /* once for us */
                        free_extent_map(em);