]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/btrfs/extent-tree.c
3d1be0b77f8fd82662ddff1a3baa48a588d62e89
[net-next-2.6.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_block_group(struct btrfs_trans_handle *trans,
36                               struct btrfs_root *root,
37                               u64 bytenr, u64 num_bytes, int alloc,
38                               int mark_free);
39 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
40                                    u64 num_bytes, int reserve);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61                           struct btrfs_root *extent_root, u64 alloc_bytes,
62                           u64 flags, int force);
63 static int pin_down_bytes(struct btrfs_trans_handle *trans,
64                           struct btrfs_root *root,
65                           struct btrfs_path *path,
66                           u64 bytenr, u64 num_bytes,
67                           int is_data, int reserved,
68                           struct extent_buffer **must_clean);
69 static int find_next_key(struct btrfs_path *path, int level,
70                          struct btrfs_key *key);
71 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
72                             int dump_block_groups);
73
74 static noinline int
75 block_group_cache_done(struct btrfs_block_group_cache *cache)
76 {
77         smp_mb();
78         return cache->cached == BTRFS_CACHE_FINISHED;
79 }
80
81 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
82 {
83         return (cache->flags & bits) == bits;
84 }
85
86 /*
87  * this adds the block group to the fs_info rb tree for the block group
88  * cache
89  */
90 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
91                                 struct btrfs_block_group_cache *block_group)
92 {
93         struct rb_node **p;
94         struct rb_node *parent = NULL;
95         struct btrfs_block_group_cache *cache;
96
97         spin_lock(&info->block_group_cache_lock);
98         p = &info->block_group_cache_tree.rb_node;
99
100         while (*p) {
101                 parent = *p;
102                 cache = rb_entry(parent, struct btrfs_block_group_cache,
103                                  cache_node);
104                 if (block_group->key.objectid < cache->key.objectid) {
105                         p = &(*p)->rb_left;
106                 } else if (block_group->key.objectid > cache->key.objectid) {
107                         p = &(*p)->rb_right;
108                 } else {
109                         spin_unlock(&info->block_group_cache_lock);
110                         return -EEXIST;
111                 }
112         }
113
114         rb_link_node(&block_group->cache_node, parent, p);
115         rb_insert_color(&block_group->cache_node,
116                         &info->block_group_cache_tree);
117         spin_unlock(&info->block_group_cache_lock);
118
119         return 0;
120 }
121
122 /*
123  * This will return the block group at or after bytenr if contains is 0, else
124  * it will return the block group that contains the bytenr
125  */
126 static struct btrfs_block_group_cache *
127 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
128                               int contains)
129 {
130         struct btrfs_block_group_cache *cache, *ret = NULL;
131         struct rb_node *n;
132         u64 end, start;
133
134         spin_lock(&info->block_group_cache_lock);
135         n = info->block_group_cache_tree.rb_node;
136
137         while (n) {
138                 cache = rb_entry(n, struct btrfs_block_group_cache,
139                                  cache_node);
140                 end = cache->key.objectid + cache->key.offset - 1;
141                 start = cache->key.objectid;
142
143                 if (bytenr < start) {
144                         if (!contains && (!ret || start < ret->key.objectid))
145                                 ret = cache;
146                         n = n->rb_left;
147                 } else if (bytenr > start) {
148                         if (contains && bytenr <= end) {
149                                 ret = cache;
150                                 break;
151                         }
152                         n = n->rb_right;
153                 } else {
154                         ret = cache;
155                         break;
156                 }
157         }
158         if (ret)
159                 atomic_inc(&ret->count);
160         spin_unlock(&info->block_group_cache_lock);
161
162         return ret;
163 }
164
165 static int add_excluded_extent(struct btrfs_root *root,
166                                u64 start, u64 num_bytes)
167 {
168         u64 end = start + num_bytes - 1;
169         set_extent_bits(&root->fs_info->freed_extents[0],
170                         start, end, EXTENT_UPTODATE, GFP_NOFS);
171         set_extent_bits(&root->fs_info->freed_extents[1],
172                         start, end, EXTENT_UPTODATE, GFP_NOFS);
173         return 0;
174 }
175
176 static void free_excluded_extents(struct btrfs_root *root,
177                                   struct btrfs_block_group_cache *cache)
178 {
179         u64 start, end;
180
181         start = cache->key.objectid;
182         end = start + cache->key.offset - 1;
183
184         clear_extent_bits(&root->fs_info->freed_extents[0],
185                           start, end, EXTENT_UPTODATE, GFP_NOFS);
186         clear_extent_bits(&root->fs_info->freed_extents[1],
187                           start, end, EXTENT_UPTODATE, GFP_NOFS);
188 }
189
190 static int exclude_super_stripes(struct btrfs_root *root,
191                                  struct btrfs_block_group_cache *cache)
192 {
193         u64 bytenr;
194         u64 *logical;
195         int stripe_len;
196         int i, nr, ret;
197
198         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
199                 bytenr = btrfs_sb_offset(i);
200                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
201                                        cache->key.objectid, bytenr,
202                                        0, &logical, &nr, &stripe_len);
203                 BUG_ON(ret);
204
205                 while (nr--) {
206                         cache->bytes_super += stripe_len;
207                         ret = add_excluded_extent(root, logical[nr],
208                                                   stripe_len);
209                         BUG_ON(ret);
210                 }
211
212                 kfree(logical);
213         }
214         return 0;
215 }
216
217 static struct btrfs_caching_control *
218 get_caching_control(struct btrfs_block_group_cache *cache)
219 {
220         struct btrfs_caching_control *ctl;
221
222         spin_lock(&cache->lock);
223         if (cache->cached != BTRFS_CACHE_STARTED) {
224                 spin_unlock(&cache->lock);
225                 return NULL;
226         }
227
228         ctl = cache->caching_ctl;
229         atomic_inc(&ctl->count);
230         spin_unlock(&cache->lock);
231         return ctl;
232 }
233
234 static void put_caching_control(struct btrfs_caching_control *ctl)
235 {
236         if (atomic_dec_and_test(&ctl->count))
237                 kfree(ctl);
238 }
239
240 /*
241  * this is only called by cache_block_group, since we could have freed extents
242  * we need to check the pinned_extents for any extents that can't be used yet
243  * since their free space will be released as soon as the transaction commits.
244  */
245 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
246                               struct btrfs_fs_info *info, u64 start, u64 end)
247 {
248         u64 extent_start, extent_end, size, total_added = 0;
249         int ret;
250
251         while (start < end) {
252                 ret = find_first_extent_bit(info->pinned_extents, start,
253                                             &extent_start, &extent_end,
254                                             EXTENT_DIRTY | EXTENT_UPTODATE);
255                 if (ret)
256                         break;
257
258                 if (extent_start == start) {
259                         start = extent_end + 1;
260                 } else if (extent_start > start && extent_start < end) {
261                         size = extent_start - start;
262                         total_added += size;
263                         ret = btrfs_add_free_space(block_group, start,
264                                                    size);
265                         BUG_ON(ret);
266                         start = extent_end + 1;
267                 } else {
268                         break;
269                 }
270         }
271
272         if (start < end) {
273                 size = end - start;
274                 total_added += size;
275                 ret = btrfs_add_free_space(block_group, start, size);
276                 BUG_ON(ret);
277         }
278
279         return total_added;
280 }
281
282 static int caching_kthread(void *data)
283 {
284         struct btrfs_block_group_cache *block_group = data;
285         struct btrfs_fs_info *fs_info = block_group->fs_info;
286         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
287         struct btrfs_root *extent_root = fs_info->extent_root;
288         struct btrfs_path *path;
289         struct extent_buffer *leaf;
290         struct btrfs_key key;
291         u64 total_found = 0;
292         u64 last = 0;
293         u32 nritems;
294         int ret = 0;
295
296         path = btrfs_alloc_path();
297         if (!path)
298                 return -ENOMEM;
299
300         exclude_super_stripes(extent_root, block_group);
301         spin_lock(&block_group->space_info->lock);
302         block_group->space_info->bytes_super += block_group->bytes_super;
303         spin_unlock(&block_group->space_info->lock);
304
305         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
306
307         /*
308          * We don't want to deadlock with somebody trying to allocate a new
309          * extent for the extent root while also trying to search the extent
310          * root to add free space.  So we skip locking and search the commit
311          * root, since its read-only
312          */
313         path->skip_locking = 1;
314         path->search_commit_root = 1;
315         path->reada = 2;
316
317         key.objectid = last;
318         key.offset = 0;
319         key.type = BTRFS_EXTENT_ITEM_KEY;
320 again:
321         mutex_lock(&caching_ctl->mutex);
322         /* need to make sure the commit_root doesn't disappear */
323         down_read(&fs_info->extent_commit_sem);
324
325         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
326         if (ret < 0)
327                 goto err;
328
329         leaf = path->nodes[0];
330         nritems = btrfs_header_nritems(leaf);
331
332         while (1) {
333                 smp_mb();
334                 if (fs_info->closing > 1) {
335                         last = (u64)-1;
336                         break;
337                 }
338
339                 if (path->slots[0] < nritems) {
340                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
341                 } else {
342                         ret = find_next_key(path, 0, &key);
343                         if (ret)
344                                 break;
345
346                         caching_ctl->progress = last;
347                         btrfs_release_path(extent_root, path);
348                         up_read(&fs_info->extent_commit_sem);
349                         mutex_unlock(&caching_ctl->mutex);
350                         if (btrfs_transaction_in_commit(fs_info))
351                                 schedule_timeout(1);
352                         else
353                                 cond_resched();
354                         goto again;
355                 }
356
357                 if (key.objectid < block_group->key.objectid) {
358                         path->slots[0]++;
359                         continue;
360                 }
361
362                 if (key.objectid >= block_group->key.objectid +
363                     block_group->key.offset)
364                         break;
365
366                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
367                         total_found += add_new_free_space(block_group,
368                                                           fs_info, last,
369                                                           key.objectid);
370                         last = key.objectid + key.offset;
371
372                         if (total_found > (1024 * 1024 * 2)) {
373                                 total_found = 0;
374                                 wake_up(&caching_ctl->wait);
375                         }
376                 }
377                 path->slots[0]++;
378         }
379         ret = 0;
380
381         total_found += add_new_free_space(block_group, fs_info, last,
382                                           block_group->key.objectid +
383                                           block_group->key.offset);
384         caching_ctl->progress = (u64)-1;
385
386         spin_lock(&block_group->lock);
387         block_group->caching_ctl = NULL;
388         block_group->cached = BTRFS_CACHE_FINISHED;
389         spin_unlock(&block_group->lock);
390
391 err:
392         btrfs_free_path(path);
393         up_read(&fs_info->extent_commit_sem);
394
395         free_excluded_extents(extent_root, block_group);
396
397         mutex_unlock(&caching_ctl->mutex);
398         wake_up(&caching_ctl->wait);
399
400         put_caching_control(caching_ctl);
401         atomic_dec(&block_group->space_info->caching_threads);
402         return 0;
403 }
404
405 static int cache_block_group(struct btrfs_block_group_cache *cache)
406 {
407         struct btrfs_fs_info *fs_info = cache->fs_info;
408         struct btrfs_caching_control *caching_ctl;
409         struct task_struct *tsk;
410         int ret = 0;
411
412         smp_mb();
413         if (cache->cached != BTRFS_CACHE_NO)
414                 return 0;
415
416         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
417         BUG_ON(!caching_ctl);
418
419         INIT_LIST_HEAD(&caching_ctl->list);
420         mutex_init(&caching_ctl->mutex);
421         init_waitqueue_head(&caching_ctl->wait);
422         caching_ctl->block_group = cache;
423         caching_ctl->progress = cache->key.objectid;
424         /* one for caching kthread, one for caching block group list */
425         atomic_set(&caching_ctl->count, 2);
426
427         spin_lock(&cache->lock);
428         if (cache->cached != BTRFS_CACHE_NO) {
429                 spin_unlock(&cache->lock);
430                 kfree(caching_ctl);
431                 return 0;
432         }
433         cache->caching_ctl = caching_ctl;
434         cache->cached = BTRFS_CACHE_STARTED;
435         spin_unlock(&cache->lock);
436
437         down_write(&fs_info->extent_commit_sem);
438         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
439         up_write(&fs_info->extent_commit_sem);
440
441         atomic_inc(&cache->space_info->caching_threads);
442
443         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
444                           cache->key.objectid);
445         if (IS_ERR(tsk)) {
446                 ret = PTR_ERR(tsk);
447                 printk(KERN_ERR "error running thread %d\n", ret);
448                 BUG();
449         }
450
451         return ret;
452 }
453
454 /*
455  * return the block group that starts at or after bytenr
456  */
457 static struct btrfs_block_group_cache *
458 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
459 {
460         struct btrfs_block_group_cache *cache;
461
462         cache = block_group_cache_tree_search(info, bytenr, 0);
463
464         return cache;
465 }
466
467 /*
468  * return the block group that contains the given bytenr
469  */
470 struct btrfs_block_group_cache *btrfs_lookup_block_group(
471                                                  struct btrfs_fs_info *info,
472                                                  u64 bytenr)
473 {
474         struct btrfs_block_group_cache *cache;
475
476         cache = block_group_cache_tree_search(info, bytenr, 1);
477
478         return cache;
479 }
480
481 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
482 {
483         if (atomic_dec_and_test(&cache->count))
484                 kfree(cache);
485 }
486
487 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
488                                                   u64 flags)
489 {
490         struct list_head *head = &info->space_info;
491         struct btrfs_space_info *found;
492
493         rcu_read_lock();
494         list_for_each_entry_rcu(found, head, list) {
495                 if (found->flags == flags) {
496                         rcu_read_unlock();
497                         return found;
498                 }
499         }
500         rcu_read_unlock();
501         return NULL;
502 }
503
504 /*
505  * after adding space to the filesystem, we need to clear the full flags
506  * on all the space infos.
507  */
508 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
509 {
510         struct list_head *head = &info->space_info;
511         struct btrfs_space_info *found;
512
513         rcu_read_lock();
514         list_for_each_entry_rcu(found, head, list)
515                 found->full = 0;
516         rcu_read_unlock();
517 }
518
519 static u64 div_factor(u64 num, int factor)
520 {
521         if (factor == 10)
522                 return num;
523         num *= factor;
524         do_div(num, 10);
525         return num;
526 }
527
528 u64 btrfs_find_block_group(struct btrfs_root *root,
529                            u64 search_start, u64 search_hint, int owner)
530 {
531         struct btrfs_block_group_cache *cache;
532         u64 used;
533         u64 last = max(search_hint, search_start);
534         u64 group_start = 0;
535         int full_search = 0;
536         int factor = 9;
537         int wrapped = 0;
538 again:
539         while (1) {
540                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
541                 if (!cache)
542                         break;
543
544                 spin_lock(&cache->lock);
545                 last = cache->key.objectid + cache->key.offset;
546                 used = btrfs_block_group_used(&cache->item);
547
548                 if ((full_search || !cache->ro) &&
549                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
550                         if (used + cache->pinned + cache->reserved <
551                             div_factor(cache->key.offset, factor)) {
552                                 group_start = cache->key.objectid;
553                                 spin_unlock(&cache->lock);
554                                 btrfs_put_block_group(cache);
555                                 goto found;
556                         }
557                 }
558                 spin_unlock(&cache->lock);
559                 btrfs_put_block_group(cache);
560                 cond_resched();
561         }
562         if (!wrapped) {
563                 last = search_start;
564                 wrapped = 1;
565                 goto again;
566         }
567         if (!full_search && factor < 10) {
568                 last = search_start;
569                 full_search = 1;
570                 factor = 10;
571                 goto again;
572         }
573 found:
574         return group_start;
575 }
576
577 /* simple helper to search for an existing extent at a given offset */
578 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
579 {
580         int ret;
581         struct btrfs_key key;
582         struct btrfs_path *path;
583
584         path = btrfs_alloc_path();
585         BUG_ON(!path);
586         key.objectid = start;
587         key.offset = len;
588         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
589         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
590                                 0, 0);
591         btrfs_free_path(path);
592         return ret;
593 }
594
595 /*
596  * Back reference rules.  Back refs have three main goals:
597  *
598  * 1) differentiate between all holders of references to an extent so that
599  *    when a reference is dropped we can make sure it was a valid reference
600  *    before freeing the extent.
601  *
602  * 2) Provide enough information to quickly find the holders of an extent
603  *    if we notice a given block is corrupted or bad.
604  *
605  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
606  *    maintenance.  This is actually the same as #2, but with a slightly
607  *    different use case.
608  *
609  * There are two kinds of back refs. The implicit back refs is optimized
610  * for pointers in non-shared tree blocks. For a given pointer in a block,
611  * back refs of this kind provide information about the block's owner tree
612  * and the pointer's key. These information allow us to find the block by
613  * b-tree searching. The full back refs is for pointers in tree blocks not
614  * referenced by their owner trees. The location of tree block is recorded
615  * in the back refs. Actually the full back refs is generic, and can be
616  * used in all cases the implicit back refs is used. The major shortcoming
617  * of the full back refs is its overhead. Every time a tree block gets
618  * COWed, we have to update back refs entry for all pointers in it.
619  *
620  * For a newly allocated tree block, we use implicit back refs for
621  * pointers in it. This means most tree related operations only involve
622  * implicit back refs. For a tree block created in old transaction, the
623  * only way to drop a reference to it is COW it. So we can detect the
624  * event that tree block loses its owner tree's reference and do the
625  * back refs conversion.
626  *
627  * When a tree block is COW'd through a tree, there are four cases:
628  *
629  * The reference count of the block is one and the tree is the block's
630  * owner tree. Nothing to do in this case.
631  *
632  * The reference count of the block is one and the tree is not the
633  * block's owner tree. In this case, full back refs is used for pointers
634  * in the block. Remove these full back refs, add implicit back refs for
635  * every pointers in the new block.
636  *
637  * The reference count of the block is greater than one and the tree is
638  * the block's owner tree. In this case, implicit back refs is used for
639  * pointers in the block. Add full back refs for every pointers in the
640  * block, increase lower level extents' reference counts. The original
641  * implicit back refs are entailed to the new block.
642  *
643  * The reference count of the block is greater than one and the tree is
644  * not the block's owner tree. Add implicit back refs for every pointer in
645  * the new block, increase lower level extents' reference count.
646  *
647  * Back Reference Key composing:
648  *
649  * The key objectid corresponds to the first byte in the extent,
650  * The key type is used to differentiate between types of back refs.
651  * There are different meanings of the key offset for different types
652  * of back refs.
653  *
654  * File extents can be referenced by:
655  *
656  * - multiple snapshots, subvolumes, or different generations in one subvol
657  * - different files inside a single subvolume
658  * - different offsets inside a file (bookend extents in file.c)
659  *
660  * The extent ref structure for the implicit back refs has fields for:
661  *
662  * - Objectid of the subvolume root
663  * - objectid of the file holding the reference
664  * - original offset in the file
665  * - how many bookend extents
666  *
667  * The key offset for the implicit back refs is hash of the first
668  * three fields.
669  *
670  * The extent ref structure for the full back refs has field for:
671  *
672  * - number of pointers in the tree leaf
673  *
674  * The key offset for the implicit back refs is the first byte of
675  * the tree leaf
676  *
677  * When a file extent is allocated, The implicit back refs is used.
678  * the fields are filled in:
679  *
680  *     (root_key.objectid, inode objectid, offset in file, 1)
681  *
682  * When a file extent is removed file truncation, we find the
683  * corresponding implicit back refs and check the following fields:
684  *
685  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
686  *
687  * Btree extents can be referenced by:
688  *
689  * - Different subvolumes
690  *
691  * Both the implicit back refs and the full back refs for tree blocks
692  * only consist of key. The key offset for the implicit back refs is
693  * objectid of block's owner tree. The key offset for the full back refs
694  * is the first byte of parent block.
695  *
696  * When implicit back refs is used, information about the lowest key and
697  * level of the tree block are required. These information are stored in
698  * tree block info structure.
699  */
700
701 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
702 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
703                                   struct btrfs_root *root,
704                                   struct btrfs_path *path,
705                                   u64 owner, u32 extra_size)
706 {
707         struct btrfs_extent_item *item;
708         struct btrfs_extent_item_v0 *ei0;
709         struct btrfs_extent_ref_v0 *ref0;
710         struct btrfs_tree_block_info *bi;
711         struct extent_buffer *leaf;
712         struct btrfs_key key;
713         struct btrfs_key found_key;
714         u32 new_size = sizeof(*item);
715         u64 refs;
716         int ret;
717
718         leaf = path->nodes[0];
719         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
720
721         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
722         ei0 = btrfs_item_ptr(leaf, path->slots[0],
723                              struct btrfs_extent_item_v0);
724         refs = btrfs_extent_refs_v0(leaf, ei0);
725
726         if (owner == (u64)-1) {
727                 while (1) {
728                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
729                                 ret = btrfs_next_leaf(root, path);
730                                 if (ret < 0)
731                                         return ret;
732                                 BUG_ON(ret > 0);
733                                 leaf = path->nodes[0];
734                         }
735                         btrfs_item_key_to_cpu(leaf, &found_key,
736                                               path->slots[0]);
737                         BUG_ON(key.objectid != found_key.objectid);
738                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
739                                 path->slots[0]++;
740                                 continue;
741                         }
742                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
743                                               struct btrfs_extent_ref_v0);
744                         owner = btrfs_ref_objectid_v0(leaf, ref0);
745                         break;
746                 }
747         }
748         btrfs_release_path(root, path);
749
750         if (owner < BTRFS_FIRST_FREE_OBJECTID)
751                 new_size += sizeof(*bi);
752
753         new_size -= sizeof(*ei0);
754         ret = btrfs_search_slot(trans, root, &key, path,
755                                 new_size + extra_size, 1);
756         if (ret < 0)
757                 return ret;
758         BUG_ON(ret);
759
760         ret = btrfs_extend_item(trans, root, path, new_size);
761         BUG_ON(ret);
762
763         leaf = path->nodes[0];
764         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
765         btrfs_set_extent_refs(leaf, item, refs);
766         /* FIXME: get real generation */
767         btrfs_set_extent_generation(leaf, item, 0);
768         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
769                 btrfs_set_extent_flags(leaf, item,
770                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
771                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
772                 bi = (struct btrfs_tree_block_info *)(item + 1);
773                 /* FIXME: get first key of the block */
774                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
775                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
776         } else {
777                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
778         }
779         btrfs_mark_buffer_dirty(leaf);
780         return 0;
781 }
782 #endif
783
784 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
785 {
786         u32 high_crc = ~(u32)0;
787         u32 low_crc = ~(u32)0;
788         __le64 lenum;
789
790         lenum = cpu_to_le64(root_objectid);
791         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
792         lenum = cpu_to_le64(owner);
793         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
794         lenum = cpu_to_le64(offset);
795         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
796
797         return ((u64)high_crc << 31) ^ (u64)low_crc;
798 }
799
800 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
801                                      struct btrfs_extent_data_ref *ref)
802 {
803         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
804                                     btrfs_extent_data_ref_objectid(leaf, ref),
805                                     btrfs_extent_data_ref_offset(leaf, ref));
806 }
807
808 static int match_extent_data_ref(struct extent_buffer *leaf,
809                                  struct btrfs_extent_data_ref *ref,
810                                  u64 root_objectid, u64 owner, u64 offset)
811 {
812         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
813             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
814             btrfs_extent_data_ref_offset(leaf, ref) != offset)
815                 return 0;
816         return 1;
817 }
818
819 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
820                                            struct btrfs_root *root,
821                                            struct btrfs_path *path,
822                                            u64 bytenr, u64 parent,
823                                            u64 root_objectid,
824                                            u64 owner, u64 offset)
825 {
826         struct btrfs_key key;
827         struct btrfs_extent_data_ref *ref;
828         struct extent_buffer *leaf;
829         u32 nritems;
830         int ret;
831         int recow;
832         int err = -ENOENT;
833
834         key.objectid = bytenr;
835         if (parent) {
836                 key.type = BTRFS_SHARED_DATA_REF_KEY;
837                 key.offset = parent;
838         } else {
839                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
840                 key.offset = hash_extent_data_ref(root_objectid,
841                                                   owner, offset);
842         }
843 again:
844         recow = 0;
845         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
846         if (ret < 0) {
847                 err = ret;
848                 goto fail;
849         }
850
851         if (parent) {
852                 if (!ret)
853                         return 0;
854 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
855                 key.type = BTRFS_EXTENT_REF_V0_KEY;
856                 btrfs_release_path(root, path);
857                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
858                 if (ret < 0) {
859                         err = ret;
860                         goto fail;
861                 }
862                 if (!ret)
863                         return 0;
864 #endif
865                 goto fail;
866         }
867
868         leaf = path->nodes[0];
869         nritems = btrfs_header_nritems(leaf);
870         while (1) {
871                 if (path->slots[0] >= nritems) {
872                         ret = btrfs_next_leaf(root, path);
873                         if (ret < 0)
874                                 err = ret;
875                         if (ret)
876                                 goto fail;
877
878                         leaf = path->nodes[0];
879                         nritems = btrfs_header_nritems(leaf);
880                         recow = 1;
881                 }
882
883                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
884                 if (key.objectid != bytenr ||
885                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
886                         goto fail;
887
888                 ref = btrfs_item_ptr(leaf, path->slots[0],
889                                      struct btrfs_extent_data_ref);
890
891                 if (match_extent_data_ref(leaf, ref, root_objectid,
892                                           owner, offset)) {
893                         if (recow) {
894                                 btrfs_release_path(root, path);
895                                 goto again;
896                         }
897                         err = 0;
898                         break;
899                 }
900                 path->slots[0]++;
901         }
902 fail:
903         return err;
904 }
905
906 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
907                                            struct btrfs_root *root,
908                                            struct btrfs_path *path,
909                                            u64 bytenr, u64 parent,
910                                            u64 root_objectid, u64 owner,
911                                            u64 offset, int refs_to_add)
912 {
913         struct btrfs_key key;
914         struct extent_buffer *leaf;
915         u32 size;
916         u32 num_refs;
917         int ret;
918
919         key.objectid = bytenr;
920         if (parent) {
921                 key.type = BTRFS_SHARED_DATA_REF_KEY;
922                 key.offset = parent;
923                 size = sizeof(struct btrfs_shared_data_ref);
924         } else {
925                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
926                 key.offset = hash_extent_data_ref(root_objectid,
927                                                   owner, offset);
928                 size = sizeof(struct btrfs_extent_data_ref);
929         }
930
931         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
932         if (ret && ret != -EEXIST)
933                 goto fail;
934
935         leaf = path->nodes[0];
936         if (parent) {
937                 struct btrfs_shared_data_ref *ref;
938                 ref = btrfs_item_ptr(leaf, path->slots[0],
939                                      struct btrfs_shared_data_ref);
940                 if (ret == 0) {
941                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
942                 } else {
943                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
944                         num_refs += refs_to_add;
945                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
946                 }
947         } else {
948                 struct btrfs_extent_data_ref *ref;
949                 while (ret == -EEXIST) {
950                         ref = btrfs_item_ptr(leaf, path->slots[0],
951                                              struct btrfs_extent_data_ref);
952                         if (match_extent_data_ref(leaf, ref, root_objectid,
953                                                   owner, offset))
954                                 break;
955                         btrfs_release_path(root, path);
956                         key.offset++;
957                         ret = btrfs_insert_empty_item(trans, root, path, &key,
958                                                       size);
959                         if (ret && ret != -EEXIST)
960                                 goto fail;
961
962                         leaf = path->nodes[0];
963                 }
964                 ref = btrfs_item_ptr(leaf, path->slots[0],
965                                      struct btrfs_extent_data_ref);
966                 if (ret == 0) {
967                         btrfs_set_extent_data_ref_root(leaf, ref,
968                                                        root_objectid);
969                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
970                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
971                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
972                 } else {
973                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
974                         num_refs += refs_to_add;
975                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
976                 }
977         }
978         btrfs_mark_buffer_dirty(leaf);
979         ret = 0;
980 fail:
981         btrfs_release_path(root, path);
982         return ret;
983 }
984
985 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
986                                            struct btrfs_root *root,
987                                            struct btrfs_path *path,
988                                            int refs_to_drop)
989 {
990         struct btrfs_key key;
991         struct btrfs_extent_data_ref *ref1 = NULL;
992         struct btrfs_shared_data_ref *ref2 = NULL;
993         struct extent_buffer *leaf;
994         u32 num_refs = 0;
995         int ret = 0;
996
997         leaf = path->nodes[0];
998         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999
1000         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1001                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1002                                       struct btrfs_extent_data_ref);
1003                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1004         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1005                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1006                                       struct btrfs_shared_data_ref);
1007                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1008 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1009         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1010                 struct btrfs_extent_ref_v0 *ref0;
1011                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1012                                       struct btrfs_extent_ref_v0);
1013                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1014 #endif
1015         } else {
1016                 BUG();
1017         }
1018
1019         BUG_ON(num_refs < refs_to_drop);
1020         num_refs -= refs_to_drop;
1021
1022         if (num_refs == 0) {
1023                 ret = btrfs_del_item(trans, root, path);
1024         } else {
1025                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1026                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1027                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1028                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1029 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1030                 else {
1031                         struct btrfs_extent_ref_v0 *ref0;
1032                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1033                                         struct btrfs_extent_ref_v0);
1034                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1035                 }
1036 #endif
1037                 btrfs_mark_buffer_dirty(leaf);
1038         }
1039         return ret;
1040 }
1041
1042 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1043                                           struct btrfs_path *path,
1044                                           struct btrfs_extent_inline_ref *iref)
1045 {
1046         struct btrfs_key key;
1047         struct extent_buffer *leaf;
1048         struct btrfs_extent_data_ref *ref1;
1049         struct btrfs_shared_data_ref *ref2;
1050         u32 num_refs = 0;
1051
1052         leaf = path->nodes[0];
1053         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1054         if (iref) {
1055                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1056                     BTRFS_EXTENT_DATA_REF_KEY) {
1057                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1058                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1059                 } else {
1060                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1061                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1062                 }
1063         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1064                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1065                                       struct btrfs_extent_data_ref);
1066                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1067         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1068                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1069                                       struct btrfs_shared_data_ref);
1070                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1071 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1072         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1073                 struct btrfs_extent_ref_v0 *ref0;
1074                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1075                                       struct btrfs_extent_ref_v0);
1076                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1077 #endif
1078         } else {
1079                 WARN_ON(1);
1080         }
1081         return num_refs;
1082 }
1083
1084 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1085                                           struct btrfs_root *root,
1086                                           struct btrfs_path *path,
1087                                           u64 bytenr, u64 parent,
1088                                           u64 root_objectid)
1089 {
1090         struct btrfs_key key;
1091         int ret;
1092
1093         key.objectid = bytenr;
1094         if (parent) {
1095                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1096                 key.offset = parent;
1097         } else {
1098                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1099                 key.offset = root_objectid;
1100         }
1101
1102         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1103         if (ret > 0)
1104                 ret = -ENOENT;
1105 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106         if (ret == -ENOENT && parent) {
1107                 btrfs_release_path(root, path);
1108                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1109                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1110                 if (ret > 0)
1111                         ret = -ENOENT;
1112         }
1113 #endif
1114         return ret;
1115 }
1116
1117 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1118                                           struct btrfs_root *root,
1119                                           struct btrfs_path *path,
1120                                           u64 bytenr, u64 parent,
1121                                           u64 root_objectid)
1122 {
1123         struct btrfs_key key;
1124         int ret;
1125
1126         key.objectid = bytenr;
1127         if (parent) {
1128                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1129                 key.offset = parent;
1130         } else {
1131                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1132                 key.offset = root_objectid;
1133         }
1134
1135         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1136         btrfs_release_path(root, path);
1137         return ret;
1138 }
1139
1140 static inline int extent_ref_type(u64 parent, u64 owner)
1141 {
1142         int type;
1143         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1144                 if (parent > 0)
1145                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1146                 else
1147                         type = BTRFS_TREE_BLOCK_REF_KEY;
1148         } else {
1149                 if (parent > 0)
1150                         type = BTRFS_SHARED_DATA_REF_KEY;
1151                 else
1152                         type = BTRFS_EXTENT_DATA_REF_KEY;
1153         }
1154         return type;
1155 }
1156
1157 static int find_next_key(struct btrfs_path *path, int level,
1158                          struct btrfs_key *key)
1159
1160 {
1161         for (; level < BTRFS_MAX_LEVEL; level++) {
1162                 if (!path->nodes[level])
1163                         break;
1164                 if (path->slots[level] + 1 >=
1165                     btrfs_header_nritems(path->nodes[level]))
1166                         continue;
1167                 if (level == 0)
1168                         btrfs_item_key_to_cpu(path->nodes[level], key,
1169                                               path->slots[level] + 1);
1170                 else
1171                         btrfs_node_key_to_cpu(path->nodes[level], key,
1172                                               path->slots[level] + 1);
1173                 return 0;
1174         }
1175         return 1;
1176 }
1177
1178 /*
1179  * look for inline back ref. if back ref is found, *ref_ret is set
1180  * to the address of inline back ref, and 0 is returned.
1181  *
1182  * if back ref isn't found, *ref_ret is set to the address where it
1183  * should be inserted, and -ENOENT is returned.
1184  *
1185  * if insert is true and there are too many inline back refs, the path
1186  * points to the extent item, and -EAGAIN is returned.
1187  *
1188  * NOTE: inline back refs are ordered in the same way that back ref
1189  *       items in the tree are ordered.
1190  */
1191 static noinline_for_stack
1192 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1193                                  struct btrfs_root *root,
1194                                  struct btrfs_path *path,
1195                                  struct btrfs_extent_inline_ref **ref_ret,
1196                                  u64 bytenr, u64 num_bytes,
1197                                  u64 parent, u64 root_objectid,
1198                                  u64 owner, u64 offset, int insert)
1199 {
1200         struct btrfs_key key;
1201         struct extent_buffer *leaf;
1202         struct btrfs_extent_item *ei;
1203         struct btrfs_extent_inline_ref *iref;
1204         u64 flags;
1205         u64 item_size;
1206         unsigned long ptr;
1207         unsigned long end;
1208         int extra_size;
1209         int type;
1210         int want;
1211         int ret;
1212         int err = 0;
1213
1214         key.objectid = bytenr;
1215         key.type = BTRFS_EXTENT_ITEM_KEY;
1216         key.offset = num_bytes;
1217
1218         want = extent_ref_type(parent, owner);
1219         if (insert) {
1220                 extra_size = btrfs_extent_inline_ref_size(want);
1221                 path->keep_locks = 1;
1222         } else
1223                 extra_size = -1;
1224         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1225         if (ret < 0) {
1226                 err = ret;
1227                 goto out;
1228         }
1229         BUG_ON(ret);
1230
1231         leaf = path->nodes[0];
1232         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1233 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1234         if (item_size < sizeof(*ei)) {
1235                 if (!insert) {
1236                         err = -ENOENT;
1237                         goto out;
1238                 }
1239                 ret = convert_extent_item_v0(trans, root, path, owner,
1240                                              extra_size);
1241                 if (ret < 0) {
1242                         err = ret;
1243                         goto out;
1244                 }
1245                 leaf = path->nodes[0];
1246                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1247         }
1248 #endif
1249         BUG_ON(item_size < sizeof(*ei));
1250
1251         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1252         flags = btrfs_extent_flags(leaf, ei);
1253
1254         ptr = (unsigned long)(ei + 1);
1255         end = (unsigned long)ei + item_size;
1256
1257         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1258                 ptr += sizeof(struct btrfs_tree_block_info);
1259                 BUG_ON(ptr > end);
1260         } else {
1261                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1262         }
1263
1264         err = -ENOENT;
1265         while (1) {
1266                 if (ptr >= end) {
1267                         WARN_ON(ptr > end);
1268                         break;
1269                 }
1270                 iref = (struct btrfs_extent_inline_ref *)ptr;
1271                 type = btrfs_extent_inline_ref_type(leaf, iref);
1272                 if (want < type)
1273                         break;
1274                 if (want > type) {
1275                         ptr += btrfs_extent_inline_ref_size(type);
1276                         continue;
1277                 }
1278
1279                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1280                         struct btrfs_extent_data_ref *dref;
1281                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1282                         if (match_extent_data_ref(leaf, dref, root_objectid,
1283                                                   owner, offset)) {
1284                                 err = 0;
1285                                 break;
1286                         }
1287                         if (hash_extent_data_ref_item(leaf, dref) <
1288                             hash_extent_data_ref(root_objectid, owner, offset))
1289                                 break;
1290                 } else {
1291                         u64 ref_offset;
1292                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1293                         if (parent > 0) {
1294                                 if (parent == ref_offset) {
1295                                         err = 0;
1296                                         break;
1297                                 }
1298                                 if (ref_offset < parent)
1299                                         break;
1300                         } else {
1301                                 if (root_objectid == ref_offset) {
1302                                         err = 0;
1303                                         break;
1304                                 }
1305                                 if (ref_offset < root_objectid)
1306                                         break;
1307                         }
1308                 }
1309                 ptr += btrfs_extent_inline_ref_size(type);
1310         }
1311         if (err == -ENOENT && insert) {
1312                 if (item_size + extra_size >=
1313                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1314                         err = -EAGAIN;
1315                         goto out;
1316                 }
1317                 /*
1318                  * To add new inline back ref, we have to make sure
1319                  * there is no corresponding back ref item.
1320                  * For simplicity, we just do not add new inline back
1321                  * ref if there is any kind of item for this block
1322                  */
1323                 if (find_next_key(path, 0, &key) == 0 &&
1324                     key.objectid == bytenr &&
1325                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1326                         err = -EAGAIN;
1327                         goto out;
1328                 }
1329         }
1330         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1331 out:
1332         if (insert) {
1333                 path->keep_locks = 0;
1334                 btrfs_unlock_up_safe(path, 1);
1335         }
1336         return err;
1337 }
1338
1339 /*
1340  * helper to add new inline back ref
1341  */
1342 static noinline_for_stack
1343 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1344                                 struct btrfs_root *root,
1345                                 struct btrfs_path *path,
1346                                 struct btrfs_extent_inline_ref *iref,
1347                                 u64 parent, u64 root_objectid,
1348                                 u64 owner, u64 offset, int refs_to_add,
1349                                 struct btrfs_delayed_extent_op *extent_op)
1350 {
1351         struct extent_buffer *leaf;
1352         struct btrfs_extent_item *ei;
1353         unsigned long ptr;
1354         unsigned long end;
1355         unsigned long item_offset;
1356         u64 refs;
1357         int size;
1358         int type;
1359         int ret;
1360
1361         leaf = path->nodes[0];
1362         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1363         item_offset = (unsigned long)iref - (unsigned long)ei;
1364
1365         type = extent_ref_type(parent, owner);
1366         size = btrfs_extent_inline_ref_size(type);
1367
1368         ret = btrfs_extend_item(trans, root, path, size);
1369         BUG_ON(ret);
1370
1371         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1372         refs = btrfs_extent_refs(leaf, ei);
1373         refs += refs_to_add;
1374         btrfs_set_extent_refs(leaf, ei, refs);
1375         if (extent_op)
1376                 __run_delayed_extent_op(extent_op, leaf, ei);
1377
1378         ptr = (unsigned long)ei + item_offset;
1379         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1380         if (ptr < end - size)
1381                 memmove_extent_buffer(leaf, ptr + size, ptr,
1382                                       end - size - ptr);
1383
1384         iref = (struct btrfs_extent_inline_ref *)ptr;
1385         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1386         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1387                 struct btrfs_extent_data_ref *dref;
1388                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1389                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1390                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1391                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1392                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1393         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1394                 struct btrfs_shared_data_ref *sref;
1395                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1396                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1397                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1398         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1399                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1400         } else {
1401                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1402         }
1403         btrfs_mark_buffer_dirty(leaf);
1404         return 0;
1405 }
1406
1407 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1408                                  struct btrfs_root *root,
1409                                  struct btrfs_path *path,
1410                                  struct btrfs_extent_inline_ref **ref_ret,
1411                                  u64 bytenr, u64 num_bytes, u64 parent,
1412                                  u64 root_objectid, u64 owner, u64 offset)
1413 {
1414         int ret;
1415
1416         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1417                                            bytenr, num_bytes, parent,
1418                                            root_objectid, owner, offset, 0);
1419         if (ret != -ENOENT)
1420                 return ret;
1421
1422         btrfs_release_path(root, path);
1423         *ref_ret = NULL;
1424
1425         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1426                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1427                                             root_objectid);
1428         } else {
1429                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1430                                              root_objectid, owner, offset);
1431         }
1432         return ret;
1433 }
1434
1435 /*
1436  * helper to update/remove inline back ref
1437  */
1438 static noinline_for_stack
1439 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1440                                  struct btrfs_root *root,
1441                                  struct btrfs_path *path,
1442                                  struct btrfs_extent_inline_ref *iref,
1443                                  int refs_to_mod,
1444                                  struct btrfs_delayed_extent_op *extent_op)
1445 {
1446         struct extent_buffer *leaf;
1447         struct btrfs_extent_item *ei;
1448         struct btrfs_extent_data_ref *dref = NULL;
1449         struct btrfs_shared_data_ref *sref = NULL;
1450         unsigned long ptr;
1451         unsigned long end;
1452         u32 item_size;
1453         int size;
1454         int type;
1455         int ret;
1456         u64 refs;
1457
1458         leaf = path->nodes[0];
1459         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1460         refs = btrfs_extent_refs(leaf, ei);
1461         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1462         refs += refs_to_mod;
1463         btrfs_set_extent_refs(leaf, ei, refs);
1464         if (extent_op)
1465                 __run_delayed_extent_op(extent_op, leaf, ei);
1466
1467         type = btrfs_extent_inline_ref_type(leaf, iref);
1468
1469         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1470                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1471                 refs = btrfs_extent_data_ref_count(leaf, dref);
1472         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1473                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1474                 refs = btrfs_shared_data_ref_count(leaf, sref);
1475         } else {
1476                 refs = 1;
1477                 BUG_ON(refs_to_mod != -1);
1478         }
1479
1480         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1481         refs += refs_to_mod;
1482
1483         if (refs > 0) {
1484                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1485                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1486                 else
1487                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1488         } else {
1489                 size =  btrfs_extent_inline_ref_size(type);
1490                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1491                 ptr = (unsigned long)iref;
1492                 end = (unsigned long)ei + item_size;
1493                 if (ptr + size < end)
1494                         memmove_extent_buffer(leaf, ptr, ptr + size,
1495                                               end - ptr - size);
1496                 item_size -= size;
1497                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1498                 BUG_ON(ret);
1499         }
1500         btrfs_mark_buffer_dirty(leaf);
1501         return 0;
1502 }
1503
1504 static noinline_for_stack
1505 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1506                                  struct btrfs_root *root,
1507                                  struct btrfs_path *path,
1508                                  u64 bytenr, u64 num_bytes, u64 parent,
1509                                  u64 root_objectid, u64 owner,
1510                                  u64 offset, int refs_to_add,
1511                                  struct btrfs_delayed_extent_op *extent_op)
1512 {
1513         struct btrfs_extent_inline_ref *iref;
1514         int ret;
1515
1516         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1517                                            bytenr, num_bytes, parent,
1518                                            root_objectid, owner, offset, 1);
1519         if (ret == 0) {
1520                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1521                 ret = update_inline_extent_backref(trans, root, path, iref,
1522                                                    refs_to_add, extent_op);
1523         } else if (ret == -ENOENT) {
1524                 ret = setup_inline_extent_backref(trans, root, path, iref,
1525                                                   parent, root_objectid,
1526                                                   owner, offset, refs_to_add,
1527                                                   extent_op);
1528         }
1529         return ret;
1530 }
1531
1532 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1533                                  struct btrfs_root *root,
1534                                  struct btrfs_path *path,
1535                                  u64 bytenr, u64 parent, u64 root_objectid,
1536                                  u64 owner, u64 offset, int refs_to_add)
1537 {
1538         int ret;
1539         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1540                 BUG_ON(refs_to_add != 1);
1541                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1542                                             parent, root_objectid);
1543         } else {
1544                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1545                                              parent, root_objectid,
1546                                              owner, offset, refs_to_add);
1547         }
1548         return ret;
1549 }
1550
1551 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1552                                  struct btrfs_root *root,
1553                                  struct btrfs_path *path,
1554                                  struct btrfs_extent_inline_ref *iref,
1555                                  int refs_to_drop, int is_data)
1556 {
1557         int ret;
1558
1559         BUG_ON(!is_data && refs_to_drop != 1);
1560         if (iref) {
1561                 ret = update_inline_extent_backref(trans, root, path, iref,
1562                                                    -refs_to_drop, NULL);
1563         } else if (is_data) {
1564                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1565         } else {
1566                 ret = btrfs_del_item(trans, root, path);
1567         }
1568         return ret;
1569 }
1570
1571 #ifdef BIO_RW_DISCARD
1572 static void btrfs_issue_discard(struct block_device *bdev,
1573                                 u64 start, u64 len)
1574 {
1575         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
1576 }
1577 #endif
1578
1579 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1580                                 u64 num_bytes)
1581 {
1582 #ifdef BIO_RW_DISCARD
1583         int ret;
1584         u64 map_length = num_bytes;
1585         struct btrfs_multi_bio *multi = NULL;
1586
1587         /* Tell the block device(s) that the sectors can be discarded */
1588         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1589                               bytenr, &map_length, &multi, 0);
1590         if (!ret) {
1591                 struct btrfs_bio_stripe *stripe = multi->stripes;
1592                 int i;
1593
1594                 if (map_length > num_bytes)
1595                         map_length = num_bytes;
1596
1597                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1598                         btrfs_issue_discard(stripe->dev->bdev,
1599                                             stripe->physical,
1600                                             map_length);
1601                 }
1602                 kfree(multi);
1603         }
1604
1605         return ret;
1606 #else
1607         return 0;
1608 #endif
1609 }
1610
1611 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1612                          struct btrfs_root *root,
1613                          u64 bytenr, u64 num_bytes, u64 parent,
1614                          u64 root_objectid, u64 owner, u64 offset)
1615 {
1616         int ret;
1617         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1618                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1619
1620         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1621                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1622                                         parent, root_objectid, (int)owner,
1623                                         BTRFS_ADD_DELAYED_REF, NULL);
1624         } else {
1625                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1626                                         parent, root_objectid, owner, offset,
1627                                         BTRFS_ADD_DELAYED_REF, NULL);
1628         }
1629         return ret;
1630 }
1631
1632 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1633                                   struct btrfs_root *root,
1634                                   u64 bytenr, u64 num_bytes,
1635                                   u64 parent, u64 root_objectid,
1636                                   u64 owner, u64 offset, int refs_to_add,
1637                                   struct btrfs_delayed_extent_op *extent_op)
1638 {
1639         struct btrfs_path *path;
1640         struct extent_buffer *leaf;
1641         struct btrfs_extent_item *item;
1642         u64 refs;
1643         int ret;
1644         int err = 0;
1645
1646         path = btrfs_alloc_path();
1647         if (!path)
1648                 return -ENOMEM;
1649
1650         path->reada = 1;
1651         path->leave_spinning = 1;
1652         /* this will setup the path even if it fails to insert the back ref */
1653         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1654                                            path, bytenr, num_bytes, parent,
1655                                            root_objectid, owner, offset,
1656                                            refs_to_add, extent_op);
1657         if (ret == 0)
1658                 goto out;
1659
1660         if (ret != -EAGAIN) {
1661                 err = ret;
1662                 goto out;
1663         }
1664
1665         leaf = path->nodes[0];
1666         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1667         refs = btrfs_extent_refs(leaf, item);
1668         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1669         if (extent_op)
1670                 __run_delayed_extent_op(extent_op, leaf, item);
1671
1672         btrfs_mark_buffer_dirty(leaf);
1673         btrfs_release_path(root->fs_info->extent_root, path);
1674
1675         path->reada = 1;
1676         path->leave_spinning = 1;
1677
1678         /* now insert the actual backref */
1679         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1680                                     path, bytenr, parent, root_objectid,
1681                                     owner, offset, refs_to_add);
1682         BUG_ON(ret);
1683 out:
1684         btrfs_free_path(path);
1685         return err;
1686 }
1687
1688 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1689                                 struct btrfs_root *root,
1690                                 struct btrfs_delayed_ref_node *node,
1691                                 struct btrfs_delayed_extent_op *extent_op,
1692                                 int insert_reserved)
1693 {
1694         int ret = 0;
1695         struct btrfs_delayed_data_ref *ref;
1696         struct btrfs_key ins;
1697         u64 parent = 0;
1698         u64 ref_root = 0;
1699         u64 flags = 0;
1700
1701         ins.objectid = node->bytenr;
1702         ins.offset = node->num_bytes;
1703         ins.type = BTRFS_EXTENT_ITEM_KEY;
1704
1705         ref = btrfs_delayed_node_to_data_ref(node);
1706         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1707                 parent = ref->parent;
1708         else
1709                 ref_root = ref->root;
1710
1711         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1712                 if (extent_op) {
1713                         BUG_ON(extent_op->update_key);
1714                         flags |= extent_op->flags_to_set;
1715                 }
1716                 ret = alloc_reserved_file_extent(trans, root,
1717                                                  parent, ref_root, flags,
1718                                                  ref->objectid, ref->offset,
1719                                                  &ins, node->ref_mod);
1720         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1721                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1722                                              node->num_bytes, parent,
1723                                              ref_root, ref->objectid,
1724                                              ref->offset, node->ref_mod,
1725                                              extent_op);
1726         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1727                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1728                                           node->num_bytes, parent,
1729                                           ref_root, ref->objectid,
1730                                           ref->offset, node->ref_mod,
1731                                           extent_op);
1732         } else {
1733                 BUG();
1734         }
1735         return ret;
1736 }
1737
1738 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1739                                     struct extent_buffer *leaf,
1740                                     struct btrfs_extent_item *ei)
1741 {
1742         u64 flags = btrfs_extent_flags(leaf, ei);
1743         if (extent_op->update_flags) {
1744                 flags |= extent_op->flags_to_set;
1745                 btrfs_set_extent_flags(leaf, ei, flags);
1746         }
1747
1748         if (extent_op->update_key) {
1749                 struct btrfs_tree_block_info *bi;
1750                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1751                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1752                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1753         }
1754 }
1755
1756 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1757                                  struct btrfs_root *root,
1758                                  struct btrfs_delayed_ref_node *node,
1759                                  struct btrfs_delayed_extent_op *extent_op)
1760 {
1761         struct btrfs_key key;
1762         struct btrfs_path *path;
1763         struct btrfs_extent_item *ei;
1764         struct extent_buffer *leaf;
1765         u32 item_size;
1766         int ret;
1767         int err = 0;
1768
1769         path = btrfs_alloc_path();
1770         if (!path)
1771                 return -ENOMEM;
1772
1773         key.objectid = node->bytenr;
1774         key.type = BTRFS_EXTENT_ITEM_KEY;
1775         key.offset = node->num_bytes;
1776
1777         path->reada = 1;
1778         path->leave_spinning = 1;
1779         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1780                                 path, 0, 1);
1781         if (ret < 0) {
1782                 err = ret;
1783                 goto out;
1784         }
1785         if (ret > 0) {
1786                 err = -EIO;
1787                 goto out;
1788         }
1789
1790         leaf = path->nodes[0];
1791         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1792 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1793         if (item_size < sizeof(*ei)) {
1794                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1795                                              path, (u64)-1, 0);
1796                 if (ret < 0) {
1797                         err = ret;
1798                         goto out;
1799                 }
1800                 leaf = path->nodes[0];
1801                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1802         }
1803 #endif
1804         BUG_ON(item_size < sizeof(*ei));
1805         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1806         __run_delayed_extent_op(extent_op, leaf, ei);
1807
1808         btrfs_mark_buffer_dirty(leaf);
1809 out:
1810         btrfs_free_path(path);
1811         return err;
1812 }
1813
1814 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1815                                 struct btrfs_root *root,
1816                                 struct btrfs_delayed_ref_node *node,
1817                                 struct btrfs_delayed_extent_op *extent_op,
1818                                 int insert_reserved)
1819 {
1820         int ret = 0;
1821         struct btrfs_delayed_tree_ref *ref;
1822         struct btrfs_key ins;
1823         u64 parent = 0;
1824         u64 ref_root = 0;
1825
1826         ins.objectid = node->bytenr;
1827         ins.offset = node->num_bytes;
1828         ins.type = BTRFS_EXTENT_ITEM_KEY;
1829
1830         ref = btrfs_delayed_node_to_tree_ref(node);
1831         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1832                 parent = ref->parent;
1833         else
1834                 ref_root = ref->root;
1835
1836         BUG_ON(node->ref_mod != 1);
1837         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1838                 BUG_ON(!extent_op || !extent_op->update_flags ||
1839                        !extent_op->update_key);
1840                 ret = alloc_reserved_tree_block(trans, root,
1841                                                 parent, ref_root,
1842                                                 extent_op->flags_to_set,
1843                                                 &extent_op->key,
1844                                                 ref->level, &ins);
1845         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1846                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1847                                              node->num_bytes, parent, ref_root,
1848                                              ref->level, 0, 1, extent_op);
1849         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1850                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1851                                           node->num_bytes, parent, ref_root,
1852                                           ref->level, 0, 1, extent_op);
1853         } else {
1854                 BUG();
1855         }
1856         return ret;
1857 }
1858
1859
1860 /* helper function to actually process a single delayed ref entry */
1861 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1862                                struct btrfs_root *root,
1863                                struct btrfs_delayed_ref_node *node,
1864                                struct btrfs_delayed_extent_op *extent_op,
1865                                int insert_reserved)
1866 {
1867         int ret;
1868         if (btrfs_delayed_ref_is_head(node)) {
1869                 struct btrfs_delayed_ref_head *head;
1870                 /*
1871                  * we've hit the end of the chain and we were supposed
1872                  * to insert this extent into the tree.  But, it got
1873                  * deleted before we ever needed to insert it, so all
1874                  * we have to do is clean up the accounting
1875                  */
1876                 BUG_ON(extent_op);
1877                 head = btrfs_delayed_node_to_head(node);
1878                 if (insert_reserved) {
1879                         int mark_free = 0;
1880                         struct extent_buffer *must_clean = NULL;
1881
1882                         ret = pin_down_bytes(trans, root, NULL,
1883                                              node->bytenr, node->num_bytes,
1884                                              head->is_data, 1, &must_clean);
1885                         if (ret > 0)
1886                                 mark_free = 1;
1887
1888                         if (must_clean) {
1889                                 clean_tree_block(NULL, root, must_clean);
1890                                 btrfs_tree_unlock(must_clean);
1891                                 free_extent_buffer(must_clean);
1892                         }
1893                         if (head->is_data) {
1894                                 ret = btrfs_del_csums(trans, root,
1895                                                       node->bytenr,
1896                                                       node->num_bytes);
1897                                 BUG_ON(ret);
1898                         }
1899                         if (mark_free) {
1900                                 ret = btrfs_free_reserved_extent(root,
1901                                                         node->bytenr,
1902                                                         node->num_bytes);
1903                                 BUG_ON(ret);
1904                         }
1905                 }
1906                 mutex_unlock(&head->mutex);
1907                 return 0;
1908         }
1909
1910         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1911             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1912                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1913                                            insert_reserved);
1914         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1915                  node->type == BTRFS_SHARED_DATA_REF_KEY)
1916                 ret = run_delayed_data_ref(trans, root, node, extent_op,
1917                                            insert_reserved);
1918         else
1919                 BUG();
1920         return ret;
1921 }
1922
1923 static noinline struct btrfs_delayed_ref_node *
1924 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1925 {
1926         struct rb_node *node;
1927         struct btrfs_delayed_ref_node *ref;
1928         int action = BTRFS_ADD_DELAYED_REF;
1929 again:
1930         /*
1931          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1932          * this prevents ref count from going down to zero when
1933          * there still are pending delayed ref.
1934          */
1935         node = rb_prev(&head->node.rb_node);
1936         while (1) {
1937                 if (!node)
1938                         break;
1939                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1940                                 rb_node);
1941                 if (ref->bytenr != head->node.bytenr)
1942                         break;
1943                 if (ref->action == action)
1944                         return ref;
1945                 node = rb_prev(node);
1946         }
1947         if (action == BTRFS_ADD_DELAYED_REF) {
1948                 action = BTRFS_DROP_DELAYED_REF;
1949                 goto again;
1950         }
1951         return NULL;
1952 }
1953
1954 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1955                                        struct btrfs_root *root,
1956                                        struct list_head *cluster)
1957 {
1958         struct btrfs_delayed_ref_root *delayed_refs;
1959         struct btrfs_delayed_ref_node *ref;
1960         struct btrfs_delayed_ref_head *locked_ref = NULL;
1961         struct btrfs_delayed_extent_op *extent_op;
1962         int ret;
1963         int count = 0;
1964         int must_insert_reserved = 0;
1965
1966         delayed_refs = &trans->transaction->delayed_refs;
1967         while (1) {
1968                 if (!locked_ref) {
1969                         /* pick a new head ref from the cluster list */
1970                         if (list_empty(cluster))
1971                                 break;
1972
1973                         locked_ref = list_entry(cluster->next,
1974                                      struct btrfs_delayed_ref_head, cluster);
1975
1976                         /* grab the lock that says we are going to process
1977                          * all the refs for this head */
1978                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
1979
1980                         /*
1981                          * we may have dropped the spin lock to get the head
1982                          * mutex lock, and that might have given someone else
1983                          * time to free the head.  If that's true, it has been
1984                          * removed from our list and we can move on.
1985                          */
1986                         if (ret == -EAGAIN) {
1987                                 locked_ref = NULL;
1988                                 count++;
1989                                 continue;
1990                         }
1991                 }
1992
1993                 /*
1994                  * record the must insert reserved flag before we
1995                  * drop the spin lock.
1996                  */
1997                 must_insert_reserved = locked_ref->must_insert_reserved;
1998                 locked_ref->must_insert_reserved = 0;
1999
2000                 extent_op = locked_ref->extent_op;
2001                 locked_ref->extent_op = NULL;
2002
2003                 /*
2004                  * locked_ref is the head node, so we have to go one
2005                  * node back for any delayed ref updates
2006                  */
2007                 ref = select_delayed_ref(locked_ref);
2008                 if (!ref) {
2009                         /* All delayed refs have been processed, Go ahead
2010                          * and send the head node to run_one_delayed_ref,
2011                          * so that any accounting fixes can happen
2012                          */
2013                         ref = &locked_ref->node;
2014
2015                         if (extent_op && must_insert_reserved) {
2016                                 kfree(extent_op);
2017                                 extent_op = NULL;
2018                         }
2019
2020                         if (extent_op) {
2021                                 spin_unlock(&delayed_refs->lock);
2022
2023                                 ret = run_delayed_extent_op(trans, root,
2024                                                             ref, extent_op);
2025                                 BUG_ON(ret);
2026                                 kfree(extent_op);
2027
2028                                 cond_resched();
2029                                 spin_lock(&delayed_refs->lock);
2030                                 continue;
2031                         }
2032
2033                         list_del_init(&locked_ref->cluster);
2034                         locked_ref = NULL;
2035                 }
2036
2037                 ref->in_tree = 0;
2038                 rb_erase(&ref->rb_node, &delayed_refs->root);
2039                 delayed_refs->num_entries--;
2040
2041                 spin_unlock(&delayed_refs->lock);
2042
2043                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2044                                           must_insert_reserved);
2045                 BUG_ON(ret);
2046
2047                 btrfs_put_delayed_ref(ref);
2048                 kfree(extent_op);
2049                 count++;
2050
2051                 cond_resched();
2052                 spin_lock(&delayed_refs->lock);
2053         }
2054         return count;
2055 }
2056
2057 /*
2058  * this starts processing the delayed reference count updates and
2059  * extent insertions we have queued up so far.  count can be
2060  * 0, which means to process everything in the tree at the start
2061  * of the run (but not newly added entries), or it can be some target
2062  * number you'd like to process.
2063  */
2064 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2065                            struct btrfs_root *root, unsigned long count)
2066 {
2067         struct rb_node *node;
2068         struct btrfs_delayed_ref_root *delayed_refs;
2069         struct btrfs_delayed_ref_node *ref;
2070         struct list_head cluster;
2071         int ret;
2072         int run_all = count == (unsigned long)-1;
2073         int run_most = 0;
2074
2075         if (root == root->fs_info->extent_root)
2076                 root = root->fs_info->tree_root;
2077
2078         delayed_refs = &trans->transaction->delayed_refs;
2079         INIT_LIST_HEAD(&cluster);
2080 again:
2081         spin_lock(&delayed_refs->lock);
2082         if (count == 0) {
2083                 count = delayed_refs->num_entries * 2;
2084                 run_most = 1;
2085         }
2086         while (1) {
2087                 if (!(run_all || run_most) &&
2088                     delayed_refs->num_heads_ready < 64)
2089                         break;
2090
2091                 /*
2092                  * go find something we can process in the rbtree.  We start at
2093                  * the beginning of the tree, and then build a cluster
2094                  * of refs to process starting at the first one we are able to
2095                  * lock
2096                  */
2097                 ret = btrfs_find_ref_cluster(trans, &cluster,
2098                                              delayed_refs->run_delayed_start);
2099                 if (ret)
2100                         break;
2101
2102                 ret = run_clustered_refs(trans, root, &cluster);
2103                 BUG_ON(ret < 0);
2104
2105                 count -= min_t(unsigned long, ret, count);
2106
2107                 if (count == 0)
2108                         break;
2109         }
2110
2111         if (run_all) {
2112                 node = rb_first(&delayed_refs->root);
2113                 if (!node)
2114                         goto out;
2115                 count = (unsigned long)-1;
2116
2117                 while (node) {
2118                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2119                                        rb_node);
2120                         if (btrfs_delayed_ref_is_head(ref)) {
2121                                 struct btrfs_delayed_ref_head *head;
2122
2123                                 head = btrfs_delayed_node_to_head(ref);
2124                                 atomic_inc(&ref->refs);
2125
2126                                 spin_unlock(&delayed_refs->lock);
2127                                 mutex_lock(&head->mutex);
2128                                 mutex_unlock(&head->mutex);
2129
2130                                 btrfs_put_delayed_ref(ref);
2131                                 cond_resched();
2132                                 goto again;
2133                         }
2134                         node = rb_next(node);
2135                 }
2136                 spin_unlock(&delayed_refs->lock);
2137                 schedule_timeout(1);
2138                 goto again;
2139         }
2140 out:
2141         spin_unlock(&delayed_refs->lock);
2142         return 0;
2143 }
2144
2145 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2146                                 struct btrfs_root *root,
2147                                 u64 bytenr, u64 num_bytes, u64 flags,
2148                                 int is_data)
2149 {
2150         struct btrfs_delayed_extent_op *extent_op;
2151         int ret;
2152
2153         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2154         if (!extent_op)
2155                 return -ENOMEM;
2156
2157         extent_op->flags_to_set = flags;
2158         extent_op->update_flags = 1;
2159         extent_op->update_key = 0;
2160         extent_op->is_data = is_data ? 1 : 0;
2161
2162         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2163         if (ret)
2164                 kfree(extent_op);
2165         return ret;
2166 }
2167
2168 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2169                                       struct btrfs_root *root,
2170                                       struct btrfs_path *path,
2171                                       u64 objectid, u64 offset, u64 bytenr)
2172 {
2173         struct btrfs_delayed_ref_head *head;
2174         struct btrfs_delayed_ref_node *ref;
2175         struct btrfs_delayed_data_ref *data_ref;
2176         struct btrfs_delayed_ref_root *delayed_refs;
2177         struct rb_node *node;
2178         int ret = 0;
2179
2180         ret = -ENOENT;
2181         delayed_refs = &trans->transaction->delayed_refs;
2182         spin_lock(&delayed_refs->lock);
2183         head = btrfs_find_delayed_ref_head(trans, bytenr);
2184         if (!head)
2185                 goto out;
2186
2187         if (!mutex_trylock(&head->mutex)) {
2188                 atomic_inc(&head->node.refs);
2189                 spin_unlock(&delayed_refs->lock);
2190
2191                 btrfs_release_path(root->fs_info->extent_root, path);
2192
2193                 mutex_lock(&head->mutex);
2194                 mutex_unlock(&head->mutex);
2195                 btrfs_put_delayed_ref(&head->node);
2196                 return -EAGAIN;
2197         }
2198
2199         node = rb_prev(&head->node.rb_node);
2200         if (!node)
2201                 goto out_unlock;
2202
2203         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2204
2205         if (ref->bytenr != bytenr)
2206                 goto out_unlock;
2207
2208         ret = 1;
2209         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2210                 goto out_unlock;
2211
2212         data_ref = btrfs_delayed_node_to_data_ref(ref);
2213
2214         node = rb_prev(node);
2215         if (node) {
2216                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2217                 if (ref->bytenr == bytenr)
2218                         goto out_unlock;
2219         }
2220
2221         if (data_ref->root != root->root_key.objectid ||
2222             data_ref->objectid != objectid || data_ref->offset != offset)
2223                 goto out_unlock;
2224
2225         ret = 0;
2226 out_unlock:
2227         mutex_unlock(&head->mutex);
2228 out:
2229         spin_unlock(&delayed_refs->lock);
2230         return ret;
2231 }
2232
2233 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2234                                         struct btrfs_root *root,
2235                                         struct btrfs_path *path,
2236                                         u64 objectid, u64 offset, u64 bytenr)
2237 {
2238         struct btrfs_root *extent_root = root->fs_info->extent_root;
2239         struct extent_buffer *leaf;
2240         struct btrfs_extent_data_ref *ref;
2241         struct btrfs_extent_inline_ref *iref;
2242         struct btrfs_extent_item *ei;
2243         struct btrfs_key key;
2244         u32 item_size;
2245         int ret;
2246
2247         key.objectid = bytenr;
2248         key.offset = (u64)-1;
2249         key.type = BTRFS_EXTENT_ITEM_KEY;
2250
2251         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2252         if (ret < 0)
2253                 goto out;
2254         BUG_ON(ret == 0);
2255
2256         ret = -ENOENT;
2257         if (path->slots[0] == 0)
2258                 goto out;
2259
2260         path->slots[0]--;
2261         leaf = path->nodes[0];
2262         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2263
2264         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2265                 goto out;
2266
2267         ret = 1;
2268         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2269 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2270         if (item_size < sizeof(*ei)) {
2271                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2272                 goto out;
2273         }
2274 #endif
2275         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2276
2277         if (item_size != sizeof(*ei) +
2278             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2279                 goto out;
2280
2281         if (btrfs_extent_generation(leaf, ei) <=
2282             btrfs_root_last_snapshot(&root->root_item))
2283                 goto out;
2284
2285         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2286         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2287             BTRFS_EXTENT_DATA_REF_KEY)
2288                 goto out;
2289
2290         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2291         if (btrfs_extent_refs(leaf, ei) !=
2292             btrfs_extent_data_ref_count(leaf, ref) ||
2293             btrfs_extent_data_ref_root(leaf, ref) !=
2294             root->root_key.objectid ||
2295             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2296             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2297                 goto out;
2298
2299         ret = 0;
2300 out:
2301         return ret;
2302 }
2303
2304 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2305                           struct btrfs_root *root,
2306                           u64 objectid, u64 offset, u64 bytenr)
2307 {
2308         struct btrfs_path *path;
2309         int ret;
2310         int ret2;
2311
2312         path = btrfs_alloc_path();
2313         if (!path)
2314                 return -ENOENT;
2315
2316         do {
2317                 ret = check_committed_ref(trans, root, path, objectid,
2318                                           offset, bytenr);
2319                 if (ret && ret != -ENOENT)
2320                         goto out;
2321
2322                 ret2 = check_delayed_ref(trans, root, path, objectid,
2323                                          offset, bytenr);
2324         } while (ret2 == -EAGAIN);
2325
2326         if (ret2 && ret2 != -ENOENT) {
2327                 ret = ret2;
2328                 goto out;
2329         }
2330
2331         if (ret != -ENOENT || ret2 != -ENOENT)
2332                 ret = 0;
2333 out:
2334         btrfs_free_path(path);
2335         return ret;
2336 }
2337
2338 #if 0
2339 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2340                     struct extent_buffer *buf, u32 nr_extents)
2341 {
2342         struct btrfs_key key;
2343         struct btrfs_file_extent_item *fi;
2344         u64 root_gen;
2345         u32 nritems;
2346         int i;
2347         int level;
2348         int ret = 0;
2349         int shared = 0;
2350
2351         if (!root->ref_cows)
2352                 return 0;
2353
2354         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2355                 shared = 0;
2356                 root_gen = root->root_key.offset;
2357         } else {
2358                 shared = 1;
2359                 root_gen = trans->transid - 1;
2360         }
2361
2362         level = btrfs_header_level(buf);
2363         nritems = btrfs_header_nritems(buf);
2364
2365         if (level == 0) {
2366                 struct btrfs_leaf_ref *ref;
2367                 struct btrfs_extent_info *info;
2368
2369                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2370                 if (!ref) {
2371                         ret = -ENOMEM;
2372                         goto out;
2373                 }
2374
2375                 ref->root_gen = root_gen;
2376                 ref->bytenr = buf->start;
2377                 ref->owner = btrfs_header_owner(buf);
2378                 ref->generation = btrfs_header_generation(buf);
2379                 ref->nritems = nr_extents;
2380                 info = ref->extents;
2381
2382                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2383                         u64 disk_bytenr;
2384                         btrfs_item_key_to_cpu(buf, &key, i);
2385                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2386                                 continue;
2387                         fi = btrfs_item_ptr(buf, i,
2388                                             struct btrfs_file_extent_item);
2389                         if (btrfs_file_extent_type(buf, fi) ==
2390                             BTRFS_FILE_EXTENT_INLINE)
2391                                 continue;
2392                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2393                         if (disk_bytenr == 0)
2394                                 continue;
2395
2396                         info->bytenr = disk_bytenr;
2397                         info->num_bytes =
2398                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2399                         info->objectid = key.objectid;
2400                         info->offset = key.offset;
2401                         info++;
2402                 }
2403
2404                 ret = btrfs_add_leaf_ref(root, ref, shared);
2405                 if (ret == -EEXIST && shared) {
2406                         struct btrfs_leaf_ref *old;
2407                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2408                         BUG_ON(!old);
2409                         btrfs_remove_leaf_ref(root, old);
2410                         btrfs_free_leaf_ref(root, old);
2411                         ret = btrfs_add_leaf_ref(root, ref, shared);
2412                 }
2413                 WARN_ON(ret);
2414                 btrfs_free_leaf_ref(root, ref);
2415         }
2416 out:
2417         return ret;
2418 }
2419
2420 /* when a block goes through cow, we update the reference counts of
2421  * everything that block points to.  The internal pointers of the block
2422  * can be in just about any order, and it is likely to have clusters of
2423  * things that are close together and clusters of things that are not.
2424  *
2425  * To help reduce the seeks that come with updating all of these reference
2426  * counts, sort them by byte number before actual updates are done.
2427  *
2428  * struct refsort is used to match byte number to slot in the btree block.
2429  * we sort based on the byte number and then use the slot to actually
2430  * find the item.
2431  *
2432  * struct refsort is smaller than strcut btrfs_item and smaller than
2433  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2434  * for a btree block, there's no way for a kmalloc of refsorts for a
2435  * single node to be bigger than a page.
2436  */
2437 struct refsort {
2438         u64 bytenr;
2439         u32 slot;
2440 };
2441
2442 /*
2443  * for passing into sort()
2444  */
2445 static int refsort_cmp(const void *a_void, const void *b_void)
2446 {
2447         const struct refsort *a = a_void;
2448         const struct refsort *b = b_void;
2449
2450         if (a->bytenr < b->bytenr)
2451                 return -1;
2452         if (a->bytenr > b->bytenr)
2453                 return 1;
2454         return 0;
2455 }
2456 #endif
2457
2458 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2459                            struct btrfs_root *root,
2460                            struct extent_buffer *buf,
2461                            int full_backref, int inc)
2462 {
2463         u64 bytenr;
2464         u64 num_bytes;
2465         u64 parent;
2466         u64 ref_root;
2467         u32 nritems;
2468         struct btrfs_key key;
2469         struct btrfs_file_extent_item *fi;
2470         int i;
2471         int level;
2472         int ret = 0;
2473         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2474                             u64, u64, u64, u64, u64, u64);
2475
2476         ref_root = btrfs_header_owner(buf);
2477         nritems = btrfs_header_nritems(buf);
2478         level = btrfs_header_level(buf);
2479
2480         if (!root->ref_cows && level == 0)
2481                 return 0;
2482
2483         if (inc)
2484                 process_func = btrfs_inc_extent_ref;
2485         else
2486                 process_func = btrfs_free_extent;
2487
2488         if (full_backref)
2489                 parent = buf->start;
2490         else
2491                 parent = 0;
2492
2493         for (i = 0; i < nritems; i++) {
2494                 if (level == 0) {
2495                         btrfs_item_key_to_cpu(buf, &key, i);
2496                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2497                                 continue;
2498                         fi = btrfs_item_ptr(buf, i,
2499                                             struct btrfs_file_extent_item);
2500                         if (btrfs_file_extent_type(buf, fi) ==
2501                             BTRFS_FILE_EXTENT_INLINE)
2502                                 continue;
2503                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2504                         if (bytenr == 0)
2505                                 continue;
2506
2507                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2508                         key.offset -= btrfs_file_extent_offset(buf, fi);
2509                         ret = process_func(trans, root, bytenr, num_bytes,
2510                                            parent, ref_root, key.objectid,
2511                                            key.offset);
2512                         if (ret)
2513                                 goto fail;
2514                 } else {
2515                         bytenr = btrfs_node_blockptr(buf, i);
2516                         num_bytes = btrfs_level_size(root, level - 1);
2517                         ret = process_func(trans, root, bytenr, num_bytes,
2518                                            parent, ref_root, level - 1, 0);
2519                         if (ret)
2520                                 goto fail;
2521                 }
2522         }
2523         return 0;
2524 fail:
2525         BUG();
2526         return ret;
2527 }
2528
2529 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2530                   struct extent_buffer *buf, int full_backref)
2531 {
2532         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2533 }
2534
2535 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2536                   struct extent_buffer *buf, int full_backref)
2537 {
2538         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2539 }
2540
2541 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2542                                  struct btrfs_root *root,
2543                                  struct btrfs_path *path,
2544                                  struct btrfs_block_group_cache *cache)
2545 {
2546         int ret;
2547         struct btrfs_root *extent_root = root->fs_info->extent_root;
2548         unsigned long bi;
2549         struct extent_buffer *leaf;
2550
2551         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2552         if (ret < 0)
2553                 goto fail;
2554         BUG_ON(ret);
2555
2556         leaf = path->nodes[0];
2557         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2558         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2559         btrfs_mark_buffer_dirty(leaf);
2560         btrfs_release_path(extent_root, path);
2561 fail:
2562         if (ret)
2563                 return ret;
2564         return 0;
2565
2566 }
2567
2568 static struct btrfs_block_group_cache *
2569 next_block_group(struct btrfs_root *root,
2570                  struct btrfs_block_group_cache *cache)
2571 {
2572         struct rb_node *node;
2573         spin_lock(&root->fs_info->block_group_cache_lock);
2574         node = rb_next(&cache->cache_node);
2575         btrfs_put_block_group(cache);
2576         if (node) {
2577                 cache = rb_entry(node, struct btrfs_block_group_cache,
2578                                  cache_node);
2579                 atomic_inc(&cache->count);
2580         } else
2581                 cache = NULL;
2582         spin_unlock(&root->fs_info->block_group_cache_lock);
2583         return cache;
2584 }
2585
2586 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2587                                    struct btrfs_root *root)
2588 {
2589         struct btrfs_block_group_cache *cache;
2590         int err = 0;
2591         struct btrfs_path *path;
2592         u64 last = 0;
2593
2594         path = btrfs_alloc_path();
2595         if (!path)
2596                 return -ENOMEM;
2597
2598         while (1) {
2599                 if (last == 0) {
2600                         err = btrfs_run_delayed_refs(trans, root,
2601                                                      (unsigned long)-1);
2602                         BUG_ON(err);
2603                 }
2604
2605                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2606                 while (cache) {
2607                         if (cache->dirty)
2608                                 break;
2609                         cache = next_block_group(root, cache);
2610                 }
2611                 if (!cache) {
2612                         if (last == 0)
2613                                 break;
2614                         last = 0;
2615                         continue;
2616                 }
2617
2618                 cache->dirty = 0;
2619                 last = cache->key.objectid + cache->key.offset;
2620
2621                 err = write_one_cache_group(trans, root, path, cache);
2622                 BUG_ON(err);
2623                 btrfs_put_block_group(cache);
2624         }
2625
2626         btrfs_free_path(path);
2627         return 0;
2628 }
2629
2630 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2631 {
2632         struct btrfs_block_group_cache *block_group;
2633         int readonly = 0;
2634
2635         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2636         if (!block_group || block_group->ro)
2637                 readonly = 1;
2638         if (block_group)
2639                 btrfs_put_block_group(block_group);
2640         return readonly;
2641 }
2642
2643 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2644                              u64 total_bytes, u64 bytes_used,
2645                              struct btrfs_space_info **space_info)
2646 {
2647         struct btrfs_space_info *found;
2648
2649         found = __find_space_info(info, flags);
2650         if (found) {
2651                 spin_lock(&found->lock);
2652                 found->total_bytes += total_bytes;
2653                 found->bytes_used += bytes_used;
2654                 found->full = 0;
2655                 spin_unlock(&found->lock);
2656                 *space_info = found;
2657                 return 0;
2658         }
2659         found = kzalloc(sizeof(*found), GFP_NOFS);
2660         if (!found)
2661                 return -ENOMEM;
2662
2663         INIT_LIST_HEAD(&found->block_groups);
2664         init_rwsem(&found->groups_sem);
2665         spin_lock_init(&found->lock);
2666         found->flags = flags;
2667         found->total_bytes = total_bytes;
2668         found->bytes_used = bytes_used;
2669         found->bytes_pinned = 0;
2670         found->bytes_reserved = 0;
2671         found->bytes_readonly = 0;
2672         found->bytes_delalloc = 0;
2673         found->full = 0;
2674         found->force_alloc = 0;
2675         *space_info = found;
2676         list_add_rcu(&found->list, &info->space_info);
2677         atomic_set(&found->caching_threads, 0);
2678         return 0;
2679 }
2680
2681 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2682 {
2683         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2684                                    BTRFS_BLOCK_GROUP_RAID1 |
2685                                    BTRFS_BLOCK_GROUP_RAID10 |
2686                                    BTRFS_BLOCK_GROUP_DUP);
2687         if (extra_flags) {
2688                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2689                         fs_info->avail_data_alloc_bits |= extra_flags;
2690                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2691                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2692                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2693                         fs_info->avail_system_alloc_bits |= extra_flags;
2694         }
2695 }
2696
2697 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2698 {
2699         spin_lock(&cache->space_info->lock);
2700         spin_lock(&cache->lock);
2701         if (!cache->ro) {
2702                 cache->space_info->bytes_readonly += cache->key.offset -
2703                                         btrfs_block_group_used(&cache->item);
2704                 cache->ro = 1;
2705         }
2706         spin_unlock(&cache->lock);
2707         spin_unlock(&cache->space_info->lock);
2708 }
2709
2710 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2711 {
2712         u64 num_devices = root->fs_info->fs_devices->rw_devices;
2713
2714         if (num_devices == 1)
2715                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2716         if (num_devices < 4)
2717                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2718
2719         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2720             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2721                       BTRFS_BLOCK_GROUP_RAID10))) {
2722                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2723         }
2724
2725         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2726             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2727                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2728         }
2729
2730         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2731             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2732              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2733              (flags & BTRFS_BLOCK_GROUP_DUP)))
2734                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2735         return flags;
2736 }
2737
2738 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2739 {
2740         struct btrfs_fs_info *info = root->fs_info;
2741         u64 alloc_profile;
2742
2743         if (data) {
2744                 alloc_profile = info->avail_data_alloc_bits &
2745                         info->data_alloc_profile;
2746                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2747         } else if (root == root->fs_info->chunk_root) {
2748                 alloc_profile = info->avail_system_alloc_bits &
2749                         info->system_alloc_profile;
2750                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2751         } else {
2752                 alloc_profile = info->avail_metadata_alloc_bits &
2753                         info->metadata_alloc_profile;
2754                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2755         }
2756
2757         return btrfs_reduce_alloc_profile(root, data);
2758 }
2759
2760 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2761 {
2762         u64 alloc_target;
2763
2764         alloc_target = btrfs_get_alloc_profile(root, 1);
2765         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2766                                                        alloc_target);
2767 }
2768
2769 static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2770 {
2771         u64 num_bytes;
2772         int level;
2773
2774         level = BTRFS_MAX_LEVEL - 2;
2775         /*
2776          * NOTE: these calculations are absolutely the worst possible case.
2777          * This assumes that _every_ item we insert will require a new leaf, and
2778          * that the tree has grown to its maximum level size.
2779          */
2780
2781         /*
2782          * for every item we insert we could insert both an extent item and a
2783          * extent ref item.  Then for ever item we insert, we will need to cow
2784          * both the original leaf, plus the leaf to the left and right of it.
2785          *
2786          * Unless we are talking about the extent root, then we just want the
2787          * number of items * 2, since we just need the extent item plus its ref.
2788          */
2789         if (root == root->fs_info->extent_root)
2790                 num_bytes = num_items * 2;
2791         else
2792                 num_bytes = (num_items + (2 * num_items)) * 3;
2793
2794         /*
2795          * num_bytes is total number of leaves we could need times the leaf
2796          * size, and then for every leaf we could end up cow'ing 2 nodes per
2797          * level, down to the leaf level.
2798          */
2799         num_bytes = (num_bytes * root->leafsize) +
2800                 (num_bytes * (level * 2)) * root->nodesize;
2801
2802         return num_bytes;
2803 }
2804
2805 /*
2806  * Unreserve metadata space for delalloc.  If we have less reserved credits than
2807  * we have extents, this function does nothing.
2808  */
2809 int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2810                                           struct inode *inode, int num_items)
2811 {
2812         struct btrfs_fs_info *info = root->fs_info;
2813         struct btrfs_space_info *meta_sinfo;
2814         u64 num_bytes;
2815         u64 alloc_target;
2816         bool bug = false;
2817
2818         /* get the space info for where the metadata will live */
2819         alloc_target = btrfs_get_alloc_profile(root, 0);
2820         meta_sinfo = __find_space_info(info, alloc_target);
2821
2822         num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2823                                            num_items);
2824
2825         spin_lock(&meta_sinfo->lock);
2826         spin_lock(&BTRFS_I(inode)->accounting_lock);
2827         if (BTRFS_I(inode)->reserved_extents <=
2828             BTRFS_I(inode)->outstanding_extents) {
2829                 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2830                 spin_unlock(&meta_sinfo->lock);
2831                 return 0;
2832         }
2833         spin_unlock(&BTRFS_I(inode)->accounting_lock);
2834
2835         BTRFS_I(inode)->reserved_extents--;
2836         BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2837
2838         if (meta_sinfo->bytes_delalloc < num_bytes) {
2839                 bug = true;
2840                 meta_sinfo->bytes_delalloc = 0;
2841         } else {
2842                 meta_sinfo->bytes_delalloc -= num_bytes;
2843         }
2844         spin_unlock(&meta_sinfo->lock);
2845
2846         BUG_ON(bug);
2847
2848         return 0;
2849 }
2850
2851 static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2852 {
2853         u64 thresh;
2854
2855         thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2856                 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2857                 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2858                 meta_sinfo->bytes_may_use;
2859
2860         thresh = meta_sinfo->total_bytes - thresh;
2861         thresh *= 80;
2862         do_div(thresh, 100);
2863         if (thresh <= meta_sinfo->bytes_delalloc)
2864                 meta_sinfo->force_delalloc = 1;
2865         else
2866                 meta_sinfo->force_delalloc = 0;
2867 }
2868
2869 static void flush_delalloc(struct btrfs_root *root,
2870                                  struct btrfs_space_info *info)
2871 {
2872         bool wait = false;
2873
2874         spin_lock(&info->lock);
2875
2876         if (!info->flushing) {
2877                 info->flushing = 1;
2878                 init_waitqueue_head(&info->flush_wait);
2879         } else {
2880                 wait = true;
2881         }
2882
2883         spin_unlock(&info->lock);
2884
2885         if (wait) {
2886                 wait_event(info->flush_wait,
2887                            !info->flushing);
2888                 return;
2889         }
2890
2891         btrfs_start_delalloc_inodes(root);
2892         btrfs_wait_ordered_extents(root, 0);
2893
2894         spin_lock(&info->lock);
2895         info->flushing = 0;
2896         spin_unlock(&info->lock);
2897         wake_up(&info->flush_wait);
2898 }
2899
2900 static int maybe_allocate_chunk(struct btrfs_root *root,
2901                                  struct btrfs_space_info *info)
2902 {
2903         struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2904         struct btrfs_trans_handle *trans;
2905         bool wait = false;
2906         int ret = 0;
2907         u64 min_metadata;
2908         u64 free_space;
2909
2910         free_space = btrfs_super_total_bytes(disk_super);
2911         /*
2912          * we allow the metadata to grow to a max of either 5gb or 5% of the
2913          * space in the volume.
2914          */
2915         min_metadata = min((u64)5 * 1024 * 1024 * 1024,
2916                              div64_u64(free_space * 5, 100));
2917         if (info->total_bytes >= min_metadata) {
2918                 spin_unlock(&info->lock);
2919                 return 0;
2920         }
2921
2922         if (info->full) {
2923                 spin_unlock(&info->lock);
2924                 return 0;
2925         }
2926
2927         if (!info->allocating_chunk) {
2928                 info->force_alloc = 1;
2929                 info->allocating_chunk = 1;
2930                 init_waitqueue_head(&info->wait);
2931         } else {
2932                 wait = true;
2933         }
2934
2935         spin_unlock(&info->lock);
2936
2937         if (wait) {
2938                 wait_event(info->wait,
2939                            !info->allocating_chunk);
2940                 return 1;
2941         }
2942
2943         trans = btrfs_start_transaction(root, 1);
2944         if (!trans) {
2945                 ret = -ENOMEM;
2946                 goto out;
2947         }
2948
2949         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
2950                              4096 + 2 * 1024 * 1024,
2951                              info->flags, 0);
2952         btrfs_end_transaction(trans, root);
2953         if (ret)
2954                 goto out;
2955 out:
2956         spin_lock(&info->lock);
2957         info->allocating_chunk = 0;
2958         spin_unlock(&info->lock);
2959         wake_up(&info->wait);
2960
2961         if (ret)
2962                 return 0;
2963         return 1;
2964 }
2965
2966 /*
2967  * Reserve metadata space for delalloc.
2968  */
2969 int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
2970                                         struct inode *inode, int num_items)
2971 {
2972         struct btrfs_fs_info *info = root->fs_info;
2973         struct btrfs_space_info *meta_sinfo;
2974         u64 num_bytes;
2975         u64 used;
2976         u64 alloc_target;
2977         int flushed = 0;
2978         int force_delalloc;
2979
2980         /* get the space info for where the metadata will live */
2981         alloc_target = btrfs_get_alloc_profile(root, 0);
2982         meta_sinfo = __find_space_info(info, alloc_target);
2983
2984         num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2985                                            num_items);
2986 again:
2987         spin_lock(&meta_sinfo->lock);
2988
2989         force_delalloc = meta_sinfo->force_delalloc;
2990
2991         if (unlikely(!meta_sinfo->bytes_root))
2992                 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
2993
2994         if (!flushed)
2995                 meta_sinfo->bytes_delalloc += num_bytes;
2996
2997         used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2998                 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2999                 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3000                 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3001
3002         if (used > meta_sinfo->total_bytes) {
3003                 flushed++;
3004
3005                 if (flushed == 1) {
3006                         if (maybe_allocate_chunk(root, meta_sinfo))
3007                                 goto again;
3008                         flushed++;
3009                 } else {
3010                         spin_unlock(&meta_sinfo->lock);
3011                 }
3012
3013                 if (flushed == 2) {
3014                         filemap_flush(inode->i_mapping);
3015                         goto again;
3016                 } else if (flushed == 3) {
3017                         flush_delalloc(root, meta_sinfo);
3018                         goto again;
3019                 }
3020                 spin_lock(&meta_sinfo->lock);
3021                 meta_sinfo->bytes_delalloc -= num_bytes;
3022                 spin_unlock(&meta_sinfo->lock);
3023                 printk(KERN_ERR "enospc, has %d, reserved %d\n",
3024                        BTRFS_I(inode)->outstanding_extents,
3025                        BTRFS_I(inode)->reserved_extents);
3026                 dump_space_info(meta_sinfo, 0, 0);
3027                 return -ENOSPC;
3028         }
3029
3030         BTRFS_I(inode)->reserved_extents++;
3031         check_force_delalloc(meta_sinfo);
3032         spin_unlock(&meta_sinfo->lock);
3033
3034         if (!flushed && force_delalloc)
3035                 filemap_flush(inode->i_mapping);
3036
3037         return 0;
3038 }
3039
3040 /*
3041  * unreserve num_items number of items worth of metadata space.  This needs to
3042  * be paired with btrfs_reserve_metadata_space.
3043  *
3044  * NOTE: if you have the option, run this _AFTER_ you do a
3045  * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3046  * oprations which will result in more used metadata, so we want to make sure we
3047  * can do that without issue.
3048  */
3049 int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3050 {
3051         struct btrfs_fs_info *info = root->fs_info;
3052         struct btrfs_space_info *meta_sinfo;
3053         u64 num_bytes;
3054         u64 alloc_target;
3055         bool bug = false;
3056
3057         /* get the space info for where the metadata will live */
3058         alloc_target = btrfs_get_alloc_profile(root, 0);
3059         meta_sinfo = __find_space_info(info, alloc_target);
3060
3061         num_bytes = calculate_bytes_needed(root, num_items);
3062
3063         spin_lock(&meta_sinfo->lock);
3064         if (meta_sinfo->bytes_may_use < num_bytes) {
3065                 bug = true;
3066                 meta_sinfo->bytes_may_use = 0;
3067         } else {
3068                 meta_sinfo->bytes_may_use -= num_bytes;
3069         }
3070         spin_unlock(&meta_sinfo->lock);
3071
3072         BUG_ON(bug);
3073
3074         return 0;
3075 }
3076
3077 /*
3078  * Reserve some metadata space for use.  We'll calculate the worste case number
3079  * of bytes that would be needed to modify num_items number of items.  If we
3080  * have space, fantastic, if not, you get -ENOSPC.  Please call
3081  * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3082  * items you reserved, since whatever metadata you needed should have already
3083  * been allocated.
3084  *
3085  * This will commit the transaction to make more space if we don't have enough
3086  * metadata space.  THe only time we don't do this is if we're reserving space
3087  * inside of a transaction, then we will just return -ENOSPC and it is the
3088  * callers responsibility to handle it properly.
3089  */
3090 int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3091 {
3092         struct btrfs_fs_info *info = root->fs_info;
3093         struct btrfs_space_info *meta_sinfo;
3094         u64 num_bytes;
3095         u64 used;
3096         u64 alloc_target;
3097         int retries = 0;
3098
3099         /* get the space info for where the metadata will live */
3100         alloc_target = btrfs_get_alloc_profile(root, 0);
3101         meta_sinfo = __find_space_info(info, alloc_target);
3102
3103         num_bytes = calculate_bytes_needed(root, num_items);
3104 again:
3105         spin_lock(&meta_sinfo->lock);
3106
3107         if (unlikely(!meta_sinfo->bytes_root))
3108                 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3109
3110         if (!retries)
3111                 meta_sinfo->bytes_may_use += num_bytes;
3112
3113         used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3114                 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3115                 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3116                 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3117
3118         if (used > meta_sinfo->total_bytes) {
3119                 retries++;
3120                 if (retries == 1) {
3121                         if (maybe_allocate_chunk(root, meta_sinfo))
3122                                 goto again;
3123                         retries++;
3124                 } else {
3125                         spin_unlock(&meta_sinfo->lock);
3126                 }
3127
3128                 if (retries == 2) {
3129                         flush_delalloc(root, meta_sinfo);
3130                         goto again;
3131                 }
3132                 spin_lock(&meta_sinfo->lock);
3133                 meta_sinfo->bytes_may_use -= num_bytes;
3134                 spin_unlock(&meta_sinfo->lock);
3135
3136                 dump_space_info(meta_sinfo, 0, 0);
3137                 return -ENOSPC;
3138         }
3139
3140         check_force_delalloc(meta_sinfo);
3141         spin_unlock(&meta_sinfo->lock);
3142
3143         return 0;
3144 }
3145
3146 /*
3147  * This will check the space that the inode allocates from to make sure we have
3148  * enough space for bytes.
3149  */
3150 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3151                                 u64 bytes)
3152 {
3153         struct btrfs_space_info *data_sinfo;
3154         int ret = 0, committed = 0;
3155
3156         /* make sure bytes are sectorsize aligned */
3157         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3158
3159         data_sinfo = BTRFS_I(inode)->space_info;
3160         if (!data_sinfo)
3161                 goto alloc;
3162
3163 again:
3164         /* make sure we have enough space to handle the data first */
3165         spin_lock(&data_sinfo->lock);
3166         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
3167             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
3168             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
3169             data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) {
3170                 struct btrfs_trans_handle *trans;
3171
3172                 /*
3173                  * if we don't have enough free bytes in this space then we need
3174                  * to alloc a new chunk.
3175                  */
3176                 if (!data_sinfo->full) {
3177                         u64 alloc_target;
3178
3179                         data_sinfo->force_alloc = 1;
3180                         spin_unlock(&data_sinfo->lock);
3181 alloc:
3182                         alloc_target = btrfs_get_alloc_profile(root, 1);
3183                         trans = btrfs_start_transaction(root, 1);
3184                         if (!trans)
3185                                 return -ENOMEM;
3186
3187                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3188                                              bytes + 2 * 1024 * 1024,
3189                                              alloc_target, 0);
3190                         btrfs_end_transaction(trans, root);
3191                         if (ret)
3192                                 return ret;
3193
3194                         if (!data_sinfo) {
3195                                 btrfs_set_inode_space_info(root, inode);
3196                                 data_sinfo = BTRFS_I(inode)->space_info;
3197                         }
3198                         goto again;
3199                 }
3200                 spin_unlock(&data_sinfo->lock);
3201
3202                 /* commit the current transaction and try again */
3203                 if (!committed && !root->fs_info->open_ioctl_trans) {
3204                         committed = 1;
3205                         trans = btrfs_join_transaction(root, 1);
3206                         if (!trans)
3207                                 return -ENOMEM;
3208                         ret = btrfs_commit_transaction(trans, root);
3209                         if (ret)
3210                                 return ret;
3211                         goto again;
3212                 }
3213
3214                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
3215                        ", %llu bytes_used, %llu bytes_reserved, "
3216                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
3217                        "%llu total\n", (unsigned long long)bytes,
3218                        (unsigned long long)data_sinfo->bytes_delalloc,
3219                        (unsigned long long)data_sinfo->bytes_used,
3220                        (unsigned long long)data_sinfo->bytes_reserved,
3221                        (unsigned long long)data_sinfo->bytes_pinned,
3222                        (unsigned long long)data_sinfo->bytes_readonly,
3223                        (unsigned long long)data_sinfo->bytes_may_use,
3224                        (unsigned long long)data_sinfo->total_bytes);
3225                 return -ENOSPC;
3226         }
3227         data_sinfo->bytes_may_use += bytes;
3228         BTRFS_I(inode)->reserved_bytes += bytes;
3229         spin_unlock(&data_sinfo->lock);
3230
3231         return 0;
3232 }
3233
3234 /*
3235  * if there was an error for whatever reason after calling
3236  * btrfs_check_data_free_space, call this so we can cleanup the counters.
3237  */
3238 void btrfs_free_reserved_data_space(struct btrfs_root *root,
3239                                     struct inode *inode, u64 bytes)
3240 {
3241         struct btrfs_space_info *data_sinfo;
3242
3243         /* make sure bytes are sectorsize aligned */
3244         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3245
3246         data_sinfo = BTRFS_I(inode)->space_info;
3247         spin_lock(&data_sinfo->lock);
3248         data_sinfo->bytes_may_use -= bytes;
3249         BTRFS_I(inode)->reserved_bytes -= bytes;
3250         spin_unlock(&data_sinfo->lock);
3251 }
3252
3253 /* called when we are adding a delalloc extent to the inode's io_tree */
3254 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
3255                                   u64 bytes)
3256 {
3257         struct btrfs_space_info *data_sinfo;
3258
3259         /* get the space info for where this inode will be storing its data */
3260         data_sinfo = BTRFS_I(inode)->space_info;
3261
3262         /* make sure we have enough space to handle the data first */
3263         spin_lock(&data_sinfo->lock);
3264         data_sinfo->bytes_delalloc += bytes;
3265
3266         /*
3267          * we are adding a delalloc extent without calling
3268          * btrfs_check_data_free_space first.  This happens on a weird
3269          * writepage condition, but shouldn't hurt our accounting
3270          */
3271         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
3272                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
3273                 BTRFS_I(inode)->reserved_bytes = 0;
3274         } else {
3275                 data_sinfo->bytes_may_use -= bytes;
3276                 BTRFS_I(inode)->reserved_bytes -= bytes;
3277         }
3278
3279         spin_unlock(&data_sinfo->lock);
3280 }
3281
3282 /* called when we are clearing an delalloc extent from the inode's io_tree */
3283 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
3284                               u64 bytes)
3285 {
3286         struct btrfs_space_info *info;
3287
3288         info = BTRFS_I(inode)->space_info;
3289
3290         spin_lock(&info->lock);
3291         info->bytes_delalloc -= bytes;
3292         spin_unlock(&info->lock);
3293 }
3294
3295 static void force_metadata_allocation(struct btrfs_fs_info *info)
3296 {
3297         struct list_head *head = &info->space_info;
3298         struct btrfs_space_info *found;
3299
3300         rcu_read_lock();
3301         list_for_each_entry_rcu(found, head, list) {
3302                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3303                         found->force_alloc = 1;
3304         }
3305         rcu_read_unlock();
3306 }
3307
3308 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3309                           struct btrfs_root *extent_root, u64 alloc_bytes,
3310                           u64 flags, int force)
3311 {
3312         struct btrfs_space_info *space_info;
3313         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3314         u64 thresh;
3315         int ret = 0;
3316
3317         mutex_lock(&fs_info->chunk_mutex);
3318
3319         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3320
3321         space_info = __find_space_info(extent_root->fs_info, flags);
3322         if (!space_info) {
3323                 ret = update_space_info(extent_root->fs_info, flags,
3324                                         0, 0, &space_info);
3325                 BUG_ON(ret);
3326         }
3327         BUG_ON(!space_info);
3328
3329         spin_lock(&space_info->lock);
3330         if (space_info->force_alloc)
3331                 force = 1;
3332         if (space_info->full) {
3333                 spin_unlock(&space_info->lock);
3334                 goto out;
3335         }
3336
3337         thresh = space_info->total_bytes - space_info->bytes_readonly;
3338         thresh = div_factor(thresh, 8);
3339         if (!force &&
3340            (space_info->bytes_used + space_info->bytes_pinned +
3341             space_info->bytes_reserved + alloc_bytes) < thresh) {
3342                 spin_unlock(&space_info->lock);
3343                 goto out;
3344         }
3345         spin_unlock(&space_info->lock);
3346
3347         /*
3348          * if we're doing a data chunk, go ahead and make sure that
3349          * we keep a reasonable number of metadata chunks allocated in the
3350          * FS as well.
3351          */
3352         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3353                 fs_info->data_chunk_allocations++;
3354                 if (!(fs_info->data_chunk_allocations %
3355                       fs_info->metadata_ratio))
3356                         force_metadata_allocation(fs_info);
3357         }
3358
3359         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3360         spin_lock(&space_info->lock);
3361         if (ret)
3362                 space_info->full = 1;
3363         space_info->force_alloc = 0;
3364         spin_unlock(&space_info->lock);
3365 out:
3366         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3367         return ret;
3368 }
3369
3370 static int update_block_group(struct btrfs_trans_handle *trans,
3371                               struct btrfs_root *root,
3372                               u64 bytenr, u64 num_bytes, int alloc,
3373                               int mark_free)
3374 {
3375         struct btrfs_block_group_cache *cache;
3376         struct btrfs_fs_info *info = root->fs_info;
3377         u64 total = num_bytes;
3378         u64 old_val;
3379         u64 byte_in_group;
3380
3381         /* block accounting for super block */
3382         spin_lock(&info->delalloc_lock);
3383         old_val = btrfs_super_bytes_used(&info->super_copy);
3384         if (alloc)
3385                 old_val += num_bytes;
3386         else
3387                 old_val -= num_bytes;
3388         btrfs_set_super_bytes_used(&info->super_copy, old_val);
3389
3390         /* block accounting for root item */
3391         old_val = btrfs_root_used(&root->root_item);
3392         if (alloc)
3393                 old_val += num_bytes;
3394         else
3395                 old_val -= num_bytes;
3396         btrfs_set_root_used(&root->root_item, old_val);
3397         spin_unlock(&info->delalloc_lock);
3398
3399         while (total) {
3400                 cache = btrfs_lookup_block_group(info, bytenr);
3401                 if (!cache)
3402                         return -1;
3403                 byte_in_group = bytenr - cache->key.objectid;
3404                 WARN_ON(byte_in_group > cache->key.offset);
3405
3406                 spin_lock(&cache->space_info->lock);
3407                 spin_lock(&cache->lock);
3408                 cache->dirty = 1;
3409                 old_val = btrfs_block_group_used(&cache->item);
3410                 num_bytes = min(total, cache->key.offset - byte_in_group);
3411                 if (alloc) {
3412                         old_val += num_bytes;
3413                         btrfs_set_block_group_used(&cache->item, old_val);
3414                         cache->reserved -= num_bytes;
3415                         cache->space_info->bytes_used += num_bytes;
3416                         cache->space_info->bytes_reserved -= num_bytes;
3417                         if (cache->ro)
3418                                 cache->space_info->bytes_readonly -= num_bytes;
3419                         spin_unlock(&cache->lock);
3420                         spin_unlock(&cache->space_info->lock);
3421                 } else {
3422                         old_val -= num_bytes;
3423                         cache->space_info->bytes_used -= num_bytes;
3424                         if (cache->ro)
3425                                 cache->space_info->bytes_readonly += num_bytes;
3426                         btrfs_set_block_group_used(&cache->item, old_val);
3427                         spin_unlock(&cache->lock);
3428                         spin_unlock(&cache->space_info->lock);
3429                         if (mark_free) {
3430                                 int ret;
3431
3432                                 ret = btrfs_discard_extent(root, bytenr,
3433                                                            num_bytes);
3434                                 WARN_ON(ret);
3435
3436                                 ret = btrfs_add_free_space(cache, bytenr,
3437                                                            num_bytes);
3438                                 WARN_ON(ret);
3439                         }
3440                 }
3441                 btrfs_put_block_group(cache);
3442                 total -= num_bytes;
3443                 bytenr += num_bytes;
3444         }
3445         return 0;
3446 }
3447
3448 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3449 {
3450         struct btrfs_block_group_cache *cache;
3451         u64 bytenr;
3452
3453         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3454         if (!cache)
3455                 return 0;
3456
3457         bytenr = cache->key.objectid;
3458         btrfs_put_block_group(cache);
3459
3460         return bytenr;
3461 }
3462
3463 /*
3464  * this function must be called within transaction
3465  */
3466 int btrfs_pin_extent(struct btrfs_root *root,
3467                      u64 bytenr, u64 num_bytes, int reserved)
3468 {
3469         struct btrfs_fs_info *fs_info = root->fs_info;
3470         struct btrfs_block_group_cache *cache;
3471
3472         cache = btrfs_lookup_block_group(fs_info, bytenr);
3473         BUG_ON(!cache);
3474
3475         spin_lock(&cache->space_info->lock);
3476         spin_lock(&cache->lock);
3477         cache->pinned += num_bytes;
3478         cache->space_info->bytes_pinned += num_bytes;
3479         if (reserved) {
3480                 cache->reserved -= num_bytes;
3481                 cache->space_info->bytes_reserved -= num_bytes;
3482         }
3483         spin_unlock(&cache->lock);
3484         spin_unlock(&cache->space_info->lock);
3485
3486         btrfs_put_block_group(cache);
3487
3488         set_extent_dirty(fs_info->pinned_extents,
3489                          bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3490         return 0;
3491 }
3492
3493 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
3494                                    u64 num_bytes, int reserve)
3495 {
3496         spin_lock(&cache->space_info->lock);
3497         spin_lock(&cache->lock);
3498         if (reserve) {
3499                 cache->reserved += num_bytes;
3500                 cache->space_info->bytes_reserved += num_bytes;
3501         } else {
3502                 cache->reserved -= num_bytes;
3503                 cache->space_info->bytes_reserved -= num_bytes;
3504         }
3505         spin_unlock(&cache->lock);
3506         spin_unlock(&cache->space_info->lock);
3507         return 0;
3508 }
3509
3510 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3511                                 struct btrfs_root *root)
3512 {
3513         struct btrfs_fs_info *fs_info = root->fs_info;
3514         struct btrfs_caching_control *next;
3515         struct btrfs_caching_control *caching_ctl;
3516         struct btrfs_block_group_cache *cache;
3517
3518         down_write(&fs_info->extent_commit_sem);
3519
3520         list_for_each_entry_safe(caching_ctl, next,
3521                                  &fs_info->caching_block_groups, list) {
3522                 cache = caching_ctl->block_group;
3523                 if (block_group_cache_done(cache)) {
3524                         cache->last_byte_to_unpin = (u64)-1;
3525                         list_del_init(&caching_ctl->list);
3526                         put_caching_control(caching_ctl);
3527                 } else {
3528                         cache->last_byte_to_unpin = caching_ctl->progress;
3529                 }
3530         }
3531
3532         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3533                 fs_info->pinned_extents = &fs_info->freed_extents[1];
3534         else
3535                 fs_info->pinned_extents = &fs_info->freed_extents[0];
3536
3537         up_write(&fs_info->extent_commit_sem);
3538         return 0;
3539 }
3540
3541 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
3542 {
3543         struct btrfs_fs_info *fs_info = root->fs_info;
3544         struct btrfs_block_group_cache *cache = NULL;
3545         u64 len;
3546
3547         while (start <= end) {
3548                 if (!cache ||
3549                     start >= cache->key.objectid + cache->key.offset) {
3550                         if (cache)
3551                                 btrfs_put_block_group(cache);
3552                         cache = btrfs_lookup_block_group(fs_info, start);
3553                         BUG_ON(!cache);
3554                 }
3555
3556                 len = cache->key.objectid + cache->key.offset - start;
3557                 len = min(len, end + 1 - start);
3558
3559                 if (start < cache->last_byte_to_unpin) {
3560                         len = min(len, cache->last_byte_to_unpin - start);
3561                         btrfs_add_free_space(cache, start, len);
3562                 }
3563
3564                 spin_lock(&cache->space_info->lock);
3565                 spin_lock(&cache->lock);
3566                 cache->pinned -= len;
3567                 cache->space_info->bytes_pinned -= len;
3568                 spin_unlock(&cache->lock);
3569                 spin_unlock(&cache->space_info->lock);
3570
3571                 start += len;
3572         }
3573
3574         if (cache)
3575                 btrfs_put_block_group(cache);
3576         return 0;
3577 }
3578
3579 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3580                                struct btrfs_root *root)
3581 {
3582         struct btrfs_fs_info *fs_info = root->fs_info;
3583         struct extent_io_tree *unpin;
3584         u64 start;
3585         u64 end;
3586         int ret;
3587
3588         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3589                 unpin = &fs_info->freed_extents[1];
3590         else
3591                 unpin = &fs_info->freed_extents[0];
3592
3593         while (1) {
3594                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3595                                             EXTENT_DIRTY);
3596                 if (ret)
3597                         break;
3598
3599                 ret = btrfs_discard_extent(root, start, end + 1 - start);
3600
3601                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3602                 unpin_extent_range(root, start, end);
3603                 cond_resched();
3604         }
3605
3606         return ret;
3607 }
3608
3609 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3610                           struct btrfs_root *root,
3611                           struct btrfs_path *path,
3612                           u64 bytenr, u64 num_bytes,
3613                           int is_data, int reserved,
3614                           struct extent_buffer **must_clean)
3615 {
3616         int err = 0;
3617         struct extent_buffer *buf;
3618
3619         if (is_data)
3620                 goto pinit;
3621
3622         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3623         if (!buf)
3624                 goto pinit;
3625
3626         /* we can reuse a block if it hasn't been written
3627          * and it is from this transaction.  We can't
3628          * reuse anything from the tree log root because
3629          * it has tiny sub-transactions.
3630          */
3631         if (btrfs_buffer_uptodate(buf, 0) &&
3632             btrfs_try_tree_lock(buf)) {
3633                 u64 header_owner = btrfs_header_owner(buf);
3634                 u64 header_transid = btrfs_header_generation(buf);
3635                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3636                     header_transid == trans->transid &&
3637                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3638                         *must_clean = buf;
3639                         return 1;
3640                 }
3641                 btrfs_tree_unlock(buf);
3642         }
3643         free_extent_buffer(buf);
3644 pinit:
3645         if (path)
3646                 btrfs_set_path_blocking(path);
3647         /* unlocks the pinned mutex */
3648         btrfs_pin_extent(root, bytenr, num_bytes, reserved);
3649
3650         BUG_ON(err < 0);
3651         return 0;
3652 }
3653
3654 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3655                                 struct btrfs_root *root,
3656                                 u64 bytenr, u64 num_bytes, u64 parent,
3657                                 u64 root_objectid, u64 owner_objectid,
3658                                 u64 owner_offset, int refs_to_drop,
3659                                 struct btrfs_delayed_extent_op *extent_op)
3660 {
3661         struct btrfs_key key;
3662         struct btrfs_path *path;
3663         struct btrfs_fs_info *info = root->fs_info;
3664         struct btrfs_root *extent_root = info->extent_root;
3665         struct extent_buffer *leaf;
3666         struct btrfs_extent_item *ei;
3667         struct btrfs_extent_inline_ref *iref;
3668         int ret;
3669         int is_data;
3670         int extent_slot = 0;
3671         int found_extent = 0;
3672         int num_to_del = 1;
3673         u32 item_size;
3674         u64 refs;
3675
3676         path = btrfs_alloc_path();
3677         if (!path)
3678                 return -ENOMEM;
3679
3680         path->reada = 1;
3681         path->leave_spinning = 1;
3682
3683         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3684         BUG_ON(!is_data && refs_to_drop != 1);
3685
3686         ret = lookup_extent_backref(trans, extent_root, path, &iref,
3687                                     bytenr, num_bytes, parent,
3688                                     root_objectid, owner_objectid,
3689                                     owner_offset);
3690         if (ret == 0) {
3691                 extent_slot = path->slots[0];
3692                 while (extent_slot >= 0) {
3693                         btrfs_item_key_to_cpu(path->nodes[0], &key,
3694                                               extent_slot);
3695                         if (key.objectid != bytenr)
3696                                 break;
3697                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3698                             key.offset == num_bytes) {
3699                                 found_extent = 1;
3700                                 break;
3701                         }
3702                         if (path->slots[0] - extent_slot > 5)
3703                                 break;
3704                         extent_slot--;
3705                 }
3706 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3707                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3708                 if (found_extent && item_size < sizeof(*ei))
3709                         found_extent = 0;
3710 #endif
3711                 if (!found_extent) {
3712                         BUG_ON(iref);
3713                         ret = remove_extent_backref(trans, extent_root, path,
3714                                                     NULL, refs_to_drop,
3715                                                     is_data);
3716                         BUG_ON(ret);
3717                         btrfs_release_path(extent_root, path);
3718                         path->leave_spinning = 1;
3719
3720                         key.objectid = bytenr;
3721                         key.type = BTRFS_EXTENT_ITEM_KEY;
3722                         key.offset = num_bytes;
3723
3724                         ret = btrfs_search_slot(trans, extent_root,
3725                                                 &key, path, -1, 1);
3726                         if (ret) {
3727                                 printk(KERN_ERR "umm, got %d back from search"
3728                                        ", was looking for %llu\n", ret,
3729                                        (unsigned long long)bytenr);
3730                                 btrfs_print_leaf(extent_root, path->nodes[0]);
3731                         }
3732                         BUG_ON(ret);
3733                         extent_slot = path->slots[0];
3734                 }
3735         } else {
3736                 btrfs_print_leaf(extent_root, path->nodes[0]);
3737                 WARN_ON(1);
3738                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3739                        "parent %llu root %llu  owner %llu offset %llu\n",
3740                        (unsigned long long)bytenr,
3741                        (unsigned long long)parent,
3742                        (unsigned long long)root_objectid,
3743                        (unsigned long long)owner_objectid,
3744                        (unsigned long long)owner_offset);
3745         }
3746
3747         leaf = path->nodes[0];
3748         item_size = btrfs_item_size_nr(leaf, extent_slot);
3749 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3750         if (item_size < sizeof(*ei)) {
3751                 BUG_ON(found_extent || extent_slot != path->slots[0]);
3752                 ret = convert_extent_item_v0(trans, extent_root, path,
3753                                              owner_objectid, 0);
3754                 BUG_ON(ret < 0);
3755
3756                 btrfs_release_path(extent_root, path);
3757                 path->leave_spinning = 1;
3758
3759                 key.objectid = bytenr;
3760                 key.type = BTRFS_EXTENT_ITEM_KEY;
3761                 key.offset = num_bytes;
3762
3763                 ret = btrfs_search_slot(trans, extent_root, &key, path,
3764                                         -1, 1);
3765                 if (ret) {
3766                         printk(KERN_ERR "umm, got %d back from search"
3767                                ", was looking for %llu\n", ret,
3768                                (unsigned long long)bytenr);
3769                         btrfs_print_leaf(extent_root, path->nodes[0]);
3770                 }
3771                 BUG_ON(ret);
3772                 extent_slot = path->slots[0];
3773                 leaf = path->nodes[0];
3774                 item_size = btrfs_item_size_nr(leaf, extent_slot);
3775         }
3776 #endif
3777         BUG_ON(item_size < sizeof(*ei));
3778         ei = btrfs_item_ptr(leaf, extent_slot,
3779                             struct btrfs_extent_item);
3780         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3781                 struct btrfs_tree_block_info *bi;
3782                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3783                 bi = (struct btrfs_tree_block_info *)(ei + 1);
3784                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3785         }
3786
3787         refs = btrfs_extent_refs(leaf, ei);
3788         BUG_ON(refs < refs_to_drop);
3789         refs -= refs_to_drop;
3790
3791         if (refs > 0) {
3792                 if (extent_op)
3793                         __run_delayed_extent_op(extent_op, leaf, ei);
3794                 /*
3795                  * In the case of inline back ref, reference count will
3796                  * be updated by remove_extent_backref
3797                  */
3798                 if (iref) {
3799                         BUG_ON(!found_extent);
3800                 } else {
3801                         btrfs_set_extent_refs(leaf, ei, refs);
3802                         btrfs_mark_buffer_dirty(leaf);
3803                 }
3804                 if (found_extent) {
3805                         ret = remove_extent_backref(trans, extent_root, path,
3806                                                     iref, refs_to_drop,
3807                                                     is_data);
3808                         BUG_ON(ret);
3809                 }
3810         } else {
3811                 int mark_free = 0;
3812                 struct extent_buffer *must_clean = NULL;
3813
3814                 if (found_extent) {
3815                         BUG_ON(is_data && refs_to_drop !=
3816                                extent_data_ref_count(root, path, iref));
3817                         if (iref) {
3818                                 BUG_ON(path->slots[0] != extent_slot);
3819                         } else {
3820                                 BUG_ON(path->slots[0] != extent_slot + 1);
3821                                 path->slots[0] = extent_slot;
3822                                 num_to_del = 2;
3823                         }
3824                 }
3825
3826                 ret = pin_down_bytes(trans, root, path, bytenr,
3827                                      num_bytes, is_data, 0, &must_clean);
3828                 if (ret > 0)
3829                         mark_free = 1;
3830                 BUG_ON(ret < 0);
3831                 /*
3832                  * it is going to be very rare for someone to be waiting
3833                  * on the block we're freeing.  del_items might need to
3834                  * schedule, so rather than get fancy, just force it
3835                  * to blocking here
3836                  */
3837                 if (must_clean)
3838                         btrfs_set_lock_blocking(must_clean);
3839
3840                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3841                                       num_to_del);
3842                 BUG_ON(ret);
3843                 btrfs_release_path(extent_root, path);
3844
3845                 if (must_clean) {
3846                         clean_tree_block(NULL, root, must_clean);
3847                         btrfs_tree_unlock(must_clean);
3848                         free_extent_buffer(must_clean);
3849                 }
3850
3851                 if (is_data) {
3852                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3853                         BUG_ON(ret);
3854                 } else {
3855                         invalidate_mapping_pages(info->btree_inode->i_mapping,
3856                              bytenr >> PAGE_CACHE_SHIFT,
3857                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3858                 }
3859
3860                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3861                                          mark_free);
3862                 BUG_ON(ret);
3863         }
3864         btrfs_free_path(path);
3865         return ret;
3866 }
3867
3868 /*
3869  * when we free an extent, it is possible (and likely) that we free the last
3870  * delayed ref for that extent as well.  This searches the delayed ref tree for
3871  * a given extent, and if there are no other delayed refs to be processed, it
3872  * removes it from the tree.
3873  */
3874 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3875                                       struct btrfs_root *root, u64 bytenr)
3876 {
3877         struct btrfs_delayed_ref_head *head;
3878         struct btrfs_delayed_ref_root *delayed_refs;
3879         struct btrfs_delayed_ref_node *ref;
3880         struct rb_node *node;
3881         int ret;
3882
3883         delayed_refs = &trans->transaction->delayed_refs;
3884         spin_lock(&delayed_refs->lock);
3885         head = btrfs_find_delayed_ref_head(trans, bytenr);
3886         if (!head)
3887                 goto out;
3888
3889         node = rb_prev(&head->node.rb_node);
3890         if (!node)
3891                 goto out;
3892
3893         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3894
3895         /* there are still entries for this ref, we can't drop it */
3896         if (ref->bytenr == bytenr)
3897                 goto out;
3898
3899         if (head->extent_op) {
3900                 if (!head->must_insert_reserved)
3901                         goto out;
3902                 kfree(head->extent_op);
3903                 head->extent_op = NULL;
3904         }
3905
3906         /*
3907          * waiting for the lock here would deadlock.  If someone else has it
3908          * locked they are already in the process of dropping it anyway
3909          */
3910         if (!mutex_trylock(&head->mutex))
3911                 goto out;
3912
3913         /*
3914          * at this point we have a head with no other entries.  Go
3915          * ahead and process it.
3916          */
3917         head->node.in_tree = 0;
3918         rb_erase(&head->node.rb_node, &delayed_refs->root);
3919
3920         delayed_refs->num_entries--;
3921
3922         /*
3923          * we don't take a ref on the node because we're removing it from the
3924          * tree, so we just steal the ref the tree was holding.
3925          */
3926         delayed_refs->num_heads--;
3927         if (list_empty(&head->cluster))
3928                 delayed_refs->num_heads_ready--;
3929
3930         list_del_init(&head->cluster);
3931         spin_unlock(&delayed_refs->lock);
3932
3933         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
3934                                   &head->node, head->extent_op,
3935                                   head->must_insert_reserved);
3936         BUG_ON(ret);
3937         btrfs_put_delayed_ref(&head->node);
3938         return 0;
3939 out:
3940         spin_unlock(&delayed_refs->lock);
3941         return 0;
3942 }
3943
3944 int btrfs_free_extent(struct btrfs_trans_handle *trans,
3945                       struct btrfs_root *root,
3946                       u64 bytenr, u64 num_bytes, u64 parent,
3947                       u64 root_objectid, u64 owner, u64 offset)
3948 {
3949         int ret;
3950
3951         /*
3952          * tree log blocks never actually go into the extent allocation
3953          * tree, just update pinning info and exit early.
3954          */
3955         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
3956                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
3957                 /* unlocks the pinned mutex */
3958                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
3959                 ret = 0;
3960         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
3961                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
3962                                         parent, root_objectid, (int)owner,
3963                                         BTRFS_DROP_DELAYED_REF, NULL);
3964                 BUG_ON(ret);
3965                 ret = check_ref_cleanup(trans, root, bytenr);
3966                 BUG_ON(ret);
3967         } else {
3968                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
3969                                         parent, root_objectid, owner,
3970                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
3971                 BUG_ON(ret);
3972         }
3973         return ret;
3974 }
3975
3976 static u64 stripe_align(struct btrfs_root *root, u64 val)
3977 {
3978         u64 mask = ((u64)root->stripesize - 1);
3979         u64 ret = (val + mask) & ~mask;
3980         return ret;
3981 }
3982
3983 /*
3984  * when we wait for progress in the block group caching, its because
3985  * our allocation attempt failed at least once.  So, we must sleep
3986  * and let some progress happen before we try again.
3987  *
3988  * This function will sleep at least once waiting for new free space to
3989  * show up, and then it will check the block group free space numbers
3990  * for our min num_bytes.  Another option is to have it go ahead
3991  * and look in the rbtree for a free extent of a given size, but this
3992  * is a good start.
3993  */
3994 static noinline int
3995 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
3996                                 u64 num_bytes)
3997 {
3998         struct btrfs_caching_control *caching_ctl;
3999         DEFINE_WAIT(wait);
4000
4001         caching_ctl = get_caching_control(cache);
4002         if (!caching_ctl)
4003                 return 0;
4004
4005         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4006                    (cache->free_space >= num_bytes));
4007
4008         put_caching_control(caching_ctl);
4009         return 0;
4010 }
4011
4012 static noinline int
4013 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4014 {
4015         struct btrfs_caching_control *caching_ctl;
4016         DEFINE_WAIT(wait);
4017
4018         caching_ctl = get_caching_control(cache);
4019         if (!caching_ctl)
4020                 return 0;
4021
4022         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4023
4024         put_caching_control(caching_ctl);
4025         return 0;
4026 }
4027
4028 enum btrfs_loop_type {
4029         LOOP_CACHED_ONLY = 0,
4030         LOOP_CACHING_NOWAIT = 1,
4031         LOOP_CACHING_WAIT = 2,
4032         LOOP_ALLOC_CHUNK = 3,
4033         LOOP_NO_EMPTY_SIZE = 4,
4034 };
4035
4036 /*
4037  * walks the btree of allocated extents and find a hole of a given size.
4038  * The key ins is changed to record the hole:
4039  * ins->objectid == block start
4040  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4041  * ins->offset == number of blocks
4042  * Any available blocks before search_start are skipped.
4043  */
4044 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4045                                      struct btrfs_root *orig_root,
4046                                      u64 num_bytes, u64 empty_size,
4047                                      u64 search_start, u64 search_end,
4048                                      u64 hint_byte, struct btrfs_key *ins,
4049                                      u64 exclude_start, u64 exclude_nr,
4050                                      int data)
4051 {
4052         int ret = 0;
4053         struct btrfs_root *root = orig_root->fs_info->extent_root;
4054         struct btrfs_free_cluster *last_ptr = NULL;
4055         struct btrfs_block_group_cache *block_group = NULL;
4056         int empty_cluster = 2 * 1024 * 1024;
4057         int allowed_chunk_alloc = 0;
4058         struct btrfs_space_info *space_info;
4059         int last_ptr_loop = 0;
4060         int loop = 0;
4061         bool found_uncached_bg = false;
4062         bool failed_cluster_refill = false;
4063         bool failed_alloc = false;
4064
4065         WARN_ON(num_bytes < root->sectorsize);
4066         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4067         ins->objectid = 0;
4068         ins->offset = 0;
4069
4070         space_info = __find_space_info(root->fs_info, data);
4071
4072         if (orig_root->ref_cows || empty_size)
4073                 allowed_chunk_alloc = 1;
4074
4075         if (data & BTRFS_BLOCK_GROUP_METADATA) {
4076                 last_ptr = &root->fs_info->meta_alloc_cluster;
4077                 if (!btrfs_test_opt(root, SSD))
4078                         empty_cluster = 64 * 1024;
4079         }
4080
4081         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4082                 last_ptr = &root->fs_info->data_alloc_cluster;
4083         }
4084
4085         if (last_ptr) {
4086                 spin_lock(&last_ptr->lock);
4087                 if (last_ptr->block_group)
4088                         hint_byte = last_ptr->window_start;
4089                 spin_unlock(&last_ptr->lock);
4090         }
4091
4092         search_start = max(search_start, first_logical_byte(root, 0));
4093         search_start = max(search_start, hint_byte);
4094
4095         if (!last_ptr)
4096                 empty_cluster = 0;
4097
4098         if (search_start == hint_byte) {
4099                 block_group = btrfs_lookup_block_group(root->fs_info,
4100                                                        search_start);
4101                 /*
4102                  * we don't want to use the block group if it doesn't match our
4103                  * allocation bits, or if its not cached.
4104                  */
4105                 if (block_group && block_group_bits(block_group, data) &&
4106                     block_group_cache_done(block_group)) {
4107                         down_read(&space_info->groups_sem);
4108                         if (list_empty(&block_group->list) ||
4109                             block_group->ro) {
4110                                 /*
4111                                  * someone is removing this block group,
4112                                  * we can't jump into the have_block_group
4113                                  * target because our list pointers are not
4114                                  * valid
4115                                  */
4116                                 btrfs_put_block_group(block_group);
4117                                 up_read(&space_info->groups_sem);
4118                         } else
4119                                 goto have_block_group;
4120                 } else if (block_group) {
4121                         btrfs_put_block_group(block_group);
4122                 }
4123         }
4124
4125 search:
4126         down_read(&space_info->groups_sem);
4127         list_for_each_entry(block_group, &space_info->block_groups, list) {
4128                 u64 offset;
4129                 int cached;
4130
4131                 atomic_inc(&block_group->count);
4132                 search_start = block_group->key.objectid;
4133
4134 have_block_group:
4135                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4136                         /*
4137                          * we want to start caching kthreads, but not too many
4138                          * right off the bat so we don't overwhelm the system,
4139                          * so only start them if there are less than 2 and we're
4140                          * in the initial allocation phase.
4141                          */
4142                         if (loop > LOOP_CACHING_NOWAIT ||
4143                             atomic_read(&space_info->caching_threads) < 2) {
4144                                 ret = cache_block_group(block_group);
4145                                 BUG_ON(ret);
4146                         }
4147                 }
4148
4149                 cached = block_group_cache_done(block_group);
4150                 if (unlikely(!cached)) {
4151                         found_uncached_bg = true;
4152
4153                         /* if we only want cached bgs, loop */
4154                         if (loop == LOOP_CACHED_ONLY)
4155                                 goto loop;
4156                 }
4157
4158                 if (unlikely(block_group->ro))
4159                         goto loop;
4160
4161                 /*
4162                  * Ok we want to try and use the cluster allocator, so lets look
4163                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4164                  * have tried the cluster allocator plenty of times at this
4165                  * point and not have found anything, so we are likely way too
4166                  * fragmented for the clustering stuff to find anything, so lets
4167                  * just skip it and let the allocator find whatever block it can
4168                  * find
4169                  */
4170                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4171                         /*
4172                          * the refill lock keeps out other
4173                          * people trying to start a new cluster
4174                          */
4175                         spin_lock(&last_ptr->refill_lock);
4176                         if (last_ptr->block_group &&
4177                             (last_ptr->block_group->ro ||
4178                             !block_group_bits(last_ptr->block_group, data))) {
4179                                 offset = 0;
4180                                 goto refill_cluster;
4181                         }
4182
4183                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4184                                                  num_bytes, search_start);
4185                         if (offset) {
4186                                 /* we have a block, we're done */
4187                                 spin_unlock(&last_ptr->refill_lock);
4188                                 goto checks;
4189                         }
4190
4191                         spin_lock(&last_ptr->lock);
4192                         /*
4193                          * whoops, this cluster doesn't actually point to
4194                          * this block group.  Get a ref on the block
4195                          * group is does point to and try again
4196                          */
4197                         if (!last_ptr_loop && last_ptr->block_group &&
4198                             last_ptr->block_group != block_group) {
4199
4200                                 btrfs_put_block_group(block_group);
4201                                 block_group = last_ptr->block_group;
4202                                 atomic_inc(&block_group->count);
4203                                 spin_unlock(&last_ptr->lock);
4204                                 spin_unlock(&last_ptr->refill_lock);
4205
4206                                 last_ptr_loop = 1;
4207                                 search_start = block_group->key.objectid;
4208                                 /*
4209                                  * we know this block group is properly
4210                                  * in the list because
4211                                  * btrfs_remove_block_group, drops the
4212                                  * cluster before it removes the block
4213                                  * group from the list
4214                                  */
4215                                 goto have_block_group;
4216                         }
4217                         spin_unlock(&last_ptr->lock);
4218 refill_cluster:
4219                         /*
4220                          * this cluster didn't work out, free it and
4221                          * start over
4222                          */
4223                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
4224
4225                         last_ptr_loop = 0;
4226
4227                         /* allocate a cluster in this block group */
4228                         ret = btrfs_find_space_cluster(trans, root,
4229                                                block_group, last_ptr,
4230                                                offset, num_bytes,
4231                                                empty_cluster + empty_size);
4232                         if (ret == 0) {
4233                                 /*
4234                                  * now pull our allocation out of this
4235                                  * cluster
4236                                  */
4237                                 offset = btrfs_alloc_from_cluster(block_group,
4238                                                   last_ptr, num_bytes,
4239                                                   search_start);
4240                                 if (offset) {
4241                                         /* we found one, proceed */
4242                                         spin_unlock(&last_ptr->refill_lock);
4243                                         goto checks;
4244                                 }
4245                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
4246                                    && !failed_cluster_refill) {
4247                                 spin_unlock(&last_ptr->refill_lock);
4248
4249                                 failed_cluster_refill = true;
4250                                 wait_block_group_cache_progress(block_group,
4251                                        num_bytes + empty_cluster + empty_size);
4252                                 goto have_block_group;
4253                         }
4254
4255                         /*
4256                          * at this point we either didn't find a cluster
4257                          * or we weren't able to allocate a block from our
4258                          * cluster.  Free the cluster we've been trying
4259                          * to use, and go to the next block group
4260                          */
4261                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
4262                         spin_unlock(&last_ptr->refill_lock);
4263                         goto loop;
4264                 }
4265
4266                 offset = btrfs_find_space_for_alloc(block_group, search_start,
4267                                                     num_bytes, empty_size);
4268                 /*
4269                  * If we didn't find a chunk, and we haven't failed on this
4270                  * block group before, and this block group is in the middle of
4271                  * caching and we are ok with waiting, then go ahead and wait
4272                  * for progress to be made, and set failed_alloc to true.
4273                  *
4274                  * If failed_alloc is true then we've already waited on this
4275                  * block group once and should move on to the next block group.
4276                  */
4277                 if (!offset && !failed_alloc && !cached &&
4278                     loop > LOOP_CACHING_NOWAIT) {
4279                         wait_block_group_cache_progress(block_group,
4280                                                 num_bytes + empty_size);
4281                         failed_alloc = true;
4282                         goto have_block_group;
4283                 } else if (!offset) {
4284                         goto loop;
4285                 }
4286 checks:
4287                 search_start = stripe_align(root, offset);
4288                 /* move on to the next group */
4289                 if (search_start + num_bytes >= search_end) {
4290                         btrfs_add_free_space(block_group, offset, num_bytes);
4291                         goto loop;
4292                 }
4293
4294                 /* move on to the next group */
4295                 if (search_start + num_bytes >
4296                     block_group->key.objectid + block_group->key.offset) {
4297                         btrfs_add_free_space(block_group, offset, num_bytes);
4298                         goto loop;
4299                 }
4300
4301                 if (exclude_nr > 0 &&
4302                     (search_start + num_bytes > exclude_start &&
4303                      search_start < exclude_start + exclude_nr)) {
4304                         search_start = exclude_start + exclude_nr;
4305
4306                         btrfs_add_free_space(block_group, offset, num_bytes);
4307                         /*
4308                          * if search_start is still in this block group
4309                          * then we just re-search this block group
4310                          */
4311                         if (search_start >= block_group->key.objectid &&
4312                             search_start < (block_group->key.objectid +
4313                                             block_group->key.offset))
4314                                 goto have_block_group;
4315                         goto loop;
4316                 }
4317
4318                 ins->objectid = search_start;
4319                 ins->offset = num_bytes;
4320
4321                 if (offset < search_start)
4322                         btrfs_add_free_space(block_group, offset,
4323                                              search_start - offset);
4324                 BUG_ON(offset > search_start);
4325
4326                 update_reserved_extents(block_group, num_bytes, 1);
4327
4328                 /* we are all good, lets return */
4329                 break;
4330 loop:
4331                 failed_cluster_refill = false;
4332                 failed_alloc = false;
4333                 btrfs_put_block_group(block_group);
4334         }
4335         up_read(&space_info->groups_sem);
4336
4337         /* LOOP_CACHED_ONLY, only search fully cached block groups
4338          * LOOP_CACHING_NOWAIT, search partially cached block groups, but
4339          *                      dont wait foR them to finish caching
4340          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4341          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4342          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4343          *                      again
4344          */
4345         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4346             (found_uncached_bg || empty_size || empty_cluster ||
4347              allowed_chunk_alloc)) {
4348                 if (found_uncached_bg) {
4349                         found_uncached_bg = false;
4350                         if (loop < LOOP_CACHING_WAIT) {
4351                                 loop++;
4352                                 goto search;
4353                         }
4354                 }
4355
4356                 if (loop == LOOP_ALLOC_CHUNK) {
4357                         empty_size = 0;
4358                         empty_cluster = 0;
4359                 }
4360
4361                 if (allowed_chunk_alloc) {
4362                         ret = do_chunk_alloc(trans, root, num_bytes +
4363                                              2 * 1024 * 1024, data, 1);
4364                         allowed_chunk_alloc = 0;
4365                 } else {
4366                         space_info->force_alloc = 1;
4367                 }
4368
4369                 if (loop < LOOP_NO_EMPTY_SIZE) {
4370                         loop++;
4371                         goto search;
4372                 }
4373                 ret = -ENOSPC;
4374         } else if (!ins->objectid) {
4375                 ret = -ENOSPC;
4376         }
4377
4378         /* we found what we needed */
4379         if (ins->objectid) {
4380                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
4381                         trans->block_group = block_group->key.objectid;
4382
4383                 btrfs_put_block_group(block_group);
4384                 ret = 0;
4385         }
4386
4387         return ret;
4388 }
4389
4390 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4391                             int dump_block_groups)
4392 {
4393         struct btrfs_block_group_cache *cache;
4394
4395         spin_lock(&info->lock);
4396         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4397                (unsigned long long)(info->total_bytes - info->bytes_used -
4398                                     info->bytes_pinned - info->bytes_reserved -
4399                                     info->bytes_super),
4400                (info->full) ? "" : "not ");
4401         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4402                " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4403                "\n",
4404                (unsigned long long)info->total_bytes,
4405                (unsigned long long)info->bytes_pinned,
4406                (unsigned long long)info->bytes_delalloc,
4407                (unsigned long long)info->bytes_may_use,
4408                (unsigned long long)info->bytes_used,
4409                (unsigned long long)info->bytes_root,
4410                (unsigned long long)info->bytes_super,
4411                (unsigned long long)info->bytes_reserved);
4412         spin_unlock(&info->lock);
4413
4414         if (!dump_block_groups)
4415                 return;
4416
4417         down_read(&info->groups_sem);
4418         list_for_each_entry(cache, &info->block_groups, list) {
4419                 spin_lock(&cache->lock);
4420                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4421                        "%llu pinned %llu reserved\n",
4422                        (unsigned long long)cache->key.objectid,
4423                        (unsigned long long)cache->key.offset,
4424                        (unsigned long long)btrfs_block_group_used(&cache->item),
4425                        (unsigned long long)cache->pinned,
4426                        (unsigned long long)cache->reserved);
4427                 btrfs_dump_free_space(cache, bytes);
4428                 spin_unlock(&cache->lock);
4429         }
4430         up_read(&info->groups_sem);
4431 }
4432
4433 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4434                          struct btrfs_root *root,
4435                          u64 num_bytes, u64 min_alloc_size,
4436                          u64 empty_size, u64 hint_byte,
4437                          u64 search_end, struct btrfs_key *ins,
4438                          u64 data)
4439 {
4440         int ret;
4441         u64 search_start = 0;
4442         struct btrfs_fs_info *info = root->fs_info;
4443
4444         data = btrfs_get_alloc_profile(root, data);
4445 again:
4446         /*
4447          * the only place that sets empty_size is btrfs_realloc_node, which
4448          * is not called recursively on allocations
4449          */
4450         if (empty_size || root->ref_cows) {
4451                 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
4452                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4453                                      2 * 1024 * 1024,
4454                                      BTRFS_BLOCK_GROUP_METADATA |
4455                                      (info->metadata_alloc_profile &
4456                                       info->avail_metadata_alloc_bits), 0);
4457                 }
4458                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4459                                      num_bytes + 2 * 1024 * 1024, data, 0);
4460         }
4461
4462         WARN_ON(num_bytes < root->sectorsize);
4463         ret = find_free_extent(trans, root, num_bytes, empty_size,
4464                                search_start, search_end, hint_byte, ins,
4465                                trans->alloc_exclude_start,
4466                                trans->alloc_exclude_nr, data);
4467
4468         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4469                 num_bytes = num_bytes >> 1;
4470                 num_bytes = num_bytes & ~(root->sectorsize - 1);
4471                 num_bytes = max(num_bytes, min_alloc_size);
4472                 do_chunk_alloc(trans, root->fs_info->extent_root,
4473                                num_bytes, data, 1);
4474                 goto again;
4475         }
4476         if (ret == -ENOSPC) {
4477                 struct btrfs_space_info *sinfo;
4478
4479                 sinfo = __find_space_info(root->fs_info, data);
4480                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4481                        "wanted %llu\n", (unsigned long long)data,
4482                        (unsigned long long)num_bytes);
4483                 dump_space_info(sinfo, num_bytes, 1);
4484         }
4485
4486         return ret;
4487 }
4488
4489 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4490 {
4491         struct btrfs_block_group_cache *cache;
4492         int ret = 0;
4493
4494         cache = btrfs_lookup_block_group(root->fs_info, start);
4495         if (!cache) {
4496                 printk(KERN_ERR "Unable to find block group for %llu\n",
4497                        (unsigned long long)start);
4498                 return -ENOSPC;
4499         }
4500
4501         ret = btrfs_discard_extent(root, start, len);
4502
4503         btrfs_add_free_space(cache, start, len);
4504         update_reserved_extents(cache, len, 0);
4505         btrfs_put_block_group(cache);
4506
4507         return ret;
4508 }
4509
4510 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4511                                       struct btrfs_root *root,
4512                                       u64 parent, u64 root_objectid,
4513                                       u64 flags, u64 owner, u64 offset,
4514                                       struct btrfs_key *ins, int ref_mod)
4515 {
4516         int ret;
4517         struct btrfs_fs_info *fs_info = root->fs_info;
4518         struct btrfs_extent_item *extent_item;
4519         struct btrfs_extent_inline_ref *iref;
4520         struct btrfs_path *path;
4521         struct extent_buffer *leaf;
4522         int type;
4523         u32 size;
4524
4525         if (parent > 0)
4526                 type = BTRFS_SHARED_DATA_REF_KEY;
4527         else
4528                 type = BTRFS_EXTENT_DATA_REF_KEY;
4529
4530         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4531
4532         path = btrfs_alloc_path();
4533         BUG_ON(!path);
4534
4535         path->leave_spinning = 1;
4536         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4537                                       ins, size);
4538         BUG_ON(ret);
4539
4540         leaf = path->nodes[0];
4541         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4542                                      struct btrfs_extent_item);
4543         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4544         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4545         btrfs_set_extent_flags(leaf, extent_item,
4546                                flags | BTRFS_EXTENT_FLAG_DATA);
4547
4548         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4549         btrfs_set_extent_inline_ref_type(leaf, iref, type);
4550         if (parent > 0) {
4551                 struct btrfs_shared_data_ref *ref;
4552                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4553                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4554                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4555         } else {
4556                 struct btrfs_extent_data_ref *ref;
4557                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4558                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4559                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4560                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4561                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4562         }
4563
4564         btrfs_mark_buffer_dirty(path->nodes[0]);
4565         btrfs_free_path(path);
4566
4567         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4568                                  1, 0);
4569         if (ret) {
4570                 printk(KERN_ERR "btrfs update block group failed for %llu "
4571                        "%llu\n", (unsigned long long)ins->objectid,
4572                        (unsigned long long)ins->offset);
4573                 BUG();
4574         }
4575         return ret;
4576 }
4577
4578 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4579                                      struct btrfs_root *root,
4580                                      u64 parent, u64 root_objectid,
4581                                      u64 flags, struct btrfs_disk_key *key,
4582                                      int level, struct btrfs_key *ins)
4583 {
4584         int ret;
4585         struct btrfs_fs_info *fs_info = root->fs_info;
4586         struct btrfs_extent_item *extent_item;
4587         struct btrfs_tree_block_info *block_info;
4588         struct btrfs_extent_inline_ref *iref;
4589         struct btrfs_path *path;
4590         struct extent_buffer *leaf;
4591         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4592
4593         path = btrfs_alloc_path();
4594         BUG_ON(!path);
4595
4596         path->leave_spinning = 1;
4597         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4598                                       ins, size);
4599         BUG_ON(ret);
4600
4601         leaf = path->nodes[0];
4602         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4603                                      struct btrfs_extent_item);
4604         btrfs_set_extent_refs(leaf, extent_item, 1);
4605         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4606         btrfs_set_extent_flags(leaf, extent_item,
4607                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4608         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4609
4610         btrfs_set_tree_block_key(leaf, block_info, key);
4611         btrfs_set_tree_block_level(leaf, block_info, level);
4612
4613         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4614         if (parent > 0) {
4615                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4616                 btrfs_set_extent_inline_ref_type(leaf, iref,
4617                                                  BTRFS_SHARED_BLOCK_REF_KEY);
4618                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4619         } else {
4620                 btrfs_set_extent_inline_ref_type(leaf, iref,
4621                                                  BTRFS_TREE_BLOCK_REF_KEY);
4622                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4623         }
4624
4625         btrfs_mark_buffer_dirty(leaf);
4626         btrfs_free_path(path);
4627
4628         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4629                                  1, 0);
4630         if (ret) {
4631                 printk(KERN_ERR "btrfs update block group failed for %llu "
4632                        "%llu\n", (unsigned long long)ins->objectid,
4633                        (unsigned long long)ins->offset);
4634                 BUG();
4635         }
4636         return ret;
4637 }
4638
4639 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4640                                      struct btrfs_root *root,
4641                                      u64 root_objectid, u64 owner,
4642                                      u64 offset, struct btrfs_key *ins)
4643 {
4644         int ret;
4645
4646         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4647
4648         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4649                                          0, root_objectid, owner, offset,
4650                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
4651         return ret;
4652 }
4653
4654 /*
4655  * this is used by the tree logging recovery code.  It records that
4656  * an extent has been allocated and makes sure to clear the free
4657  * space cache bits as well
4658  */
4659 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4660                                    struct btrfs_root *root,
4661                                    u64 root_objectid, u64 owner, u64 offset,
4662                                    struct btrfs_key *ins)
4663 {
4664         int ret;
4665         struct btrfs_block_group_cache *block_group;
4666         struct btrfs_caching_control *caching_ctl;
4667         u64 start = ins->objectid;
4668         u64 num_bytes = ins->offset;
4669
4670         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4671         cache_block_group(block_group);
4672         caching_ctl = get_caching_control(block_group);
4673
4674         if (!caching_ctl) {
4675                 BUG_ON(!block_group_cache_done(block_group));
4676                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
4677                 BUG_ON(ret);
4678         } else {
4679                 mutex_lock(&caching_ctl->mutex);
4680
4681                 if (start >= caching_ctl->progress) {
4682                         ret = add_excluded_extent(root, start, num_bytes);
4683                         BUG_ON(ret);
4684                 } else if (start + num_bytes <= caching_ctl->progress) {
4685                         ret = btrfs_remove_free_space(block_group,
4686                                                       start, num_bytes);
4687                         BUG_ON(ret);
4688                 } else {
4689                         num_bytes = caching_ctl->progress - start;
4690                         ret = btrfs_remove_free_space(block_group,
4691                                                       start, num_bytes);
4692                         BUG_ON(ret);
4693
4694                         start = caching_ctl->progress;
4695                         num_bytes = ins->objectid + ins->offset -
4696                                     caching_ctl->progress;
4697                         ret = add_excluded_extent(root, start, num_bytes);
4698                         BUG_ON(ret);
4699                 }
4700
4701                 mutex_unlock(&caching_ctl->mutex);
4702                 put_caching_control(caching_ctl);
4703         }
4704
4705         update_reserved_extents(block_group, ins->offset, 1);
4706         btrfs_put_block_group(block_group);
4707         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4708                                          0, owner, offset, ins, 1);
4709         return ret;
4710 }
4711
4712 /*
4713  * finds a free extent and does all the dirty work required for allocation
4714  * returns the key for the extent through ins, and a tree buffer for
4715  * the first block of the extent through buf.
4716  *
4717  * returns 0 if everything worked, non-zero otherwise.
4718  */
4719 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4720                             struct btrfs_root *root,
4721                             u64 num_bytes, u64 parent, u64 root_objectid,
4722                             struct btrfs_disk_key *key, int level,
4723                             u64 empty_size, u64 hint_byte, u64 search_end,
4724                             struct btrfs_key *ins)
4725 {
4726         int ret;
4727         u64 flags = 0;
4728
4729         ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4730                                    empty_size, hint_byte, search_end,
4731                                    ins, 0);
4732         if (ret)
4733                 return ret;
4734
4735         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4736                 if (parent == 0)
4737                         parent = ins->objectid;
4738                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4739         } else
4740                 BUG_ON(parent > 0);
4741
4742         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4743                 struct btrfs_delayed_extent_op *extent_op;
4744                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4745                 BUG_ON(!extent_op);
4746                 if (key)
4747                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
4748                 else
4749                         memset(&extent_op->key, 0, sizeof(extent_op->key));
4750                 extent_op->flags_to_set = flags;
4751                 extent_op->update_key = 1;
4752                 extent_op->update_flags = 1;
4753                 extent_op->is_data = 0;
4754
4755                 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4756                                         ins->offset, parent, root_objectid,
4757                                         level, BTRFS_ADD_DELAYED_EXTENT,
4758                                         extent_op);
4759                 BUG_ON(ret);
4760         }
4761         return ret;
4762 }
4763
4764 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4765                                             struct btrfs_root *root,
4766                                             u64 bytenr, u32 blocksize,
4767                                             int level)
4768 {
4769         struct extent_buffer *buf;
4770
4771         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4772         if (!buf)
4773                 return ERR_PTR(-ENOMEM);
4774         btrfs_set_header_generation(buf, trans->transid);
4775         btrfs_set_buffer_lockdep_class(buf, level);
4776         btrfs_tree_lock(buf);
4777         clean_tree_block(trans, root, buf);
4778
4779         btrfs_set_lock_blocking(buf);
4780         btrfs_set_buffer_uptodate(buf);
4781
4782         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4783                 set_extent_dirty(&root->dirty_log_pages, buf->start,
4784                          buf->start + buf->len - 1, GFP_NOFS);
4785         } else {
4786                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4787                          buf->start + buf->len - 1, GFP_NOFS);
4788         }
4789         trans->blocks_used++;
4790         /* this returns a buffer locked for blocking */
4791         return buf;
4792 }
4793
4794 /*
4795  * helper function to allocate a block for a given tree
4796  * returns the tree buffer or NULL.
4797  */
4798 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4799                                         struct btrfs_root *root, u32 blocksize,
4800                                         u64 parent, u64 root_objectid,
4801                                         struct btrfs_disk_key *key, int level,
4802                                         u64 hint, u64 empty_size)
4803 {
4804         struct btrfs_key ins;
4805         int ret;
4806         struct extent_buffer *buf;
4807
4808         ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4809                                key, level, empty_size, hint, (u64)-1, &ins);
4810         if (ret) {
4811                 BUG_ON(ret > 0);
4812                 return ERR_PTR(ret);
4813         }
4814
4815         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4816                                     blocksize, level);
4817         return buf;
4818 }
4819
4820 struct walk_control {
4821         u64 refs[BTRFS_MAX_LEVEL];
4822         u64 flags[BTRFS_MAX_LEVEL];
4823         struct btrfs_key update_progress;
4824         int stage;
4825         int level;
4826         int shared_level;
4827         int update_ref;
4828         int keep_locks;
4829         int reada_slot;
4830         int reada_count;
4831 };
4832
4833 #define DROP_REFERENCE  1
4834 #define UPDATE_BACKREF  2
4835
4836 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4837                                      struct btrfs_root *root,
4838                                      struct walk_control *wc,
4839                                      struct btrfs_path *path)
4840 {
4841         u64 bytenr;
4842         u64 generation;
4843         u64 refs;
4844         u64 last = 0;
4845         u32 nritems;
4846         u32 blocksize;
4847         struct btrfs_key key;
4848         struct extent_buffer *eb;
4849         int ret;
4850         int slot;
4851         int nread = 0;
4852
4853         if (path->slots[wc->level] < wc->reada_slot) {
4854                 wc->reada_count = wc->reada_count * 2 / 3;
4855                 wc->reada_count = max(wc->reada_count, 2);
4856         } else {
4857                 wc->reada_count = wc->reada_count * 3 / 2;
4858                 wc->reada_count = min_t(int, wc->reada_count,
4859                                         BTRFS_NODEPTRS_PER_BLOCK(root));
4860         }
4861
4862         eb = path->nodes[wc->level];
4863         nritems = btrfs_header_nritems(eb);
4864         blocksize = btrfs_level_size(root, wc->level - 1);
4865
4866         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
4867                 if (nread >= wc->reada_count)
4868                         break;
4869
4870                 cond_resched();
4871                 bytenr = btrfs_node_blockptr(eb, slot);
4872                 generation = btrfs_node_ptr_generation(eb, slot);
4873
4874                 if (slot == path->slots[wc->level])
4875                         goto reada;
4876
4877                 if (wc->stage == UPDATE_BACKREF &&
4878                     generation <= root->root_key.offset)
4879                         continue;
4880
4881                 if (wc->stage == DROP_REFERENCE) {
4882                         ret = btrfs_lookup_extent_info(trans, root,
4883                                                 bytenr, blocksize,
4884                                                 &refs, NULL);
4885                         BUG_ON(ret);
4886                         BUG_ON(refs == 0);
4887                         if (refs == 1)
4888                                 goto reada;
4889
4890                         if (!wc->update_ref ||
4891                             generation <= root->root_key.offset)
4892                                 continue;
4893                         btrfs_node_key_to_cpu(eb, &key, slot);
4894                         ret = btrfs_comp_cpu_keys(&key,
4895                                                   &wc->update_progress);
4896                         if (ret < 0)
4897                                 continue;
4898                 }
4899 reada:
4900                 ret = readahead_tree_block(root, bytenr, blocksize,
4901                                            generation);
4902                 if (ret)
4903                         break;
4904                 last = bytenr + blocksize;
4905                 nread++;
4906         }
4907         wc->reada_slot = slot;
4908 }
4909
4910 /*
4911  * hepler to process tree block while walking down the tree.
4912  *
4913  * when wc->stage == UPDATE_BACKREF, this function updates
4914  * back refs for pointers in the block.
4915  *
4916  * NOTE: return value 1 means we should stop walking down.
4917  */
4918 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
4919                                    struct btrfs_root *root,
4920                                    struct btrfs_path *path,
4921                                    struct walk_control *wc)
4922 {
4923         int level = wc->level;
4924         struct extent_buffer *eb = path->nodes[level];
4925         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
4926         int ret;
4927
4928         if (wc->stage == UPDATE_BACKREF &&
4929             btrfs_header_owner(eb) != root->root_key.objectid)
4930                 return 1;
4931
4932         /*
4933          * when reference count of tree block is 1, it won't increase
4934          * again. once full backref flag is set, we never clear it.
4935          */
4936         if ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
4937             (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag))) {
4938                 BUG_ON(!path->locks[level]);
4939                 ret = btrfs_lookup_extent_info(trans, root,
4940                                                eb->start, eb->len,
4941                                                &wc->refs[level],
4942                                                &wc->flags[level]);
4943                 BUG_ON(ret);
4944                 BUG_ON(wc->refs[level] == 0);
4945         }
4946
4947         if (wc->stage == DROP_REFERENCE) {
4948                 if (wc->refs[level] > 1)
4949                         return 1;
4950
4951                 if (path->locks[level] && !wc->keep_locks) {
4952                         btrfs_tree_unlock(eb);
4953                         path->locks[level] = 0;
4954                 }
4955                 return 0;
4956         }
4957
4958         /* wc->stage == UPDATE_BACKREF */
4959         if (!(wc->flags[level] & flag)) {
4960                 BUG_ON(!path->locks[level]);
4961                 ret = btrfs_inc_ref(trans, root, eb, 1);
4962                 BUG_ON(ret);
4963                 ret = btrfs_dec_ref(trans, root, eb, 0);
4964                 BUG_ON(ret);
4965                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
4966                                                   eb->len, flag, 0);
4967                 BUG_ON(ret);
4968                 wc->flags[level] |= flag;
4969         }
4970
4971         /*
4972          * the block is shared by multiple trees, so it's not good to
4973          * keep the tree lock
4974          */
4975         if (path->locks[level] && level > 0) {
4976                 btrfs_tree_unlock(eb);
4977                 path->locks[level] = 0;
4978         }
4979         return 0;
4980 }
4981
4982 /*
4983  * hepler to process tree block pointer.
4984  *
4985  * when wc->stage == DROP_REFERENCE, this function checks
4986  * reference count of the block pointed to. if the block
4987  * is shared and we need update back refs for the subtree
4988  * rooted at the block, this function changes wc->stage to
4989  * UPDATE_BACKREF. if the block is shared and there is no
4990  * need to update back, this function drops the reference
4991  * to the block.
4992  *
4993  * NOTE: return value 1 means we should stop walking down.
4994  */
4995 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
4996                                  struct btrfs_root *root,
4997                                  struct btrfs_path *path,
4998                                  struct walk_control *wc)
4999 {
5000         u64 bytenr;
5001         u64 generation;
5002         u64 parent;
5003         u32 blocksize;
5004         struct btrfs_key key;
5005         struct extent_buffer *next;
5006         int level = wc->level;
5007         int reada = 0;
5008         int ret = 0;
5009
5010         generation = btrfs_node_ptr_generation(path->nodes[level],
5011                                                path->slots[level]);
5012         /*
5013          * if the lower level block was created before the snapshot
5014          * was created, we know there is no need to update back refs
5015          * for the subtree
5016          */
5017         if (wc->stage == UPDATE_BACKREF &&
5018             generation <= root->root_key.offset)
5019                 return 1;
5020
5021         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5022         blocksize = btrfs_level_size(root, level - 1);
5023
5024         next = btrfs_find_tree_block(root, bytenr, blocksize);
5025         if (!next) {
5026                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5027                 reada = 1;
5028         }
5029         btrfs_tree_lock(next);
5030         btrfs_set_lock_blocking(next);
5031
5032         if (wc->stage == DROP_REFERENCE) {
5033                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5034                                                &wc->refs[level - 1],
5035                                                &wc->flags[level - 1]);
5036                 BUG_ON(ret);
5037                 BUG_ON(wc->refs[level - 1] == 0);
5038
5039                 if (wc->refs[level - 1] > 1) {
5040                         if (!wc->update_ref ||
5041                             generation <= root->root_key.offset)
5042                                 goto skip;
5043
5044                         btrfs_node_key_to_cpu(path->nodes[level], &key,
5045                                               path->slots[level]);
5046                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5047                         if (ret < 0)
5048                                 goto skip;
5049
5050                         wc->stage = UPDATE_BACKREF;
5051                         wc->shared_level = level - 1;
5052                 }
5053         }
5054
5055         if (!btrfs_buffer_uptodate(next, generation)) {
5056                 btrfs_tree_unlock(next);
5057                 free_extent_buffer(next);
5058                 next = NULL;
5059         }
5060
5061         if (!next) {
5062                 if (reada && level == 1)
5063                         reada_walk_down(trans, root, wc, path);
5064                 next = read_tree_block(root, bytenr, blocksize, generation);
5065                 btrfs_tree_lock(next);
5066                 btrfs_set_lock_blocking(next);
5067         }
5068
5069         level--;
5070         BUG_ON(level != btrfs_header_level(next));
5071         path->nodes[level] = next;
5072         path->slots[level] = 0;
5073         path->locks[level] = 1;
5074         wc->level = level;
5075         if (wc->level == 1)
5076                 wc->reada_slot = 0;
5077         return 0;
5078 skip:
5079         wc->refs[level - 1] = 0;
5080         wc->flags[level - 1] = 0;
5081
5082         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5083                 parent = path->nodes[level]->start;
5084         } else {
5085                 BUG_ON(root->root_key.objectid !=
5086                        btrfs_header_owner(path->nodes[level]));
5087                 parent = 0;
5088         }
5089
5090         ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5091                                 root->root_key.objectid, level - 1, 0);
5092         BUG_ON(ret);
5093
5094         btrfs_tree_unlock(next);
5095         free_extent_buffer(next);
5096         return 1;
5097 }
5098
5099 /*
5100  * hepler to process tree block while walking up the tree.
5101  *
5102  * when wc->stage == DROP_REFERENCE, this function drops
5103  * reference count on the block.
5104  *
5105  * when wc->stage == UPDATE_BACKREF, this function changes
5106  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5107  * to UPDATE_BACKREF previously while processing the block.
5108  *
5109  * NOTE: return value 1 means we should stop walking up.
5110  */
5111 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5112                                  struct btrfs_root *root,
5113                                  struct btrfs_path *path,
5114                                  struct walk_control *wc)
5115 {
5116         int ret = 0;
5117         int level = wc->level;
5118         struct extent_buffer *eb = path->nodes[level];
5119         u64 parent = 0;
5120
5121         if (wc->stage == UPDATE_BACKREF) {
5122                 BUG_ON(wc->shared_level < level);
5123                 if (level < wc->shared_level)
5124                         goto out;
5125
5126                 ret = find_next_key(path, level + 1, &wc->update_progress);
5127                 if (ret > 0)
5128                         wc->update_ref = 0;
5129
5130                 wc->stage = DROP_REFERENCE;
5131                 wc->shared_level = -1;
5132                 path->slots[level] = 0;
5133
5134                 /*
5135                  * check reference count again if the block isn't locked.
5136                  * we should start walking down the tree again if reference
5137                  * count is one.
5138                  */
5139                 if (!path->locks[level]) {
5140                         BUG_ON(level == 0);
5141                         btrfs_tree_lock(eb);
5142                         btrfs_set_lock_blocking(eb);
5143                         path->locks[level] = 1;
5144
5145                         ret = btrfs_lookup_extent_info(trans, root,
5146                                                        eb->start, eb->len,
5147                                                        &wc->refs[level],
5148                                                        &wc->flags[level]);
5149                         BUG_ON(ret);
5150                         BUG_ON(wc->refs[level] == 0);
5151                         if (wc->refs[level] == 1) {
5152                                 btrfs_tree_unlock(eb);
5153                                 path->locks[level] = 0;
5154                                 return 1;
5155                         }
5156                 }
5157         }
5158
5159         /* wc->stage == DROP_REFERENCE */
5160         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5161
5162         if (wc->refs[level] == 1) {
5163                 if (level == 0) {
5164                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5165                                 ret = btrfs_dec_ref(trans, root, eb, 1);
5166                         else
5167                                 ret = btrfs_dec_ref(trans, root, eb, 0);
5168                         BUG_ON(ret);
5169                 }
5170                 /* make block locked assertion in clean_tree_block happy */
5171                 if (!path->locks[level] &&
5172                     btrfs_header_generation(eb) == trans->transid) {
5173                         btrfs_tree_lock(eb);
5174                         btrfs_set_lock_blocking(eb);
5175                         path->locks[level] = 1;
5176                 }
5177                 clean_tree_block(trans, root, eb);
5178         }
5179
5180         if (eb == root->node) {
5181                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5182                         parent = eb->start;
5183                 else
5184                         BUG_ON(root->root_key.objectid !=
5185                                btrfs_header_owner(eb));
5186         } else {
5187                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5188                         parent = path->nodes[level + 1]->start;
5189                 else
5190                         BUG_ON(root->root_key.objectid !=
5191                                btrfs_header_owner(path->nodes[level + 1]));
5192         }
5193
5194         ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
5195                                 root->root_key.objectid, level, 0);
5196         BUG_ON(ret);
5197 out:
5198         wc->refs[level] = 0;
5199         wc->flags[level] = 0;
5200         return ret;
5201 }
5202
5203 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5204                                    struct btrfs_root *root,
5205                                    struct btrfs_path *path,
5206                                    struct walk_control *wc)
5207 {
5208         int level = wc->level;
5209         int ret;
5210
5211         while (level >= 0) {
5212                 if (path->slots[level] >=
5213                     btrfs_header_nritems(path->nodes[level]))
5214                         break;
5215
5216                 ret = walk_down_proc(trans, root, path, wc);
5217                 if (ret > 0)
5218                         break;
5219
5220                 if (level == 0)
5221                         break;
5222
5223                 ret = do_walk_down(trans, root, path, wc);
5224                 if (ret > 0) {
5225                         path->slots[level]++;
5226                         continue;
5227                 }
5228                 level = wc->level;
5229         }
5230         return 0;
5231 }
5232
5233 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5234                                  struct btrfs_root *root,
5235                                  struct btrfs_path *path,
5236                                  struct walk_control *wc, int max_level)
5237 {
5238         int level = wc->level;
5239         int ret;
5240
5241         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5242         while (level < max_level && path->nodes[level]) {
5243                 wc->level = level;
5244                 if (path->slots[level] + 1 <
5245                     btrfs_header_nritems(path->nodes[level])) {
5246                         path->slots[level]++;
5247                         return 0;
5248                 } else {
5249                         ret = walk_up_proc(trans, root, path, wc);
5250                         if (ret > 0)
5251                                 return 0;
5252
5253                         if (path->locks[level]) {
5254                                 btrfs_tree_unlock(path->nodes[level]);
5255                                 path->locks[level] = 0;
5256                         }
5257                         free_extent_buffer(path->nodes[level]);
5258                         path->nodes[level] = NULL;
5259                         level++;
5260                 }
5261         }
5262         return 1;
5263 }
5264
5265 /*
5266  * drop a subvolume tree.
5267  *
5268  * this function traverses the tree freeing any blocks that only
5269  * referenced by the tree.
5270  *
5271  * when a shared tree block is found. this function decreases its
5272  * reference count by one. if update_ref is true, this function
5273  * also make sure backrefs for the shared block and all lower level
5274  * blocks are properly updated.
5275  */
5276 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5277 {
5278         struct btrfs_path *path;
5279         struct btrfs_trans_handle *trans;
5280         struct btrfs_root *tree_root = root->fs_info->tree_root;
5281         struct btrfs_root_item *root_item = &root->root_item;
5282         struct walk_control *wc;
5283         struct btrfs_key key;
5284         int err = 0;
5285         int ret;
5286         int level;
5287
5288         path = btrfs_alloc_path();
5289         BUG_ON(!path);
5290
5291         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5292         BUG_ON(!wc);
5293
5294         trans = btrfs_start_transaction(tree_root, 1);
5295
5296         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5297                 level = btrfs_header_level(root->node);
5298                 path->nodes[level] = btrfs_lock_root_node(root);
5299                 btrfs_set_lock_blocking(path->nodes[level]);
5300                 path->slots[level] = 0;
5301                 path->locks[level] = 1;
5302                 memset(&wc->update_progress, 0,
5303                        sizeof(wc->update_progress));
5304         } else {
5305                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5306                 memcpy(&wc->update_progress, &key,
5307                        sizeof(wc->update_progress));
5308
5309                 level = root_item->drop_level;
5310                 BUG_ON(level == 0);
5311                 path->lowest_level = level;
5312                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5313                 path->lowest_level = 0;
5314                 if (ret < 0) {
5315                         err = ret;
5316                         goto out;
5317                 }
5318                 WARN_ON(ret > 0);
5319
5320                 /*
5321                  * unlock our path, this is safe because only this
5322                  * function is allowed to delete this snapshot
5323                  */
5324                 btrfs_unlock_up_safe(path, 0);
5325
5326                 level = btrfs_header_level(root->node);
5327                 while (1) {
5328                         btrfs_tree_lock(path->nodes[level]);
5329                         btrfs_set_lock_blocking(path->nodes[level]);
5330
5331                         ret = btrfs_lookup_extent_info(trans, root,
5332                                                 path->nodes[level]->start,
5333                                                 path->nodes[level]->len,
5334                                                 &wc->refs[level],
5335                                                 &wc->flags[level]);
5336                         BUG_ON(ret);
5337                         BUG_ON(wc->refs[level] == 0);
5338
5339                         if (level == root_item->drop_level)
5340                                 break;
5341
5342                         btrfs_tree_unlock(path->nodes[level]);
5343                         WARN_ON(wc->refs[level] != 1);
5344                         level--;
5345                 }
5346         }
5347
5348         wc->level = level;
5349         wc->shared_level = -1;
5350         wc->stage = DROP_REFERENCE;
5351         wc->update_ref = update_ref;
5352         wc->keep_locks = 0;
5353         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5354
5355         while (1) {
5356                 ret = walk_down_tree(trans, root, path, wc);
5357                 if (ret < 0) {
5358                         err = ret;
5359                         break;
5360                 }
5361
5362                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5363                 if (ret < 0) {
5364                         err = ret;
5365                         break;
5366                 }
5367
5368                 if (ret > 0) {
5369                         BUG_ON(wc->stage != DROP_REFERENCE);
5370                         break;
5371                 }
5372
5373                 if (wc->stage == DROP_REFERENCE) {
5374                         level = wc->level;
5375                         btrfs_node_key(path->nodes[level],
5376                                        &root_item->drop_progress,
5377                                        path->slots[level]);
5378                         root_item->drop_level = level;
5379                 }
5380
5381                 BUG_ON(wc->level == 0);
5382                 if (trans->transaction->in_commit ||
5383                     trans->transaction->delayed_refs.flushing) {
5384                         ret = btrfs_update_root(trans, tree_root,
5385                                                 &root->root_key,
5386                                                 root_item);
5387                         BUG_ON(ret);
5388
5389                         btrfs_end_transaction(trans, tree_root);
5390                         trans = btrfs_start_transaction(tree_root, 1);
5391                 } else {
5392                         unsigned long update;
5393                         update = trans->delayed_ref_updates;
5394                         trans->delayed_ref_updates = 0;
5395                         if (update)
5396                                 btrfs_run_delayed_refs(trans, tree_root,
5397                                                        update);
5398                 }
5399         }
5400         btrfs_release_path(root, path);
5401         BUG_ON(err);
5402
5403         ret = btrfs_del_root(trans, tree_root, &root->root_key);
5404         BUG_ON(ret);
5405
5406         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5407                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
5408                                            NULL, NULL);
5409                 BUG_ON(ret < 0);
5410                 if (ret > 0) {
5411                         ret = btrfs_del_orphan_item(trans, tree_root,
5412                                                     root->root_key.objectid);
5413                         BUG_ON(ret);
5414                 }
5415         }
5416
5417         if (root->in_radix) {
5418                 btrfs_free_fs_root(tree_root->fs_info, root);
5419         } else {
5420                 free_extent_buffer(root->node);
5421                 free_extent_buffer(root->commit_root);
5422                 kfree(root);
5423         }
5424 out:
5425         btrfs_end_transaction(trans, tree_root);
5426         kfree(wc);
5427         btrfs_free_path(path);
5428         return err;
5429 }
5430
5431 /*
5432  * drop subtree rooted at tree block 'node'.
5433  *
5434  * NOTE: this function will unlock and release tree block 'node'
5435  */
5436 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5437                         struct btrfs_root *root,
5438                         struct extent_buffer *node,
5439                         struct extent_buffer *parent)
5440 {
5441         struct btrfs_path *path;
5442         struct walk_control *wc;
5443         int level;
5444         int parent_level;
5445         int ret = 0;
5446         int wret;
5447
5448         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5449
5450         path = btrfs_alloc_path();
5451         BUG_ON(!path);
5452
5453         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5454         BUG_ON(!wc);
5455
5456         btrfs_assert_tree_locked(parent);
5457         parent_level = btrfs_header_level(parent);
5458         extent_buffer_get(parent);
5459         path->nodes[parent_level] = parent;
5460         path->slots[parent_level] = btrfs_header_nritems(parent);
5461
5462         btrfs_assert_tree_locked(node);
5463         level = btrfs_header_level(node);
5464         path->nodes[level] = node;
5465         path->slots[level] = 0;
5466         path->locks[level] = 1;
5467
5468         wc->refs[parent_level] = 1;
5469         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5470         wc->level = level;
5471         wc->shared_level = -1;
5472         wc->stage = DROP_REFERENCE;
5473         wc->update_ref = 0;
5474         wc->keep_locks = 1;
5475         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5476
5477         while (1) {
5478                 wret = walk_down_tree(trans, root, path, wc);
5479                 if (wret < 0) {
5480                         ret = wret;
5481                         break;
5482                 }
5483
5484                 wret = walk_up_tree(trans, root, path, wc, parent_level);
5485                 if (wret < 0)
5486                         ret = wret;
5487                 if (wret != 0)
5488                         break;
5489         }
5490
5491         kfree(wc);
5492         btrfs_free_path(path);
5493         return ret;
5494 }
5495
5496 #if 0
5497 static unsigned long calc_ra(unsigned long start, unsigned long last,
5498                              unsigned long nr)
5499 {
5500         return min(last, start + nr - 1);
5501 }
5502
5503 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5504                                          u64 len)
5505 {
5506         u64 page_start;
5507         u64 page_end;
5508         unsigned long first_index;
5509         unsigned long last_index;
5510         unsigned long i;
5511         struct page *page;
5512         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5513         struct file_ra_state *ra;
5514         struct btrfs_ordered_extent *ordered;
5515         unsigned int total_read = 0;
5516         unsigned int total_dirty = 0;
5517         int ret = 0;
5518
5519         ra = kzalloc(sizeof(*ra), GFP_NOFS);
5520
5521         mutex_lock(&inode->i_mutex);
5522         first_index = start >> PAGE_CACHE_SHIFT;
5523         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5524
5525         /* make sure the dirty trick played by the caller work */
5526         ret = invalidate_inode_pages2_range(inode->i_mapping,
5527                                             first_index, last_index);
5528         if (ret)
5529                 goto out_unlock;
5530
5531         file_ra_state_init(ra, inode->i_mapping);
5532
5533         for (i = first_index ; i <= last_index; i++) {
5534                 if (total_read % ra->ra_pages == 0) {
5535                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5536                                        calc_ra(i, last_index, ra->ra_pages));
5537                 }
5538                 total_read++;
5539 again:
5540                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5541                         BUG_ON(1);
5542                 page = grab_cache_page(inode->i_mapping, i);
5543                 if (!page) {
5544                         ret = -ENOMEM;
5545                         goto out_unlock;
5546                 }
5547                 if (!PageUptodate(page)) {
5548                         btrfs_readpage(NULL, page);
5549                         lock_page(page);
5550                         if (!PageUptodate(page)) {
5551                                 unlock_page(page);
5552                                 page_cache_release(page);
5553                                 ret = -EIO;
5554                                 goto out_unlock;
5555                         }
5556                 }
5557                 wait_on_page_writeback(page);
5558
5559                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5560                 page_end = page_start + PAGE_CACHE_SIZE - 1;
5561                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5562
5563                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5564                 if (ordered) {
5565                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5566                         unlock_page(page);
5567                         page_cache_release(page);
5568                         btrfs_start_ordered_extent(inode, ordered, 1);
5569                         btrfs_put_ordered_extent(ordered);
5570                         goto again;
5571                 }
5572                 set_page_extent_mapped(page);
5573
5574                 if (i == first_index)
5575                         set_extent_bits(io_tree, page_start, page_end,
5576                                         EXTENT_BOUNDARY, GFP_NOFS);
5577                 btrfs_set_extent_delalloc(inode, page_start, page_end);
5578
5579                 set_page_dirty(page);
5580                 total_dirty++;
5581
5582                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5583                 unlock_page(page);
5584                 page_cache_release(page);
5585         }
5586
5587 out_unlock:
5588         kfree(ra);
5589         mutex_unlock(&inode->i_mutex);
5590         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5591         return ret;
5592 }
5593
5594 static noinline int relocate_data_extent(struct inode *reloc_inode,
5595                                          struct btrfs_key *extent_key,
5596                                          u64 offset)
5597 {
5598         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5599         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5600         struct extent_map *em;
5601         u64 start = extent_key->objectid - offset;
5602         u64 end = start + extent_key->offset - 1;
5603
5604         em = alloc_extent_map(GFP_NOFS);
5605         BUG_ON(!em || IS_ERR(em));
5606
5607         em->start = start;
5608         em->len = extent_key->offset;
5609         em->block_len = extent_key->offset;
5610         em->block_start = extent_key->objectid;
5611         em->bdev = root->fs_info->fs_devices->latest_bdev;
5612         set_bit(EXTENT_FLAG_PINNED, &em->flags);
5613
5614         /* setup extent map to cheat btrfs_readpage */
5615         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5616         while (1) {
5617                 int ret;
5618                 write_lock(&em_tree->lock);
5619                 ret = add_extent_mapping(em_tree, em);
5620                 write_unlock(&em_tree->lock);
5621                 if (ret != -EEXIST) {
5622                         free_extent_map(em);
5623                         break;
5624                 }
5625                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5626         }
5627         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5628
5629         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5630 }
5631
5632 struct btrfs_ref_path {
5633         u64 extent_start;
5634         u64 nodes[BTRFS_MAX_LEVEL];
5635         u64 root_objectid;
5636         u64 root_generation;
5637         u64 owner_objectid;
5638         u32 num_refs;
5639         int lowest_level;
5640         int current_level;
5641         int shared_level;
5642
5643         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5644         u64 new_nodes[BTRFS_MAX_LEVEL];
5645 };
5646
5647 struct disk_extent {
5648         u64 ram_bytes;
5649         u64 disk_bytenr;
5650         u64 disk_num_bytes;
5651         u64 offset;
5652         u64 num_bytes;
5653         u8 compression;
5654         u8 encryption;
5655         u16 other_encoding;
5656 };
5657
5658 static int is_cowonly_root(u64 root_objectid)
5659 {
5660         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5661             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5662             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5663             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5664             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5665             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5666                 return 1;
5667         return 0;
5668 }
5669
5670 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5671                                     struct btrfs_root *extent_root,
5672                                     struct btrfs_ref_path *ref_path,
5673                                     int first_time)
5674 {
5675         struct extent_buffer *leaf;
5676         struct btrfs_path *path;
5677         struct btrfs_extent_ref *ref;
5678         struct btrfs_key key;
5679         struct btrfs_key found_key;
5680         u64 bytenr;
5681         u32 nritems;
5682         int level;
5683         int ret = 1;
5684
5685         path = btrfs_alloc_path();
5686         if (!path)
5687                 return -ENOMEM;
5688
5689         if (first_time) {
5690                 ref_path->lowest_level = -1;
5691                 ref_path->current_level = -1;
5692                 ref_path->shared_level = -1;
5693                 goto walk_up;
5694         }
5695 walk_down:
5696         level = ref_path->current_level - 1;
5697         while (level >= -1) {
5698                 u64 parent;
5699                 if (level < ref_path->lowest_level)
5700                         break;
5701
5702                 if (level >= 0)
5703                         bytenr = ref_path->nodes[level];
5704                 else
5705                         bytenr = ref_path->extent_start;
5706                 BUG_ON(bytenr == 0);
5707
5708                 parent = ref_path->nodes[level + 1];
5709                 ref_path->nodes[level + 1] = 0;
5710                 ref_path->current_level = level;
5711                 BUG_ON(parent == 0);
5712
5713                 key.objectid = bytenr;
5714                 key.offset = parent + 1;
5715                 key.type = BTRFS_EXTENT_REF_KEY;
5716
5717                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5718                 if (ret < 0)
5719                         goto out;
5720                 BUG_ON(ret == 0);
5721
5722                 leaf = path->nodes[0];
5723                 nritems = btrfs_header_nritems(leaf);
5724                 if (path->slots[0] >= nritems) {
5725                         ret = btrfs_next_leaf(extent_root, path);
5726                         if (ret < 0)
5727                                 goto out;
5728                         if (ret > 0)
5729                                 goto next;
5730                         leaf = path->nodes[0];
5731                 }
5732
5733                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5734                 if (found_key.objectid == bytenr &&
5735                     found_key.type == BTRFS_EXTENT_REF_KEY) {
5736                         if (level < ref_path->shared_level)
5737                                 ref_path->shared_level = level;
5738                         goto found;
5739                 }
5740 next:
5741                 level--;
5742                 btrfs_release_path(extent_root, path);
5743                 cond_resched();
5744         }
5745         /* reached lowest level */
5746         ret = 1;
5747         goto out;
5748 walk_up:
5749         level = ref_path->current_level;
5750         while (level < BTRFS_MAX_LEVEL - 1) {
5751                 u64 ref_objectid;
5752
5753                 if (level >= 0)
5754                         bytenr = ref_path->nodes[level];
5755                 else
5756                         bytenr = ref_path->extent_start;
5757
5758                 BUG_ON(bytenr == 0);
5759
5760                 key.objectid = bytenr;
5761                 key.offset = 0;
5762                 key.type = BTRFS_EXTENT_REF_KEY;
5763
5764                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5765                 if (ret < 0)
5766                         goto out;
5767
5768                 leaf = path->nodes[0];
5769                 nritems = btrfs_header_nritems(leaf);
5770                 if (path->slots[0] >= nritems) {
5771                         ret = btrfs_next_leaf(extent_root, path);
5772                         if (ret < 0)
5773                                 goto out;
5774                         if (ret > 0) {
5775                                 /* the extent was freed by someone */
5776                                 if (ref_path->lowest_level == level)
5777                                         goto out;
5778                                 btrfs_release_path(extent_root, path);
5779                                 goto walk_down;
5780                         }
5781                         leaf = path->nodes[0];
5782                 }
5783
5784                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5785                 if (found_key.objectid != bytenr ||
5786                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
5787                         /* the extent was freed by someone */
5788                         if (ref_path->lowest_level == level) {
5789                                 ret = 1;
5790                                 goto out;
5791                         }
5792                         btrfs_release_path(extent_root, path);
5793                         goto walk_down;
5794                 }
5795 found:
5796                 ref = btrfs_item_ptr(leaf, path->slots[0],
5797                                 struct btrfs_extent_ref);
5798                 ref_objectid = btrfs_ref_objectid(leaf, ref);
5799                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5800                         if (first_time) {
5801                                 level = (int)ref_objectid;
5802                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
5803                                 ref_path->lowest_level = level;
5804                                 ref_path->current_level = level;
5805                                 ref_path->nodes[level] = bytenr;
5806                         } else {
5807                                 WARN_ON(ref_objectid != level);
5808                         }
5809                 } else {
5810                         WARN_ON(level != -1);
5811                 }
5812                 first_time = 0;
5813
5814                 if (ref_path->lowest_level == level) {
5815                         ref_path->owner_objectid = ref_objectid;
5816                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
5817                 }
5818
5819                 /*
5820                  * the block is tree root or the block isn't in reference
5821                  * counted tree.
5822                  */
5823                 if (found_key.objectid == found_key.offset ||
5824                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
5825                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5826                         ref_path->root_generation =
5827                                 btrfs_ref_generation(leaf, ref);
5828                         if (level < 0) {
5829                                 /* special reference from the tree log */
5830                                 ref_path->nodes[0] = found_key.offset;
5831                                 ref_path->current_level = 0;
5832                         }
5833                         ret = 0;
5834                         goto out;
5835                 }
5836
5837                 level++;
5838                 BUG_ON(ref_path->nodes[level] != 0);
5839                 ref_path->nodes[level] = found_key.offset;
5840                 ref_path->current_level = level;
5841
5842                 /*
5843                  * the reference was created in the running transaction,
5844                  * no need to continue walking up.
5845                  */
5846                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
5847                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
5848                         ref_path->root_generation =
5849                                 btrfs_ref_generation(leaf, ref);
5850                         ret = 0;
5851                         goto out;
5852                 }
5853
5854                 btrfs_release_path(extent_root, path);
5855                 cond_resched();
5856         }
5857         /* reached max tree level, but no tree root found. */
5858         BUG();
5859 out:
5860         btrfs_free_path(path);
5861         return ret;
5862 }
5863
5864 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
5865                                 struct btrfs_root *extent_root,
5866                                 struct btrfs_ref_path *ref_path,
5867                                 u64 extent_start)
5868 {
5869         memset(ref_path, 0, sizeof(*ref_path));
5870         ref_path->extent_start = extent_start;
5871
5872         return __next_ref_path(trans, extent_root, ref_path, 1);
5873 }
5874
5875 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
5876                                struct btrfs_root *extent_root,
5877                                struct btrfs_ref_path *ref_path)
5878 {
5879         return __next_ref_path(trans, extent_root, ref_path, 0);
5880 }
5881
5882 static noinline int get_new_locations(struct inode *reloc_inode,
5883                                       struct btrfs_key *extent_key,
5884                                       u64 offset, int no_fragment,
5885                                       struct disk_extent **extents,
5886                                       int *nr_extents)
5887 {
5888         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5889         struct btrfs_path *path;
5890         struct btrfs_file_extent_item *fi;
5891         struct extent_buffer *leaf;
5892         struct disk_extent *exts = *extents;
5893         struct btrfs_key found_key;
5894         u64 cur_pos;
5895         u64 last_byte;
5896         u32 nritems;
5897         int nr = 0;
5898         int max = *nr_extents;
5899         int ret;
5900
5901         WARN_ON(!no_fragment && *extents);
5902         if (!exts) {
5903                 max = 1;
5904                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
5905                 if (!exts)
5906                         return -ENOMEM;
5907         }
5908
5909         path = btrfs_alloc_path();
5910         BUG_ON(!path);
5911
5912         cur_pos = extent_key->objectid - offset;
5913         last_byte = extent_key->objectid + extent_key->offset;
5914         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
5915                                        cur_pos, 0);
5916         if (ret < 0)
5917                 goto out;
5918         if (ret > 0) {
5919                 ret = -ENOENT;
5920                 goto out;
5921         }
5922
5923         while (1) {
5924                 leaf = path->nodes[0];
5925                 nritems = btrfs_header_nritems(leaf);
5926                 if (path->slots[0] >= nritems) {
5927                         ret = btrfs_next_leaf(root, path);
5928                         if (ret < 0)
5929                                 goto out;
5930                         if (ret > 0)
5931                                 break;
5932                         leaf = path->nodes[0];
5933                 }
5934
5935                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5936                 if (found_key.offset != cur_pos ||
5937                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
5938                     found_key.objectid != reloc_inode->i_ino)
5939                         break;
5940
5941                 fi = btrfs_item_ptr(leaf, path->slots[0],
5942                                     struct btrfs_file_extent_item);
5943                 if (btrfs_file_extent_type(leaf, fi) !=
5944                     BTRFS_FILE_EXTENT_REG ||
5945                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5946                         break;
5947
5948                 if (nr == max) {
5949                         struct disk_extent *old = exts;
5950                         max *= 2;
5951                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
5952                         memcpy(exts, old, sizeof(*exts) * nr);
5953                         if (old != *extents)
5954                                 kfree(old);
5955                 }
5956
5957                 exts[nr].disk_bytenr =
5958                         btrfs_file_extent_disk_bytenr(leaf, fi);
5959                 exts[nr].disk_num_bytes =
5960                         btrfs_file_extent_disk_num_bytes(leaf, fi);
5961                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
5962                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5963                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
5964                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
5965                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
5966                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
5967                                                                            fi);
5968                 BUG_ON(exts[nr].offset > 0);
5969                 BUG_ON(exts[nr].compression || exts[nr].encryption);
5970                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
5971
5972                 cur_pos += exts[nr].num_bytes;
5973                 nr++;
5974
5975                 if (cur_pos + offset >= last_byte)
5976                         break;
5977
5978                 if (no_fragment) {
5979                         ret = 1;
5980                         goto out;
5981                 }
5982                 path->slots[0]++;
5983         }
5984
5985         BUG_ON(cur_pos + offset > last_byte);
5986         if (cur_pos + offset < last_byte) {
5987                 ret = -ENOENT;
5988                 goto out;
5989         }
5990         ret = 0;
5991 out:
5992         btrfs_free_path(path);
5993         if (ret) {
5994                 if (exts != *extents)
5995                         kfree(exts);
5996         } else {
5997                 *extents = exts;
5998                 *nr_extents = nr;
5999         }
6000         return ret;
6001 }
6002
6003 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6004                                         struct btrfs_root *root,
6005                                         struct btrfs_path *path,
6006                                         struct btrfs_key *extent_key,
6007                                         struct btrfs_key *leaf_key,
6008                                         struct btrfs_ref_path *ref_path,
6009                                         struct disk_extent *new_extents,
6010                                         int nr_extents)
6011 {
6012         struct extent_buffer *leaf;
6013         struct btrfs_file_extent_item *fi;
6014         struct inode *inode = NULL;
6015         struct btrfs_key key;
6016         u64 lock_start = 0;
6017         u64 lock_end = 0;
6018         u64 num_bytes;
6019         u64 ext_offset;
6020         u64 search_end = (u64)-1;
6021         u32 nritems;
6022         int nr_scaned = 0;
6023         int extent_locked = 0;
6024         int extent_type;
6025         int ret;
6026
6027         memcpy(&key, leaf_key, sizeof(key));
6028         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6029                 if (key.objectid < ref_path->owner_objectid ||
6030                     (key.objectid == ref_path->owner_objectid &&
6031                      key.type < BTRFS_EXTENT_DATA_KEY)) {
6032                         key.objectid = ref_path->owner_objectid;
6033                         key.type = BTRFS_EXTENT_DATA_KEY;
6034                         key.offset = 0;
6035                 }
6036         }
6037
6038         while (1) {
6039                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6040                 if (ret < 0)
6041                         goto out;
6042
6043                 leaf = path->nodes[0];
6044                 nritems = btrfs_header_nritems(leaf);
6045 next:
6046                 if (extent_locked && ret > 0) {
6047                         /*
6048                          * the file extent item was modified by someone
6049                          * before the extent got locked.
6050                          */
6051                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6052                                       lock_end, GFP_NOFS);
6053                         extent_locked = 0;
6054                 }
6055
6056                 if (path->slots[0] >= nritems) {
6057                         if (++nr_scaned > 2)
6058                                 break;
6059
6060                         BUG_ON(extent_locked);
6061                         ret = btrfs_next_leaf(root, path);
6062                         if (ret < 0)
6063                                 goto out;
6064                         if (ret > 0)
6065                                 break;
6066                         leaf = path->nodes[0];
6067                         nritems = btrfs_header_nritems(leaf);
6068                 }
6069
6070                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6071
6072                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6073                         if ((key.objectid > ref_path->owner_objectid) ||
6074                             (key.objectid == ref_path->owner_objectid &&
6075                              key.type > BTRFS_EXTENT_DATA_KEY) ||
6076                             key.offset >= search_end)
6077                                 break;
6078                 }
6079
6080                 if (inode && key.objectid != inode->i_ino) {
6081                         BUG_ON(extent_locked);
6082                         btrfs_release_path(root, path);
6083                         mutex_unlock(&inode->i_mutex);
6084                         iput(inode);
6085                         inode = NULL;
6086                         continue;
6087                 }
6088
6089                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6090                         path->slots[0]++;
6091                         ret = 1;
6092                         goto next;
6093                 }
6094                 fi = btrfs_item_ptr(leaf, path->slots[0],
6095                                     struct btrfs_file_extent_item);
6096                 extent_type = btrfs_file_extent_type(leaf, fi);
6097                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6098                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6099                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6100                      extent_key->objectid)) {
6101                         path->slots[0]++;
6102                         ret = 1;
6103                         goto next;
6104                 }
6105
6106                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6107                 ext_offset = btrfs_file_extent_offset(leaf, fi);
6108
6109                 if (search_end == (u64)-1) {
6110                         search_end = key.offset - ext_offset +
6111                                 btrfs_file_extent_ram_bytes(leaf, fi);
6112                 }
6113
6114                 if (!extent_locked) {
6115                         lock_start = key.offset;
6116                         lock_end = lock_start + num_bytes - 1;
6117                 } else {
6118                         if (lock_start > key.offset ||
6119                             lock_end + 1 < key.offset + num_bytes) {
6120                                 unlock_extent(&BTRFS_I(inode)->io_tree,
6121                                               lock_start, lock_end, GFP_NOFS);
6122                                 extent_locked = 0;
6123                         }
6124                 }
6125
6126                 if (!inode) {
6127                         btrfs_release_path(root, path);
6128
6129                         inode = btrfs_iget_locked(root->fs_info->sb,
6130                                                   key.objectid, root);
6131                         if (inode->i_state & I_NEW) {
6132                                 BTRFS_I(inode)->root = root;
6133                                 BTRFS_I(inode)->location.objectid =
6134                                         key.objectid;
6135                                 BTRFS_I(inode)->location.type =
6136                                         BTRFS_INODE_ITEM_KEY;
6137                                 BTRFS_I(inode)->location.offset = 0;
6138                                 btrfs_read_locked_inode(inode);
6139                                 unlock_new_inode(inode);
6140                         }
6141                         /*
6142                          * some code call btrfs_commit_transaction while
6143                          * holding the i_mutex, so we can't use mutex_lock
6144                          * here.
6145                          */
6146                         if (is_bad_inode(inode) ||
6147                             !mutex_trylock(&inode->i_mutex)) {
6148                                 iput(inode);
6149                                 inode = NULL;
6150                                 key.offset = (u64)-1;
6151                                 goto skip;
6152                         }
6153                 }
6154
6155                 if (!extent_locked) {
6156                         struct btrfs_ordered_extent *ordered;
6157
6158                         btrfs_release_path(root, path);
6159
6160                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6161                                     lock_end, GFP_NOFS);
6162                         ordered = btrfs_lookup_first_ordered_extent(inode,
6163                                                                     lock_end);
6164                         if (ordered &&
6165                             ordered->file_offset <= lock_end &&
6166                             ordered->file_offset + ordered->len > lock_start) {
6167                                 unlock_extent(&BTRFS_I(inode)->io_tree,
6168                                               lock_start, lock_end, GFP_NOFS);
6169                                 btrfs_start_ordered_extent(inode, ordered, 1);
6170                                 btrfs_put_ordered_extent(ordered);
6171                                 key.offset += num_bytes;
6172                                 goto skip;
6173                         }
6174                         if (ordered)
6175                                 btrfs_put_ordered_extent(ordered);
6176
6177                         extent_locked = 1;
6178                         continue;
6179                 }
6180
6181                 if (nr_extents == 1) {
6182                         /* update extent pointer in place */
6183                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
6184                                                 new_extents[0].disk_bytenr);
6185                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6186                                                 new_extents[0].disk_num_bytes);
6187                         btrfs_mark_buffer_dirty(leaf);
6188
6189                         btrfs_drop_extent_cache(inode, key.offset,
6190                                                 key.offset + num_bytes - 1, 0);
6191
6192                         ret = btrfs_inc_extent_ref(trans, root,
6193                                                 new_extents[0].disk_bytenr,
6194                                                 new_extents[0].disk_num_bytes,
6195                                                 leaf->start,
6196                                                 root->root_key.objectid,
6197                                                 trans->transid,
6198                                                 key.objectid);
6199                         BUG_ON(ret);
6200
6201                         ret = btrfs_free_extent(trans, root,
6202                                                 extent_key->objectid,
6203                                                 extent_key->offset,
6204                                                 leaf->start,
6205                                                 btrfs_header_owner(leaf),
6206                                                 btrfs_header_generation(leaf),
6207                                                 key.objectid, 0);
6208                         BUG_ON(ret);
6209
6210                         btrfs_release_path(root, path);
6211                         key.offset += num_bytes;
6212                 } else {
6213                         BUG_ON(1);
6214 #if 0
6215                         u64 alloc_hint;
6216                         u64 extent_len;
6217                         int i;
6218                         /*
6219                          * drop old extent pointer at first, then insert the
6220                          * new pointers one bye one
6221                          */
6222                         btrfs_release_path(root, path);
6223                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
6224                                                  key.offset + num_bytes,
6225                                                  key.offset, &alloc_hint);
6226                         BUG_ON(ret);
6227
6228                         for (i = 0; i < nr_extents; i++) {
6229                                 if (ext_offset >= new_extents[i].num_bytes) {
6230                                         ext_offset -= new_extents[i].num_bytes;
6231                                         continue;
6232                                 }
6233                                 extent_len = min(new_extents[i].num_bytes -
6234                                                  ext_offset, num_bytes);
6235
6236                                 ret = btrfs_insert_empty_item(trans, root,
6237                                                               path, &key,
6238                                                               sizeof(*fi));
6239                                 BUG_ON(ret);
6240
6241                                 leaf = path->nodes[0];
6242                                 fi = btrfs_item_ptr(leaf, path->slots[0],
6243                                                 struct btrfs_file_extent_item);
6244                                 btrfs_set_file_extent_generation(leaf, fi,
6245                                                         trans->transid);
6246                                 btrfs_set_file_extent_type(leaf, fi,
6247                                                         BTRFS_FILE_EXTENT_REG);
6248                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6249                                                 new_extents[i].disk_bytenr);
6250                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6251                                                 new_extents[i].disk_num_bytes);
6252                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
6253                                                 new_extents[i].ram_bytes);
6254
6255                                 btrfs_set_file_extent_compression(leaf, fi,
6256                                                 new_extents[i].compression);
6257                                 btrfs_set_file_extent_encryption(leaf, fi,
6258                                                 new_extents[i].encryption);
6259                                 btrfs_set_file_extent_other_encoding(leaf, fi,
6260                                                 new_extents[i].other_encoding);
6261
6262                                 btrfs_set_file_extent_num_bytes(leaf, fi,
6263                                                         extent_len);
6264                                 ext_offset += new_extents[i].offset;
6265                                 btrfs_set_file_extent_offset(leaf, fi,
6266                                                         ext_offset);
6267                                 btrfs_mark_buffer_dirty(leaf);
6268
6269                                 btrfs_drop_extent_cache(inode, key.offset,
6270                                                 key.offset + extent_len - 1, 0);
6271
6272                                 ret = btrfs_inc_extent_ref(trans, root,
6273                                                 new_extents[i].disk_bytenr,
6274                                                 new_extents[i].disk_num_bytes,
6275                                                 leaf->start,
6276                                                 root->root_key.objectid,
6277                                                 trans->transid, key.objectid);
6278                                 BUG_ON(ret);
6279                                 btrfs_release_path(root, path);
6280
6281                                 inode_add_bytes(inode, extent_len);
6282
6283                                 ext_offset = 0;
6284                                 num_bytes -= extent_len;
6285                                 key.offset += extent_len;
6286
6287                                 if (num_bytes == 0)
6288                                         break;
6289                         }
6290                         BUG_ON(i >= nr_extents);
6291 #endif
6292                 }
6293
6294                 if (extent_locked) {
6295                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6296                                       lock_end, GFP_NOFS);
6297                         extent_locked = 0;
6298                 }
6299 skip:
6300                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
6301                     key.offset >= search_end)
6302                         break;
6303
6304                 cond_resched();
6305         }
6306         ret = 0;
6307 out:
6308         btrfs_release_path(root, path);
6309         if (inode) {
6310                 mutex_unlock(&inode->i_mutex);
6311                 if (extent_locked) {
6312                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6313                                       lock_end, GFP_NOFS);
6314                 }
6315                 iput(inode);
6316         }
6317         return ret;
6318 }
6319
6320 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6321                                struct btrfs_root *root,
6322                                struct extent_buffer *buf, u64 orig_start)
6323 {
6324         int level;
6325         int ret;
6326
6327         BUG_ON(btrfs_header_generation(buf) != trans->transid);
6328         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6329
6330         level = btrfs_header_level(buf);
6331         if (level == 0) {
6332                 struct btrfs_leaf_ref *ref;
6333                 struct btrfs_leaf_ref *orig_ref;
6334
6335                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6336                 if (!orig_ref)
6337                         return -ENOENT;
6338
6339                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6340                 if (!ref) {
6341                         btrfs_free_leaf_ref(root, orig_ref);
6342                         return -ENOMEM;
6343                 }
6344
6345                 ref->nritems = orig_ref->nritems;
6346                 memcpy(ref->extents, orig_ref->extents,
6347                         sizeof(ref->extents[0]) * ref->nritems);
6348
6349                 btrfs_free_leaf_ref(root, orig_ref);
6350
6351                 ref->root_gen = trans->transid;
6352                 ref->bytenr = buf->start;
6353                 ref->owner = btrfs_header_owner(buf);
6354                 ref->generation = btrfs_header_generation(buf);
6355
6356                 ret = btrfs_add_leaf_ref(root, ref, 0);
6357                 WARN_ON(ret);
6358                 btrfs_free_leaf_ref(root, ref);
6359         }
6360         return 0;
6361 }
6362
6363 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6364                                         struct extent_buffer *leaf,
6365                                         struct btrfs_block_group_cache *group,
6366                                         struct btrfs_root *target_root)
6367 {
6368         struct btrfs_key key;
6369         struct inode *inode = NULL;
6370         struct btrfs_file_extent_item *fi;
6371         u64 num_bytes;
6372         u64 skip_objectid = 0;
6373         u32 nritems;
6374         u32 i;
6375
6376         nritems = btrfs_header_nritems(leaf);
6377         for (i = 0; i < nritems; i++) {
6378                 btrfs_item_key_to_cpu(leaf, &key, i);
6379                 if (key.objectid == skip_objectid ||
6380                     key.type != BTRFS_EXTENT_DATA_KEY)
6381                         continue;
6382                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6383                 if (btrfs_file_extent_type(leaf, fi) ==
6384                     BTRFS_FILE_EXTENT_INLINE)
6385                         continue;
6386                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6387                         continue;
6388                 if (!inode || inode->i_ino != key.objectid) {
6389                         iput(inode);
6390                         inode = btrfs_ilookup(target_root->fs_info->sb,
6391                                               key.objectid, target_root, 1);
6392                 }
6393                 if (!inode) {
6394                         skip_objectid = key.objectid;
6395                         continue;
6396                 }
6397                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6398
6399                 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6400                             key.offset + num_bytes - 1, GFP_NOFS);
6401                 btrfs_drop_extent_cache(inode, key.offset,
6402                                         key.offset + num_bytes - 1, 1);
6403                 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6404                               key.offset + num_bytes - 1, GFP_NOFS);
6405                 cond_resched();
6406         }
6407         iput(inode);
6408         return 0;
6409 }
6410
6411 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6412                                         struct btrfs_root *root,
6413                                         struct extent_buffer *leaf,
6414                                         struct btrfs_block_group_cache *group,
6415                                         struct inode *reloc_inode)
6416 {
6417         struct btrfs_key key;
6418         struct btrfs_key extent_key;
6419         struct btrfs_file_extent_item *fi;
6420         struct btrfs_leaf_ref *ref;
6421         struct disk_extent *new_extent;
6422         u64 bytenr;
6423         u64 num_bytes;
6424         u32 nritems;
6425         u32 i;
6426         int ext_index;
6427         int nr_extent;
6428         int ret;
6429
6430         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6431         BUG_ON(!new_extent);
6432
6433         ref = btrfs_lookup_leaf_ref(root, leaf->start);
6434         BUG_ON(!ref);
6435
6436         ext_index = -1;
6437         nritems = btrfs_header_nritems(leaf);
6438         for (i = 0; i < nritems; i++) {
6439                 btrfs_item_key_to_cpu(leaf, &key, i);
6440                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6441                         continue;
6442                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6443                 if (btrfs_file_extent_type(leaf, fi) ==
6444                     BTRFS_FILE_EXTENT_INLINE)
6445                         continue;
6446                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6447                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6448                 if (bytenr == 0)
6449                         continue;
6450
6451                 ext_index++;
6452                 if (bytenr >= group->key.objectid + group->key.offset ||
6453                     bytenr + num_bytes <= group->key.objectid)
6454                         continue;
6455
6456                 extent_key.objectid = bytenr;
6457                 extent_key.offset = num_bytes;
6458                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6459                 nr_extent = 1;
6460                 ret = get_new_locations(reloc_inode, &extent_key,
6461                                         group->key.objectid, 1,
6462                                         &new_extent, &nr_extent);
6463                 if (ret > 0)
6464                         continue;
6465                 BUG_ON(ret < 0);
6466
6467                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6468                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6469                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6470                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6471
6472                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6473                                                 new_extent->disk_bytenr);
6474                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6475                                                 new_extent->disk_num_bytes);
6476                 btrfs_mark_buffer_dirty(leaf);
6477
6478                 ret = btrfs_inc_extent_ref(trans, root,
6479                                         new_extent->disk_bytenr,
6480                                         new_extent->disk_num_bytes,
6481                                         leaf->start,
6482                                         root->root_key.objectid,
6483                                         trans->transid, key.objectid);
6484                 BUG_ON(ret);
6485
6486                 ret = btrfs_free_extent(trans, root,
6487                                         bytenr, num_bytes, leaf->start,
6488                                         btrfs_header_owner(leaf),
6489                                         btrfs_header_generation(leaf),
6490                                         key.objectid, 0);
6491                 BUG_ON(ret);
6492                 cond_resched();
6493         }
6494         kfree(new_extent);
6495         BUG_ON(ext_index + 1 != ref->nritems);
6496         btrfs_free_leaf_ref(root, ref);
6497         return 0;
6498 }
6499
6500 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6501                           struct btrfs_root *root)
6502 {
6503         struct btrfs_root *reloc_root;
6504         int ret;
6505
6506         if (root->reloc_root) {
6507                 reloc_root = root->reloc_root;
6508                 root->reloc_root = NULL;
6509                 list_add(&reloc_root->dead_list,
6510                          &root->fs_info->dead_reloc_roots);
6511
6512                 btrfs_set_root_bytenr(&reloc_root->root_item,
6513                                       reloc_root->node->start);
6514                 btrfs_set_root_level(&root->root_item,
6515                                      btrfs_header_level(reloc_root->node));
6516                 memset(&reloc_root->root_item.drop_progress, 0,
6517                         sizeof(struct btrfs_disk_key));
6518                 reloc_root->root_item.drop_level = 0;
6519
6520                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6521                                         &reloc_root->root_key,
6522                                         &reloc_root->root_item);
6523                 BUG_ON(ret);
6524         }
6525         return 0;
6526 }
6527
6528 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6529 {
6530         struct btrfs_trans_handle *trans;
6531         struct btrfs_root *reloc_root;
6532         struct btrfs_root *prev_root = NULL;
6533         struct list_head dead_roots;
6534         int ret;
6535         unsigned long nr;
6536
6537         INIT_LIST_HEAD(&dead_roots);
6538         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6539
6540         while (!list_empty(&dead_roots)) {
6541                 reloc_root = list_entry(dead_roots.prev,
6542                                         struct btrfs_root, dead_list);
6543                 list_del_init(&reloc_root->dead_list);
6544
6545                 BUG_ON(reloc_root->commit_root != NULL);
6546                 while (1) {
6547                         trans = btrfs_join_transaction(root, 1);
6548                         BUG_ON(!trans);
6549
6550                         mutex_lock(&root->fs_info->drop_mutex);
6551                         ret = btrfs_drop_snapshot(trans, reloc_root);
6552                         if (ret != -EAGAIN)
6553                                 break;
6554                         mutex_unlock(&root->fs_info->drop_mutex);
6555
6556                         nr = trans->blocks_used;
6557                         ret = btrfs_end_transaction(trans, root);
6558                         BUG_ON(ret);
6559                         btrfs_btree_balance_dirty(root, nr);
6560                 }
6561
6562                 free_extent_buffer(reloc_root->node);
6563
6564                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6565                                      &reloc_root->root_key);
6566                 BUG_ON(ret);
6567                 mutex_unlock(&root->fs_info->drop_mutex);
6568
6569                 nr = trans->blocks_used;
6570                 ret = btrfs_end_transaction(trans, root);
6571                 BUG_ON(ret);
6572                 btrfs_btree_balance_dirty(root, nr);
6573
6574                 kfree(prev_root);
6575                 prev_root = reloc_root;
6576         }
6577         if (prev_root) {
6578                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6579                 kfree(prev_root);
6580         }
6581         return 0;
6582 }
6583
6584 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6585 {
6586         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6587         return 0;
6588 }
6589
6590 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6591 {
6592         struct btrfs_root *reloc_root;
6593         struct btrfs_trans_handle *trans;
6594         struct btrfs_key location;
6595         int found;
6596         int ret;
6597
6598         mutex_lock(&root->fs_info->tree_reloc_mutex);
6599         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6600         BUG_ON(ret);
6601         found = !list_empty(&root->fs_info->dead_reloc_roots);
6602         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6603
6604         if (found) {
6605                 trans = btrfs_start_transaction(root, 1);
6606                 BUG_ON(!trans);
6607                 ret = btrfs_commit_transaction(trans, root);
6608                 BUG_ON(ret);
6609         }
6610
6611         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6612         location.offset = (u64)-1;
6613         location.type = BTRFS_ROOT_ITEM_KEY;
6614
6615         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6616         BUG_ON(!reloc_root);
6617         btrfs_orphan_cleanup(reloc_root);
6618         return 0;
6619 }
6620
6621 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6622                                     struct btrfs_root *root)
6623 {
6624         struct btrfs_root *reloc_root;
6625         struct extent_buffer *eb;
6626         struct btrfs_root_item *root_item;
6627         struct btrfs_key root_key;
6628         int ret;
6629
6630         BUG_ON(!root->ref_cows);
6631         if (root->reloc_root)
6632                 return 0;
6633
6634         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6635         BUG_ON(!root_item);
6636
6637         ret = btrfs_copy_root(trans, root, root->commit_root,
6638                               &eb, BTRFS_TREE_RELOC_OBJECTID);
6639         BUG_ON(ret);
6640
6641         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6642         root_key.offset = root->root_key.objectid;
6643         root_key.type = BTRFS_ROOT_ITEM_KEY;
6644
6645         memcpy(root_item, &root->root_item, sizeof(root_item));
6646         btrfs_set_root_refs(root_item, 0);
6647         btrfs_set_root_bytenr(root_item, eb->start);
6648         btrfs_set_root_level(root_item, btrfs_header_level(eb));
6649         btrfs_set_root_generation(root_item, trans->transid);
6650
6651         btrfs_tree_unlock(eb);
6652         free_extent_buffer(eb);
6653
6654         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6655                                 &root_key, root_item);
6656         BUG_ON(ret);
6657         kfree(root_item);
6658
6659         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6660                                                  &root_key);
6661         BUG_ON(!reloc_root);
6662         reloc_root->last_trans = trans->transid;
6663         reloc_root->commit_root = NULL;
6664         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6665
6666         root->reloc_root = reloc_root;
6667         return 0;
6668 }
6669
6670 /*
6671  * Core function of space balance.
6672  *
6673  * The idea is using reloc trees to relocate tree blocks in reference
6674  * counted roots. There is one reloc tree for each subvol, and all
6675  * reloc trees share same root key objectid. Reloc trees are snapshots
6676  * of the latest committed roots of subvols (root->commit_root).
6677  *
6678  * To relocate a tree block referenced by a subvol, there are two steps.
6679  * COW the block through subvol's reloc tree, then update block pointer
6680  * in the subvol to point to the new block. Since all reloc trees share
6681  * same root key objectid, doing special handing for tree blocks owned
6682  * by them is easy. Once a tree block has been COWed in one reloc tree,
6683  * we can use the resulting new block directly when the same block is
6684  * required to COW again through other reloc trees. By this way, relocated
6685  * tree blocks are shared between reloc trees, so they are also shared
6686  * between subvols.
6687  */
6688 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6689                                       struct btrfs_root *root,
6690                                       struct btrfs_path *path,
6691                                       struct btrfs_key *first_key,
6692                                       struct btrfs_ref_path *ref_path,
6693                                       struct btrfs_block_group_cache *group,
6694                                       struct inode *reloc_inode)
6695 {
6696         struct btrfs_root *reloc_root;
6697         struct extent_buffer *eb = NULL;
6698         struct btrfs_key *keys;
6699         u64 *nodes;
6700         int level;
6701         int shared_level;
6702         int lowest_level = 0;
6703         int ret;
6704
6705         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6706                 lowest_level = ref_path->owner_objectid;
6707
6708         if (!root->ref_cows) {
6709                 path->lowest_level = lowest_level;
6710                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6711                 BUG_ON(ret < 0);
6712                 path->lowest_level = 0;
6713                 btrfs_release_path(root, path);
6714                 return 0;
6715         }
6716
6717         mutex_lock(&root->fs_info->tree_reloc_mutex);
6718         ret = init_reloc_tree(trans, root);
6719         BUG_ON(ret);
6720         reloc_root = root->reloc_root;
6721
6722         shared_level = ref_path->shared_level;
6723         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6724
6725         keys = ref_path->node_keys;
6726         nodes = ref_path->new_nodes;
6727         memset(&keys[shared_level + 1], 0,
6728                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6729         memset(&nodes[shared_level + 1], 0,
6730                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6731
6732         if (nodes[lowest_level] == 0) {
6733                 path->lowest_level = lowest_level;
6734                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6735                                         0, 1);
6736                 BUG_ON(ret);
6737                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6738                         eb = path->nodes[level];
6739                         if (!eb || eb == reloc_root->node)
6740                                 break;
6741                         nodes[level] = eb->start;
6742                         if (level == 0)
6743                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6744                         else
6745                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6746                 }
6747                 if (nodes[0] &&
6748                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6749                         eb = path->nodes[0];
6750                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
6751                                                       group, reloc_inode);
6752                         BUG_ON(ret);
6753                 }
6754                 btrfs_release_path(reloc_root, path);
6755         } else {
6756                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6757                                        lowest_level);
6758                 BUG_ON(ret);
6759         }
6760
6761         /*
6762          * replace tree blocks in the fs tree with tree blocks in
6763          * the reloc tree.
6764          */
6765         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6766         BUG_ON(ret < 0);
6767
6768         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6769                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6770                                         0, 0);
6771                 BUG_ON(ret);
6772                 extent_buffer_get(path->nodes[0]);
6773                 eb = path->nodes[0];
6774                 btrfs_release_path(reloc_root, path);
6775                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6776                 BUG_ON(ret);
6777                 free_extent_buffer(eb);
6778         }
6779
6780         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6781         path->lowest_level = 0;
6782         return 0;
6783 }
6784
6785 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6786                                         struct btrfs_root *root,
6787                                         struct btrfs_path *path,
6788                                         struct btrfs_key *first_key,
6789                                         struct btrfs_ref_path *ref_path)
6790 {
6791         int ret;
6792
6793         ret = relocate_one_path(trans, root, path, first_key,
6794                                 ref_path, NULL, NULL);
6795         BUG_ON(ret);
6796
6797         return 0;
6798 }
6799
6800 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
6801                                     struct btrfs_root *extent_root,
6802                                     struct btrfs_path *path,
6803                                     struct btrfs_key *extent_key)
6804 {
6805         int ret;
6806
6807         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6808         if (ret)
6809                 goto out;
6810         ret = btrfs_del_item(trans, extent_root, path);
6811 out:
6812         btrfs_release_path(extent_root, path);
6813         return ret;
6814 }
6815
6816 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
6817                                                 struct btrfs_ref_path *ref_path)
6818 {
6819         struct btrfs_key root_key;
6820
6821         root_key.objectid = ref_path->root_objectid;
6822         root_key.type = BTRFS_ROOT_ITEM_KEY;
6823         if (is_cowonly_root(ref_path->root_objectid))
6824                 root_key.offset = 0;
6825         else
6826                 root_key.offset = (u64)-1;
6827
6828         return btrfs_read_fs_root_no_name(fs_info, &root_key);
6829 }
6830
6831 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
6832                                         struct btrfs_path *path,
6833                                         struct btrfs_key *extent_key,
6834                                         struct btrfs_block_group_cache *group,
6835                                         struct inode *reloc_inode, int pass)
6836 {
6837         struct btrfs_trans_handle *trans;
6838         struct btrfs_root *found_root;
6839         struct btrfs_ref_path *ref_path = NULL;
6840         struct disk_extent *new_extents = NULL;
6841         int nr_extents = 0;
6842         int loops;
6843         int ret;
6844         int level;
6845         struct btrfs_key first_key;
6846         u64 prev_block = 0;
6847
6848
6849         trans = btrfs_start_transaction(extent_root, 1);
6850         BUG_ON(!trans);
6851
6852         if (extent_key->objectid == 0) {
6853                 ret = del_extent_zero(trans, extent_root, path, extent_key);
6854                 goto out;
6855         }
6856
6857         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
6858         if (!ref_path) {
6859                 ret = -ENOMEM;
6860                 goto out;
6861         }
6862
6863         for (loops = 0; ; loops++) {
6864                 if (loops == 0) {
6865                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
6866                                                    extent_key->objectid);
6867                 } else {
6868                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
6869                 }
6870                 if (ret < 0)
6871                         goto out;
6872                 if (ret > 0)
6873                         break;
6874
6875                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6876                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
6877                         continue;
6878
6879                 found_root = read_ref_root(extent_root->fs_info, ref_path);
6880                 BUG_ON(!found_root);
6881                 /*
6882                  * for reference counted tree, only process reference paths
6883                  * rooted at the latest committed root.
6884                  */
6885                 if (found_root->ref_cows &&
6886                     ref_path->root_generation != found_root->root_key.offset)
6887                         continue;
6888
6889                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6890                         if (pass == 0) {
6891                                 /*
6892                                  * copy data extents to new locations
6893                                  */
6894                                 u64 group_start = group->key.objectid;
6895                                 ret = relocate_data_extent(reloc_inode,
6896                                                            extent_key,
6897                                                            group_start);
6898                                 if (ret < 0)
6899                                         goto out;
6900                                 break;
6901                         }
6902                         level = 0;
6903                 } else {
6904                         level = ref_path->owner_objectid;
6905                 }
6906
6907                 if (prev_block != ref_path->nodes[level]) {
6908                         struct extent_buffer *eb;
6909                         u64 block_start = ref_path->nodes[level];
6910                         u64 block_size = btrfs_level_size(found_root, level);
6911
6912                         eb = read_tree_block(found_root, block_start,
6913                                              block_size, 0);
6914                         btrfs_tree_lock(eb);
6915                         BUG_ON(level != btrfs_header_level(eb));
6916
6917                         if (level == 0)
6918                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
6919                         else
6920                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
6921
6922                         btrfs_tree_unlock(eb);
6923                         free_extent_buffer(eb);
6924                         prev_block = block_start;
6925                 }
6926
6927                 mutex_lock(&extent_root->fs_info->trans_mutex);
6928                 btrfs_record_root_in_trans(found_root);
6929                 mutex_unlock(&extent_root->fs_info->trans_mutex);
6930                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6931                         /*
6932                          * try to update data extent references while
6933                          * keeping metadata shared between snapshots.
6934                          */
6935                         if (pass == 1) {
6936                                 ret = relocate_one_path(trans, found_root,
6937                                                 path, &first_key, ref_path,
6938                                                 group, reloc_inode);
6939                                 if (ret < 0)
6940                                         goto out;
6941                                 continue;
6942                         }
6943                         /*
6944                          * use fallback method to process the remaining
6945                          * references.
6946                          */
6947                         if (!new_extents) {
6948                                 u64 group_start = group->key.objectid;
6949                                 new_extents = kmalloc(sizeof(*new_extents),
6950                                                       GFP_NOFS);
6951                                 nr_extents = 1;
6952                                 ret = get_new_locations(reloc_inode,
6953                                                         extent_key,
6954                                                         group_start, 1,
6955                                                         &new_extents,
6956                                                         &nr_extents);
6957                                 if (ret)
6958                                         goto out;
6959                         }
6960                         ret = replace_one_extent(trans, found_root,
6961                                                 path, extent_key,
6962                                                 &first_key, ref_path,
6963                                                 new_extents, nr_extents);
6964                 } else {
6965                         ret = relocate_tree_block(trans, found_root, path,
6966                                                   &first_key, ref_path);
6967                 }
6968                 if (ret < 0)
6969                         goto out;
6970         }
6971         ret = 0;
6972 out:
6973         btrfs_end_transaction(trans, extent_root);
6974         kfree(new_extents);
6975         kfree(ref_path);
6976         return ret;
6977 }
6978 #endif
6979
6980 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
6981 {
6982         u64 num_devices;
6983         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
6984                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
6985
6986         num_devices = root->fs_info->fs_devices->rw_devices;
6987         if (num_devices == 1) {
6988                 stripped |= BTRFS_BLOCK_GROUP_DUP;
6989                 stripped = flags & ~stripped;
6990
6991                 /* turn raid0 into single device chunks */
6992                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
6993                         return stripped;
6994
6995                 /* turn mirroring into duplication */
6996                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
6997                              BTRFS_BLOCK_GROUP_RAID10))
6998                         return stripped | BTRFS_BLOCK_GROUP_DUP;
6999                 return flags;
7000         } else {
7001                 /* they already had raid on here, just return */
7002                 if (flags & stripped)
7003                         return flags;
7004
7005                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7006                 stripped = flags & ~stripped;
7007
7008                 /* switch duplicated blocks with raid1 */
7009                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7010                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7011
7012                 /* turn single device chunks into raid0 */
7013                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7014         }
7015         return flags;
7016 }
7017
7018 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
7019                      struct btrfs_block_group_cache *shrink_block_group,
7020                      int force)
7021 {
7022         struct btrfs_trans_handle *trans;
7023         u64 new_alloc_flags;
7024         u64 calc;
7025
7026         spin_lock(&shrink_block_group->lock);
7027         if (btrfs_block_group_used(&shrink_block_group->item) +
7028             shrink_block_group->reserved > 0) {
7029                 spin_unlock(&shrink_block_group->lock);
7030
7031                 trans = btrfs_start_transaction(root, 1);
7032                 spin_lock(&shrink_block_group->lock);
7033
7034                 new_alloc_flags = update_block_group_flags(root,
7035                                                    shrink_block_group->flags);
7036                 if (new_alloc_flags != shrink_block_group->flags) {
7037                         calc =
7038                              btrfs_block_group_used(&shrink_block_group->item);
7039                 } else {
7040                         calc = shrink_block_group->key.offset;
7041                 }
7042                 spin_unlock(&shrink_block_group->lock);
7043
7044                 do_chunk_alloc(trans, root->fs_info->extent_root,
7045                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
7046
7047                 btrfs_end_transaction(trans, root);
7048         } else
7049                 spin_unlock(&shrink_block_group->lock);
7050         return 0;
7051 }
7052
7053
7054 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
7055                                          struct btrfs_block_group_cache *group)
7056
7057 {
7058         __alloc_chunk_for_shrink(root, group, 1);
7059         set_block_group_readonly(group);
7060         return 0;
7061 }
7062
7063 /*
7064  * checks to see if its even possible to relocate this block group.
7065  *
7066  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7067  * ok to go ahead and try.
7068  */
7069 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7070 {
7071         struct btrfs_block_group_cache *block_group;
7072         struct btrfs_space_info *space_info;
7073         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7074         struct btrfs_device *device;
7075         int full = 0;
7076         int ret = 0;
7077
7078         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7079
7080         /* odd, couldn't find the block group, leave it alone */
7081         if (!block_group)
7082                 return -1;
7083
7084         /* no bytes used, we're good */
7085         if (!btrfs_block_group_used(&block_group->item))
7086                 goto out;
7087
7088         space_info = block_group->space_info;
7089         spin_lock(&space_info->lock);
7090
7091         full = space_info->full;
7092
7093         /*
7094          * if this is the last block group we have in this space, we can't
7095          * relocate it unless we're able to allocate a new chunk below.
7096          *
7097          * Otherwise, we need to make sure we have room in the space to handle
7098          * all of the extents from this block group.  If we can, we're good
7099          */
7100         if ((space_info->total_bytes != block_group->key.offset) &&
7101            (space_info->bytes_used + space_info->bytes_reserved +
7102             space_info->bytes_pinned + space_info->bytes_readonly +
7103             btrfs_block_group_used(&block_group->item) <
7104             space_info->total_bytes)) {
7105                 spin_unlock(&space_info->lock);
7106                 goto out;
7107         }
7108         spin_unlock(&space_info->lock);
7109
7110         /*
7111          * ok we don't have enough space, but maybe we have free space on our
7112          * devices to allocate new chunks for relocation, so loop through our
7113          * alloc devices and guess if we have enough space.  However, if we
7114          * were marked as full, then we know there aren't enough chunks, and we
7115          * can just return.
7116          */
7117         ret = -1;
7118         if (full)
7119                 goto out;
7120
7121         mutex_lock(&root->fs_info->chunk_mutex);
7122         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7123                 u64 min_free = btrfs_block_group_used(&block_group->item);
7124                 u64 dev_offset, max_avail;
7125
7126                 /*
7127                  * check to make sure we can actually find a chunk with enough
7128                  * space to fit our block group in.
7129                  */
7130                 if (device->total_bytes > device->bytes_used + min_free) {
7131                         ret = find_free_dev_extent(NULL, device, min_free,
7132                                                    &dev_offset, &max_avail);
7133                         if (!ret)
7134                                 break;
7135                         ret = -1;
7136                 }
7137         }
7138         mutex_unlock(&root->fs_info->chunk_mutex);
7139 out:
7140         btrfs_put_block_group(block_group);
7141         return ret;
7142 }
7143
7144 static int find_first_block_group(struct btrfs_root *root,
7145                 struct btrfs_path *path, struct btrfs_key *key)
7146 {
7147         int ret = 0;
7148         struct btrfs_key found_key;
7149         struct extent_buffer *leaf;
7150         int slot;
7151
7152         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7153         if (ret < 0)
7154                 goto out;
7155
7156         while (1) {
7157                 slot = path->slots[0];
7158                 leaf = path->nodes[0];
7159                 if (slot >= btrfs_header_nritems(leaf)) {
7160                         ret = btrfs_next_leaf(root, path);
7161                         if (ret == 0)
7162                                 continue;
7163                         if (ret < 0)
7164                                 goto out;
7165                         break;
7166                 }
7167                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7168
7169                 if (found_key.objectid >= key->objectid &&
7170                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7171                         ret = 0;
7172                         goto out;
7173                 }
7174                 path->slots[0]++;
7175         }
7176         ret = -ENOENT;
7177 out:
7178         return ret;
7179 }
7180
7181 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7182 {
7183         struct btrfs_block_group_cache *block_group;
7184         struct btrfs_space_info *space_info;
7185         struct btrfs_caching_control *caching_ctl;
7186         struct rb_node *n;
7187
7188         down_write(&info->extent_commit_sem);
7189         while (!list_empty(&info->caching_block_groups)) {
7190                 caching_ctl = list_entry(info->caching_block_groups.next,
7191                                          struct btrfs_caching_control, list);
7192                 list_del(&caching_ctl->list);
7193                 put_caching_control(caching_ctl);
7194         }
7195         up_write(&info->extent_commit_sem);
7196
7197         spin_lock(&info->block_group_cache_lock);
7198         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7199                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7200                                        cache_node);
7201                 rb_erase(&block_group->cache_node,
7202                          &info->block_group_cache_tree);
7203                 spin_unlock(&info->block_group_cache_lock);
7204
7205                 down_write(&block_group->space_info->groups_sem);
7206                 list_del(&block_group->list);
7207                 up_write(&block_group->space_info->groups_sem);
7208
7209                 if (block_group->cached == BTRFS_CACHE_STARTED)
7210                         wait_block_group_cache_done(block_group);
7211
7212                 btrfs_remove_free_space_cache(block_group);
7213
7214                 WARN_ON(atomic_read(&block_group->count) != 1);
7215                 kfree(block_group);
7216
7217                 spin_lock(&info->block_group_cache_lock);
7218         }
7219         spin_unlock(&info->block_group_cache_lock);
7220
7221         /* now that all the block groups are freed, go through and
7222          * free all the space_info structs.  This is only called during
7223          * the final stages of unmount, and so we know nobody is
7224          * using them.  We call synchronize_rcu() once before we start,
7225          * just to be on the safe side.
7226          */
7227         synchronize_rcu();
7228
7229         while(!list_empty(&info->space_info)) {
7230                 space_info = list_entry(info->space_info.next,
7231                                         struct btrfs_space_info,
7232                                         list);
7233
7234                 list_del(&space_info->list);
7235                 kfree(space_info);
7236         }
7237         return 0;
7238 }
7239
7240 int btrfs_read_block_groups(struct btrfs_root *root)
7241 {
7242         struct btrfs_path *path;
7243         int ret;
7244         struct btrfs_block_group_cache *cache;
7245         struct btrfs_fs_info *info = root->fs_info;
7246         struct btrfs_space_info *space_info;
7247         struct btrfs_key key;
7248         struct btrfs_key found_key;
7249         struct extent_buffer *leaf;
7250
7251         root = info->extent_root;
7252         key.objectid = 0;
7253         key.offset = 0;
7254         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7255         path = btrfs_alloc_path();
7256         if (!path)
7257                 return -ENOMEM;
7258
7259         while (1) {
7260                 ret = find_first_block_group(root, path, &key);
7261                 if (ret > 0) {
7262                         ret = 0;
7263                         goto error;
7264                 }
7265                 if (ret != 0)
7266                         goto error;
7267
7268                 leaf = path->nodes[0];
7269                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7270                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7271                 if (!cache) {
7272                         ret = -ENOMEM;
7273                         break;
7274                 }
7275
7276                 atomic_set(&cache->count, 1);
7277                 spin_lock_init(&cache->lock);
7278                 spin_lock_init(&cache->tree_lock);
7279                 cache->fs_info = info;
7280                 INIT_LIST_HEAD(&cache->list);
7281                 INIT_LIST_HEAD(&cache->cluster_list);
7282
7283                 /*
7284                  * we only want to have 32k of ram per block group for keeping
7285                  * track of free space, and if we pass 1/2 of that we want to
7286                  * start converting things over to using bitmaps
7287                  */
7288                 cache->extents_thresh = ((1024 * 32) / 2) /
7289                         sizeof(struct btrfs_free_space);
7290
7291                 read_extent_buffer(leaf, &cache->item,
7292                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7293                                    sizeof(cache->item));
7294                 memcpy(&cache->key, &found_key, sizeof(found_key));
7295
7296                 key.objectid = found_key.objectid + found_key.offset;
7297                 btrfs_release_path(root, path);
7298                 cache->flags = btrfs_block_group_flags(&cache->item);
7299                 cache->sectorsize = root->sectorsize;
7300
7301                 /*
7302                  * check for two cases, either we are full, and therefore
7303                  * don't need to bother with the caching work since we won't
7304                  * find any space, or we are empty, and we can just add all
7305                  * the space in and be done with it.  This saves us _alot_ of
7306                  * time, particularly in the full case.
7307                  */
7308                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7309                         exclude_super_stripes(root, cache);
7310                         cache->last_byte_to_unpin = (u64)-1;
7311                         cache->cached = BTRFS_CACHE_FINISHED;
7312                         free_excluded_extents(root, cache);
7313                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7314                         exclude_super_stripes(root, cache);
7315                         cache->last_byte_to_unpin = (u64)-1;
7316                         cache->cached = BTRFS_CACHE_FINISHED;
7317                         add_new_free_space(cache, root->fs_info,
7318                                            found_key.objectid,
7319                                            found_key.objectid +
7320                                            found_key.offset);
7321                         free_excluded_extents(root, cache);
7322                 }
7323
7324                 ret = update_space_info(info, cache->flags, found_key.offset,
7325                                         btrfs_block_group_used(&cache->item),
7326                                         &space_info);
7327                 BUG_ON(ret);
7328                 cache->space_info = space_info;
7329                 spin_lock(&cache->space_info->lock);
7330                 cache->space_info->bytes_super += cache->bytes_super;
7331                 spin_unlock(&cache->space_info->lock);
7332
7333                 down_write(&space_info->groups_sem);
7334                 list_add_tail(&cache->list, &space_info->block_groups);
7335                 up_write(&space_info->groups_sem);
7336
7337                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7338                 BUG_ON(ret);
7339
7340                 set_avail_alloc_bits(root->fs_info, cache->flags);
7341                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7342                         set_block_group_readonly(cache);
7343         }
7344         ret = 0;
7345 error:
7346         btrfs_free_path(path);
7347         return ret;
7348 }
7349
7350 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7351                            struct btrfs_root *root, u64 bytes_used,
7352                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7353                            u64 size)
7354 {
7355         int ret;
7356         struct btrfs_root *extent_root;
7357         struct btrfs_block_group_cache *cache;
7358
7359         extent_root = root->fs_info->extent_root;
7360
7361         root->fs_info->last_trans_log_full_commit = trans->transid;
7362
7363         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7364         if (!cache)
7365                 return -ENOMEM;
7366
7367         cache->key.objectid = chunk_offset;
7368         cache->key.offset = size;
7369         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7370         cache->sectorsize = root->sectorsize;
7371
7372         /*
7373          * we only want to have 32k of ram per block group for keeping track
7374          * of free space, and if we pass 1/2 of that we want to start
7375          * converting things over to using bitmaps
7376          */
7377         cache->extents_thresh = ((1024 * 32) / 2) /
7378                 sizeof(struct btrfs_free_space);
7379         atomic_set(&cache->count, 1);
7380         spin_lock_init(&cache->lock);
7381         spin_lock_init(&cache->tree_lock);
7382         INIT_LIST_HEAD(&cache->list);
7383         INIT_LIST_HEAD(&cache->cluster_list);
7384
7385         btrfs_set_block_group_used(&cache->item, bytes_used);
7386         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7387         cache->flags = type;
7388         btrfs_set_block_group_flags(&cache->item, type);
7389
7390         cache->last_byte_to_unpin = (u64)-1;
7391         cache->cached = BTRFS_CACHE_FINISHED;
7392         exclude_super_stripes(root, cache);
7393
7394         add_new_free_space(cache, root->fs_info, chunk_offset,
7395                            chunk_offset + size);
7396
7397         free_excluded_extents(root, cache);
7398
7399         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7400                                 &cache->space_info);
7401         BUG_ON(ret);
7402
7403         spin_lock(&cache->space_info->lock);
7404         cache->space_info->bytes_super += cache->bytes_super;
7405         spin_unlock(&cache->space_info->lock);
7406
7407         down_write(&cache->space_info->groups_sem);
7408         list_add_tail(&cache->list, &cache->space_info->block_groups);
7409         up_write(&cache->space_info->groups_sem);
7410
7411         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7412         BUG_ON(ret);
7413
7414         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7415                                 sizeof(cache->item));
7416         BUG_ON(ret);
7417
7418         set_avail_alloc_bits(extent_root->fs_info, type);
7419
7420         return 0;
7421 }
7422
7423 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7424                              struct btrfs_root *root, u64 group_start)
7425 {
7426         struct btrfs_path *path;
7427         struct btrfs_block_group_cache *block_group;
7428         struct btrfs_free_cluster *cluster;
7429         struct btrfs_key key;
7430         int ret;
7431
7432         root = root->fs_info->extent_root;
7433
7434         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7435         BUG_ON(!block_group);
7436         BUG_ON(!block_group->ro);
7437
7438         memcpy(&key, &block_group->key, sizeof(key));
7439
7440         /* make sure this block group isn't part of an allocation cluster */
7441         cluster = &root->fs_info->data_alloc_cluster;
7442         spin_lock(&cluster->refill_lock);
7443         btrfs_return_cluster_to_free_space(block_group, cluster);
7444         spin_unlock(&cluster->refill_lock);
7445
7446         /*
7447          * make sure this block group isn't part of a metadata
7448          * allocation cluster
7449          */
7450         cluster = &root->fs_info->meta_alloc_cluster;
7451         spin_lock(&cluster->refill_lock);
7452         btrfs_return_cluster_to_free_space(block_group, cluster);
7453         spin_unlock(&cluster->refill_lock);
7454
7455         path = btrfs_alloc_path();
7456         BUG_ON(!path);
7457
7458         spin_lock(&root->fs_info->block_group_cache_lock);
7459         rb_erase(&block_group->cache_node,
7460                  &root->fs_info->block_group_cache_tree);
7461         spin_unlock(&root->fs_info->block_group_cache_lock);
7462
7463         down_write(&block_group->space_info->groups_sem);
7464         /*
7465          * we must use list_del_init so people can check to see if they
7466          * are still on the list after taking the semaphore
7467          */
7468         list_del_init(&block_group->list);
7469         up_write(&block_group->space_info->groups_sem);
7470
7471         if (block_group->cached == BTRFS_CACHE_STARTED)
7472                 wait_block_group_cache_done(block_group);
7473
7474         btrfs_remove_free_space_cache(block_group);
7475
7476         spin_lock(&block_group->space_info->lock);
7477         block_group->space_info->total_bytes -= block_group->key.offset;
7478         block_group->space_info->bytes_readonly -= block_group->key.offset;
7479         spin_unlock(&block_group->space_info->lock);
7480
7481         btrfs_clear_space_info_full(root->fs_info);
7482
7483         btrfs_put_block_group(block_group);
7484         btrfs_put_block_group(block_group);
7485
7486         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7487         if (ret > 0)
7488                 ret = -EIO;
7489         if (ret < 0)
7490                 goto out;
7491
7492         ret = btrfs_del_item(trans, root, path);
7493 out:
7494         btrfs_free_path(path);
7495         return ret;
7496 }