]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/btrfs/extent-tree.c
101041d4d2b29cbd02be2f995be718b9d88a77c8
[net-next-2.6.git] / fs / btrfs / extent-tree.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include "compat.h"
26 #include "hash.h"
27 #include "ctree.h"
28 #include "disk-io.h"
29 #include "print-tree.h"
30 #include "transaction.h"
31 #include "volumes.h"
32 #include "locking.h"
33 #include "free-space-cache.h"
34
35 static int update_block_group(struct btrfs_trans_handle *trans,
36                               struct btrfs_root *root,
37                               u64 bytenr, u64 num_bytes, int alloc,
38                               int mark_free);
39 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
40                                    u64 num_bytes, int reserve);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42                                 struct btrfs_root *root,
43                                 u64 bytenr, u64 num_bytes, u64 parent,
44                                 u64 root_objectid, u64 owner_objectid,
45                                 u64 owner_offset, int refs_to_drop,
46                                 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48                                     struct extent_buffer *leaf,
49                                     struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51                                       struct btrfs_root *root,
52                                       u64 parent, u64 root_objectid,
53                                       u64 flags, u64 owner, u64 offset,
54                                       struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56                                      struct btrfs_root *root,
57                                      u64 parent, u64 root_objectid,
58                                      u64 flags, struct btrfs_disk_key *key,
59                                      int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61                           struct btrfs_root *extent_root, u64 alloc_bytes,
62                           u64 flags, int force);
63 static int pin_down_bytes(struct btrfs_trans_handle *trans,
64                           struct btrfs_root *root,
65                           struct btrfs_path *path,
66                           u64 bytenr, u64 num_bytes,
67                           int is_data, int reserved,
68                           struct extent_buffer **must_clean);
69 static int find_next_key(struct btrfs_path *path, int level,
70                          struct btrfs_key *key);
71 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
72                             int dump_block_groups);
73
74 static noinline int
75 block_group_cache_done(struct btrfs_block_group_cache *cache)
76 {
77         smp_mb();
78         return cache->cached == BTRFS_CACHE_FINISHED;
79 }
80
81 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
82 {
83         return (cache->flags & bits) == bits;
84 }
85
86 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
87 {
88         atomic_inc(&cache->count);
89 }
90
91 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
92 {
93         if (atomic_dec_and_test(&cache->count))
94                 kfree(cache);
95 }
96
97 /*
98  * this adds the block group to the fs_info rb tree for the block group
99  * cache
100  */
101 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
102                                 struct btrfs_block_group_cache *block_group)
103 {
104         struct rb_node **p;
105         struct rb_node *parent = NULL;
106         struct btrfs_block_group_cache *cache;
107
108         spin_lock(&info->block_group_cache_lock);
109         p = &info->block_group_cache_tree.rb_node;
110
111         while (*p) {
112                 parent = *p;
113                 cache = rb_entry(parent, struct btrfs_block_group_cache,
114                                  cache_node);
115                 if (block_group->key.objectid < cache->key.objectid) {
116                         p = &(*p)->rb_left;
117                 } else if (block_group->key.objectid > cache->key.objectid) {
118                         p = &(*p)->rb_right;
119                 } else {
120                         spin_unlock(&info->block_group_cache_lock);
121                         return -EEXIST;
122                 }
123         }
124
125         rb_link_node(&block_group->cache_node, parent, p);
126         rb_insert_color(&block_group->cache_node,
127                         &info->block_group_cache_tree);
128         spin_unlock(&info->block_group_cache_lock);
129
130         return 0;
131 }
132
133 /*
134  * This will return the block group at or after bytenr if contains is 0, else
135  * it will return the block group that contains the bytenr
136  */
137 static struct btrfs_block_group_cache *
138 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
139                               int contains)
140 {
141         struct btrfs_block_group_cache *cache, *ret = NULL;
142         struct rb_node *n;
143         u64 end, start;
144
145         spin_lock(&info->block_group_cache_lock);
146         n = info->block_group_cache_tree.rb_node;
147
148         while (n) {
149                 cache = rb_entry(n, struct btrfs_block_group_cache,
150                                  cache_node);
151                 end = cache->key.objectid + cache->key.offset - 1;
152                 start = cache->key.objectid;
153
154                 if (bytenr < start) {
155                         if (!contains && (!ret || start < ret->key.objectid))
156                                 ret = cache;
157                         n = n->rb_left;
158                 } else if (bytenr > start) {
159                         if (contains && bytenr <= end) {
160                                 ret = cache;
161                                 break;
162                         }
163                         n = n->rb_right;
164                 } else {
165                         ret = cache;
166                         break;
167                 }
168         }
169         if (ret)
170                 btrfs_get_block_group(ret);
171         spin_unlock(&info->block_group_cache_lock);
172
173         return ret;
174 }
175
176 static int add_excluded_extent(struct btrfs_root *root,
177                                u64 start, u64 num_bytes)
178 {
179         u64 end = start + num_bytes - 1;
180         set_extent_bits(&root->fs_info->freed_extents[0],
181                         start, end, EXTENT_UPTODATE, GFP_NOFS);
182         set_extent_bits(&root->fs_info->freed_extents[1],
183                         start, end, EXTENT_UPTODATE, GFP_NOFS);
184         return 0;
185 }
186
187 static void free_excluded_extents(struct btrfs_root *root,
188                                   struct btrfs_block_group_cache *cache)
189 {
190         u64 start, end;
191
192         start = cache->key.objectid;
193         end = start + cache->key.offset - 1;
194
195         clear_extent_bits(&root->fs_info->freed_extents[0],
196                           start, end, EXTENT_UPTODATE, GFP_NOFS);
197         clear_extent_bits(&root->fs_info->freed_extents[1],
198                           start, end, EXTENT_UPTODATE, GFP_NOFS);
199 }
200
201 static int exclude_super_stripes(struct btrfs_root *root,
202                                  struct btrfs_block_group_cache *cache)
203 {
204         u64 bytenr;
205         u64 *logical;
206         int stripe_len;
207         int i, nr, ret;
208
209         if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
210                 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
211                 cache->bytes_super += stripe_len;
212                 ret = add_excluded_extent(root, cache->key.objectid,
213                                           stripe_len);
214                 BUG_ON(ret);
215         }
216
217         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
218                 bytenr = btrfs_sb_offset(i);
219                 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
220                                        cache->key.objectid, bytenr,
221                                        0, &logical, &nr, &stripe_len);
222                 BUG_ON(ret);
223
224                 while (nr--) {
225                         cache->bytes_super += stripe_len;
226                         ret = add_excluded_extent(root, logical[nr],
227                                                   stripe_len);
228                         BUG_ON(ret);
229                 }
230
231                 kfree(logical);
232         }
233         return 0;
234 }
235
236 static struct btrfs_caching_control *
237 get_caching_control(struct btrfs_block_group_cache *cache)
238 {
239         struct btrfs_caching_control *ctl;
240
241         spin_lock(&cache->lock);
242         if (cache->cached != BTRFS_CACHE_STARTED) {
243                 spin_unlock(&cache->lock);
244                 return NULL;
245         }
246
247         ctl = cache->caching_ctl;
248         atomic_inc(&ctl->count);
249         spin_unlock(&cache->lock);
250         return ctl;
251 }
252
253 static void put_caching_control(struct btrfs_caching_control *ctl)
254 {
255         if (atomic_dec_and_test(&ctl->count))
256                 kfree(ctl);
257 }
258
259 /*
260  * this is only called by cache_block_group, since we could have freed extents
261  * we need to check the pinned_extents for any extents that can't be used yet
262  * since their free space will be released as soon as the transaction commits.
263  */
264 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
265                               struct btrfs_fs_info *info, u64 start, u64 end)
266 {
267         u64 extent_start, extent_end, size, total_added = 0;
268         int ret;
269
270         while (start < end) {
271                 ret = find_first_extent_bit(info->pinned_extents, start,
272                                             &extent_start, &extent_end,
273                                             EXTENT_DIRTY | EXTENT_UPTODATE);
274                 if (ret)
275                         break;
276
277                 if (extent_start <= start) {
278                         start = extent_end + 1;
279                 } else if (extent_start > start && extent_start < end) {
280                         size = extent_start - start;
281                         total_added += size;
282                         ret = btrfs_add_free_space(block_group, start,
283                                                    size);
284                         BUG_ON(ret);
285                         start = extent_end + 1;
286                 } else {
287                         break;
288                 }
289         }
290
291         if (start < end) {
292                 size = end - start;
293                 total_added += size;
294                 ret = btrfs_add_free_space(block_group, start, size);
295                 BUG_ON(ret);
296         }
297
298         return total_added;
299 }
300
301 static int caching_kthread(void *data)
302 {
303         struct btrfs_block_group_cache *block_group = data;
304         struct btrfs_fs_info *fs_info = block_group->fs_info;
305         struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
306         struct btrfs_root *extent_root = fs_info->extent_root;
307         struct btrfs_path *path;
308         struct extent_buffer *leaf;
309         struct btrfs_key key;
310         u64 total_found = 0;
311         u64 last = 0;
312         u32 nritems;
313         int ret = 0;
314
315         path = btrfs_alloc_path();
316         if (!path)
317                 return -ENOMEM;
318
319         exclude_super_stripes(extent_root, block_group);
320         spin_lock(&block_group->space_info->lock);
321         block_group->space_info->bytes_super += block_group->bytes_super;
322         spin_unlock(&block_group->space_info->lock);
323
324         last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
325
326         /*
327          * We don't want to deadlock with somebody trying to allocate a new
328          * extent for the extent root while also trying to search the extent
329          * root to add free space.  So we skip locking and search the commit
330          * root, since its read-only
331          */
332         path->skip_locking = 1;
333         path->search_commit_root = 1;
334         path->reada = 2;
335
336         key.objectid = last;
337         key.offset = 0;
338         key.type = BTRFS_EXTENT_ITEM_KEY;
339 again:
340         mutex_lock(&caching_ctl->mutex);
341         /* need to make sure the commit_root doesn't disappear */
342         down_read(&fs_info->extent_commit_sem);
343
344         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
345         if (ret < 0)
346                 goto err;
347
348         leaf = path->nodes[0];
349         nritems = btrfs_header_nritems(leaf);
350
351         while (1) {
352                 smp_mb();
353                 if (fs_info->closing > 1) {
354                         last = (u64)-1;
355                         break;
356                 }
357
358                 if (path->slots[0] < nritems) {
359                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
360                 } else {
361                         ret = find_next_key(path, 0, &key);
362                         if (ret)
363                                 break;
364
365                         caching_ctl->progress = last;
366                         btrfs_release_path(extent_root, path);
367                         up_read(&fs_info->extent_commit_sem);
368                         mutex_unlock(&caching_ctl->mutex);
369                         if (btrfs_transaction_in_commit(fs_info))
370                                 schedule_timeout(1);
371                         else
372                                 cond_resched();
373                         goto again;
374                 }
375
376                 if (key.objectid < block_group->key.objectid) {
377                         path->slots[0]++;
378                         continue;
379                 }
380
381                 if (key.objectid >= block_group->key.objectid +
382                     block_group->key.offset)
383                         break;
384
385                 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
386                         total_found += add_new_free_space(block_group,
387                                                           fs_info, last,
388                                                           key.objectid);
389                         last = key.objectid + key.offset;
390
391                         if (total_found > (1024 * 1024 * 2)) {
392                                 total_found = 0;
393                                 wake_up(&caching_ctl->wait);
394                         }
395                 }
396                 path->slots[0]++;
397         }
398         ret = 0;
399
400         total_found += add_new_free_space(block_group, fs_info, last,
401                                           block_group->key.objectid +
402                                           block_group->key.offset);
403         caching_ctl->progress = (u64)-1;
404
405         spin_lock(&block_group->lock);
406         block_group->caching_ctl = NULL;
407         block_group->cached = BTRFS_CACHE_FINISHED;
408         spin_unlock(&block_group->lock);
409
410 err:
411         btrfs_free_path(path);
412         up_read(&fs_info->extent_commit_sem);
413
414         free_excluded_extents(extent_root, block_group);
415
416         mutex_unlock(&caching_ctl->mutex);
417         wake_up(&caching_ctl->wait);
418
419         put_caching_control(caching_ctl);
420         atomic_dec(&block_group->space_info->caching_threads);
421         btrfs_put_block_group(block_group);
422
423         return 0;
424 }
425
426 static int cache_block_group(struct btrfs_block_group_cache *cache)
427 {
428         struct btrfs_fs_info *fs_info = cache->fs_info;
429         struct btrfs_caching_control *caching_ctl;
430         struct task_struct *tsk;
431         int ret = 0;
432
433         smp_mb();
434         if (cache->cached != BTRFS_CACHE_NO)
435                 return 0;
436
437         caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
438         BUG_ON(!caching_ctl);
439
440         INIT_LIST_HEAD(&caching_ctl->list);
441         mutex_init(&caching_ctl->mutex);
442         init_waitqueue_head(&caching_ctl->wait);
443         caching_ctl->block_group = cache;
444         caching_ctl->progress = cache->key.objectid;
445         /* one for caching kthread, one for caching block group list */
446         atomic_set(&caching_ctl->count, 2);
447
448         spin_lock(&cache->lock);
449         if (cache->cached != BTRFS_CACHE_NO) {
450                 spin_unlock(&cache->lock);
451                 kfree(caching_ctl);
452                 return 0;
453         }
454         cache->caching_ctl = caching_ctl;
455         cache->cached = BTRFS_CACHE_STARTED;
456         spin_unlock(&cache->lock);
457
458         down_write(&fs_info->extent_commit_sem);
459         list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
460         up_write(&fs_info->extent_commit_sem);
461
462         atomic_inc(&cache->space_info->caching_threads);
463         btrfs_get_block_group(cache);
464
465         tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
466                           cache->key.objectid);
467         if (IS_ERR(tsk)) {
468                 ret = PTR_ERR(tsk);
469                 printk(KERN_ERR "error running thread %d\n", ret);
470                 BUG();
471         }
472
473         return ret;
474 }
475
476 /*
477  * return the block group that starts at or after bytenr
478  */
479 static struct btrfs_block_group_cache *
480 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
481 {
482         struct btrfs_block_group_cache *cache;
483
484         cache = block_group_cache_tree_search(info, bytenr, 0);
485
486         return cache;
487 }
488
489 /*
490  * return the block group that contains the given bytenr
491  */
492 struct btrfs_block_group_cache *btrfs_lookup_block_group(
493                                                  struct btrfs_fs_info *info,
494                                                  u64 bytenr)
495 {
496         struct btrfs_block_group_cache *cache;
497
498         cache = block_group_cache_tree_search(info, bytenr, 1);
499
500         return cache;
501 }
502
503 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
504                                                   u64 flags)
505 {
506         struct list_head *head = &info->space_info;
507         struct btrfs_space_info *found;
508
509         rcu_read_lock();
510         list_for_each_entry_rcu(found, head, list) {
511                 if (found->flags == flags) {
512                         rcu_read_unlock();
513                         return found;
514                 }
515         }
516         rcu_read_unlock();
517         return NULL;
518 }
519
520 /*
521  * after adding space to the filesystem, we need to clear the full flags
522  * on all the space infos.
523  */
524 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
525 {
526         struct list_head *head = &info->space_info;
527         struct btrfs_space_info *found;
528
529         rcu_read_lock();
530         list_for_each_entry_rcu(found, head, list)
531                 found->full = 0;
532         rcu_read_unlock();
533 }
534
535 static u64 div_factor(u64 num, int factor)
536 {
537         if (factor == 10)
538                 return num;
539         num *= factor;
540         do_div(num, 10);
541         return num;
542 }
543
544 u64 btrfs_find_block_group(struct btrfs_root *root,
545                            u64 search_start, u64 search_hint, int owner)
546 {
547         struct btrfs_block_group_cache *cache;
548         u64 used;
549         u64 last = max(search_hint, search_start);
550         u64 group_start = 0;
551         int full_search = 0;
552         int factor = 9;
553         int wrapped = 0;
554 again:
555         while (1) {
556                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
557                 if (!cache)
558                         break;
559
560                 spin_lock(&cache->lock);
561                 last = cache->key.objectid + cache->key.offset;
562                 used = btrfs_block_group_used(&cache->item);
563
564                 if ((full_search || !cache->ro) &&
565                     block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
566                         if (used + cache->pinned + cache->reserved <
567                             div_factor(cache->key.offset, factor)) {
568                                 group_start = cache->key.objectid;
569                                 spin_unlock(&cache->lock);
570                                 btrfs_put_block_group(cache);
571                                 goto found;
572                         }
573                 }
574                 spin_unlock(&cache->lock);
575                 btrfs_put_block_group(cache);
576                 cond_resched();
577         }
578         if (!wrapped) {
579                 last = search_start;
580                 wrapped = 1;
581                 goto again;
582         }
583         if (!full_search && factor < 10) {
584                 last = search_start;
585                 full_search = 1;
586                 factor = 10;
587                 goto again;
588         }
589 found:
590         return group_start;
591 }
592
593 /* simple helper to search for an existing extent at a given offset */
594 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
595 {
596         int ret;
597         struct btrfs_key key;
598         struct btrfs_path *path;
599
600         path = btrfs_alloc_path();
601         BUG_ON(!path);
602         key.objectid = start;
603         key.offset = len;
604         btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
605         ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
606                                 0, 0);
607         btrfs_free_path(path);
608         return ret;
609 }
610
611 /*
612  * Back reference rules.  Back refs have three main goals:
613  *
614  * 1) differentiate between all holders of references to an extent so that
615  *    when a reference is dropped we can make sure it was a valid reference
616  *    before freeing the extent.
617  *
618  * 2) Provide enough information to quickly find the holders of an extent
619  *    if we notice a given block is corrupted or bad.
620  *
621  * 3) Make it easy to migrate blocks for FS shrinking or storage pool
622  *    maintenance.  This is actually the same as #2, but with a slightly
623  *    different use case.
624  *
625  * There are two kinds of back refs. The implicit back refs is optimized
626  * for pointers in non-shared tree blocks. For a given pointer in a block,
627  * back refs of this kind provide information about the block's owner tree
628  * and the pointer's key. These information allow us to find the block by
629  * b-tree searching. The full back refs is for pointers in tree blocks not
630  * referenced by their owner trees. The location of tree block is recorded
631  * in the back refs. Actually the full back refs is generic, and can be
632  * used in all cases the implicit back refs is used. The major shortcoming
633  * of the full back refs is its overhead. Every time a tree block gets
634  * COWed, we have to update back refs entry for all pointers in it.
635  *
636  * For a newly allocated tree block, we use implicit back refs for
637  * pointers in it. This means most tree related operations only involve
638  * implicit back refs. For a tree block created in old transaction, the
639  * only way to drop a reference to it is COW it. So we can detect the
640  * event that tree block loses its owner tree's reference and do the
641  * back refs conversion.
642  *
643  * When a tree block is COW'd through a tree, there are four cases:
644  *
645  * The reference count of the block is one and the tree is the block's
646  * owner tree. Nothing to do in this case.
647  *
648  * The reference count of the block is one and the tree is not the
649  * block's owner tree. In this case, full back refs is used for pointers
650  * in the block. Remove these full back refs, add implicit back refs for
651  * every pointers in the new block.
652  *
653  * The reference count of the block is greater than one and the tree is
654  * the block's owner tree. In this case, implicit back refs is used for
655  * pointers in the block. Add full back refs for every pointers in the
656  * block, increase lower level extents' reference counts. The original
657  * implicit back refs are entailed to the new block.
658  *
659  * The reference count of the block is greater than one and the tree is
660  * not the block's owner tree. Add implicit back refs for every pointer in
661  * the new block, increase lower level extents' reference count.
662  *
663  * Back Reference Key composing:
664  *
665  * The key objectid corresponds to the first byte in the extent,
666  * The key type is used to differentiate between types of back refs.
667  * There are different meanings of the key offset for different types
668  * of back refs.
669  *
670  * File extents can be referenced by:
671  *
672  * - multiple snapshots, subvolumes, or different generations in one subvol
673  * - different files inside a single subvolume
674  * - different offsets inside a file (bookend extents in file.c)
675  *
676  * The extent ref structure for the implicit back refs has fields for:
677  *
678  * - Objectid of the subvolume root
679  * - objectid of the file holding the reference
680  * - original offset in the file
681  * - how many bookend extents
682  *
683  * The key offset for the implicit back refs is hash of the first
684  * three fields.
685  *
686  * The extent ref structure for the full back refs has field for:
687  *
688  * - number of pointers in the tree leaf
689  *
690  * The key offset for the implicit back refs is the first byte of
691  * the tree leaf
692  *
693  * When a file extent is allocated, The implicit back refs is used.
694  * the fields are filled in:
695  *
696  *     (root_key.objectid, inode objectid, offset in file, 1)
697  *
698  * When a file extent is removed file truncation, we find the
699  * corresponding implicit back refs and check the following fields:
700  *
701  *     (btrfs_header_owner(leaf), inode objectid, offset in file)
702  *
703  * Btree extents can be referenced by:
704  *
705  * - Different subvolumes
706  *
707  * Both the implicit back refs and the full back refs for tree blocks
708  * only consist of key. The key offset for the implicit back refs is
709  * objectid of block's owner tree. The key offset for the full back refs
710  * is the first byte of parent block.
711  *
712  * When implicit back refs is used, information about the lowest key and
713  * level of the tree block are required. These information are stored in
714  * tree block info structure.
715  */
716
717 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
718 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
719                                   struct btrfs_root *root,
720                                   struct btrfs_path *path,
721                                   u64 owner, u32 extra_size)
722 {
723         struct btrfs_extent_item *item;
724         struct btrfs_extent_item_v0 *ei0;
725         struct btrfs_extent_ref_v0 *ref0;
726         struct btrfs_tree_block_info *bi;
727         struct extent_buffer *leaf;
728         struct btrfs_key key;
729         struct btrfs_key found_key;
730         u32 new_size = sizeof(*item);
731         u64 refs;
732         int ret;
733
734         leaf = path->nodes[0];
735         BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
736
737         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
738         ei0 = btrfs_item_ptr(leaf, path->slots[0],
739                              struct btrfs_extent_item_v0);
740         refs = btrfs_extent_refs_v0(leaf, ei0);
741
742         if (owner == (u64)-1) {
743                 while (1) {
744                         if (path->slots[0] >= btrfs_header_nritems(leaf)) {
745                                 ret = btrfs_next_leaf(root, path);
746                                 if (ret < 0)
747                                         return ret;
748                                 BUG_ON(ret > 0);
749                                 leaf = path->nodes[0];
750                         }
751                         btrfs_item_key_to_cpu(leaf, &found_key,
752                                               path->slots[0]);
753                         BUG_ON(key.objectid != found_key.objectid);
754                         if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
755                                 path->slots[0]++;
756                                 continue;
757                         }
758                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
759                                               struct btrfs_extent_ref_v0);
760                         owner = btrfs_ref_objectid_v0(leaf, ref0);
761                         break;
762                 }
763         }
764         btrfs_release_path(root, path);
765
766         if (owner < BTRFS_FIRST_FREE_OBJECTID)
767                 new_size += sizeof(*bi);
768
769         new_size -= sizeof(*ei0);
770         ret = btrfs_search_slot(trans, root, &key, path,
771                                 new_size + extra_size, 1);
772         if (ret < 0)
773                 return ret;
774         BUG_ON(ret);
775
776         ret = btrfs_extend_item(trans, root, path, new_size);
777         BUG_ON(ret);
778
779         leaf = path->nodes[0];
780         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
781         btrfs_set_extent_refs(leaf, item, refs);
782         /* FIXME: get real generation */
783         btrfs_set_extent_generation(leaf, item, 0);
784         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
785                 btrfs_set_extent_flags(leaf, item,
786                                        BTRFS_EXTENT_FLAG_TREE_BLOCK |
787                                        BTRFS_BLOCK_FLAG_FULL_BACKREF);
788                 bi = (struct btrfs_tree_block_info *)(item + 1);
789                 /* FIXME: get first key of the block */
790                 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
791                 btrfs_set_tree_block_level(leaf, bi, (int)owner);
792         } else {
793                 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
794         }
795         btrfs_mark_buffer_dirty(leaf);
796         return 0;
797 }
798 #endif
799
800 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
801 {
802         u32 high_crc = ~(u32)0;
803         u32 low_crc = ~(u32)0;
804         __le64 lenum;
805
806         lenum = cpu_to_le64(root_objectid);
807         high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
808         lenum = cpu_to_le64(owner);
809         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
810         lenum = cpu_to_le64(offset);
811         low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
812
813         return ((u64)high_crc << 31) ^ (u64)low_crc;
814 }
815
816 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
817                                      struct btrfs_extent_data_ref *ref)
818 {
819         return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
820                                     btrfs_extent_data_ref_objectid(leaf, ref),
821                                     btrfs_extent_data_ref_offset(leaf, ref));
822 }
823
824 static int match_extent_data_ref(struct extent_buffer *leaf,
825                                  struct btrfs_extent_data_ref *ref,
826                                  u64 root_objectid, u64 owner, u64 offset)
827 {
828         if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
829             btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
830             btrfs_extent_data_ref_offset(leaf, ref) != offset)
831                 return 0;
832         return 1;
833 }
834
835 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
836                                            struct btrfs_root *root,
837                                            struct btrfs_path *path,
838                                            u64 bytenr, u64 parent,
839                                            u64 root_objectid,
840                                            u64 owner, u64 offset)
841 {
842         struct btrfs_key key;
843         struct btrfs_extent_data_ref *ref;
844         struct extent_buffer *leaf;
845         u32 nritems;
846         int ret;
847         int recow;
848         int err = -ENOENT;
849
850         key.objectid = bytenr;
851         if (parent) {
852                 key.type = BTRFS_SHARED_DATA_REF_KEY;
853                 key.offset = parent;
854         } else {
855                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
856                 key.offset = hash_extent_data_ref(root_objectid,
857                                                   owner, offset);
858         }
859 again:
860         recow = 0;
861         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
862         if (ret < 0) {
863                 err = ret;
864                 goto fail;
865         }
866
867         if (parent) {
868                 if (!ret)
869                         return 0;
870 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
871                 key.type = BTRFS_EXTENT_REF_V0_KEY;
872                 btrfs_release_path(root, path);
873                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
874                 if (ret < 0) {
875                         err = ret;
876                         goto fail;
877                 }
878                 if (!ret)
879                         return 0;
880 #endif
881                 goto fail;
882         }
883
884         leaf = path->nodes[0];
885         nritems = btrfs_header_nritems(leaf);
886         while (1) {
887                 if (path->slots[0] >= nritems) {
888                         ret = btrfs_next_leaf(root, path);
889                         if (ret < 0)
890                                 err = ret;
891                         if (ret)
892                                 goto fail;
893
894                         leaf = path->nodes[0];
895                         nritems = btrfs_header_nritems(leaf);
896                         recow = 1;
897                 }
898
899                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
900                 if (key.objectid != bytenr ||
901                     key.type != BTRFS_EXTENT_DATA_REF_KEY)
902                         goto fail;
903
904                 ref = btrfs_item_ptr(leaf, path->slots[0],
905                                      struct btrfs_extent_data_ref);
906
907                 if (match_extent_data_ref(leaf, ref, root_objectid,
908                                           owner, offset)) {
909                         if (recow) {
910                                 btrfs_release_path(root, path);
911                                 goto again;
912                         }
913                         err = 0;
914                         break;
915                 }
916                 path->slots[0]++;
917         }
918 fail:
919         return err;
920 }
921
922 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
923                                            struct btrfs_root *root,
924                                            struct btrfs_path *path,
925                                            u64 bytenr, u64 parent,
926                                            u64 root_objectid, u64 owner,
927                                            u64 offset, int refs_to_add)
928 {
929         struct btrfs_key key;
930         struct extent_buffer *leaf;
931         u32 size;
932         u32 num_refs;
933         int ret;
934
935         key.objectid = bytenr;
936         if (parent) {
937                 key.type = BTRFS_SHARED_DATA_REF_KEY;
938                 key.offset = parent;
939                 size = sizeof(struct btrfs_shared_data_ref);
940         } else {
941                 key.type = BTRFS_EXTENT_DATA_REF_KEY;
942                 key.offset = hash_extent_data_ref(root_objectid,
943                                                   owner, offset);
944                 size = sizeof(struct btrfs_extent_data_ref);
945         }
946
947         ret = btrfs_insert_empty_item(trans, root, path, &key, size);
948         if (ret && ret != -EEXIST)
949                 goto fail;
950
951         leaf = path->nodes[0];
952         if (parent) {
953                 struct btrfs_shared_data_ref *ref;
954                 ref = btrfs_item_ptr(leaf, path->slots[0],
955                                      struct btrfs_shared_data_ref);
956                 if (ret == 0) {
957                         btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
958                 } else {
959                         num_refs = btrfs_shared_data_ref_count(leaf, ref);
960                         num_refs += refs_to_add;
961                         btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
962                 }
963         } else {
964                 struct btrfs_extent_data_ref *ref;
965                 while (ret == -EEXIST) {
966                         ref = btrfs_item_ptr(leaf, path->slots[0],
967                                              struct btrfs_extent_data_ref);
968                         if (match_extent_data_ref(leaf, ref, root_objectid,
969                                                   owner, offset))
970                                 break;
971                         btrfs_release_path(root, path);
972                         key.offset++;
973                         ret = btrfs_insert_empty_item(trans, root, path, &key,
974                                                       size);
975                         if (ret && ret != -EEXIST)
976                                 goto fail;
977
978                         leaf = path->nodes[0];
979                 }
980                 ref = btrfs_item_ptr(leaf, path->slots[0],
981                                      struct btrfs_extent_data_ref);
982                 if (ret == 0) {
983                         btrfs_set_extent_data_ref_root(leaf, ref,
984                                                        root_objectid);
985                         btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
986                         btrfs_set_extent_data_ref_offset(leaf, ref, offset);
987                         btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
988                 } else {
989                         num_refs = btrfs_extent_data_ref_count(leaf, ref);
990                         num_refs += refs_to_add;
991                         btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
992                 }
993         }
994         btrfs_mark_buffer_dirty(leaf);
995         ret = 0;
996 fail:
997         btrfs_release_path(root, path);
998         return ret;
999 }
1000
1001 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1002                                            struct btrfs_root *root,
1003                                            struct btrfs_path *path,
1004                                            int refs_to_drop)
1005 {
1006         struct btrfs_key key;
1007         struct btrfs_extent_data_ref *ref1 = NULL;
1008         struct btrfs_shared_data_ref *ref2 = NULL;
1009         struct extent_buffer *leaf;
1010         u32 num_refs = 0;
1011         int ret = 0;
1012
1013         leaf = path->nodes[0];
1014         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1015
1016         if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1017                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1018                                       struct btrfs_extent_data_ref);
1019                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1020         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1021                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1022                                       struct btrfs_shared_data_ref);
1023                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1024 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1025         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1026                 struct btrfs_extent_ref_v0 *ref0;
1027                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1028                                       struct btrfs_extent_ref_v0);
1029                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1030 #endif
1031         } else {
1032                 BUG();
1033         }
1034
1035         BUG_ON(num_refs < refs_to_drop);
1036         num_refs -= refs_to_drop;
1037
1038         if (num_refs == 0) {
1039                 ret = btrfs_del_item(trans, root, path);
1040         } else {
1041                 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1042                         btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1043                 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1044                         btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1045 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1046                 else {
1047                         struct btrfs_extent_ref_v0 *ref0;
1048                         ref0 = btrfs_item_ptr(leaf, path->slots[0],
1049                                         struct btrfs_extent_ref_v0);
1050                         btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1051                 }
1052 #endif
1053                 btrfs_mark_buffer_dirty(leaf);
1054         }
1055         return ret;
1056 }
1057
1058 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1059                                           struct btrfs_path *path,
1060                                           struct btrfs_extent_inline_ref *iref)
1061 {
1062         struct btrfs_key key;
1063         struct extent_buffer *leaf;
1064         struct btrfs_extent_data_ref *ref1;
1065         struct btrfs_shared_data_ref *ref2;
1066         u32 num_refs = 0;
1067
1068         leaf = path->nodes[0];
1069         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1070         if (iref) {
1071                 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1072                     BTRFS_EXTENT_DATA_REF_KEY) {
1073                         ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1074                         num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1075                 } else {
1076                         ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1077                         num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1078                 }
1079         } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1080                 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1081                                       struct btrfs_extent_data_ref);
1082                 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1083         } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1084                 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1085                                       struct btrfs_shared_data_ref);
1086                 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1087 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1088         } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1089                 struct btrfs_extent_ref_v0 *ref0;
1090                 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1091                                       struct btrfs_extent_ref_v0);
1092                 num_refs = btrfs_ref_count_v0(leaf, ref0);
1093 #endif
1094         } else {
1095                 WARN_ON(1);
1096         }
1097         return num_refs;
1098 }
1099
1100 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1101                                           struct btrfs_root *root,
1102                                           struct btrfs_path *path,
1103                                           u64 bytenr, u64 parent,
1104                                           u64 root_objectid)
1105 {
1106         struct btrfs_key key;
1107         int ret;
1108
1109         key.objectid = bytenr;
1110         if (parent) {
1111                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1112                 key.offset = parent;
1113         } else {
1114                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1115                 key.offset = root_objectid;
1116         }
1117
1118         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1119         if (ret > 0)
1120                 ret = -ENOENT;
1121 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1122         if (ret == -ENOENT && parent) {
1123                 btrfs_release_path(root, path);
1124                 key.type = BTRFS_EXTENT_REF_V0_KEY;
1125                 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1126                 if (ret > 0)
1127                         ret = -ENOENT;
1128         }
1129 #endif
1130         return ret;
1131 }
1132
1133 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1134                                           struct btrfs_root *root,
1135                                           struct btrfs_path *path,
1136                                           u64 bytenr, u64 parent,
1137                                           u64 root_objectid)
1138 {
1139         struct btrfs_key key;
1140         int ret;
1141
1142         key.objectid = bytenr;
1143         if (parent) {
1144                 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1145                 key.offset = parent;
1146         } else {
1147                 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1148                 key.offset = root_objectid;
1149         }
1150
1151         ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1152         btrfs_release_path(root, path);
1153         return ret;
1154 }
1155
1156 static inline int extent_ref_type(u64 parent, u64 owner)
1157 {
1158         int type;
1159         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1160                 if (parent > 0)
1161                         type = BTRFS_SHARED_BLOCK_REF_KEY;
1162                 else
1163                         type = BTRFS_TREE_BLOCK_REF_KEY;
1164         } else {
1165                 if (parent > 0)
1166                         type = BTRFS_SHARED_DATA_REF_KEY;
1167                 else
1168                         type = BTRFS_EXTENT_DATA_REF_KEY;
1169         }
1170         return type;
1171 }
1172
1173 static int find_next_key(struct btrfs_path *path, int level,
1174                          struct btrfs_key *key)
1175
1176 {
1177         for (; level < BTRFS_MAX_LEVEL; level++) {
1178                 if (!path->nodes[level])
1179                         break;
1180                 if (path->slots[level] + 1 >=
1181                     btrfs_header_nritems(path->nodes[level]))
1182                         continue;
1183                 if (level == 0)
1184                         btrfs_item_key_to_cpu(path->nodes[level], key,
1185                                               path->slots[level] + 1);
1186                 else
1187                         btrfs_node_key_to_cpu(path->nodes[level], key,
1188                                               path->slots[level] + 1);
1189                 return 0;
1190         }
1191         return 1;
1192 }
1193
1194 /*
1195  * look for inline back ref. if back ref is found, *ref_ret is set
1196  * to the address of inline back ref, and 0 is returned.
1197  *
1198  * if back ref isn't found, *ref_ret is set to the address where it
1199  * should be inserted, and -ENOENT is returned.
1200  *
1201  * if insert is true and there are too many inline back refs, the path
1202  * points to the extent item, and -EAGAIN is returned.
1203  *
1204  * NOTE: inline back refs are ordered in the same way that back ref
1205  *       items in the tree are ordered.
1206  */
1207 static noinline_for_stack
1208 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1209                                  struct btrfs_root *root,
1210                                  struct btrfs_path *path,
1211                                  struct btrfs_extent_inline_ref **ref_ret,
1212                                  u64 bytenr, u64 num_bytes,
1213                                  u64 parent, u64 root_objectid,
1214                                  u64 owner, u64 offset, int insert)
1215 {
1216         struct btrfs_key key;
1217         struct extent_buffer *leaf;
1218         struct btrfs_extent_item *ei;
1219         struct btrfs_extent_inline_ref *iref;
1220         u64 flags;
1221         u64 item_size;
1222         unsigned long ptr;
1223         unsigned long end;
1224         int extra_size;
1225         int type;
1226         int want;
1227         int ret;
1228         int err = 0;
1229
1230         key.objectid = bytenr;
1231         key.type = BTRFS_EXTENT_ITEM_KEY;
1232         key.offset = num_bytes;
1233
1234         want = extent_ref_type(parent, owner);
1235         if (insert) {
1236                 extra_size = btrfs_extent_inline_ref_size(want);
1237                 path->keep_locks = 1;
1238         } else
1239                 extra_size = -1;
1240         ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1241         if (ret < 0) {
1242                 err = ret;
1243                 goto out;
1244         }
1245         BUG_ON(ret);
1246
1247         leaf = path->nodes[0];
1248         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1249 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1250         if (item_size < sizeof(*ei)) {
1251                 if (!insert) {
1252                         err = -ENOENT;
1253                         goto out;
1254                 }
1255                 ret = convert_extent_item_v0(trans, root, path, owner,
1256                                              extra_size);
1257                 if (ret < 0) {
1258                         err = ret;
1259                         goto out;
1260                 }
1261                 leaf = path->nodes[0];
1262                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1263         }
1264 #endif
1265         BUG_ON(item_size < sizeof(*ei));
1266
1267         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1268         flags = btrfs_extent_flags(leaf, ei);
1269
1270         ptr = (unsigned long)(ei + 1);
1271         end = (unsigned long)ei + item_size;
1272
1273         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1274                 ptr += sizeof(struct btrfs_tree_block_info);
1275                 BUG_ON(ptr > end);
1276         } else {
1277                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1278         }
1279
1280         err = -ENOENT;
1281         while (1) {
1282                 if (ptr >= end) {
1283                         WARN_ON(ptr > end);
1284                         break;
1285                 }
1286                 iref = (struct btrfs_extent_inline_ref *)ptr;
1287                 type = btrfs_extent_inline_ref_type(leaf, iref);
1288                 if (want < type)
1289                         break;
1290                 if (want > type) {
1291                         ptr += btrfs_extent_inline_ref_size(type);
1292                         continue;
1293                 }
1294
1295                 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1296                         struct btrfs_extent_data_ref *dref;
1297                         dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1298                         if (match_extent_data_ref(leaf, dref, root_objectid,
1299                                                   owner, offset)) {
1300                                 err = 0;
1301                                 break;
1302                         }
1303                         if (hash_extent_data_ref_item(leaf, dref) <
1304                             hash_extent_data_ref(root_objectid, owner, offset))
1305                                 break;
1306                 } else {
1307                         u64 ref_offset;
1308                         ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1309                         if (parent > 0) {
1310                                 if (parent == ref_offset) {
1311                                         err = 0;
1312                                         break;
1313                                 }
1314                                 if (ref_offset < parent)
1315                                         break;
1316                         } else {
1317                                 if (root_objectid == ref_offset) {
1318                                         err = 0;
1319                                         break;
1320                                 }
1321                                 if (ref_offset < root_objectid)
1322                                         break;
1323                         }
1324                 }
1325                 ptr += btrfs_extent_inline_ref_size(type);
1326         }
1327         if (err == -ENOENT && insert) {
1328                 if (item_size + extra_size >=
1329                     BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1330                         err = -EAGAIN;
1331                         goto out;
1332                 }
1333                 /*
1334                  * To add new inline back ref, we have to make sure
1335                  * there is no corresponding back ref item.
1336                  * For simplicity, we just do not add new inline back
1337                  * ref if there is any kind of item for this block
1338                  */
1339                 if (find_next_key(path, 0, &key) == 0 &&
1340                     key.objectid == bytenr &&
1341                     key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1342                         err = -EAGAIN;
1343                         goto out;
1344                 }
1345         }
1346         *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1347 out:
1348         if (insert) {
1349                 path->keep_locks = 0;
1350                 btrfs_unlock_up_safe(path, 1);
1351         }
1352         return err;
1353 }
1354
1355 /*
1356  * helper to add new inline back ref
1357  */
1358 static noinline_for_stack
1359 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1360                                 struct btrfs_root *root,
1361                                 struct btrfs_path *path,
1362                                 struct btrfs_extent_inline_ref *iref,
1363                                 u64 parent, u64 root_objectid,
1364                                 u64 owner, u64 offset, int refs_to_add,
1365                                 struct btrfs_delayed_extent_op *extent_op)
1366 {
1367         struct extent_buffer *leaf;
1368         struct btrfs_extent_item *ei;
1369         unsigned long ptr;
1370         unsigned long end;
1371         unsigned long item_offset;
1372         u64 refs;
1373         int size;
1374         int type;
1375         int ret;
1376
1377         leaf = path->nodes[0];
1378         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1379         item_offset = (unsigned long)iref - (unsigned long)ei;
1380
1381         type = extent_ref_type(parent, owner);
1382         size = btrfs_extent_inline_ref_size(type);
1383
1384         ret = btrfs_extend_item(trans, root, path, size);
1385         BUG_ON(ret);
1386
1387         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1388         refs = btrfs_extent_refs(leaf, ei);
1389         refs += refs_to_add;
1390         btrfs_set_extent_refs(leaf, ei, refs);
1391         if (extent_op)
1392                 __run_delayed_extent_op(extent_op, leaf, ei);
1393
1394         ptr = (unsigned long)ei + item_offset;
1395         end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1396         if (ptr < end - size)
1397                 memmove_extent_buffer(leaf, ptr + size, ptr,
1398                                       end - size - ptr);
1399
1400         iref = (struct btrfs_extent_inline_ref *)ptr;
1401         btrfs_set_extent_inline_ref_type(leaf, iref, type);
1402         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1403                 struct btrfs_extent_data_ref *dref;
1404                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1405                 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1406                 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1407                 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1408                 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1409         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1410                 struct btrfs_shared_data_ref *sref;
1411                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1412                 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1413                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1414         } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1415                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1416         } else {
1417                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1418         }
1419         btrfs_mark_buffer_dirty(leaf);
1420         return 0;
1421 }
1422
1423 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1424                                  struct btrfs_root *root,
1425                                  struct btrfs_path *path,
1426                                  struct btrfs_extent_inline_ref **ref_ret,
1427                                  u64 bytenr, u64 num_bytes, u64 parent,
1428                                  u64 root_objectid, u64 owner, u64 offset)
1429 {
1430         int ret;
1431
1432         ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1433                                            bytenr, num_bytes, parent,
1434                                            root_objectid, owner, offset, 0);
1435         if (ret != -ENOENT)
1436                 return ret;
1437
1438         btrfs_release_path(root, path);
1439         *ref_ret = NULL;
1440
1441         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1442                 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1443                                             root_objectid);
1444         } else {
1445                 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1446                                              root_objectid, owner, offset);
1447         }
1448         return ret;
1449 }
1450
1451 /*
1452  * helper to update/remove inline back ref
1453  */
1454 static noinline_for_stack
1455 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1456                                  struct btrfs_root *root,
1457                                  struct btrfs_path *path,
1458                                  struct btrfs_extent_inline_ref *iref,
1459                                  int refs_to_mod,
1460                                  struct btrfs_delayed_extent_op *extent_op)
1461 {
1462         struct extent_buffer *leaf;
1463         struct btrfs_extent_item *ei;
1464         struct btrfs_extent_data_ref *dref = NULL;
1465         struct btrfs_shared_data_ref *sref = NULL;
1466         unsigned long ptr;
1467         unsigned long end;
1468         u32 item_size;
1469         int size;
1470         int type;
1471         int ret;
1472         u64 refs;
1473
1474         leaf = path->nodes[0];
1475         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1476         refs = btrfs_extent_refs(leaf, ei);
1477         WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1478         refs += refs_to_mod;
1479         btrfs_set_extent_refs(leaf, ei, refs);
1480         if (extent_op)
1481                 __run_delayed_extent_op(extent_op, leaf, ei);
1482
1483         type = btrfs_extent_inline_ref_type(leaf, iref);
1484
1485         if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1486                 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1487                 refs = btrfs_extent_data_ref_count(leaf, dref);
1488         } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1489                 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1490                 refs = btrfs_shared_data_ref_count(leaf, sref);
1491         } else {
1492                 refs = 1;
1493                 BUG_ON(refs_to_mod != -1);
1494         }
1495
1496         BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1497         refs += refs_to_mod;
1498
1499         if (refs > 0) {
1500                 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1501                         btrfs_set_extent_data_ref_count(leaf, dref, refs);
1502                 else
1503                         btrfs_set_shared_data_ref_count(leaf, sref, refs);
1504         } else {
1505                 size =  btrfs_extent_inline_ref_size(type);
1506                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1507                 ptr = (unsigned long)iref;
1508                 end = (unsigned long)ei + item_size;
1509                 if (ptr + size < end)
1510                         memmove_extent_buffer(leaf, ptr, ptr + size,
1511                                               end - ptr - size);
1512                 item_size -= size;
1513                 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1514                 BUG_ON(ret);
1515         }
1516         btrfs_mark_buffer_dirty(leaf);
1517         return 0;
1518 }
1519
1520 static noinline_for_stack
1521 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1522                                  struct btrfs_root *root,
1523                                  struct btrfs_path *path,
1524                                  u64 bytenr, u64 num_bytes, u64 parent,
1525                                  u64 root_objectid, u64 owner,
1526                                  u64 offset, int refs_to_add,
1527                                  struct btrfs_delayed_extent_op *extent_op)
1528 {
1529         struct btrfs_extent_inline_ref *iref;
1530         int ret;
1531
1532         ret = lookup_inline_extent_backref(trans, root, path, &iref,
1533                                            bytenr, num_bytes, parent,
1534                                            root_objectid, owner, offset, 1);
1535         if (ret == 0) {
1536                 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1537                 ret = update_inline_extent_backref(trans, root, path, iref,
1538                                                    refs_to_add, extent_op);
1539         } else if (ret == -ENOENT) {
1540                 ret = setup_inline_extent_backref(trans, root, path, iref,
1541                                                   parent, root_objectid,
1542                                                   owner, offset, refs_to_add,
1543                                                   extent_op);
1544         }
1545         return ret;
1546 }
1547
1548 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1549                                  struct btrfs_root *root,
1550                                  struct btrfs_path *path,
1551                                  u64 bytenr, u64 parent, u64 root_objectid,
1552                                  u64 owner, u64 offset, int refs_to_add)
1553 {
1554         int ret;
1555         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1556                 BUG_ON(refs_to_add != 1);
1557                 ret = insert_tree_block_ref(trans, root, path, bytenr,
1558                                             parent, root_objectid);
1559         } else {
1560                 ret = insert_extent_data_ref(trans, root, path, bytenr,
1561                                              parent, root_objectid,
1562                                              owner, offset, refs_to_add);
1563         }
1564         return ret;
1565 }
1566
1567 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1568                                  struct btrfs_root *root,
1569                                  struct btrfs_path *path,
1570                                  struct btrfs_extent_inline_ref *iref,
1571                                  int refs_to_drop, int is_data)
1572 {
1573         int ret;
1574
1575         BUG_ON(!is_data && refs_to_drop != 1);
1576         if (iref) {
1577                 ret = update_inline_extent_backref(trans, root, path, iref,
1578                                                    -refs_to_drop, NULL);
1579         } else if (is_data) {
1580                 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1581         } else {
1582                 ret = btrfs_del_item(trans, root, path);
1583         }
1584         return ret;
1585 }
1586
1587 static void btrfs_issue_discard(struct block_device *bdev,
1588                                 u64 start, u64 len)
1589 {
1590         blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1591                              DISCARD_FL_BARRIER);
1592 }
1593
1594 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1595                                 u64 num_bytes)
1596 {
1597         int ret;
1598         u64 map_length = num_bytes;
1599         struct btrfs_multi_bio *multi = NULL;
1600
1601         if (!btrfs_test_opt(root, DISCARD))
1602                 return 0;
1603
1604         /* Tell the block device(s) that the sectors can be discarded */
1605         ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1606                               bytenr, &map_length, &multi, 0);
1607         if (!ret) {
1608                 struct btrfs_bio_stripe *stripe = multi->stripes;
1609                 int i;
1610
1611                 if (map_length > num_bytes)
1612                         map_length = num_bytes;
1613
1614                 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1615                         btrfs_issue_discard(stripe->dev->bdev,
1616                                             stripe->physical,
1617                                             map_length);
1618                 }
1619                 kfree(multi);
1620         }
1621
1622         return ret;
1623 }
1624
1625 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1626                          struct btrfs_root *root,
1627                          u64 bytenr, u64 num_bytes, u64 parent,
1628                          u64 root_objectid, u64 owner, u64 offset)
1629 {
1630         int ret;
1631         BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1632                root_objectid == BTRFS_TREE_LOG_OBJECTID);
1633
1634         if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1635                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1636                                         parent, root_objectid, (int)owner,
1637                                         BTRFS_ADD_DELAYED_REF, NULL);
1638         } else {
1639                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1640                                         parent, root_objectid, owner, offset,
1641                                         BTRFS_ADD_DELAYED_REF, NULL);
1642         }
1643         return ret;
1644 }
1645
1646 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1647                                   struct btrfs_root *root,
1648                                   u64 bytenr, u64 num_bytes,
1649                                   u64 parent, u64 root_objectid,
1650                                   u64 owner, u64 offset, int refs_to_add,
1651                                   struct btrfs_delayed_extent_op *extent_op)
1652 {
1653         struct btrfs_path *path;
1654         struct extent_buffer *leaf;
1655         struct btrfs_extent_item *item;
1656         u64 refs;
1657         int ret;
1658         int err = 0;
1659
1660         path = btrfs_alloc_path();
1661         if (!path)
1662                 return -ENOMEM;
1663
1664         path->reada = 1;
1665         path->leave_spinning = 1;
1666         /* this will setup the path even if it fails to insert the back ref */
1667         ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1668                                            path, bytenr, num_bytes, parent,
1669                                            root_objectid, owner, offset,
1670                                            refs_to_add, extent_op);
1671         if (ret == 0)
1672                 goto out;
1673
1674         if (ret != -EAGAIN) {
1675                 err = ret;
1676                 goto out;
1677         }
1678
1679         leaf = path->nodes[0];
1680         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1681         refs = btrfs_extent_refs(leaf, item);
1682         btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1683         if (extent_op)
1684                 __run_delayed_extent_op(extent_op, leaf, item);
1685
1686         btrfs_mark_buffer_dirty(leaf);
1687         btrfs_release_path(root->fs_info->extent_root, path);
1688
1689         path->reada = 1;
1690         path->leave_spinning = 1;
1691
1692         /* now insert the actual backref */
1693         ret = insert_extent_backref(trans, root->fs_info->extent_root,
1694                                     path, bytenr, parent, root_objectid,
1695                                     owner, offset, refs_to_add);
1696         BUG_ON(ret);
1697 out:
1698         btrfs_free_path(path);
1699         return err;
1700 }
1701
1702 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1703                                 struct btrfs_root *root,
1704                                 struct btrfs_delayed_ref_node *node,
1705                                 struct btrfs_delayed_extent_op *extent_op,
1706                                 int insert_reserved)
1707 {
1708         int ret = 0;
1709         struct btrfs_delayed_data_ref *ref;
1710         struct btrfs_key ins;
1711         u64 parent = 0;
1712         u64 ref_root = 0;
1713         u64 flags = 0;
1714
1715         ins.objectid = node->bytenr;
1716         ins.offset = node->num_bytes;
1717         ins.type = BTRFS_EXTENT_ITEM_KEY;
1718
1719         ref = btrfs_delayed_node_to_data_ref(node);
1720         if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1721                 parent = ref->parent;
1722         else
1723                 ref_root = ref->root;
1724
1725         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1726                 if (extent_op) {
1727                         BUG_ON(extent_op->update_key);
1728                         flags |= extent_op->flags_to_set;
1729                 }
1730                 ret = alloc_reserved_file_extent(trans, root,
1731                                                  parent, ref_root, flags,
1732                                                  ref->objectid, ref->offset,
1733                                                  &ins, node->ref_mod);
1734         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1735                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1736                                              node->num_bytes, parent,
1737                                              ref_root, ref->objectid,
1738                                              ref->offset, node->ref_mod,
1739                                              extent_op);
1740         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1741                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1742                                           node->num_bytes, parent,
1743                                           ref_root, ref->objectid,
1744                                           ref->offset, node->ref_mod,
1745                                           extent_op);
1746         } else {
1747                 BUG();
1748         }
1749         return ret;
1750 }
1751
1752 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1753                                     struct extent_buffer *leaf,
1754                                     struct btrfs_extent_item *ei)
1755 {
1756         u64 flags = btrfs_extent_flags(leaf, ei);
1757         if (extent_op->update_flags) {
1758                 flags |= extent_op->flags_to_set;
1759                 btrfs_set_extent_flags(leaf, ei, flags);
1760         }
1761
1762         if (extent_op->update_key) {
1763                 struct btrfs_tree_block_info *bi;
1764                 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1765                 bi = (struct btrfs_tree_block_info *)(ei + 1);
1766                 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1767         }
1768 }
1769
1770 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1771                                  struct btrfs_root *root,
1772                                  struct btrfs_delayed_ref_node *node,
1773                                  struct btrfs_delayed_extent_op *extent_op)
1774 {
1775         struct btrfs_key key;
1776         struct btrfs_path *path;
1777         struct btrfs_extent_item *ei;
1778         struct extent_buffer *leaf;
1779         u32 item_size;
1780         int ret;
1781         int err = 0;
1782
1783         path = btrfs_alloc_path();
1784         if (!path)
1785                 return -ENOMEM;
1786
1787         key.objectid = node->bytenr;
1788         key.type = BTRFS_EXTENT_ITEM_KEY;
1789         key.offset = node->num_bytes;
1790
1791         path->reada = 1;
1792         path->leave_spinning = 1;
1793         ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1794                                 path, 0, 1);
1795         if (ret < 0) {
1796                 err = ret;
1797                 goto out;
1798         }
1799         if (ret > 0) {
1800                 err = -EIO;
1801                 goto out;
1802         }
1803
1804         leaf = path->nodes[0];
1805         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1806 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1807         if (item_size < sizeof(*ei)) {
1808                 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1809                                              path, (u64)-1, 0);
1810                 if (ret < 0) {
1811                         err = ret;
1812                         goto out;
1813                 }
1814                 leaf = path->nodes[0];
1815                 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1816         }
1817 #endif
1818         BUG_ON(item_size < sizeof(*ei));
1819         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1820         __run_delayed_extent_op(extent_op, leaf, ei);
1821
1822         btrfs_mark_buffer_dirty(leaf);
1823 out:
1824         btrfs_free_path(path);
1825         return err;
1826 }
1827
1828 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1829                                 struct btrfs_root *root,
1830                                 struct btrfs_delayed_ref_node *node,
1831                                 struct btrfs_delayed_extent_op *extent_op,
1832                                 int insert_reserved)
1833 {
1834         int ret = 0;
1835         struct btrfs_delayed_tree_ref *ref;
1836         struct btrfs_key ins;
1837         u64 parent = 0;
1838         u64 ref_root = 0;
1839
1840         ins.objectid = node->bytenr;
1841         ins.offset = node->num_bytes;
1842         ins.type = BTRFS_EXTENT_ITEM_KEY;
1843
1844         ref = btrfs_delayed_node_to_tree_ref(node);
1845         if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1846                 parent = ref->parent;
1847         else
1848                 ref_root = ref->root;
1849
1850         BUG_ON(node->ref_mod != 1);
1851         if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1852                 BUG_ON(!extent_op || !extent_op->update_flags ||
1853                        !extent_op->update_key);
1854                 ret = alloc_reserved_tree_block(trans, root,
1855                                                 parent, ref_root,
1856                                                 extent_op->flags_to_set,
1857                                                 &extent_op->key,
1858                                                 ref->level, &ins);
1859         } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1860                 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1861                                              node->num_bytes, parent, ref_root,
1862                                              ref->level, 0, 1, extent_op);
1863         } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1864                 ret = __btrfs_free_extent(trans, root, node->bytenr,
1865                                           node->num_bytes, parent, ref_root,
1866                                           ref->level, 0, 1, extent_op);
1867         } else {
1868                 BUG();
1869         }
1870         return ret;
1871 }
1872
1873
1874 /* helper function to actually process a single delayed ref entry */
1875 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1876                                struct btrfs_root *root,
1877                                struct btrfs_delayed_ref_node *node,
1878                                struct btrfs_delayed_extent_op *extent_op,
1879                                int insert_reserved)
1880 {
1881         int ret;
1882         if (btrfs_delayed_ref_is_head(node)) {
1883                 struct btrfs_delayed_ref_head *head;
1884                 /*
1885                  * we've hit the end of the chain and we were supposed
1886                  * to insert this extent into the tree.  But, it got
1887                  * deleted before we ever needed to insert it, so all
1888                  * we have to do is clean up the accounting
1889                  */
1890                 BUG_ON(extent_op);
1891                 head = btrfs_delayed_node_to_head(node);
1892                 if (insert_reserved) {
1893                         int mark_free = 0;
1894                         struct extent_buffer *must_clean = NULL;
1895
1896                         ret = pin_down_bytes(trans, root, NULL,
1897                                              node->bytenr, node->num_bytes,
1898                                              head->is_data, 1, &must_clean);
1899                         if (ret > 0)
1900                                 mark_free = 1;
1901
1902                         if (must_clean) {
1903                                 clean_tree_block(NULL, root, must_clean);
1904                                 btrfs_tree_unlock(must_clean);
1905                                 free_extent_buffer(must_clean);
1906                         }
1907                         if (head->is_data) {
1908                                 ret = btrfs_del_csums(trans, root,
1909                                                       node->bytenr,
1910                                                       node->num_bytes);
1911                                 BUG_ON(ret);
1912                         }
1913                         if (mark_free) {
1914                                 ret = btrfs_free_reserved_extent(root,
1915                                                         node->bytenr,
1916                                                         node->num_bytes);
1917                                 BUG_ON(ret);
1918                         }
1919                 }
1920                 mutex_unlock(&head->mutex);
1921                 return 0;
1922         }
1923
1924         if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1925             node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1926                 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1927                                            insert_reserved);
1928         else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1929                  node->type == BTRFS_SHARED_DATA_REF_KEY)
1930                 ret = run_delayed_data_ref(trans, root, node, extent_op,
1931                                            insert_reserved);
1932         else
1933                 BUG();
1934         return ret;
1935 }
1936
1937 static noinline struct btrfs_delayed_ref_node *
1938 select_delayed_ref(struct btrfs_delayed_ref_head *head)
1939 {
1940         struct rb_node *node;
1941         struct btrfs_delayed_ref_node *ref;
1942         int action = BTRFS_ADD_DELAYED_REF;
1943 again:
1944         /*
1945          * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1946          * this prevents ref count from going down to zero when
1947          * there still are pending delayed ref.
1948          */
1949         node = rb_prev(&head->node.rb_node);
1950         while (1) {
1951                 if (!node)
1952                         break;
1953                 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1954                                 rb_node);
1955                 if (ref->bytenr != head->node.bytenr)
1956                         break;
1957                 if (ref->action == action)
1958                         return ref;
1959                 node = rb_prev(node);
1960         }
1961         if (action == BTRFS_ADD_DELAYED_REF) {
1962                 action = BTRFS_DROP_DELAYED_REF;
1963                 goto again;
1964         }
1965         return NULL;
1966 }
1967
1968 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1969                                        struct btrfs_root *root,
1970                                        struct list_head *cluster)
1971 {
1972         struct btrfs_delayed_ref_root *delayed_refs;
1973         struct btrfs_delayed_ref_node *ref;
1974         struct btrfs_delayed_ref_head *locked_ref = NULL;
1975         struct btrfs_delayed_extent_op *extent_op;
1976         int ret;
1977         int count = 0;
1978         int must_insert_reserved = 0;
1979
1980         delayed_refs = &trans->transaction->delayed_refs;
1981         while (1) {
1982                 if (!locked_ref) {
1983                         /* pick a new head ref from the cluster list */
1984                         if (list_empty(cluster))
1985                                 break;
1986
1987                         locked_ref = list_entry(cluster->next,
1988                                      struct btrfs_delayed_ref_head, cluster);
1989
1990                         /* grab the lock that says we are going to process
1991                          * all the refs for this head */
1992                         ret = btrfs_delayed_ref_lock(trans, locked_ref);
1993
1994                         /*
1995                          * we may have dropped the spin lock to get the head
1996                          * mutex lock, and that might have given someone else
1997                          * time to free the head.  If that's true, it has been
1998                          * removed from our list and we can move on.
1999                          */
2000                         if (ret == -EAGAIN) {
2001                                 locked_ref = NULL;
2002                                 count++;
2003                                 continue;
2004                         }
2005                 }
2006
2007                 /*
2008                  * record the must insert reserved flag before we
2009                  * drop the spin lock.
2010                  */
2011                 must_insert_reserved = locked_ref->must_insert_reserved;
2012                 locked_ref->must_insert_reserved = 0;
2013
2014                 extent_op = locked_ref->extent_op;
2015                 locked_ref->extent_op = NULL;
2016
2017                 /*
2018                  * locked_ref is the head node, so we have to go one
2019                  * node back for any delayed ref updates
2020                  */
2021                 ref = select_delayed_ref(locked_ref);
2022                 if (!ref) {
2023                         /* All delayed refs have been processed, Go ahead
2024                          * and send the head node to run_one_delayed_ref,
2025                          * so that any accounting fixes can happen
2026                          */
2027                         ref = &locked_ref->node;
2028
2029                         if (extent_op && must_insert_reserved) {
2030                                 kfree(extent_op);
2031                                 extent_op = NULL;
2032                         }
2033
2034                         if (extent_op) {
2035                                 spin_unlock(&delayed_refs->lock);
2036
2037                                 ret = run_delayed_extent_op(trans, root,
2038                                                             ref, extent_op);
2039                                 BUG_ON(ret);
2040                                 kfree(extent_op);
2041
2042                                 cond_resched();
2043                                 spin_lock(&delayed_refs->lock);
2044                                 continue;
2045                         }
2046
2047                         list_del_init(&locked_ref->cluster);
2048                         locked_ref = NULL;
2049                 }
2050
2051                 ref->in_tree = 0;
2052                 rb_erase(&ref->rb_node, &delayed_refs->root);
2053                 delayed_refs->num_entries--;
2054
2055                 spin_unlock(&delayed_refs->lock);
2056
2057                 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2058                                           must_insert_reserved);
2059                 BUG_ON(ret);
2060
2061                 btrfs_put_delayed_ref(ref);
2062                 kfree(extent_op);
2063                 count++;
2064
2065                 cond_resched();
2066                 spin_lock(&delayed_refs->lock);
2067         }
2068         return count;
2069 }
2070
2071 /*
2072  * this starts processing the delayed reference count updates and
2073  * extent insertions we have queued up so far.  count can be
2074  * 0, which means to process everything in the tree at the start
2075  * of the run (but not newly added entries), or it can be some target
2076  * number you'd like to process.
2077  */
2078 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2079                            struct btrfs_root *root, unsigned long count)
2080 {
2081         struct rb_node *node;
2082         struct btrfs_delayed_ref_root *delayed_refs;
2083         struct btrfs_delayed_ref_node *ref;
2084         struct list_head cluster;
2085         int ret;
2086         int run_all = count == (unsigned long)-1;
2087         int run_most = 0;
2088
2089         if (root == root->fs_info->extent_root)
2090                 root = root->fs_info->tree_root;
2091
2092         delayed_refs = &trans->transaction->delayed_refs;
2093         INIT_LIST_HEAD(&cluster);
2094 again:
2095         spin_lock(&delayed_refs->lock);
2096         if (count == 0) {
2097                 count = delayed_refs->num_entries * 2;
2098                 run_most = 1;
2099         }
2100         while (1) {
2101                 if (!(run_all || run_most) &&
2102                     delayed_refs->num_heads_ready < 64)
2103                         break;
2104
2105                 /*
2106                  * go find something we can process in the rbtree.  We start at
2107                  * the beginning of the tree, and then build a cluster
2108                  * of refs to process starting at the first one we are able to
2109                  * lock
2110                  */
2111                 ret = btrfs_find_ref_cluster(trans, &cluster,
2112                                              delayed_refs->run_delayed_start);
2113                 if (ret)
2114                         break;
2115
2116                 ret = run_clustered_refs(trans, root, &cluster);
2117                 BUG_ON(ret < 0);
2118
2119                 count -= min_t(unsigned long, ret, count);
2120
2121                 if (count == 0)
2122                         break;
2123         }
2124
2125         if (run_all) {
2126                 node = rb_first(&delayed_refs->root);
2127                 if (!node)
2128                         goto out;
2129                 count = (unsigned long)-1;
2130
2131                 while (node) {
2132                         ref = rb_entry(node, struct btrfs_delayed_ref_node,
2133                                        rb_node);
2134                         if (btrfs_delayed_ref_is_head(ref)) {
2135                                 struct btrfs_delayed_ref_head *head;
2136
2137                                 head = btrfs_delayed_node_to_head(ref);
2138                                 atomic_inc(&ref->refs);
2139
2140                                 spin_unlock(&delayed_refs->lock);
2141                                 mutex_lock(&head->mutex);
2142                                 mutex_unlock(&head->mutex);
2143
2144                                 btrfs_put_delayed_ref(ref);
2145                                 cond_resched();
2146                                 goto again;
2147                         }
2148                         node = rb_next(node);
2149                 }
2150                 spin_unlock(&delayed_refs->lock);
2151                 schedule_timeout(1);
2152                 goto again;
2153         }
2154 out:
2155         spin_unlock(&delayed_refs->lock);
2156         return 0;
2157 }
2158
2159 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2160                                 struct btrfs_root *root,
2161                                 u64 bytenr, u64 num_bytes, u64 flags,
2162                                 int is_data)
2163 {
2164         struct btrfs_delayed_extent_op *extent_op;
2165         int ret;
2166
2167         extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2168         if (!extent_op)
2169                 return -ENOMEM;
2170
2171         extent_op->flags_to_set = flags;
2172         extent_op->update_flags = 1;
2173         extent_op->update_key = 0;
2174         extent_op->is_data = is_data ? 1 : 0;
2175
2176         ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2177         if (ret)
2178                 kfree(extent_op);
2179         return ret;
2180 }
2181
2182 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2183                                       struct btrfs_root *root,
2184                                       struct btrfs_path *path,
2185                                       u64 objectid, u64 offset, u64 bytenr)
2186 {
2187         struct btrfs_delayed_ref_head *head;
2188         struct btrfs_delayed_ref_node *ref;
2189         struct btrfs_delayed_data_ref *data_ref;
2190         struct btrfs_delayed_ref_root *delayed_refs;
2191         struct rb_node *node;
2192         int ret = 0;
2193
2194         ret = -ENOENT;
2195         delayed_refs = &trans->transaction->delayed_refs;
2196         spin_lock(&delayed_refs->lock);
2197         head = btrfs_find_delayed_ref_head(trans, bytenr);
2198         if (!head)
2199                 goto out;
2200
2201         if (!mutex_trylock(&head->mutex)) {
2202                 atomic_inc(&head->node.refs);
2203                 spin_unlock(&delayed_refs->lock);
2204
2205                 btrfs_release_path(root->fs_info->extent_root, path);
2206
2207                 mutex_lock(&head->mutex);
2208                 mutex_unlock(&head->mutex);
2209                 btrfs_put_delayed_ref(&head->node);
2210                 return -EAGAIN;
2211         }
2212
2213         node = rb_prev(&head->node.rb_node);
2214         if (!node)
2215                 goto out_unlock;
2216
2217         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2218
2219         if (ref->bytenr != bytenr)
2220                 goto out_unlock;
2221
2222         ret = 1;
2223         if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2224                 goto out_unlock;
2225
2226         data_ref = btrfs_delayed_node_to_data_ref(ref);
2227
2228         node = rb_prev(node);
2229         if (node) {
2230                 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2231                 if (ref->bytenr == bytenr)
2232                         goto out_unlock;
2233         }
2234
2235         if (data_ref->root != root->root_key.objectid ||
2236             data_ref->objectid != objectid || data_ref->offset != offset)
2237                 goto out_unlock;
2238
2239         ret = 0;
2240 out_unlock:
2241         mutex_unlock(&head->mutex);
2242 out:
2243         spin_unlock(&delayed_refs->lock);
2244         return ret;
2245 }
2246
2247 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2248                                         struct btrfs_root *root,
2249                                         struct btrfs_path *path,
2250                                         u64 objectid, u64 offset, u64 bytenr)
2251 {
2252         struct btrfs_root *extent_root = root->fs_info->extent_root;
2253         struct extent_buffer *leaf;
2254         struct btrfs_extent_data_ref *ref;
2255         struct btrfs_extent_inline_ref *iref;
2256         struct btrfs_extent_item *ei;
2257         struct btrfs_key key;
2258         u32 item_size;
2259         int ret;
2260
2261         key.objectid = bytenr;
2262         key.offset = (u64)-1;
2263         key.type = BTRFS_EXTENT_ITEM_KEY;
2264
2265         ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2266         if (ret < 0)
2267                 goto out;
2268         BUG_ON(ret == 0);
2269
2270         ret = -ENOENT;
2271         if (path->slots[0] == 0)
2272                 goto out;
2273
2274         path->slots[0]--;
2275         leaf = path->nodes[0];
2276         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2277
2278         if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2279                 goto out;
2280
2281         ret = 1;
2282         item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2283 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2284         if (item_size < sizeof(*ei)) {
2285                 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2286                 goto out;
2287         }
2288 #endif
2289         ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2290
2291         if (item_size != sizeof(*ei) +
2292             btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2293                 goto out;
2294
2295         if (btrfs_extent_generation(leaf, ei) <=
2296             btrfs_root_last_snapshot(&root->root_item))
2297                 goto out;
2298
2299         iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2300         if (btrfs_extent_inline_ref_type(leaf, iref) !=
2301             BTRFS_EXTENT_DATA_REF_KEY)
2302                 goto out;
2303
2304         ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2305         if (btrfs_extent_refs(leaf, ei) !=
2306             btrfs_extent_data_ref_count(leaf, ref) ||
2307             btrfs_extent_data_ref_root(leaf, ref) !=
2308             root->root_key.objectid ||
2309             btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2310             btrfs_extent_data_ref_offset(leaf, ref) != offset)
2311                 goto out;
2312
2313         ret = 0;
2314 out:
2315         return ret;
2316 }
2317
2318 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2319                           struct btrfs_root *root,
2320                           u64 objectid, u64 offset, u64 bytenr)
2321 {
2322         struct btrfs_path *path;
2323         int ret;
2324         int ret2;
2325
2326         path = btrfs_alloc_path();
2327         if (!path)
2328                 return -ENOENT;
2329
2330         do {
2331                 ret = check_committed_ref(trans, root, path, objectid,
2332                                           offset, bytenr);
2333                 if (ret && ret != -ENOENT)
2334                         goto out;
2335
2336                 ret2 = check_delayed_ref(trans, root, path, objectid,
2337                                          offset, bytenr);
2338         } while (ret2 == -EAGAIN);
2339
2340         if (ret2 && ret2 != -ENOENT) {
2341                 ret = ret2;
2342                 goto out;
2343         }
2344
2345         if (ret != -ENOENT || ret2 != -ENOENT)
2346                 ret = 0;
2347 out:
2348         btrfs_free_path(path);
2349         return ret;
2350 }
2351
2352 #if 0
2353 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2354                     struct extent_buffer *buf, u32 nr_extents)
2355 {
2356         struct btrfs_key key;
2357         struct btrfs_file_extent_item *fi;
2358         u64 root_gen;
2359         u32 nritems;
2360         int i;
2361         int level;
2362         int ret = 0;
2363         int shared = 0;
2364
2365         if (!root->ref_cows)
2366                 return 0;
2367
2368         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2369                 shared = 0;
2370                 root_gen = root->root_key.offset;
2371         } else {
2372                 shared = 1;
2373                 root_gen = trans->transid - 1;
2374         }
2375
2376         level = btrfs_header_level(buf);
2377         nritems = btrfs_header_nritems(buf);
2378
2379         if (level == 0) {
2380                 struct btrfs_leaf_ref *ref;
2381                 struct btrfs_extent_info *info;
2382
2383                 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2384                 if (!ref) {
2385                         ret = -ENOMEM;
2386                         goto out;
2387                 }
2388
2389                 ref->root_gen = root_gen;
2390                 ref->bytenr = buf->start;
2391                 ref->owner = btrfs_header_owner(buf);
2392                 ref->generation = btrfs_header_generation(buf);
2393                 ref->nritems = nr_extents;
2394                 info = ref->extents;
2395
2396                 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2397                         u64 disk_bytenr;
2398                         btrfs_item_key_to_cpu(buf, &key, i);
2399                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2400                                 continue;
2401                         fi = btrfs_item_ptr(buf, i,
2402                                             struct btrfs_file_extent_item);
2403                         if (btrfs_file_extent_type(buf, fi) ==
2404                             BTRFS_FILE_EXTENT_INLINE)
2405                                 continue;
2406                         disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2407                         if (disk_bytenr == 0)
2408                                 continue;
2409
2410                         info->bytenr = disk_bytenr;
2411                         info->num_bytes =
2412                                 btrfs_file_extent_disk_num_bytes(buf, fi);
2413                         info->objectid = key.objectid;
2414                         info->offset = key.offset;
2415                         info++;
2416                 }
2417
2418                 ret = btrfs_add_leaf_ref(root, ref, shared);
2419                 if (ret == -EEXIST && shared) {
2420                         struct btrfs_leaf_ref *old;
2421                         old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2422                         BUG_ON(!old);
2423                         btrfs_remove_leaf_ref(root, old);
2424                         btrfs_free_leaf_ref(root, old);
2425                         ret = btrfs_add_leaf_ref(root, ref, shared);
2426                 }
2427                 WARN_ON(ret);
2428                 btrfs_free_leaf_ref(root, ref);
2429         }
2430 out:
2431         return ret;
2432 }
2433
2434 /* when a block goes through cow, we update the reference counts of
2435  * everything that block points to.  The internal pointers of the block
2436  * can be in just about any order, and it is likely to have clusters of
2437  * things that are close together and clusters of things that are not.
2438  *
2439  * To help reduce the seeks that come with updating all of these reference
2440  * counts, sort them by byte number before actual updates are done.
2441  *
2442  * struct refsort is used to match byte number to slot in the btree block.
2443  * we sort based on the byte number and then use the slot to actually
2444  * find the item.
2445  *
2446  * struct refsort is smaller than strcut btrfs_item and smaller than
2447  * struct btrfs_key_ptr.  Since we're currently limited to the page size
2448  * for a btree block, there's no way for a kmalloc of refsorts for a
2449  * single node to be bigger than a page.
2450  */
2451 struct refsort {
2452         u64 bytenr;
2453         u32 slot;
2454 };
2455
2456 /*
2457  * for passing into sort()
2458  */
2459 static int refsort_cmp(const void *a_void, const void *b_void)
2460 {
2461         const struct refsort *a = a_void;
2462         const struct refsort *b = b_void;
2463
2464         if (a->bytenr < b->bytenr)
2465                 return -1;
2466         if (a->bytenr > b->bytenr)
2467                 return 1;
2468         return 0;
2469 }
2470 #endif
2471
2472 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2473                            struct btrfs_root *root,
2474                            struct extent_buffer *buf,
2475                            int full_backref, int inc)
2476 {
2477         u64 bytenr;
2478         u64 num_bytes;
2479         u64 parent;
2480         u64 ref_root;
2481         u32 nritems;
2482         struct btrfs_key key;
2483         struct btrfs_file_extent_item *fi;
2484         int i;
2485         int level;
2486         int ret = 0;
2487         int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2488                             u64, u64, u64, u64, u64, u64);
2489
2490         ref_root = btrfs_header_owner(buf);
2491         nritems = btrfs_header_nritems(buf);
2492         level = btrfs_header_level(buf);
2493
2494         if (!root->ref_cows && level == 0)
2495                 return 0;
2496
2497         if (inc)
2498                 process_func = btrfs_inc_extent_ref;
2499         else
2500                 process_func = btrfs_free_extent;
2501
2502         if (full_backref)
2503                 parent = buf->start;
2504         else
2505                 parent = 0;
2506
2507         for (i = 0; i < nritems; i++) {
2508                 if (level == 0) {
2509                         btrfs_item_key_to_cpu(buf, &key, i);
2510                         if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2511                                 continue;
2512                         fi = btrfs_item_ptr(buf, i,
2513                                             struct btrfs_file_extent_item);
2514                         if (btrfs_file_extent_type(buf, fi) ==
2515                             BTRFS_FILE_EXTENT_INLINE)
2516                                 continue;
2517                         bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2518                         if (bytenr == 0)
2519                                 continue;
2520
2521                         num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2522                         key.offset -= btrfs_file_extent_offset(buf, fi);
2523                         ret = process_func(trans, root, bytenr, num_bytes,
2524                                            parent, ref_root, key.objectid,
2525                                            key.offset);
2526                         if (ret)
2527                                 goto fail;
2528                 } else {
2529                         bytenr = btrfs_node_blockptr(buf, i);
2530                         num_bytes = btrfs_level_size(root, level - 1);
2531                         ret = process_func(trans, root, bytenr, num_bytes,
2532                                            parent, ref_root, level - 1, 0);
2533                         if (ret)
2534                                 goto fail;
2535                 }
2536         }
2537         return 0;
2538 fail:
2539         BUG();
2540         return ret;
2541 }
2542
2543 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2544                   struct extent_buffer *buf, int full_backref)
2545 {
2546         return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2547 }
2548
2549 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2550                   struct extent_buffer *buf, int full_backref)
2551 {
2552         return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2553 }
2554
2555 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2556                                  struct btrfs_root *root,
2557                                  struct btrfs_path *path,
2558                                  struct btrfs_block_group_cache *cache)
2559 {
2560         int ret;
2561         struct btrfs_root *extent_root = root->fs_info->extent_root;
2562         unsigned long bi;
2563         struct extent_buffer *leaf;
2564
2565         ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2566         if (ret < 0)
2567                 goto fail;
2568         BUG_ON(ret);
2569
2570         leaf = path->nodes[0];
2571         bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2572         write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2573         btrfs_mark_buffer_dirty(leaf);
2574         btrfs_release_path(extent_root, path);
2575 fail:
2576         if (ret)
2577                 return ret;
2578         return 0;
2579
2580 }
2581
2582 static struct btrfs_block_group_cache *
2583 next_block_group(struct btrfs_root *root,
2584                  struct btrfs_block_group_cache *cache)
2585 {
2586         struct rb_node *node;
2587         spin_lock(&root->fs_info->block_group_cache_lock);
2588         node = rb_next(&cache->cache_node);
2589         btrfs_put_block_group(cache);
2590         if (node) {
2591                 cache = rb_entry(node, struct btrfs_block_group_cache,
2592                                  cache_node);
2593                 btrfs_get_block_group(cache);
2594         } else
2595                 cache = NULL;
2596         spin_unlock(&root->fs_info->block_group_cache_lock);
2597         return cache;
2598 }
2599
2600 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2601                                    struct btrfs_root *root)
2602 {
2603         struct btrfs_block_group_cache *cache;
2604         int err = 0;
2605         struct btrfs_path *path;
2606         u64 last = 0;
2607
2608         path = btrfs_alloc_path();
2609         if (!path)
2610                 return -ENOMEM;
2611
2612         while (1) {
2613                 if (last == 0) {
2614                         err = btrfs_run_delayed_refs(trans, root,
2615                                                      (unsigned long)-1);
2616                         BUG_ON(err);
2617                 }
2618
2619                 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2620                 while (cache) {
2621                         if (cache->dirty)
2622                                 break;
2623                         cache = next_block_group(root, cache);
2624                 }
2625                 if (!cache) {
2626                         if (last == 0)
2627                                 break;
2628                         last = 0;
2629                         continue;
2630                 }
2631
2632                 cache->dirty = 0;
2633                 last = cache->key.objectid + cache->key.offset;
2634
2635                 err = write_one_cache_group(trans, root, path, cache);
2636                 BUG_ON(err);
2637                 btrfs_put_block_group(cache);
2638         }
2639
2640         btrfs_free_path(path);
2641         return 0;
2642 }
2643
2644 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2645 {
2646         struct btrfs_block_group_cache *block_group;
2647         int readonly = 0;
2648
2649         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2650         if (!block_group || block_group->ro)
2651                 readonly = 1;
2652         if (block_group)
2653                 btrfs_put_block_group(block_group);
2654         return readonly;
2655 }
2656
2657 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2658                              u64 total_bytes, u64 bytes_used,
2659                              struct btrfs_space_info **space_info)
2660 {
2661         struct btrfs_space_info *found;
2662
2663         found = __find_space_info(info, flags);
2664         if (found) {
2665                 spin_lock(&found->lock);
2666                 found->total_bytes += total_bytes;
2667                 found->bytes_used += bytes_used;
2668                 found->full = 0;
2669                 spin_unlock(&found->lock);
2670                 *space_info = found;
2671                 return 0;
2672         }
2673         found = kzalloc(sizeof(*found), GFP_NOFS);
2674         if (!found)
2675                 return -ENOMEM;
2676
2677         INIT_LIST_HEAD(&found->block_groups);
2678         init_rwsem(&found->groups_sem);
2679         init_waitqueue_head(&found->flush_wait);
2680         init_waitqueue_head(&found->allocate_wait);
2681         spin_lock_init(&found->lock);
2682         found->flags = flags;
2683         found->total_bytes = total_bytes;
2684         found->bytes_used = bytes_used;
2685         found->bytes_pinned = 0;
2686         found->bytes_reserved = 0;
2687         found->bytes_readonly = 0;
2688         found->bytes_delalloc = 0;
2689         found->full = 0;
2690         found->force_alloc = 0;
2691         *space_info = found;
2692         list_add_rcu(&found->list, &info->space_info);
2693         atomic_set(&found->caching_threads, 0);
2694         return 0;
2695 }
2696
2697 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2698 {
2699         u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
2700                                    BTRFS_BLOCK_GROUP_RAID1 |
2701                                    BTRFS_BLOCK_GROUP_RAID10 |
2702                                    BTRFS_BLOCK_GROUP_DUP);
2703         if (extra_flags) {
2704                 if (flags & BTRFS_BLOCK_GROUP_DATA)
2705                         fs_info->avail_data_alloc_bits |= extra_flags;
2706                 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2707                         fs_info->avail_metadata_alloc_bits |= extra_flags;
2708                 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2709                         fs_info->avail_system_alloc_bits |= extra_flags;
2710         }
2711 }
2712
2713 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2714 {
2715         spin_lock(&cache->space_info->lock);
2716         spin_lock(&cache->lock);
2717         if (!cache->ro) {
2718                 cache->space_info->bytes_readonly += cache->key.offset -
2719                                         btrfs_block_group_used(&cache->item);
2720                 cache->ro = 1;
2721         }
2722         spin_unlock(&cache->lock);
2723         spin_unlock(&cache->space_info->lock);
2724 }
2725
2726 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
2727 {
2728         u64 num_devices = root->fs_info->fs_devices->rw_devices;
2729
2730         if (num_devices == 1)
2731                 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2732         if (num_devices < 4)
2733                 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2734
2735         if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2736             (flags & (BTRFS_BLOCK_GROUP_RAID1 |
2737                       BTRFS_BLOCK_GROUP_RAID10))) {
2738                 flags &= ~BTRFS_BLOCK_GROUP_DUP;
2739         }
2740
2741         if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
2742             (flags & BTRFS_BLOCK_GROUP_RAID10)) {
2743                 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
2744         }
2745
2746         if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2747             ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2748              (flags & BTRFS_BLOCK_GROUP_RAID10) |
2749              (flags & BTRFS_BLOCK_GROUP_DUP)))
2750                 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2751         return flags;
2752 }
2753
2754 static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2755 {
2756         struct btrfs_fs_info *info = root->fs_info;
2757         u64 alloc_profile;
2758
2759         if (data) {
2760                 alloc_profile = info->avail_data_alloc_bits &
2761                         info->data_alloc_profile;
2762                 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2763         } else if (root == root->fs_info->chunk_root) {
2764                 alloc_profile = info->avail_system_alloc_bits &
2765                         info->system_alloc_profile;
2766                 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2767         } else {
2768                 alloc_profile = info->avail_metadata_alloc_bits &
2769                         info->metadata_alloc_profile;
2770                 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2771         }
2772
2773         return btrfs_reduce_alloc_profile(root, data);
2774 }
2775
2776 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2777 {
2778         u64 alloc_target;
2779
2780         alloc_target = btrfs_get_alloc_profile(root, 1);
2781         BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2782                                                        alloc_target);
2783 }
2784
2785 static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2786 {
2787         u64 num_bytes;
2788         int level;
2789
2790         level = BTRFS_MAX_LEVEL - 2;
2791         /*
2792          * NOTE: these calculations are absolutely the worst possible case.
2793          * This assumes that _every_ item we insert will require a new leaf, and
2794          * that the tree has grown to its maximum level size.
2795          */
2796
2797         /*
2798          * for every item we insert we could insert both an extent item and a
2799          * extent ref item.  Then for ever item we insert, we will need to cow
2800          * both the original leaf, plus the leaf to the left and right of it.
2801          *
2802          * Unless we are talking about the extent root, then we just want the
2803          * number of items * 2, since we just need the extent item plus its ref.
2804          */
2805         if (root == root->fs_info->extent_root)
2806                 num_bytes = num_items * 2;
2807         else
2808                 num_bytes = (num_items + (2 * num_items)) * 3;
2809
2810         /*
2811          * num_bytes is total number of leaves we could need times the leaf
2812          * size, and then for every leaf we could end up cow'ing 2 nodes per
2813          * level, down to the leaf level.
2814          */
2815         num_bytes = (num_bytes * root->leafsize) +
2816                 (num_bytes * (level * 2)) * root->nodesize;
2817
2818         return num_bytes;
2819 }
2820
2821 /*
2822  * Unreserve metadata space for delalloc.  If we have less reserved credits than
2823  * we have extents, this function does nothing.
2824  */
2825 int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2826                                           struct inode *inode, int num_items)
2827 {
2828         struct btrfs_fs_info *info = root->fs_info;
2829         struct btrfs_space_info *meta_sinfo;
2830         u64 num_bytes;
2831         u64 alloc_target;
2832         bool bug = false;
2833
2834         /* get the space info for where the metadata will live */
2835         alloc_target = btrfs_get_alloc_profile(root, 0);
2836         meta_sinfo = __find_space_info(info, alloc_target);
2837
2838         num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2839                                            num_items);
2840
2841         spin_lock(&meta_sinfo->lock);
2842         spin_lock(&BTRFS_I(inode)->accounting_lock);
2843         if (BTRFS_I(inode)->reserved_extents <=
2844             BTRFS_I(inode)->outstanding_extents) {
2845                 spin_unlock(&BTRFS_I(inode)->accounting_lock);
2846                 spin_unlock(&meta_sinfo->lock);
2847                 return 0;
2848         }
2849         spin_unlock(&BTRFS_I(inode)->accounting_lock);
2850
2851         BTRFS_I(inode)->reserved_extents -= num_items;
2852         BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
2853
2854         if (meta_sinfo->bytes_delalloc < num_bytes) {
2855                 bug = true;
2856                 meta_sinfo->bytes_delalloc = 0;
2857         } else {
2858                 meta_sinfo->bytes_delalloc -= num_bytes;
2859         }
2860         spin_unlock(&meta_sinfo->lock);
2861
2862         BUG_ON(bug);
2863
2864         return 0;
2865 }
2866
2867 static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2868 {
2869         u64 thresh;
2870
2871         thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2872                 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2873                 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2874                 meta_sinfo->bytes_may_use;
2875
2876         thresh = meta_sinfo->total_bytes - thresh;
2877         thresh *= 80;
2878         do_div(thresh, 100);
2879         if (thresh <= meta_sinfo->bytes_delalloc)
2880                 meta_sinfo->force_delalloc = 1;
2881         else
2882                 meta_sinfo->force_delalloc = 0;
2883 }
2884
2885 struct async_flush {
2886         struct btrfs_root *root;
2887         struct btrfs_space_info *info;
2888         struct btrfs_work work;
2889 };
2890
2891 static noinline void flush_delalloc_async(struct btrfs_work *work)
2892 {
2893         struct async_flush *async;
2894         struct btrfs_root *root;
2895         struct btrfs_space_info *info;
2896
2897         async = container_of(work, struct async_flush, work);
2898         root = async->root;
2899         info = async->info;
2900
2901         btrfs_start_delalloc_inodes(root, 0);
2902         wake_up(&info->flush_wait);
2903         btrfs_wait_ordered_extents(root, 0, 0);
2904
2905         spin_lock(&info->lock);
2906         info->flushing = 0;
2907         spin_unlock(&info->lock);
2908         wake_up(&info->flush_wait);
2909
2910         kfree(async);
2911 }
2912
2913 static void wait_on_flush(struct btrfs_space_info *info)
2914 {
2915         DEFINE_WAIT(wait);
2916         u64 used;
2917
2918         while (1) {
2919                 prepare_to_wait(&info->flush_wait, &wait,
2920                                 TASK_UNINTERRUPTIBLE);
2921                 spin_lock(&info->lock);
2922                 if (!info->flushing) {
2923                         spin_unlock(&info->lock);
2924                         break;
2925                 }
2926
2927                 used = info->bytes_used + info->bytes_reserved +
2928                         info->bytes_pinned + info->bytes_readonly +
2929                         info->bytes_super + info->bytes_root +
2930                         info->bytes_may_use + info->bytes_delalloc;
2931                 if (used < info->total_bytes) {
2932                         spin_unlock(&info->lock);
2933                         break;
2934                 }
2935                 spin_unlock(&info->lock);
2936                 schedule();
2937         }
2938         finish_wait(&info->flush_wait, &wait);
2939 }
2940
2941 static void flush_delalloc(struct btrfs_root *root,
2942                                  struct btrfs_space_info *info)
2943 {
2944         struct async_flush *async;
2945         bool wait = false;
2946
2947         spin_lock(&info->lock);
2948
2949         if (!info->flushing)
2950                 info->flushing = 1;
2951         else
2952                 wait = true;
2953
2954         spin_unlock(&info->lock);
2955
2956         if (wait) {
2957                 wait_on_flush(info);
2958                 return;
2959         }
2960
2961         async = kzalloc(sizeof(*async), GFP_NOFS);
2962         if (!async)
2963                 goto flush;
2964
2965         async->root = root;
2966         async->info = info;
2967         async->work.func = flush_delalloc_async;
2968
2969         btrfs_queue_worker(&root->fs_info->enospc_workers,
2970                            &async->work);
2971         wait_on_flush(info);
2972         return;
2973
2974 flush:
2975         btrfs_start_delalloc_inodes(root, 0);
2976         btrfs_wait_ordered_extents(root, 0, 0);
2977
2978         spin_lock(&info->lock);
2979         info->flushing = 0;
2980         spin_unlock(&info->lock);
2981         wake_up(&info->flush_wait);
2982 }
2983
2984 static int maybe_allocate_chunk(struct btrfs_root *root,
2985                                  struct btrfs_space_info *info)
2986 {
2987         struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2988         struct btrfs_trans_handle *trans;
2989         bool wait = false;
2990         int ret = 0;
2991         u64 min_metadata;
2992         u64 free_space;
2993
2994         free_space = btrfs_super_total_bytes(disk_super);
2995         /*
2996          * we allow the metadata to grow to a max of either 10gb or 5% of the
2997          * space in the volume.
2998          */
2999         min_metadata = min((u64)10 * 1024 * 1024 * 1024,
3000                              div64_u64(free_space * 5, 100));
3001         if (info->total_bytes >= min_metadata) {
3002                 spin_unlock(&info->lock);
3003                 return 0;
3004         }
3005
3006         if (info->full) {
3007                 spin_unlock(&info->lock);
3008                 return 0;
3009         }
3010
3011         if (!info->allocating_chunk) {
3012                 info->force_alloc = 1;
3013                 info->allocating_chunk = 1;
3014         } else {
3015                 wait = true;
3016         }
3017
3018         spin_unlock(&info->lock);
3019
3020         if (wait) {
3021                 wait_event(info->allocate_wait,
3022                            !info->allocating_chunk);
3023                 return 1;
3024         }
3025
3026         trans = btrfs_start_transaction(root, 1);
3027         if (!trans) {
3028                 ret = -ENOMEM;
3029                 goto out;
3030         }
3031
3032         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3033                              4096 + 2 * 1024 * 1024,
3034                              info->flags, 0);
3035         btrfs_end_transaction(trans, root);
3036         if (ret)
3037                 goto out;
3038 out:
3039         spin_lock(&info->lock);
3040         info->allocating_chunk = 0;
3041         spin_unlock(&info->lock);
3042         wake_up(&info->allocate_wait);
3043
3044         if (ret)
3045                 return 0;
3046         return 1;
3047 }
3048
3049 /*
3050  * Reserve metadata space for delalloc.
3051  */
3052 int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
3053                                         struct inode *inode, int num_items)
3054 {
3055         struct btrfs_fs_info *info = root->fs_info;
3056         struct btrfs_space_info *meta_sinfo;
3057         u64 num_bytes;
3058         u64 used;
3059         u64 alloc_target;
3060         int flushed = 0;
3061         int force_delalloc;
3062
3063         /* get the space info for where the metadata will live */
3064         alloc_target = btrfs_get_alloc_profile(root, 0);
3065         meta_sinfo = __find_space_info(info, alloc_target);
3066
3067         num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
3068                                            num_items);
3069 again:
3070         spin_lock(&meta_sinfo->lock);
3071
3072         force_delalloc = meta_sinfo->force_delalloc;
3073
3074         if (unlikely(!meta_sinfo->bytes_root))
3075                 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3076
3077         if (!flushed)
3078                 meta_sinfo->bytes_delalloc += num_bytes;
3079
3080         used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3081                 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3082                 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3083                 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3084
3085         if (used > meta_sinfo->total_bytes) {
3086                 flushed++;
3087
3088                 if (flushed == 1) {
3089                         if (maybe_allocate_chunk(root, meta_sinfo))
3090                                 goto again;
3091                         flushed++;
3092                 } else {
3093                         spin_unlock(&meta_sinfo->lock);
3094                 }
3095
3096                 if (flushed == 2) {
3097                         filemap_flush(inode->i_mapping);
3098                         goto again;
3099                 } else if (flushed == 3) {
3100                         flush_delalloc(root, meta_sinfo);
3101                         goto again;
3102                 }
3103                 spin_lock(&meta_sinfo->lock);
3104                 meta_sinfo->bytes_delalloc -= num_bytes;
3105                 spin_unlock(&meta_sinfo->lock);
3106                 printk(KERN_ERR "enospc, has %d, reserved %d\n",
3107                        BTRFS_I(inode)->outstanding_extents,
3108                        BTRFS_I(inode)->reserved_extents);
3109                 dump_space_info(meta_sinfo, 0, 0);
3110                 return -ENOSPC;
3111         }
3112
3113         BTRFS_I(inode)->reserved_extents += num_items;
3114         check_force_delalloc(meta_sinfo);
3115         spin_unlock(&meta_sinfo->lock);
3116
3117         if (!flushed && force_delalloc)
3118                 filemap_flush(inode->i_mapping);
3119
3120         return 0;
3121 }
3122
3123 /*
3124  * unreserve num_items number of items worth of metadata space.  This needs to
3125  * be paired with btrfs_reserve_metadata_space.
3126  *
3127  * NOTE: if you have the option, run this _AFTER_ you do a
3128  * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3129  * oprations which will result in more used metadata, so we want to make sure we
3130  * can do that without issue.
3131  */
3132 int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3133 {
3134         struct btrfs_fs_info *info = root->fs_info;
3135         struct btrfs_space_info *meta_sinfo;
3136         u64 num_bytes;
3137         u64 alloc_target;
3138         bool bug = false;
3139
3140         /* get the space info for where the metadata will live */
3141         alloc_target = btrfs_get_alloc_profile(root, 0);
3142         meta_sinfo = __find_space_info(info, alloc_target);
3143
3144         num_bytes = calculate_bytes_needed(root, num_items);
3145
3146         spin_lock(&meta_sinfo->lock);
3147         if (meta_sinfo->bytes_may_use < num_bytes) {
3148                 bug = true;
3149                 meta_sinfo->bytes_may_use = 0;
3150         } else {
3151                 meta_sinfo->bytes_may_use -= num_bytes;
3152         }
3153         spin_unlock(&meta_sinfo->lock);
3154
3155         BUG_ON(bug);
3156
3157         return 0;
3158 }
3159
3160 /*
3161  * Reserve some metadata space for use.  We'll calculate the worste case number
3162  * of bytes that would be needed to modify num_items number of items.  If we
3163  * have space, fantastic, if not, you get -ENOSPC.  Please call
3164  * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3165  * items you reserved, since whatever metadata you needed should have already
3166  * been allocated.
3167  *
3168  * This will commit the transaction to make more space if we don't have enough
3169  * metadata space.  THe only time we don't do this is if we're reserving space
3170  * inside of a transaction, then we will just return -ENOSPC and it is the
3171  * callers responsibility to handle it properly.
3172  */
3173 int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3174 {
3175         struct btrfs_fs_info *info = root->fs_info;
3176         struct btrfs_space_info *meta_sinfo;
3177         u64 num_bytes;
3178         u64 used;
3179         u64 alloc_target;
3180         int retries = 0;
3181
3182         /* get the space info for where the metadata will live */
3183         alloc_target = btrfs_get_alloc_profile(root, 0);
3184         meta_sinfo = __find_space_info(info, alloc_target);
3185
3186         num_bytes = calculate_bytes_needed(root, num_items);
3187 again:
3188         spin_lock(&meta_sinfo->lock);
3189
3190         if (unlikely(!meta_sinfo->bytes_root))
3191                 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3192
3193         if (!retries)
3194                 meta_sinfo->bytes_may_use += num_bytes;
3195
3196         used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3197                 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3198                 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3199                 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3200
3201         if (used > meta_sinfo->total_bytes) {
3202                 retries++;
3203                 if (retries == 1) {
3204                         if (maybe_allocate_chunk(root, meta_sinfo))
3205                                 goto again;
3206                         retries++;
3207                 } else {
3208                         spin_unlock(&meta_sinfo->lock);
3209                 }
3210
3211                 if (retries == 2) {
3212                         flush_delalloc(root, meta_sinfo);
3213                         goto again;
3214                 }
3215                 spin_lock(&meta_sinfo->lock);
3216                 meta_sinfo->bytes_may_use -= num_bytes;
3217                 spin_unlock(&meta_sinfo->lock);
3218
3219                 dump_space_info(meta_sinfo, 0, 0);
3220                 return -ENOSPC;
3221         }
3222
3223         check_force_delalloc(meta_sinfo);
3224         spin_unlock(&meta_sinfo->lock);
3225
3226         return 0;
3227 }
3228
3229 /*
3230  * This will check the space that the inode allocates from to make sure we have
3231  * enough space for bytes.
3232  */
3233 int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3234                                 u64 bytes)
3235 {
3236         struct btrfs_space_info *data_sinfo;
3237         int ret = 0, committed = 0;
3238
3239         /* make sure bytes are sectorsize aligned */
3240         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3241
3242         data_sinfo = BTRFS_I(inode)->space_info;
3243         if (!data_sinfo)
3244                 goto alloc;
3245
3246 again:
3247         /* make sure we have enough space to handle the data first */
3248         spin_lock(&data_sinfo->lock);
3249         if (data_sinfo->total_bytes - data_sinfo->bytes_used -
3250             data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
3251             data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
3252             data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) {
3253                 struct btrfs_trans_handle *trans;
3254
3255                 /*
3256                  * if we don't have enough free bytes in this space then we need
3257                  * to alloc a new chunk.
3258                  */
3259                 if (!data_sinfo->full) {
3260                         u64 alloc_target;
3261
3262                         data_sinfo->force_alloc = 1;
3263                         spin_unlock(&data_sinfo->lock);
3264 alloc:
3265                         alloc_target = btrfs_get_alloc_profile(root, 1);
3266                         trans = btrfs_start_transaction(root, 1);
3267                         if (!trans)
3268                                 return -ENOMEM;
3269
3270                         ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3271                                              bytes + 2 * 1024 * 1024,
3272                                              alloc_target, 0);
3273                         btrfs_end_transaction(trans, root);
3274                         if (ret)
3275                                 return ret;
3276
3277                         if (!data_sinfo) {
3278                                 btrfs_set_inode_space_info(root, inode);
3279                                 data_sinfo = BTRFS_I(inode)->space_info;
3280                         }
3281                         goto again;
3282                 }
3283                 spin_unlock(&data_sinfo->lock);
3284
3285                 /* commit the current transaction and try again */
3286                 if (!committed && !root->fs_info->open_ioctl_trans) {
3287                         committed = 1;
3288                         trans = btrfs_join_transaction(root, 1);
3289                         if (!trans)
3290                                 return -ENOMEM;
3291                         ret = btrfs_commit_transaction(trans, root);
3292                         if (ret)
3293                                 return ret;
3294                         goto again;
3295                 }
3296
3297                 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
3298                        ", %llu bytes_used, %llu bytes_reserved, "
3299                        "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
3300                        "%llu total\n", (unsigned long long)bytes,
3301                        (unsigned long long)data_sinfo->bytes_delalloc,
3302                        (unsigned long long)data_sinfo->bytes_used,
3303                        (unsigned long long)data_sinfo->bytes_reserved,
3304                        (unsigned long long)data_sinfo->bytes_pinned,
3305                        (unsigned long long)data_sinfo->bytes_readonly,
3306                        (unsigned long long)data_sinfo->bytes_may_use,
3307                        (unsigned long long)data_sinfo->total_bytes);
3308                 return -ENOSPC;
3309         }
3310         data_sinfo->bytes_may_use += bytes;
3311         BTRFS_I(inode)->reserved_bytes += bytes;
3312         spin_unlock(&data_sinfo->lock);
3313
3314         return 0;
3315 }
3316
3317 /*
3318  * if there was an error for whatever reason after calling
3319  * btrfs_check_data_free_space, call this so we can cleanup the counters.
3320  */
3321 void btrfs_free_reserved_data_space(struct btrfs_root *root,
3322                                     struct inode *inode, u64 bytes)
3323 {
3324         struct btrfs_space_info *data_sinfo;
3325
3326         /* make sure bytes are sectorsize aligned */
3327         bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3328
3329         data_sinfo = BTRFS_I(inode)->space_info;
3330         spin_lock(&data_sinfo->lock);
3331         data_sinfo->bytes_may_use -= bytes;
3332         BTRFS_I(inode)->reserved_bytes -= bytes;
3333         spin_unlock(&data_sinfo->lock);
3334 }
3335
3336 /* called when we are adding a delalloc extent to the inode's io_tree */
3337 void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
3338                                   u64 bytes)
3339 {
3340         struct btrfs_space_info *data_sinfo;
3341
3342         /* get the space info for where this inode will be storing its data */
3343         data_sinfo = BTRFS_I(inode)->space_info;
3344
3345         /* make sure we have enough space to handle the data first */
3346         spin_lock(&data_sinfo->lock);
3347         data_sinfo->bytes_delalloc += bytes;
3348
3349         /*
3350          * we are adding a delalloc extent without calling
3351          * btrfs_check_data_free_space first.  This happens on a weird
3352          * writepage condition, but shouldn't hurt our accounting
3353          */
3354         if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
3355                 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
3356                 BTRFS_I(inode)->reserved_bytes = 0;
3357         } else {
3358                 data_sinfo->bytes_may_use -= bytes;
3359                 BTRFS_I(inode)->reserved_bytes -= bytes;
3360         }
3361
3362         spin_unlock(&data_sinfo->lock);
3363 }
3364
3365 /* called when we are clearing an delalloc extent from the inode's io_tree */
3366 void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
3367                               u64 bytes)
3368 {
3369         struct btrfs_space_info *info;
3370
3371         info = BTRFS_I(inode)->space_info;
3372
3373         spin_lock(&info->lock);
3374         info->bytes_delalloc -= bytes;
3375         spin_unlock(&info->lock);
3376 }
3377
3378 static void force_metadata_allocation(struct btrfs_fs_info *info)
3379 {
3380         struct list_head *head = &info->space_info;
3381         struct btrfs_space_info *found;
3382
3383         rcu_read_lock();
3384         list_for_each_entry_rcu(found, head, list) {
3385                 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3386                         found->force_alloc = 1;
3387         }
3388         rcu_read_unlock();
3389 }
3390
3391 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3392                           struct btrfs_root *extent_root, u64 alloc_bytes,
3393                           u64 flags, int force)
3394 {
3395         struct btrfs_space_info *space_info;
3396         struct btrfs_fs_info *fs_info = extent_root->fs_info;
3397         u64 thresh;
3398         int ret = 0;
3399
3400         mutex_lock(&fs_info->chunk_mutex);
3401
3402         flags = btrfs_reduce_alloc_profile(extent_root, flags);
3403
3404         space_info = __find_space_info(extent_root->fs_info, flags);
3405         if (!space_info) {
3406                 ret = update_space_info(extent_root->fs_info, flags,
3407                                         0, 0, &space_info);
3408                 BUG_ON(ret);
3409         }
3410         BUG_ON(!space_info);
3411
3412         spin_lock(&space_info->lock);
3413         if (space_info->force_alloc)
3414                 force = 1;
3415         if (space_info->full) {
3416                 spin_unlock(&space_info->lock);
3417                 goto out;
3418         }
3419
3420         thresh = space_info->total_bytes - space_info->bytes_readonly;
3421         thresh = div_factor(thresh, 8);
3422         if (!force &&
3423            (space_info->bytes_used + space_info->bytes_pinned +
3424             space_info->bytes_reserved + alloc_bytes) < thresh) {
3425                 spin_unlock(&space_info->lock);
3426                 goto out;
3427         }
3428         spin_unlock(&space_info->lock);
3429
3430         /*
3431          * if we're doing a data chunk, go ahead and make sure that
3432          * we keep a reasonable number of metadata chunks allocated in the
3433          * FS as well.
3434          */
3435         if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3436                 fs_info->data_chunk_allocations++;
3437                 if (!(fs_info->data_chunk_allocations %
3438                       fs_info->metadata_ratio))
3439                         force_metadata_allocation(fs_info);
3440         }
3441
3442         ret = btrfs_alloc_chunk(trans, extent_root, flags);
3443         spin_lock(&space_info->lock);
3444         if (ret)
3445                 space_info->full = 1;
3446         space_info->force_alloc = 0;
3447         spin_unlock(&space_info->lock);
3448 out:
3449         mutex_unlock(&extent_root->fs_info->chunk_mutex);
3450         return ret;
3451 }
3452
3453 static int update_block_group(struct btrfs_trans_handle *trans,
3454                               struct btrfs_root *root,
3455                               u64 bytenr, u64 num_bytes, int alloc,
3456                               int mark_free)
3457 {
3458         struct btrfs_block_group_cache *cache;
3459         struct btrfs_fs_info *info = root->fs_info;
3460         u64 total = num_bytes;
3461         u64 old_val;
3462         u64 byte_in_group;
3463
3464         /* block accounting for super block */
3465         spin_lock(&info->delalloc_lock);
3466         old_val = btrfs_super_bytes_used(&info->super_copy);
3467         if (alloc)
3468                 old_val += num_bytes;
3469         else
3470                 old_val -= num_bytes;
3471         btrfs_set_super_bytes_used(&info->super_copy, old_val);
3472         spin_unlock(&info->delalloc_lock);
3473
3474         while (total) {
3475                 cache = btrfs_lookup_block_group(info, bytenr);
3476                 if (!cache)
3477                         return -1;
3478                 byte_in_group = bytenr - cache->key.objectid;
3479                 WARN_ON(byte_in_group > cache->key.offset);
3480
3481                 spin_lock(&cache->space_info->lock);
3482                 spin_lock(&cache->lock);
3483                 cache->dirty = 1;
3484                 old_val = btrfs_block_group_used(&cache->item);
3485                 num_bytes = min(total, cache->key.offset - byte_in_group);
3486                 if (alloc) {
3487                         old_val += num_bytes;
3488                         btrfs_set_block_group_used(&cache->item, old_val);
3489                         cache->reserved -= num_bytes;
3490                         cache->space_info->bytes_used += num_bytes;
3491                         cache->space_info->bytes_reserved -= num_bytes;
3492                         if (cache->ro)
3493                                 cache->space_info->bytes_readonly -= num_bytes;
3494                         spin_unlock(&cache->lock);
3495                         spin_unlock(&cache->space_info->lock);
3496                 } else {
3497                         old_val -= num_bytes;
3498                         cache->space_info->bytes_used -= num_bytes;
3499                         if (cache->ro)
3500                                 cache->space_info->bytes_readonly += num_bytes;
3501                         btrfs_set_block_group_used(&cache->item, old_val);
3502                         spin_unlock(&cache->lock);
3503                         spin_unlock(&cache->space_info->lock);
3504                         if (mark_free) {
3505                                 int ret;
3506
3507                                 ret = btrfs_discard_extent(root, bytenr,
3508                                                            num_bytes);
3509                                 WARN_ON(ret);
3510
3511                                 ret = btrfs_add_free_space(cache, bytenr,
3512                                                            num_bytes);
3513                                 WARN_ON(ret);
3514                         }
3515                 }
3516                 btrfs_put_block_group(cache);
3517                 total -= num_bytes;
3518                 bytenr += num_bytes;
3519         }
3520         return 0;
3521 }
3522
3523 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3524 {
3525         struct btrfs_block_group_cache *cache;
3526         u64 bytenr;
3527
3528         cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3529         if (!cache)
3530                 return 0;
3531
3532         bytenr = cache->key.objectid;
3533         btrfs_put_block_group(cache);
3534
3535         return bytenr;
3536 }
3537
3538 /*
3539  * this function must be called within transaction
3540  */
3541 int btrfs_pin_extent(struct btrfs_root *root,
3542                      u64 bytenr, u64 num_bytes, int reserved)
3543 {
3544         struct btrfs_fs_info *fs_info = root->fs_info;
3545         struct btrfs_block_group_cache *cache;
3546
3547         cache = btrfs_lookup_block_group(fs_info, bytenr);
3548         BUG_ON(!cache);
3549
3550         spin_lock(&cache->space_info->lock);
3551         spin_lock(&cache->lock);
3552         cache->pinned += num_bytes;
3553         cache->space_info->bytes_pinned += num_bytes;
3554         if (reserved) {
3555                 cache->reserved -= num_bytes;
3556                 cache->space_info->bytes_reserved -= num_bytes;
3557         }
3558         spin_unlock(&cache->lock);
3559         spin_unlock(&cache->space_info->lock);
3560
3561         btrfs_put_block_group(cache);
3562
3563         set_extent_dirty(fs_info->pinned_extents,
3564                          bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3565         return 0;
3566 }
3567
3568 static int update_reserved_extents(struct btrfs_block_group_cache *cache,
3569                                    u64 num_bytes, int reserve)
3570 {
3571         spin_lock(&cache->space_info->lock);
3572         spin_lock(&cache->lock);
3573         if (reserve) {
3574                 cache->reserved += num_bytes;
3575                 cache->space_info->bytes_reserved += num_bytes;
3576         } else {
3577                 cache->reserved -= num_bytes;
3578                 cache->space_info->bytes_reserved -= num_bytes;
3579         }
3580         spin_unlock(&cache->lock);
3581         spin_unlock(&cache->space_info->lock);
3582         return 0;
3583 }
3584
3585 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3586                                 struct btrfs_root *root)
3587 {
3588         struct btrfs_fs_info *fs_info = root->fs_info;
3589         struct btrfs_caching_control *next;
3590         struct btrfs_caching_control *caching_ctl;
3591         struct btrfs_block_group_cache *cache;
3592
3593         down_write(&fs_info->extent_commit_sem);
3594
3595         list_for_each_entry_safe(caching_ctl, next,
3596                                  &fs_info->caching_block_groups, list) {
3597                 cache = caching_ctl->block_group;
3598                 if (block_group_cache_done(cache)) {
3599                         cache->last_byte_to_unpin = (u64)-1;
3600                         list_del_init(&caching_ctl->list);
3601                         put_caching_control(caching_ctl);
3602                 } else {
3603                         cache->last_byte_to_unpin = caching_ctl->progress;
3604                 }
3605         }
3606
3607         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3608                 fs_info->pinned_extents = &fs_info->freed_extents[1];
3609         else
3610                 fs_info->pinned_extents = &fs_info->freed_extents[0];
3611
3612         up_write(&fs_info->extent_commit_sem);
3613         return 0;
3614 }
3615
3616 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
3617 {
3618         struct btrfs_fs_info *fs_info = root->fs_info;
3619         struct btrfs_block_group_cache *cache = NULL;
3620         u64 len;
3621
3622         while (start <= end) {
3623                 if (!cache ||
3624                     start >= cache->key.objectid + cache->key.offset) {
3625                         if (cache)
3626                                 btrfs_put_block_group(cache);
3627                         cache = btrfs_lookup_block_group(fs_info, start);
3628                         BUG_ON(!cache);
3629                 }
3630
3631                 len = cache->key.objectid + cache->key.offset - start;
3632                 len = min(len, end + 1 - start);
3633
3634                 if (start < cache->last_byte_to_unpin) {
3635                         len = min(len, cache->last_byte_to_unpin - start);
3636                         btrfs_add_free_space(cache, start, len);
3637                 }
3638
3639                 spin_lock(&cache->space_info->lock);
3640                 spin_lock(&cache->lock);
3641                 cache->pinned -= len;
3642                 cache->space_info->bytes_pinned -= len;
3643                 spin_unlock(&cache->lock);
3644                 spin_unlock(&cache->space_info->lock);
3645
3646                 start += len;
3647         }
3648
3649         if (cache)
3650                 btrfs_put_block_group(cache);
3651         return 0;
3652 }
3653
3654 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
3655                                struct btrfs_root *root)
3656 {
3657         struct btrfs_fs_info *fs_info = root->fs_info;
3658         struct extent_io_tree *unpin;
3659         u64 start;
3660         u64 end;
3661         int ret;
3662
3663         if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3664                 unpin = &fs_info->freed_extents[1];
3665         else
3666                 unpin = &fs_info->freed_extents[0];
3667
3668         while (1) {
3669                 ret = find_first_extent_bit(unpin, 0, &start, &end,
3670                                             EXTENT_DIRTY);
3671                 if (ret)
3672                         break;
3673
3674                 ret = btrfs_discard_extent(root, start, end + 1 - start);
3675
3676                 clear_extent_dirty(unpin, start, end, GFP_NOFS);
3677                 unpin_extent_range(root, start, end);
3678                 cond_resched();
3679         }
3680
3681         return ret;
3682 }
3683
3684 static int pin_down_bytes(struct btrfs_trans_handle *trans,
3685                           struct btrfs_root *root,
3686                           struct btrfs_path *path,
3687                           u64 bytenr, u64 num_bytes,
3688                           int is_data, int reserved,
3689                           struct extent_buffer **must_clean)
3690 {
3691         int err = 0;
3692         struct extent_buffer *buf;
3693
3694         if (is_data)
3695                 goto pinit;
3696
3697         /*
3698          * discard is sloooow, and so triggering discards on
3699          * individual btree blocks isn't a good plan.  Just
3700          * pin everything in discard mode.
3701          */
3702         if (btrfs_test_opt(root, DISCARD))
3703                 goto pinit;
3704
3705         buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3706         if (!buf)
3707                 goto pinit;
3708
3709         /* we can reuse a block if it hasn't been written
3710          * and it is from this transaction.  We can't
3711          * reuse anything from the tree log root because
3712          * it has tiny sub-transactions.
3713          */
3714         if (btrfs_buffer_uptodate(buf, 0) &&
3715             btrfs_try_tree_lock(buf)) {
3716                 u64 header_owner = btrfs_header_owner(buf);
3717                 u64 header_transid = btrfs_header_generation(buf);
3718                 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3719                     header_transid == trans->transid &&
3720                     !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
3721                         *must_clean = buf;
3722                         return 1;
3723                 }
3724                 btrfs_tree_unlock(buf);
3725         }
3726         free_extent_buffer(buf);
3727 pinit:
3728         if (path)
3729                 btrfs_set_path_blocking(path);
3730         /* unlocks the pinned mutex */
3731         btrfs_pin_extent(root, bytenr, num_bytes, reserved);
3732
3733         BUG_ON(err < 0);
3734         return 0;
3735 }
3736
3737 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3738                                 struct btrfs_root *root,
3739                                 u64 bytenr, u64 num_bytes, u64 parent,
3740                                 u64 root_objectid, u64 owner_objectid,
3741                                 u64 owner_offset, int refs_to_drop,
3742                                 struct btrfs_delayed_extent_op *extent_op)
3743 {
3744         struct btrfs_key key;
3745         struct btrfs_path *path;
3746         struct btrfs_fs_info *info = root->fs_info;
3747         struct btrfs_root *extent_root = info->extent_root;
3748         struct extent_buffer *leaf;
3749         struct btrfs_extent_item *ei;
3750         struct btrfs_extent_inline_ref *iref;
3751         int ret;
3752         int is_data;
3753         int extent_slot = 0;
3754         int found_extent = 0;
3755         int num_to_del = 1;
3756         u32 item_size;
3757         u64 refs;
3758
3759         path = btrfs_alloc_path();
3760         if (!path)
3761                 return -ENOMEM;
3762
3763         path->reada = 1;
3764         path->leave_spinning = 1;
3765
3766         is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3767         BUG_ON(!is_data && refs_to_drop != 1);
3768
3769         ret = lookup_extent_backref(trans, extent_root, path, &iref,
3770                                     bytenr, num_bytes, parent,
3771                                     root_objectid, owner_objectid,
3772                                     owner_offset);
3773         if (ret == 0) {
3774                 extent_slot = path->slots[0];
3775                 while (extent_slot >= 0) {
3776                         btrfs_item_key_to_cpu(path->nodes[0], &key,
3777                                               extent_slot);
3778                         if (key.objectid != bytenr)
3779                                 break;
3780                         if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3781                             key.offset == num_bytes) {
3782                                 found_extent = 1;
3783                                 break;
3784                         }
3785                         if (path->slots[0] - extent_slot > 5)
3786                                 break;
3787                         extent_slot--;
3788                 }
3789 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3790                 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3791                 if (found_extent && item_size < sizeof(*ei))
3792                         found_extent = 0;
3793 #endif
3794                 if (!found_extent) {
3795                         BUG_ON(iref);
3796                         ret = remove_extent_backref(trans, extent_root, path,
3797                                                     NULL, refs_to_drop,
3798                                                     is_data);
3799                         BUG_ON(ret);
3800                         btrfs_release_path(extent_root, path);
3801                         path->leave_spinning = 1;
3802
3803                         key.objectid = bytenr;
3804                         key.type = BTRFS_EXTENT_ITEM_KEY;
3805                         key.offset = num_bytes;
3806
3807                         ret = btrfs_search_slot(trans, extent_root,
3808                                                 &key, path, -1, 1);
3809                         if (ret) {
3810                                 printk(KERN_ERR "umm, got %d back from search"
3811                                        ", was looking for %llu\n", ret,
3812                                        (unsigned long long)bytenr);
3813                                 btrfs_print_leaf(extent_root, path->nodes[0]);
3814                         }
3815                         BUG_ON(ret);
3816                         extent_slot = path->slots[0];
3817                 }
3818         } else {
3819                 btrfs_print_leaf(extent_root, path->nodes[0]);
3820                 WARN_ON(1);
3821                 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
3822                        "parent %llu root %llu  owner %llu offset %llu\n",
3823                        (unsigned long long)bytenr,
3824                        (unsigned long long)parent,
3825                        (unsigned long long)root_objectid,
3826                        (unsigned long long)owner_objectid,
3827                        (unsigned long long)owner_offset);
3828         }
3829
3830         leaf = path->nodes[0];
3831         item_size = btrfs_item_size_nr(leaf, extent_slot);
3832 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3833         if (item_size < sizeof(*ei)) {
3834                 BUG_ON(found_extent || extent_slot != path->slots[0]);
3835                 ret = convert_extent_item_v0(trans, extent_root, path,
3836                                              owner_objectid, 0);
3837                 BUG_ON(ret < 0);
3838
3839                 btrfs_release_path(extent_root, path);
3840                 path->leave_spinning = 1;
3841
3842                 key.objectid = bytenr;
3843                 key.type = BTRFS_EXTENT_ITEM_KEY;
3844                 key.offset = num_bytes;
3845
3846                 ret = btrfs_search_slot(trans, extent_root, &key, path,
3847                                         -1, 1);
3848                 if (ret) {
3849                         printk(KERN_ERR "umm, got %d back from search"
3850                                ", was looking for %llu\n", ret,
3851                                (unsigned long long)bytenr);
3852                         btrfs_print_leaf(extent_root, path->nodes[0]);
3853                 }
3854                 BUG_ON(ret);
3855                 extent_slot = path->slots[0];
3856                 leaf = path->nodes[0];
3857                 item_size = btrfs_item_size_nr(leaf, extent_slot);
3858         }
3859 #endif
3860         BUG_ON(item_size < sizeof(*ei));
3861         ei = btrfs_item_ptr(leaf, extent_slot,
3862                             struct btrfs_extent_item);
3863         if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3864                 struct btrfs_tree_block_info *bi;
3865                 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3866                 bi = (struct btrfs_tree_block_info *)(ei + 1);
3867                 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3868         }
3869
3870         refs = btrfs_extent_refs(leaf, ei);
3871         BUG_ON(refs < refs_to_drop);
3872         refs -= refs_to_drop;
3873
3874         if (refs > 0) {
3875                 if (extent_op)
3876                         __run_delayed_extent_op(extent_op, leaf, ei);
3877                 /*
3878                  * In the case of inline back ref, reference count will
3879                  * be updated by remove_extent_backref
3880                  */
3881                 if (iref) {
3882                         BUG_ON(!found_extent);
3883                 } else {
3884                         btrfs_set_extent_refs(leaf, ei, refs);
3885                         btrfs_mark_buffer_dirty(leaf);
3886                 }
3887                 if (found_extent) {
3888                         ret = remove_extent_backref(trans, extent_root, path,
3889                                                     iref, refs_to_drop,
3890                                                     is_data);
3891                         BUG_ON(ret);
3892                 }
3893         } else {
3894                 int mark_free = 0;
3895                 struct extent_buffer *must_clean = NULL;
3896
3897                 if (found_extent) {
3898                         BUG_ON(is_data && refs_to_drop !=
3899                                extent_data_ref_count(root, path, iref));
3900                         if (iref) {
3901                                 BUG_ON(path->slots[0] != extent_slot);
3902                         } else {
3903                                 BUG_ON(path->slots[0] != extent_slot + 1);
3904                                 path->slots[0] = extent_slot;
3905                                 num_to_del = 2;
3906                         }
3907                 }
3908
3909                 ret = pin_down_bytes(trans, root, path, bytenr,
3910                                      num_bytes, is_data, 0, &must_clean);
3911                 if (ret > 0)
3912                         mark_free = 1;
3913                 BUG_ON(ret < 0);
3914                 /*
3915                  * it is going to be very rare for someone to be waiting
3916                  * on the block we're freeing.  del_items might need to
3917                  * schedule, so rather than get fancy, just force it
3918                  * to blocking here
3919                  */
3920                 if (must_clean)
3921                         btrfs_set_lock_blocking(must_clean);
3922
3923                 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3924                                       num_to_del);
3925                 BUG_ON(ret);
3926                 btrfs_release_path(extent_root, path);
3927
3928                 if (must_clean) {
3929                         clean_tree_block(NULL, root, must_clean);
3930                         btrfs_tree_unlock(must_clean);
3931                         free_extent_buffer(must_clean);
3932                 }
3933
3934                 if (is_data) {
3935                         ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3936                         BUG_ON(ret);
3937                 } else {
3938                         invalidate_mapping_pages(info->btree_inode->i_mapping,
3939                              bytenr >> PAGE_CACHE_SHIFT,
3940                              (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
3941                 }
3942
3943                 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3944                                          mark_free);
3945                 BUG_ON(ret);
3946         }
3947         btrfs_free_path(path);
3948         return ret;
3949 }
3950
3951 /*
3952  * when we free an extent, it is possible (and likely) that we free the last
3953  * delayed ref for that extent as well.  This searches the delayed ref tree for
3954  * a given extent, and if there are no other delayed refs to be processed, it
3955  * removes it from the tree.
3956  */
3957 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3958                                       struct btrfs_root *root, u64 bytenr)
3959 {
3960         struct btrfs_delayed_ref_head *head;
3961         struct btrfs_delayed_ref_root *delayed_refs;
3962         struct btrfs_delayed_ref_node *ref;
3963         struct rb_node *node;
3964         int ret;
3965
3966         delayed_refs = &trans->transaction->delayed_refs;
3967         spin_lock(&delayed_refs->lock);
3968         head = btrfs_find_delayed_ref_head(trans, bytenr);
3969         if (!head)
3970                 goto out;
3971
3972         node = rb_prev(&head->node.rb_node);
3973         if (!node)
3974                 goto out;
3975
3976         ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3977
3978         /* there are still entries for this ref, we can't drop it */
3979         if (ref->bytenr == bytenr)
3980                 goto out;
3981
3982         if (head->extent_op) {
3983                 if (!head->must_insert_reserved)
3984                         goto out;
3985                 kfree(head->extent_op);
3986                 head->extent_op = NULL;
3987         }
3988
3989         /*
3990          * waiting for the lock here would deadlock.  If someone else has it
3991          * locked they are already in the process of dropping it anyway
3992          */
3993         if (!mutex_trylock(&head->mutex))
3994                 goto out;
3995
3996         /*
3997          * at this point we have a head with no other entries.  Go
3998          * ahead and process it.
3999          */
4000         head->node.in_tree = 0;
4001         rb_erase(&head->node.rb_node, &delayed_refs->root);
4002
4003         delayed_refs->num_entries--;
4004
4005         /*
4006          * we don't take a ref on the node because we're removing it from the
4007          * tree, so we just steal the ref the tree was holding.
4008          */
4009         delayed_refs->num_heads--;
4010         if (list_empty(&head->cluster))
4011                 delayed_refs->num_heads_ready--;
4012
4013         list_del_init(&head->cluster);
4014         spin_unlock(&delayed_refs->lock);
4015
4016         ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
4017                                   &head->node, head->extent_op,
4018                                   head->must_insert_reserved);
4019         BUG_ON(ret);
4020         btrfs_put_delayed_ref(&head->node);
4021         return 0;
4022 out:
4023         spin_unlock(&delayed_refs->lock);
4024         return 0;
4025 }
4026
4027 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4028                       struct btrfs_root *root,
4029                       u64 bytenr, u64 num_bytes, u64 parent,
4030                       u64 root_objectid, u64 owner, u64 offset)
4031 {
4032         int ret;
4033
4034         /*
4035          * tree log blocks never actually go into the extent allocation
4036          * tree, just update pinning info and exit early.
4037          */
4038         if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4039                 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4040                 /* unlocks the pinned mutex */
4041                 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4042                 ret = 0;
4043         } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4044                 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4045                                         parent, root_objectid, (int)owner,
4046                                         BTRFS_DROP_DELAYED_REF, NULL);
4047                 BUG_ON(ret);
4048                 ret = check_ref_cleanup(trans, root, bytenr);
4049                 BUG_ON(ret);
4050         } else {
4051                 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4052                                         parent, root_objectid, owner,
4053                                         offset, BTRFS_DROP_DELAYED_REF, NULL);
4054                 BUG_ON(ret);
4055         }
4056         return ret;
4057 }
4058
4059 int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4060                           struct btrfs_root *root,
4061                           u64 bytenr, u32 blocksize,
4062                           u64 parent, u64 root_objectid, int level)
4063 {
4064         u64 used;
4065         spin_lock(&root->node_lock);
4066         used = btrfs_root_used(&root->root_item) - blocksize;
4067         btrfs_set_root_used(&root->root_item, used);
4068         spin_unlock(&root->node_lock);
4069
4070         return btrfs_free_extent(trans, root, bytenr, blocksize,
4071                                  parent, root_objectid, level, 0);
4072 }
4073
4074 static u64 stripe_align(struct btrfs_root *root, u64 val)
4075 {
4076         u64 mask = ((u64)root->stripesize - 1);
4077         u64 ret = (val + mask) & ~mask;
4078         return ret;
4079 }
4080
4081 /*
4082  * when we wait for progress in the block group caching, its because
4083  * our allocation attempt failed at least once.  So, we must sleep
4084  * and let some progress happen before we try again.
4085  *
4086  * This function will sleep at least once waiting for new free space to
4087  * show up, and then it will check the block group free space numbers
4088  * for our min num_bytes.  Another option is to have it go ahead
4089  * and look in the rbtree for a free extent of a given size, but this
4090  * is a good start.
4091  */
4092 static noinline int
4093 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4094                                 u64 num_bytes)
4095 {
4096         struct btrfs_caching_control *caching_ctl;
4097         DEFINE_WAIT(wait);
4098
4099         caching_ctl = get_caching_control(cache);
4100         if (!caching_ctl)
4101                 return 0;
4102
4103         wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4104                    (cache->free_space >= num_bytes));
4105
4106         put_caching_control(caching_ctl);
4107         return 0;
4108 }
4109
4110 static noinline int
4111 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4112 {
4113         struct btrfs_caching_control *caching_ctl;
4114         DEFINE_WAIT(wait);
4115
4116         caching_ctl = get_caching_control(cache);
4117         if (!caching_ctl)
4118                 return 0;
4119
4120         wait_event(caching_ctl->wait, block_group_cache_done(cache));
4121
4122         put_caching_control(caching_ctl);
4123         return 0;
4124 }
4125
4126 enum btrfs_loop_type {
4127         LOOP_FIND_IDEAL = 0,
4128         LOOP_CACHING_NOWAIT = 1,
4129         LOOP_CACHING_WAIT = 2,
4130         LOOP_ALLOC_CHUNK = 3,
4131         LOOP_NO_EMPTY_SIZE = 4,
4132 };
4133
4134 /*
4135  * walks the btree of allocated extents and find a hole of a given size.
4136  * The key ins is changed to record the hole:
4137  * ins->objectid == block start
4138  * ins->flags = BTRFS_EXTENT_ITEM_KEY
4139  * ins->offset == number of blocks
4140  * Any available blocks before search_start are skipped.
4141  */
4142 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4143                                      struct btrfs_root *orig_root,
4144                                      u64 num_bytes, u64 empty_size,
4145                                      u64 search_start, u64 search_end,
4146                                      u64 hint_byte, struct btrfs_key *ins,
4147                                      u64 exclude_start, u64 exclude_nr,
4148                                      int data)
4149 {
4150         int ret = 0;
4151         struct btrfs_root *root = orig_root->fs_info->extent_root;
4152         struct btrfs_free_cluster *last_ptr = NULL;
4153         struct btrfs_block_group_cache *block_group = NULL;
4154         int empty_cluster = 2 * 1024 * 1024;
4155         int allowed_chunk_alloc = 0;
4156         int done_chunk_alloc = 0;
4157         struct btrfs_space_info *space_info;
4158         int last_ptr_loop = 0;
4159         int loop = 0;
4160         bool found_uncached_bg = false;
4161         bool failed_cluster_refill = false;
4162         bool failed_alloc = false;
4163         u64 ideal_cache_percent = 0;
4164         u64 ideal_cache_offset = 0;
4165
4166         WARN_ON(num_bytes < root->sectorsize);
4167         btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4168         ins->objectid = 0;
4169         ins->offset = 0;
4170
4171         space_info = __find_space_info(root->fs_info, data);
4172         if (!space_info) {
4173                 printk(KERN_ERR "No space info for %d\n", data);
4174                 return -ENOSPC;
4175         }
4176
4177         if (orig_root->ref_cows || empty_size)
4178                 allowed_chunk_alloc = 1;
4179
4180         if (data & BTRFS_BLOCK_GROUP_METADATA) {
4181                 last_ptr = &root->fs_info->meta_alloc_cluster;
4182                 if (!btrfs_test_opt(root, SSD))
4183                         empty_cluster = 64 * 1024;
4184         }
4185
4186         if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4187                 last_ptr = &root->fs_info->data_alloc_cluster;
4188         }
4189
4190         if (last_ptr) {
4191                 spin_lock(&last_ptr->lock);
4192                 if (last_ptr->block_group)
4193                         hint_byte = last_ptr->window_start;
4194                 spin_unlock(&last_ptr->lock);
4195         }
4196
4197         search_start = max(search_start, first_logical_byte(root, 0));
4198         search_start = max(search_start, hint_byte);
4199
4200         if (!last_ptr)
4201                 empty_cluster = 0;
4202
4203         if (search_start == hint_byte) {
4204 ideal_cache:
4205                 block_group = btrfs_lookup_block_group(root->fs_info,
4206                                                        search_start);
4207                 /*
4208                  * we don't want to use the block group if it doesn't match our
4209                  * allocation bits, or if its not cached.
4210                  *
4211                  * However if we are re-searching with an ideal block group
4212                  * picked out then we don't care that the block group is cached.
4213                  */
4214                 if (block_group && block_group_bits(block_group, data) &&
4215                     (block_group->cached != BTRFS_CACHE_NO ||
4216                      search_start == ideal_cache_offset)) {
4217                         down_read(&space_info->groups_sem);
4218                         if (list_empty(&block_group->list) ||
4219                             block_group->ro) {
4220                                 /*
4221                                  * someone is removing this block group,
4222                                  * we can't jump into the have_block_group
4223                                  * target because our list pointers are not
4224                                  * valid
4225                                  */
4226                                 btrfs_put_block_group(block_group);
4227                                 up_read(&space_info->groups_sem);
4228                         } else {
4229                                 goto have_block_group;
4230                         }
4231                 } else if (block_group) {
4232                         btrfs_put_block_group(block_group);
4233                 }
4234         }
4235 search:
4236         down_read(&space_info->groups_sem);
4237         list_for_each_entry(block_group, &space_info->block_groups, list) {
4238                 u64 offset;
4239                 int cached;
4240
4241                 btrfs_get_block_group(block_group);
4242                 search_start = block_group->key.objectid;
4243
4244 have_block_group:
4245                 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4246                         u64 free_percent;
4247
4248                         free_percent = btrfs_block_group_used(&block_group->item);
4249                         free_percent *= 100;
4250                         free_percent = div64_u64(free_percent,
4251                                                  block_group->key.offset);
4252                         free_percent = 100 - free_percent;
4253                         if (free_percent > ideal_cache_percent &&
4254                             likely(!block_group->ro)) {
4255                                 ideal_cache_offset = block_group->key.objectid;
4256                                 ideal_cache_percent = free_percent;
4257                         }
4258
4259                         /*
4260                          * We only want to start kthread caching if we are at
4261                          * the point where we will wait for caching to make
4262                          * progress, or if our ideal search is over and we've
4263                          * found somebody to start caching.
4264                          */
4265                         if (loop > LOOP_CACHING_NOWAIT ||
4266                             (loop > LOOP_FIND_IDEAL &&
4267                              atomic_read(&space_info->caching_threads) < 2)) {
4268                                 ret = cache_block_group(block_group);
4269                                 BUG_ON(ret);
4270                         }
4271                         found_uncached_bg = true;
4272
4273                         /*
4274                          * If loop is set for cached only, try the next block
4275                          * group.
4276                          */
4277                         if (loop == LOOP_FIND_IDEAL)
4278                                 goto loop;
4279                 }
4280
4281                 cached = block_group_cache_done(block_group);
4282                 if (unlikely(!cached))
4283                         found_uncached_bg = true;
4284
4285                 if (unlikely(block_group->ro))
4286                         goto loop;
4287
4288                 /*
4289                  * Ok we want to try and use the cluster allocator, so lets look
4290                  * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4291                  * have tried the cluster allocator plenty of times at this
4292                  * point and not have found anything, so we are likely way too
4293                  * fragmented for the clustering stuff to find anything, so lets
4294                  * just skip it and let the allocator find whatever block it can
4295                  * find
4296                  */
4297                 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
4298                         /*
4299                          * the refill lock keeps out other
4300                          * people trying to start a new cluster
4301                          */
4302                         spin_lock(&last_ptr->refill_lock);
4303                         if (last_ptr->block_group &&
4304                             (last_ptr->block_group->ro ||
4305                             !block_group_bits(last_ptr->block_group, data))) {
4306                                 offset = 0;
4307                                 goto refill_cluster;
4308                         }
4309
4310                         offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4311                                                  num_bytes, search_start);
4312                         if (offset) {
4313                                 /* we have a block, we're done */
4314                                 spin_unlock(&last_ptr->refill_lock);
4315                                 goto checks;
4316                         }
4317
4318                         spin_lock(&last_ptr->lock);
4319                         /*
4320                          * whoops, this cluster doesn't actually point to
4321                          * this block group.  Get a ref on the block
4322                          * group is does point to and try again
4323                          */
4324                         if (!last_ptr_loop && last_ptr->block_group &&
4325                             last_ptr->block_group != block_group) {
4326
4327                                 btrfs_put_block_group(block_group);
4328                                 block_group = last_ptr->block_group;
4329                                 btrfs_get_block_group(block_group);
4330                                 spin_unlock(&last_ptr->lock);
4331                                 spin_unlock(&last_ptr->refill_lock);
4332
4333                                 last_ptr_loop = 1;
4334                                 search_start = block_group->key.objectid;
4335                                 /*
4336                                  * we know this block group is properly
4337                                  * in the list because
4338                                  * btrfs_remove_block_group, drops the
4339                                  * cluster before it removes the block
4340                                  * group from the list
4341                                  */
4342                                 goto have_block_group;
4343                         }
4344                         spin_unlock(&last_ptr->lock);
4345 refill_cluster:
4346                         /*
4347                          * this cluster didn't work out, free it and
4348                          * start over
4349                          */
4350                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
4351
4352                         last_ptr_loop = 0;
4353
4354                         /* allocate a cluster in this block group */
4355                         ret = btrfs_find_space_cluster(trans, root,
4356                                                block_group, last_ptr,
4357                                                offset, num_bytes,
4358                                                empty_cluster + empty_size);
4359                         if (ret == 0) {
4360                                 /*
4361                                  * now pull our allocation out of this
4362                                  * cluster
4363                                  */
4364                                 offset = btrfs_alloc_from_cluster(block_group,
4365                                                   last_ptr, num_bytes,
4366                                                   search_start);
4367                                 if (offset) {
4368                                         /* we found one, proceed */
4369                                         spin_unlock(&last_ptr->refill_lock);
4370                                         goto checks;
4371                                 }
4372                         } else if (!cached && loop > LOOP_CACHING_NOWAIT
4373                                    && !failed_cluster_refill) {
4374                                 spin_unlock(&last_ptr->refill_lock);
4375
4376                                 failed_cluster_refill = true;
4377                                 wait_block_group_cache_progress(block_group,
4378                                        num_bytes + empty_cluster + empty_size);
4379                                 goto have_block_group;
4380                         }
4381
4382                         /*
4383                          * at this point we either didn't find a cluster
4384                          * or we weren't able to allocate a block from our
4385                          * cluster.  Free the cluster we've been trying
4386                          * to use, and go to the next block group
4387                          */
4388                         btrfs_return_cluster_to_free_space(NULL, last_ptr);
4389                         spin_unlock(&last_ptr->refill_lock);
4390                         goto loop;
4391                 }
4392
4393                 offset = btrfs_find_space_for_alloc(block_group, search_start,
4394                                                     num_bytes, empty_size);
4395                 /*
4396                  * If we didn't find a chunk, and we haven't failed on this
4397                  * block group before, and this block group is in the middle of
4398                  * caching and we are ok with waiting, then go ahead and wait
4399                  * for progress to be made, and set failed_alloc to true.
4400                  *
4401                  * If failed_alloc is true then we've already waited on this
4402                  * block group once and should move on to the next block group.
4403                  */
4404                 if (!offset && !failed_alloc && !cached &&
4405                     loop > LOOP_CACHING_NOWAIT) {
4406                         wait_block_group_cache_progress(block_group,
4407                                                 num_bytes + empty_size);
4408                         failed_alloc = true;
4409                         goto have_block_group;
4410                 } else if (!offset) {
4411                         goto loop;
4412                 }
4413 checks:
4414                 search_start = stripe_align(root, offset);
4415                 /* move on to the next group */
4416                 if (search_start + num_bytes >= search_end) {
4417                         btrfs_add_free_space(block_group, offset, num_bytes);
4418                         goto loop;
4419                 }
4420
4421                 /* move on to the next group */
4422                 if (search_start + num_bytes >
4423                     block_group->key.objectid + block_group->key.offset) {
4424                         btrfs_add_free_space(block_group, offset, num_bytes);
4425                         goto loop;
4426                 }
4427
4428                 if (exclude_nr > 0 &&
4429                     (search_start + num_bytes > exclude_start &&
4430                      search_start < exclude_start + exclude_nr)) {
4431                         search_start = exclude_start + exclude_nr;
4432
4433                         btrfs_add_free_space(block_group, offset, num_bytes);
4434                         /*
4435                          * if search_start is still in this block group
4436                          * then we just re-search this block group
4437                          */
4438                         if (search_start >= block_group->key.objectid &&
4439                             search_start < (block_group->key.objectid +
4440                                             block_group->key.offset))
4441                                 goto have_block_group;
4442                         goto loop;
4443                 }
4444
4445                 ins->objectid = search_start;
4446                 ins->offset = num_bytes;
4447
4448                 if (offset < search_start)
4449                         btrfs_add_free_space(block_group, offset,
4450                                              search_start - offset);
4451                 BUG_ON(offset > search_start);
4452
4453                 update_reserved_extents(block_group, num_bytes, 1);
4454
4455                 /* we are all good, lets return */
4456                 break;
4457 loop:
4458                 failed_cluster_refill = false;
4459                 failed_alloc = false;
4460                 btrfs_put_block_group(block_group);
4461         }
4462         up_read(&space_info->groups_sem);
4463
4464         /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4465          *                      for them to make caching progress.  Also
4466          *                      determine the best possible bg to cache
4467          * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4468          *                      caching kthreads as we move along
4469          * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4470          * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4471          * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4472          *                      again
4473          */
4474         if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4475             (found_uncached_bg || empty_size || empty_cluster ||
4476              allowed_chunk_alloc)) {
4477                 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
4478                         found_uncached_bg = false;
4479                         loop++;
4480                         if (!ideal_cache_percent &&
4481                             atomic_read(&space_info->caching_threads))
4482                                 goto search;
4483
4484                         /*
4485                          * 1 of the following 2 things have happened so far
4486                          *
4487                          * 1) We found an ideal block group for caching that
4488                          * is mostly full and will cache quickly, so we might
4489                          * as well wait for it.
4490                          *
4491                          * 2) We searched for cached only and we didn't find
4492                          * anything, and we didn't start any caching kthreads
4493                          * either, so chances are we will loop through and
4494                          * start a couple caching kthreads, and then come back
4495                          * around and just wait for them.  This will be slower
4496                          * because we will have 2 caching kthreads reading at
4497                          * the same time when we could have just started one
4498                          * and waited for it to get far enough to give us an
4499                          * allocation, so go ahead and go to the wait caching
4500                          * loop.
4501                          */
4502                         loop = LOOP_CACHING_WAIT;
4503                         search_start = ideal_cache_offset;
4504                         ideal_cache_percent = 0;
4505                         goto ideal_cache;
4506                 } else if (loop == LOOP_FIND_IDEAL) {
4507                         /*
4508                          * Didn't find a uncached bg, wait on anything we find
4509                          * next.
4510                          */
4511                         loop = LOOP_CACHING_WAIT;
4512                         goto search;
4513                 }
4514
4515                 if (loop < LOOP_CACHING_WAIT) {
4516                         loop++;
4517                         goto search;
4518                 }
4519
4520                 if (loop == LOOP_ALLOC_CHUNK) {
4521                         empty_size = 0;
4522                         empty_cluster = 0;
4523                 }
4524
4525                 if (allowed_chunk_alloc) {
4526                         ret = do_chunk_alloc(trans, root, num_bytes +
4527                                              2 * 1024 * 1024, data, 1);
4528                         allowed_chunk_alloc = 0;
4529                         done_chunk_alloc = 1;
4530                 } else if (!done_chunk_alloc) {
4531                         space_info->force_alloc = 1;
4532                 }
4533
4534                 if (loop < LOOP_NO_EMPTY_SIZE) {
4535                         loop++;
4536                         goto search;
4537                 }
4538                 ret = -ENOSPC;
4539         } else if (!ins->objectid) {
4540                 ret = -ENOSPC;
4541         }
4542
4543         /* we found what we needed */
4544         if (ins->objectid) {
4545                 if (!(data & BTRFS_BLOCK_GROUP_DATA))
4546                         trans->block_group = block_group->key.objectid;
4547
4548                 btrfs_put_block_group(block_group);
4549                 ret = 0;
4550         }
4551
4552         return ret;
4553 }
4554
4555 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4556                             int dump_block_groups)
4557 {
4558         struct btrfs_block_group_cache *cache;
4559
4560         spin_lock(&info->lock);
4561         printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4562                (unsigned long long)(info->total_bytes - info->bytes_used -
4563                                     info->bytes_pinned - info->bytes_reserved -
4564                                     info->bytes_super),
4565                (info->full) ? "" : "not ");
4566         printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
4567                " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4568                "\n",
4569                (unsigned long long)info->total_bytes,
4570                (unsigned long long)info->bytes_pinned,
4571                (unsigned long long)info->bytes_delalloc,
4572                (unsigned long long)info->bytes_may_use,
4573                (unsigned long long)info->bytes_used,
4574                (unsigned long long)info->bytes_root,
4575                (unsigned long long)info->bytes_super,
4576                (unsigned long long)info->bytes_reserved);
4577         spin_unlock(&info->lock);
4578
4579         if (!dump_block_groups)
4580                 return;
4581
4582         down_read(&info->groups_sem);
4583         list_for_each_entry(cache, &info->block_groups, list) {
4584                 spin_lock(&cache->lock);
4585                 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4586                        "%llu pinned %llu reserved\n",
4587                        (unsigned long long)cache->key.objectid,
4588                        (unsigned long long)cache->key.offset,
4589                        (unsigned long long)btrfs_block_group_used(&cache->item),
4590                        (unsigned long long)cache->pinned,
4591                        (unsigned long long)cache->reserved);
4592                 btrfs_dump_free_space(cache, bytes);
4593                 spin_unlock(&cache->lock);
4594         }
4595         up_read(&info->groups_sem);
4596 }
4597
4598 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4599                          struct btrfs_root *root,
4600                          u64 num_bytes, u64 min_alloc_size,
4601                          u64 empty_size, u64 hint_byte,
4602                          u64 search_end, struct btrfs_key *ins,
4603                          u64 data)
4604 {
4605         int ret;
4606         u64 search_start = 0;
4607
4608         data = btrfs_get_alloc_profile(root, data);
4609 again:
4610         /*
4611          * the only place that sets empty_size is btrfs_realloc_node, which
4612          * is not called recursively on allocations
4613          */
4614         if (empty_size || root->ref_cows)
4615                 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4616                                      num_bytes + 2 * 1024 * 1024, data, 0);
4617
4618         WARN_ON(num_bytes < root->sectorsize);
4619         ret = find_free_extent(trans, root, num_bytes, empty_size,
4620                                search_start, search_end, hint_byte, ins,
4621                                trans->alloc_exclude_start,
4622                                trans->alloc_exclude_nr, data);
4623
4624         if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4625                 num_bytes = num_bytes >> 1;
4626                 num_bytes = num_bytes & ~(root->sectorsize - 1);
4627                 num_bytes = max(num_bytes, min_alloc_size);
4628                 do_chunk_alloc(trans, root->fs_info->extent_root,
4629                                num_bytes, data, 1);
4630                 goto again;
4631         }
4632         if (ret == -ENOSPC) {
4633                 struct btrfs_space_info *sinfo;
4634
4635                 sinfo = __find_space_info(root->fs_info, data);
4636                 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4637                        "wanted %llu\n", (unsigned long long)data,
4638                        (unsigned long long)num_bytes);
4639                 dump_space_info(sinfo, num_bytes, 1);
4640         }
4641
4642         return ret;
4643 }
4644
4645 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4646 {
4647         struct btrfs_block_group_cache *cache;
4648         int ret = 0;
4649
4650         cache = btrfs_lookup_block_group(root->fs_info, start);
4651         if (!cache) {
4652                 printk(KERN_ERR "Unable to find block group for %llu\n",
4653                        (unsigned long long)start);
4654                 return -ENOSPC;
4655         }
4656
4657         ret = btrfs_discard_extent(root, start, len);
4658
4659         btrfs_add_free_space(cache, start, len);
4660         update_reserved_extents(cache, len, 0);
4661         btrfs_put_block_group(cache);
4662
4663         return ret;
4664 }
4665
4666 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4667                                       struct btrfs_root *root,
4668                                       u64 parent, u64 root_objectid,
4669                                       u64 flags, u64 owner, u64 offset,
4670                                       struct btrfs_key *ins, int ref_mod)
4671 {
4672         int ret;
4673         struct btrfs_fs_info *fs_info = root->fs_info;
4674         struct btrfs_extent_item *extent_item;
4675         struct btrfs_extent_inline_ref *iref;
4676         struct btrfs_path *path;
4677         struct extent_buffer *leaf;
4678         int type;
4679         u32 size;
4680
4681         if (parent > 0)
4682                 type = BTRFS_SHARED_DATA_REF_KEY;
4683         else
4684                 type = BTRFS_EXTENT_DATA_REF_KEY;
4685
4686         size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
4687
4688         path = btrfs_alloc_path();
4689         BUG_ON(!path);
4690
4691         path->leave_spinning = 1;
4692         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4693                                       ins, size);
4694         BUG_ON(ret);
4695
4696         leaf = path->nodes[0];
4697         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4698                                      struct btrfs_extent_item);
4699         btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4700         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4701         btrfs_set_extent_flags(leaf, extent_item,
4702                                flags | BTRFS_EXTENT_FLAG_DATA);
4703
4704         iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4705         btrfs_set_extent_inline_ref_type(leaf, iref, type);
4706         if (parent > 0) {
4707                 struct btrfs_shared_data_ref *ref;
4708                 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4709                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4710                 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4711         } else {
4712                 struct btrfs_extent_data_ref *ref;
4713                 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4714                 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4715                 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4716                 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4717                 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4718         }
4719
4720         btrfs_mark_buffer_dirty(path->nodes[0]);
4721         btrfs_free_path(path);
4722
4723         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4724                                  1, 0);
4725         if (ret) {
4726                 printk(KERN_ERR "btrfs update block group failed for %llu "
4727                        "%llu\n", (unsigned long long)ins->objectid,
4728                        (unsigned long long)ins->offset);
4729                 BUG();
4730         }
4731         return ret;
4732 }
4733
4734 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4735                                      struct btrfs_root *root,
4736                                      u64 parent, u64 root_objectid,
4737                                      u64 flags, struct btrfs_disk_key *key,
4738                                      int level, struct btrfs_key *ins)
4739 {
4740         int ret;
4741         struct btrfs_fs_info *fs_info = root->fs_info;
4742         struct btrfs_extent_item *extent_item;
4743         struct btrfs_tree_block_info *block_info;
4744         struct btrfs_extent_inline_ref *iref;
4745         struct btrfs_path *path;
4746         struct extent_buffer *leaf;
4747         u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
4748
4749         path = btrfs_alloc_path();
4750         BUG_ON(!path);
4751
4752         path->leave_spinning = 1;
4753         ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4754                                       ins, size);
4755         BUG_ON(ret);
4756
4757         leaf = path->nodes[0];
4758         extent_item = btrfs_item_ptr(leaf, path->slots[0],
4759                                      struct btrfs_extent_item);
4760         btrfs_set_extent_refs(leaf, extent_item, 1);
4761         btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4762         btrfs_set_extent_flags(leaf, extent_item,
4763                                flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4764         block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4765
4766         btrfs_set_tree_block_key(leaf, block_info, key);
4767         btrfs_set_tree_block_level(leaf, block_info, level);
4768
4769         iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4770         if (parent > 0) {
4771                 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4772                 btrfs_set_extent_inline_ref_type(leaf, iref,
4773                                                  BTRFS_SHARED_BLOCK_REF_KEY);
4774                 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4775         } else {
4776                 btrfs_set_extent_inline_ref_type(leaf, iref,
4777                                                  BTRFS_TREE_BLOCK_REF_KEY);
4778                 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4779         }
4780
4781         btrfs_mark_buffer_dirty(leaf);
4782         btrfs_free_path(path);
4783
4784         ret = update_block_group(trans, root, ins->objectid, ins->offset,
4785                                  1, 0);
4786         if (ret) {
4787                 printk(KERN_ERR "btrfs update block group failed for %llu "
4788                        "%llu\n", (unsigned long long)ins->objectid,
4789                        (unsigned long long)ins->offset);
4790                 BUG();
4791         }
4792         return ret;
4793 }
4794
4795 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4796                                      struct btrfs_root *root,
4797                                      u64 root_objectid, u64 owner,
4798                                      u64 offset, struct btrfs_key *ins)
4799 {
4800         int ret;
4801
4802         BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4803
4804         ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4805                                          0, root_objectid, owner, offset,
4806                                          BTRFS_ADD_DELAYED_EXTENT, NULL);
4807         return ret;
4808 }
4809
4810 /*
4811  * this is used by the tree logging recovery code.  It records that
4812  * an extent has been allocated and makes sure to clear the free
4813  * space cache bits as well
4814  */
4815 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4816                                    struct btrfs_root *root,
4817                                    u64 root_objectid, u64 owner, u64 offset,
4818                                    struct btrfs_key *ins)
4819 {
4820         int ret;
4821         struct btrfs_block_group_cache *block_group;
4822         struct btrfs_caching_control *caching_ctl;
4823         u64 start = ins->objectid;
4824         u64 num_bytes = ins->offset;
4825
4826         block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
4827         cache_block_group(block_group);
4828         caching_ctl = get_caching_control(block_group);
4829
4830         if (!caching_ctl) {
4831                 BUG_ON(!block_group_cache_done(block_group));
4832                 ret = btrfs_remove_free_space(block_group, start, num_bytes);
4833                 BUG_ON(ret);
4834         } else {
4835                 mutex_lock(&caching_ctl->mutex);
4836
4837                 if (start >= caching_ctl->progress) {
4838                         ret = add_excluded_extent(root, start, num_bytes);
4839                         BUG_ON(ret);
4840                 } else if (start + num_bytes <= caching_ctl->progress) {
4841                         ret = btrfs_remove_free_space(block_group,
4842                                                       start, num_bytes);
4843                         BUG_ON(ret);
4844                 } else {
4845                         num_bytes = caching_ctl->progress - start;
4846                         ret = btrfs_remove_free_space(block_group,
4847                                                       start, num_bytes);
4848                         BUG_ON(ret);
4849
4850                         start = caching_ctl->progress;
4851                         num_bytes = ins->objectid + ins->offset -
4852                                     caching_ctl->progress;
4853                         ret = add_excluded_extent(root, start, num_bytes);
4854                         BUG_ON(ret);
4855                 }
4856
4857                 mutex_unlock(&caching_ctl->mutex);
4858                 put_caching_control(caching_ctl);
4859         }
4860
4861         update_reserved_extents(block_group, ins->offset, 1);
4862         btrfs_put_block_group(block_group);
4863         ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4864                                          0, owner, offset, ins, 1);
4865         return ret;
4866 }
4867
4868 /*
4869  * finds a free extent and does all the dirty work required for allocation
4870  * returns the key for the extent through ins, and a tree buffer for
4871  * the first block of the extent through buf.
4872  *
4873  * returns 0 if everything worked, non-zero otherwise.
4874  */
4875 static int alloc_tree_block(struct btrfs_trans_handle *trans,
4876                             struct btrfs_root *root,
4877                             u64 num_bytes, u64 parent, u64 root_objectid,
4878                             struct btrfs_disk_key *key, int level,
4879                             u64 empty_size, u64 hint_byte, u64 search_end,
4880                             struct btrfs_key *ins)
4881 {
4882         int ret;
4883         u64 flags = 0;
4884
4885         ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4886                                    empty_size, hint_byte, search_end,
4887                                    ins, 0);
4888         if (ret)
4889                 return ret;
4890
4891         if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4892                 if (parent == 0)
4893                         parent = ins->objectid;
4894                 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4895         } else
4896                 BUG_ON(parent > 0);
4897
4898         if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
4899                 struct btrfs_delayed_extent_op *extent_op;
4900                 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4901                 BUG_ON(!extent_op);
4902                 if (key)
4903                         memcpy(&extent_op->key, key, sizeof(extent_op->key));
4904                 else
4905                         memset(&extent_op->key, 0, sizeof(extent_op->key));
4906                 extent_op->flags_to_set = flags;
4907                 extent_op->update_key = 1;
4908                 extent_op->update_flags = 1;
4909                 extent_op->is_data = 0;
4910
4911                 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4912                                         ins->offset, parent, root_objectid,
4913                                         level, BTRFS_ADD_DELAYED_EXTENT,
4914                                         extent_op);
4915                 BUG_ON(ret);
4916         }
4917
4918         if (root_objectid == root->root_key.objectid) {
4919                 u64 used;
4920                 spin_lock(&root->node_lock);
4921                 used = btrfs_root_used(&root->root_item) + num_bytes;
4922                 btrfs_set_root_used(&root->root_item, used);
4923                 spin_unlock(&root->node_lock);
4924         }
4925         return ret;
4926 }
4927
4928 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4929                                             struct btrfs_root *root,
4930                                             u64 bytenr, u32 blocksize,
4931                                             int level)
4932 {
4933         struct extent_buffer *buf;
4934
4935         buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4936         if (!buf)
4937                 return ERR_PTR(-ENOMEM);
4938         btrfs_set_header_generation(buf, trans->transid);
4939         btrfs_set_buffer_lockdep_class(buf, level);
4940         btrfs_tree_lock(buf);
4941         clean_tree_block(trans, root, buf);
4942
4943         btrfs_set_lock_blocking(buf);
4944         btrfs_set_buffer_uptodate(buf);
4945
4946         if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
4947                 /*
4948                  * we allow two log transactions at a time, use different
4949                  * EXENT bit to differentiate dirty pages.
4950                  */
4951                 if (root->log_transid % 2 == 0)
4952                         set_extent_dirty(&root->dirty_log_pages, buf->start,
4953                                         buf->start + buf->len - 1, GFP_NOFS);
4954                 else
4955                         set_extent_new(&root->dirty_log_pages, buf->start,
4956                                         buf->start + buf->len - 1, GFP_NOFS);
4957         } else {
4958                 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
4959                          buf->start + buf->len - 1, GFP_NOFS);
4960         }
4961         trans->blocks_used++;
4962         /* this returns a buffer locked for blocking */
4963         return buf;
4964 }
4965
4966 /*
4967  * helper function to allocate a block for a given tree
4968  * returns the tree buffer or NULL.
4969  */
4970 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
4971                                         struct btrfs_root *root, u32 blocksize,
4972                                         u64 parent, u64 root_objectid,
4973                                         struct btrfs_disk_key *key, int level,
4974                                         u64 hint, u64 empty_size)
4975 {
4976         struct btrfs_key ins;
4977         int ret;
4978         struct extent_buffer *buf;
4979
4980         ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4981                                key, level, empty_size, hint, (u64)-1, &ins);
4982         if (ret) {
4983                 BUG_ON(ret > 0);
4984                 return ERR_PTR(ret);
4985         }
4986
4987         buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4988                                     blocksize, level);
4989         return buf;
4990 }
4991
4992 struct walk_control {
4993         u64 refs[BTRFS_MAX_LEVEL];
4994         u64 flags[BTRFS_MAX_LEVEL];
4995         struct btrfs_key update_progress;
4996         int stage;
4997         int level;
4998         int shared_level;
4999         int update_ref;
5000         int keep_locks;
5001         int reada_slot;
5002         int reada_count;
5003 };
5004
5005 #define DROP_REFERENCE  1
5006 #define UPDATE_BACKREF  2
5007
5008 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5009                                      struct btrfs_root *root,
5010                                      struct walk_control *wc,
5011                                      struct btrfs_path *path)
5012 {
5013         u64 bytenr;
5014         u64 generation;
5015         u64 refs;
5016         u64 flags;
5017         u64 last = 0;
5018         u32 nritems;
5019         u32 blocksize;
5020         struct btrfs_key key;
5021         struct extent_buffer *eb;
5022         int ret;
5023         int slot;
5024         int nread = 0;
5025
5026         if (path->slots[wc->level] < wc->reada_slot) {
5027                 wc->reada_count = wc->reada_count * 2 / 3;
5028                 wc->reada_count = max(wc->reada_count, 2);
5029         } else {
5030                 wc->reada_count = wc->reada_count * 3 / 2;
5031                 wc->reada_count = min_t(int, wc->reada_count,
5032                                         BTRFS_NODEPTRS_PER_BLOCK(root));
5033         }
5034
5035         eb = path->nodes[wc->level];
5036         nritems = btrfs_header_nritems(eb);
5037         blocksize = btrfs_level_size(root, wc->level - 1);
5038
5039         for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5040                 if (nread >= wc->reada_count)
5041                         break;
5042
5043                 cond_resched();
5044                 bytenr = btrfs_node_blockptr(eb, slot);
5045                 generation = btrfs_node_ptr_generation(eb, slot);
5046
5047                 if (slot == path->slots[wc->level])
5048                         goto reada;
5049
5050                 if (wc->stage == UPDATE_BACKREF &&
5051                     generation <= root->root_key.offset)
5052                         continue;
5053
5054                 /* We don't lock the tree block, it's OK to be racy here */
5055                 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5056                                                &refs, &flags);
5057                 BUG_ON(ret);
5058                 BUG_ON(refs == 0);
5059
5060                 if (wc->stage == DROP_REFERENCE) {
5061                         if (refs == 1)
5062                                 goto reada;
5063
5064                         if (wc->level == 1 &&
5065                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5066                                 continue;
5067                         if (!wc->update_ref ||
5068                             generation <= root->root_key.offset)
5069                                 continue;
5070                         btrfs_node_key_to_cpu(eb, &key, slot);
5071                         ret = btrfs_comp_cpu_keys(&key,
5072                                                   &wc->update_progress);
5073                         if (ret < 0)
5074                                 continue;
5075                 } else {
5076                         if (wc->level == 1 &&
5077                             (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5078                                 continue;
5079                 }
5080 reada:
5081                 ret = readahead_tree_block(root, bytenr, blocksize,
5082                                            generation);
5083                 if (ret)
5084                         break;
5085                 last = bytenr + blocksize;
5086                 nread++;
5087         }
5088         wc->reada_slot = slot;
5089 }
5090
5091 /*
5092  * hepler to process tree block while walking down the tree.
5093  *
5094  * when wc->stage == UPDATE_BACKREF, this function updates
5095  * back refs for pointers in the block.
5096  *
5097  * NOTE: return value 1 means we should stop walking down.
5098  */
5099 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5100                                    struct btrfs_root *root,
5101                                    struct btrfs_path *path,
5102                                    struct walk_control *wc, int lookup_info)
5103 {
5104         int level = wc->level;
5105         struct extent_buffer *eb = path->nodes[level];
5106         u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5107         int ret;
5108
5109         if (wc->stage == UPDATE_BACKREF &&
5110             btrfs_header_owner(eb) != root->root_key.objectid)
5111                 return 1;
5112
5113         /*
5114          * when reference count of tree block is 1, it won't increase
5115          * again. once full backref flag is set, we never clear it.
5116          */
5117         if (lookup_info &&
5118             ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5119              (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5120                 BUG_ON(!path->locks[level]);
5121                 ret = btrfs_lookup_extent_info(trans, root,
5122                                                eb->start, eb->len,
5123                                                &wc->refs[level],
5124                                                &wc->flags[level]);
5125                 BUG_ON(ret);
5126                 BUG_ON(wc->refs[level] == 0);
5127         }
5128
5129         if (wc->stage == DROP_REFERENCE) {
5130                 if (wc->refs[level] > 1)
5131                         return 1;
5132
5133                 if (path->locks[level] && !wc->keep_locks) {
5134                         btrfs_tree_unlock(eb);
5135                         path->locks[level] = 0;
5136                 }
5137                 return 0;
5138         }
5139
5140         /* wc->stage == UPDATE_BACKREF */
5141         if (!(wc->flags[level] & flag)) {
5142                 BUG_ON(!path->locks[level]);
5143                 ret = btrfs_inc_ref(trans, root, eb, 1);
5144                 BUG_ON(ret);
5145                 ret = btrfs_dec_ref(trans, root, eb, 0);
5146                 BUG_ON(ret);
5147                 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5148                                                   eb->len, flag, 0);
5149                 BUG_ON(ret);
5150                 wc->flags[level] |= flag;
5151         }
5152
5153         /*
5154          * the block is shared by multiple trees, so it's not good to
5155          * keep the tree lock
5156          */
5157         if (path->locks[level] && level > 0) {
5158                 btrfs_tree_unlock(eb);
5159                 path->locks[level] = 0;
5160         }
5161         return 0;
5162 }
5163
5164 /*
5165  * hepler to process tree block pointer.
5166  *
5167  * when wc->stage == DROP_REFERENCE, this function checks
5168  * reference count of the block pointed to. if the block
5169  * is shared and we need update back refs for the subtree
5170  * rooted at the block, this function changes wc->stage to
5171  * UPDATE_BACKREF. if the block is shared and there is no
5172  * need to update back, this function drops the reference
5173  * to the block.
5174  *
5175  * NOTE: return value 1 means we should stop walking down.
5176  */
5177 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5178                                  struct btrfs_root *root,
5179                                  struct btrfs_path *path,
5180                                  struct walk_control *wc, int *lookup_info)
5181 {
5182         u64 bytenr;
5183         u64 generation;
5184         u64 parent;
5185         u32 blocksize;
5186         struct btrfs_key key;
5187         struct extent_buffer *next;
5188         int level = wc->level;
5189         int reada = 0;
5190         int ret = 0;
5191
5192         generation = btrfs_node_ptr_generation(path->nodes[level],
5193                                                path->slots[level]);
5194         /*
5195          * if the lower level block was created before the snapshot
5196          * was created, we know there is no need to update back refs
5197          * for the subtree
5198          */
5199         if (wc->stage == UPDATE_BACKREF &&
5200             generation <= root->root_key.offset) {
5201                 *lookup_info = 1;
5202                 return 1;
5203         }
5204
5205         bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5206         blocksize = btrfs_level_size(root, level - 1);
5207
5208         next = btrfs_find_tree_block(root, bytenr, blocksize);
5209         if (!next) {
5210                 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5211                 if (!next)
5212                         return -ENOMEM;
5213                 reada = 1;
5214         }
5215         btrfs_tree_lock(next);
5216         btrfs_set_lock_blocking(next);
5217
5218         ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5219                                        &wc->refs[level - 1],
5220                                        &wc->flags[level - 1]);
5221         BUG_ON(ret);
5222         BUG_ON(wc->refs[level - 1] == 0);
5223         *lookup_info = 0;
5224
5225         if (wc->stage == DROP_REFERENCE) {
5226                 if (wc->refs[level - 1] > 1) {
5227                         if (level == 1 &&
5228                             (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5229                                 goto skip;
5230
5231                         if (!wc->update_ref ||
5232                             generation <= root->root_key.offset)
5233                                 goto skip;
5234
5235                         btrfs_node_key_to_cpu(path->nodes[level], &key,
5236                                               path->slots[level]);
5237                         ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5238                         if (ret < 0)
5239                                 goto skip;
5240
5241                         wc->stage = UPDATE_BACKREF;
5242                         wc->shared_level = level - 1;
5243                 }
5244         } else {
5245                 if (level == 1 &&
5246                     (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5247                         goto skip;
5248         }
5249
5250         if (!btrfs_buffer_uptodate(next, generation)) {
5251                 btrfs_tree_unlock(next);
5252                 free_extent_buffer(next);
5253                 next = NULL;
5254                 *lookup_info = 1;
5255         }
5256
5257         if (!next) {
5258                 if (reada && level == 1)
5259                         reada_walk_down(trans, root, wc, path);
5260                 next = read_tree_block(root, bytenr, blocksize, generation);
5261                 btrfs_tree_lock(next);
5262                 btrfs_set_lock_blocking(next);
5263         }
5264
5265         level--;
5266         BUG_ON(level != btrfs_header_level(next));
5267         path->nodes[level] = next;
5268         path->slots[level] = 0;
5269         path->locks[level] = 1;
5270         wc->level = level;
5271         if (wc->level == 1)
5272                 wc->reada_slot = 0;
5273         return 0;
5274 skip:
5275         wc->refs[level - 1] = 0;
5276         wc->flags[level - 1] = 0;
5277         if (wc->stage == DROP_REFERENCE) {
5278                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5279                         parent = path->nodes[level]->start;
5280                 } else {
5281                         BUG_ON(root->root_key.objectid !=
5282                                btrfs_header_owner(path->nodes[level]));
5283                         parent = 0;
5284                 }
5285
5286                 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5287                                         root->root_key.objectid, level - 1, 0);
5288                 BUG_ON(ret);
5289         }
5290         btrfs_tree_unlock(next);
5291         free_extent_buffer(next);
5292         *lookup_info = 1;
5293         return 1;
5294 }
5295
5296 /*
5297  * hepler to process tree block while walking up the tree.
5298  *
5299  * when wc->stage == DROP_REFERENCE, this function drops
5300  * reference count on the block.
5301  *
5302  * when wc->stage == UPDATE_BACKREF, this function changes
5303  * wc->stage back to DROP_REFERENCE if we changed wc->stage
5304  * to UPDATE_BACKREF previously while processing the block.
5305  *
5306  * NOTE: return value 1 means we should stop walking up.
5307  */
5308 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5309                                  struct btrfs_root *root,
5310                                  struct btrfs_path *path,
5311                                  struct walk_control *wc)
5312 {
5313         int ret = 0;
5314         int level = wc->level;
5315         struct extent_buffer *eb = path->nodes[level];
5316         u64 parent = 0;
5317
5318         if (wc->stage == UPDATE_BACKREF) {
5319                 BUG_ON(wc->shared_level < level);
5320                 if (level < wc->shared_level)
5321                         goto out;
5322
5323                 ret = find_next_key(path, level + 1, &wc->update_progress);
5324                 if (ret > 0)
5325                         wc->update_ref = 0;
5326
5327                 wc->stage = DROP_REFERENCE;
5328                 wc->shared_level = -1;
5329                 path->slots[level] = 0;
5330
5331                 /*
5332                  * check reference count again if the block isn't locked.
5333                  * we should start walking down the tree again if reference
5334                  * count is one.
5335                  */
5336                 if (!path->locks[level]) {
5337                         BUG_ON(level == 0);
5338                         btrfs_tree_lock(eb);
5339                         btrfs_set_lock_blocking(eb);
5340                         path->locks[level] = 1;
5341
5342                         ret = btrfs_lookup_extent_info(trans, root,
5343                                                        eb->start, eb->len,
5344                                                        &wc->refs[level],
5345                                                        &wc->flags[level]);
5346                         BUG_ON(ret);
5347                         BUG_ON(wc->refs[level] == 0);
5348                         if (wc->refs[level] == 1) {
5349                                 btrfs_tree_unlock(eb);
5350                                 path->locks[level] = 0;
5351                                 return 1;
5352                         }
5353                 }
5354         }
5355
5356         /* wc->stage == DROP_REFERENCE */
5357         BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5358
5359         if (wc->refs[level] == 1) {
5360                 if (level == 0) {
5361                         if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5362                                 ret = btrfs_dec_ref(trans, root, eb, 1);
5363                         else
5364                                 ret = btrfs_dec_ref(trans, root, eb, 0);
5365                         BUG_ON(ret);
5366                 }
5367                 /* make block locked assertion in clean_tree_block happy */
5368                 if (!path->locks[level] &&
5369                     btrfs_header_generation(eb) == trans->transid) {
5370                         btrfs_tree_lock(eb);
5371                         btrfs_set_lock_blocking(eb);
5372                         path->locks[level] = 1;
5373                 }
5374                 clean_tree_block(trans, root, eb);
5375         }
5376
5377         if (eb == root->node) {
5378                 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5379                         parent = eb->start;
5380                 else
5381                         BUG_ON(root->root_key.objectid !=
5382                                btrfs_header_owner(eb));
5383         } else {
5384                 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5385                         parent = path->nodes[level + 1]->start;
5386                 else
5387                         BUG_ON(root->root_key.objectid !=
5388                                btrfs_header_owner(path->nodes[level + 1]));
5389         }
5390
5391         ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
5392                                 root->root_key.objectid, level, 0);
5393         BUG_ON(ret);
5394 out:
5395         wc->refs[level] = 0;
5396         wc->flags[level] = 0;
5397         return ret;
5398 }
5399
5400 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5401                                    struct btrfs_root *root,
5402                                    struct btrfs_path *path,
5403                                    struct walk_control *wc)
5404 {
5405         int level = wc->level;
5406         int lookup_info = 1;
5407         int ret;
5408
5409         while (level >= 0) {
5410                 ret = walk_down_proc(trans, root, path, wc, lookup_info);
5411                 if (ret > 0)
5412                         break;
5413
5414                 if (level == 0)
5415                         break;
5416
5417                 if (path->slots[level] >=
5418                     btrfs_header_nritems(path->nodes[level]))
5419                         break;
5420
5421                 ret = do_walk_down(trans, root, path, wc, &lookup_info);
5422                 if (ret > 0) {
5423                         path->slots[level]++;
5424                         continue;
5425                 } else if (ret < 0)
5426                         return ret;
5427                 level = wc->level;
5428         }
5429         return 0;
5430 }
5431
5432 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
5433                                  struct btrfs_root *root,
5434                                  struct btrfs_path *path,
5435                                  struct walk_control *wc, int max_level)
5436 {
5437         int level = wc->level;
5438         int ret;
5439
5440         path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5441         while (level < max_level && path->nodes[level]) {
5442                 wc->level = level;
5443                 if (path->slots[level] + 1 <
5444                     btrfs_header_nritems(path->nodes[level])) {
5445                         path->slots[level]++;
5446                         return 0;
5447                 } else {
5448                         ret = walk_up_proc(trans, root, path, wc);
5449                         if (ret > 0)
5450                                 return 0;
5451
5452                         if (path->locks[level]) {
5453                                 btrfs_tree_unlock(path->nodes[level]);
5454                                 path->locks[level] = 0;
5455                         }
5456                         free_extent_buffer(path->nodes[level]);
5457                         path->nodes[level] = NULL;
5458                         level++;
5459                 }
5460         }
5461         return 1;
5462 }
5463
5464 /*
5465  * drop a subvolume tree.
5466  *
5467  * this function traverses the tree freeing any blocks that only
5468  * referenced by the tree.
5469  *
5470  * when a shared tree block is found. this function decreases its
5471  * reference count by one. if update_ref is true, this function
5472  * also make sure backrefs for the shared block and all lower level
5473  * blocks are properly updated.
5474  */
5475 int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
5476 {
5477         struct btrfs_path *path;
5478         struct btrfs_trans_handle *trans;
5479         struct btrfs_root *tree_root = root->fs_info->tree_root;
5480         struct btrfs_root_item *root_item = &root->root_item;
5481         struct walk_control *wc;
5482         struct btrfs_key key;
5483         int err = 0;
5484         int ret;
5485         int level;
5486
5487         path = btrfs_alloc_path();
5488         BUG_ON(!path);
5489
5490         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5491         BUG_ON(!wc);
5492
5493         trans = btrfs_start_transaction(tree_root, 1);
5494
5495         if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
5496                 level = btrfs_header_level(root->node);
5497                 path->nodes[level] = btrfs_lock_root_node(root);
5498                 btrfs_set_lock_blocking(path->nodes[level]);
5499                 path->slots[level] = 0;
5500                 path->locks[level] = 1;
5501                 memset(&wc->update_progress, 0,
5502                        sizeof(wc->update_progress));
5503         } else {
5504                 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
5505                 memcpy(&wc->update_progress, &key,
5506                        sizeof(wc->update_progress));
5507
5508                 level = root_item->drop_level;
5509                 BUG_ON(level == 0);
5510                 path->lowest_level = level;
5511                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5512                 path->lowest_level = 0;
5513                 if (ret < 0) {
5514                         err = ret;
5515                         goto out;
5516                 }
5517                 WARN_ON(ret > 0);
5518
5519                 /*
5520                  * unlock our path, this is safe because only this
5521                  * function is allowed to delete this snapshot
5522                  */
5523                 btrfs_unlock_up_safe(path, 0);
5524
5525                 level = btrfs_header_level(root->node);
5526                 while (1) {
5527                         btrfs_tree_lock(path->nodes[level]);
5528                         btrfs_set_lock_blocking(path->nodes[level]);
5529
5530                         ret = btrfs_lookup_extent_info(trans, root,
5531                                                 path->nodes[level]->start,
5532                                                 path->nodes[level]->len,
5533                                                 &wc->refs[level],
5534                                                 &wc->flags[level]);
5535                         BUG_ON(ret);
5536                         BUG_ON(wc->refs[level] == 0);
5537
5538                         if (level == root_item->drop_level)
5539                                 break;
5540
5541                         btrfs_tree_unlock(path->nodes[level]);
5542                         WARN_ON(wc->refs[level] != 1);
5543                         level--;
5544                 }
5545         }
5546
5547         wc->level = level;
5548         wc->shared_level = -1;
5549         wc->stage = DROP_REFERENCE;
5550         wc->update_ref = update_ref;
5551         wc->keep_locks = 0;
5552         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5553
5554         while (1) {
5555                 ret = walk_down_tree(trans, root, path, wc);
5556                 if (ret < 0) {
5557                         err = ret;
5558                         break;
5559                 }
5560
5561                 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5562                 if (ret < 0) {
5563                         err = ret;
5564                         break;
5565                 }
5566
5567                 if (ret > 0) {
5568                         BUG_ON(wc->stage != DROP_REFERENCE);
5569                         break;
5570                 }
5571
5572                 if (wc->stage == DROP_REFERENCE) {
5573                         level = wc->level;
5574                         btrfs_node_key(path->nodes[level],
5575                                        &root_item->drop_progress,
5576                                        path->slots[level]);
5577                         root_item->drop_level = level;
5578                 }
5579
5580                 BUG_ON(wc->level == 0);
5581                 if (trans->transaction->in_commit ||
5582                     trans->transaction->delayed_refs.flushing) {
5583                         ret = btrfs_update_root(trans, tree_root,
5584                                                 &root->root_key,
5585                                                 root_item);
5586                         BUG_ON(ret);
5587
5588                         btrfs_end_transaction(trans, tree_root);
5589                         trans = btrfs_start_transaction(tree_root, 1);
5590                 } else {
5591                         unsigned long update;
5592                         update = trans->delayed_ref_updates;
5593                         trans->delayed_ref_updates = 0;
5594                         if (update)
5595                                 btrfs_run_delayed_refs(trans, tree_root,
5596                                                        update);
5597                 }
5598         }
5599         btrfs_release_path(root, path);
5600         BUG_ON(err);
5601
5602         ret = btrfs_del_root(trans, tree_root, &root->root_key);
5603         BUG_ON(ret);
5604
5605         if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5606                 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
5607                                            NULL, NULL);
5608                 BUG_ON(ret < 0);
5609                 if (ret > 0) {
5610                         ret = btrfs_del_orphan_item(trans, tree_root,
5611                                                     root->root_key.objectid);
5612                         BUG_ON(ret);
5613                 }
5614         }
5615
5616         if (root->in_radix) {
5617                 btrfs_free_fs_root(tree_root->fs_info, root);
5618         } else {
5619                 free_extent_buffer(root->node);
5620                 free_extent_buffer(root->commit_root);
5621                 kfree(root);
5622         }
5623 out:
5624         btrfs_end_transaction(trans, tree_root);
5625         kfree(wc);
5626         btrfs_free_path(path);
5627         return err;
5628 }
5629
5630 /*
5631  * drop subtree rooted at tree block 'node'.
5632  *
5633  * NOTE: this function will unlock and release tree block 'node'
5634  */
5635 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5636                         struct btrfs_root *root,
5637                         struct extent_buffer *node,
5638                         struct extent_buffer *parent)
5639 {
5640         struct btrfs_path *path;
5641         struct walk_control *wc;
5642         int level;
5643         int parent_level;
5644         int ret = 0;
5645         int wret;
5646
5647         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5648
5649         path = btrfs_alloc_path();
5650         BUG_ON(!path);
5651
5652         wc = kzalloc(sizeof(*wc), GFP_NOFS);
5653         BUG_ON(!wc);
5654
5655         btrfs_assert_tree_locked(parent);
5656         parent_level = btrfs_header_level(parent);
5657         extent_buffer_get(parent);
5658         path->nodes[parent_level] = parent;
5659         path->slots[parent_level] = btrfs_header_nritems(parent);
5660
5661         btrfs_assert_tree_locked(node);
5662         level = btrfs_header_level(node);
5663         path->nodes[level] = node;
5664         path->slots[level] = 0;
5665         path->locks[level] = 1;
5666
5667         wc->refs[parent_level] = 1;
5668         wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5669         wc->level = level;
5670         wc->shared_level = -1;
5671         wc->stage = DROP_REFERENCE;
5672         wc->update_ref = 0;
5673         wc->keep_locks = 1;
5674         wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
5675
5676         while (1) {
5677                 wret = walk_down_tree(trans, root, path, wc);
5678                 if (wret < 0) {
5679                         ret = wret;
5680                         break;
5681                 }
5682
5683                 wret = walk_up_tree(trans, root, path, wc, parent_level);
5684                 if (wret < 0)
5685                         ret = wret;
5686                 if (wret != 0)
5687                         break;
5688         }
5689
5690         kfree(wc);
5691         btrfs_free_path(path);
5692         return ret;
5693 }
5694
5695 #if 0
5696 static unsigned long calc_ra(unsigned long start, unsigned long last,
5697                              unsigned long nr)
5698 {
5699         return min(last, start + nr - 1);
5700 }
5701
5702 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
5703                                          u64 len)
5704 {
5705         u64 page_start;
5706         u64 page_end;
5707         unsigned long first_index;
5708         unsigned long last_index;
5709         unsigned long i;
5710         struct page *page;
5711         struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
5712         struct file_ra_state *ra;
5713         struct btrfs_ordered_extent *ordered;
5714         unsigned int total_read = 0;
5715         unsigned int total_dirty = 0;
5716         int ret = 0;
5717
5718         ra = kzalloc(sizeof(*ra), GFP_NOFS);
5719
5720         mutex_lock(&inode->i_mutex);
5721         first_index = start >> PAGE_CACHE_SHIFT;
5722         last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5723
5724         /* make sure the dirty trick played by the caller work */
5725         ret = invalidate_inode_pages2_range(inode->i_mapping,
5726                                             first_index, last_index);
5727         if (ret)
5728                 goto out_unlock;
5729
5730         file_ra_state_init(ra, inode->i_mapping);
5731
5732         for (i = first_index ; i <= last_index; i++) {
5733                 if (total_read % ra->ra_pages == 0) {
5734                         btrfs_force_ra(inode->i_mapping, ra, NULL, i,
5735                                        calc_ra(i, last_index, ra->ra_pages));
5736                 }
5737                 total_read++;
5738 again:
5739                 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
5740                         BUG_ON(1);
5741                 page = grab_cache_page(inode->i_mapping, i);
5742                 if (!page) {
5743                         ret = -ENOMEM;
5744                         goto out_unlock;
5745                 }
5746                 if (!PageUptodate(page)) {
5747                         btrfs_readpage(NULL, page);
5748                         lock_page(page);
5749                         if (!PageUptodate(page)) {
5750                                 unlock_page(page);
5751                                 page_cache_release(page);
5752                                 ret = -EIO;
5753                                 goto out_unlock;
5754                         }
5755                 }
5756                 wait_on_page_writeback(page);
5757
5758                 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5759                 page_end = page_start + PAGE_CACHE_SIZE - 1;
5760                 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
5761
5762                 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5763                 if (ordered) {
5764                         unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5765                         unlock_page(page);
5766                         page_cache_release(page);
5767                         btrfs_start_ordered_extent(inode, ordered, 1);
5768                         btrfs_put_ordered_extent(ordered);
5769                         goto again;
5770                 }
5771                 set_page_extent_mapped(page);
5772
5773                 if (i == first_index)
5774                         set_extent_bits(io_tree, page_start, page_end,
5775                                         EXTENT_BOUNDARY, GFP_NOFS);
5776                 btrfs_set_extent_delalloc(inode, page_start, page_end);
5777
5778                 set_page_dirty(page);
5779                 total_dirty++;
5780
5781                 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5782                 unlock_page(page);
5783                 page_cache_release(page);
5784         }
5785
5786 out_unlock:
5787         kfree(ra);
5788         mutex_unlock(&inode->i_mutex);
5789         balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5790         return ret;
5791 }
5792
5793 static noinline int relocate_data_extent(struct inode *reloc_inode,
5794                                          struct btrfs_key *extent_key,
5795                                          u64 offset)
5796 {
5797         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5798         struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5799         struct extent_map *em;
5800         u64 start = extent_key->objectid - offset;
5801         u64 end = start + extent_key->offset - 1;
5802
5803         em = alloc_extent_map(GFP_NOFS);
5804         BUG_ON(!em || IS_ERR(em));
5805
5806         em->start = start;
5807         em->len = extent_key->offset;
5808         em->block_len = extent_key->offset;
5809         em->block_start = extent_key->objectid;
5810         em->bdev = root->fs_info->fs_devices->latest_bdev;
5811         set_bit(EXTENT_FLAG_PINNED, &em->flags);
5812
5813         /* setup extent map to cheat btrfs_readpage */
5814         lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5815         while (1) {
5816                 int ret;
5817                 write_lock(&em_tree->lock);
5818                 ret = add_extent_mapping(em_tree, em);
5819                 write_unlock(&em_tree->lock);
5820                 if (ret != -EEXIST) {
5821                         free_extent_map(em);
5822                         break;
5823                 }
5824                 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
5825         }
5826         unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
5827
5828         return relocate_inode_pages(reloc_inode, start, extent_key->offset);
5829 }
5830
5831 struct btrfs_ref_path {
5832         u64 extent_start;
5833         u64 nodes[BTRFS_MAX_LEVEL];
5834         u64 root_objectid;
5835         u64 root_generation;
5836         u64 owner_objectid;
5837         u32 num_refs;
5838         int lowest_level;
5839         int current_level;
5840         int shared_level;
5841
5842         struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5843         u64 new_nodes[BTRFS_MAX_LEVEL];
5844 };
5845
5846 struct disk_extent {
5847         u64 ram_bytes;
5848         u64 disk_bytenr;
5849         u64 disk_num_bytes;
5850         u64 offset;
5851         u64 num_bytes;
5852         u8 compression;
5853         u8 encryption;
5854         u16 other_encoding;
5855 };
5856
5857 static int is_cowonly_root(u64 root_objectid)
5858 {
5859         if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5860             root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5861             root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5862             root_objectid == BTRFS_DEV_TREE_OBJECTID ||
5863             root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5864             root_objectid == BTRFS_CSUM_TREE_OBJECTID)
5865                 return 1;
5866         return 0;
5867 }
5868
5869 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
5870                                     struct btrfs_root *extent_root,
5871                                     struct btrfs_ref_path *ref_path,
5872                                     int first_time)
5873 {
5874         struct extent_buffer *leaf;
5875         struct btrfs_path *path;
5876         struct btrfs_extent_ref *ref;
5877         struct btrfs_key key;
5878         struct btrfs_key found_key;
5879         u64 bytenr;
5880         u32 nritems;
5881         int level;
5882         int ret = 1;
5883
5884         path = btrfs_alloc_path();
5885         if (!path)
5886                 return -ENOMEM;
5887
5888         if (first_time) {
5889                 ref_path->lowest_level = -1;
5890                 ref_path->current_level = -1;
5891                 ref_path->shared_level = -1;
5892                 goto walk_up;
5893         }
5894 walk_down:
5895         level = ref_path->current_level - 1;
5896         while (level >= -1) {
5897                 u64 parent;
5898                 if (level < ref_path->lowest_level)
5899                         break;
5900
5901                 if (level >= 0)
5902                         bytenr = ref_path->nodes[level];
5903                 else
5904                         bytenr = ref_path->extent_start;
5905                 BUG_ON(bytenr == 0);
5906
5907                 parent = ref_path->nodes[level + 1];
5908                 ref_path->nodes[level + 1] = 0;
5909                 ref_path->current_level = level;
5910                 BUG_ON(parent == 0);
5911
5912                 key.objectid = bytenr;
5913                 key.offset = parent + 1;
5914                 key.type = BTRFS_EXTENT_REF_KEY;
5915
5916                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5917                 if (ret < 0)
5918                         goto out;
5919                 BUG_ON(ret == 0);
5920
5921                 leaf = path->nodes[0];
5922                 nritems = btrfs_header_nritems(leaf);
5923                 if (path->slots[0] >= nritems) {
5924                         ret = btrfs_next_leaf(extent_root, path);
5925                         if (ret < 0)
5926                                 goto out;
5927                         if (ret > 0)
5928                                 goto next;
5929                         leaf = path->nodes[0];
5930                 }
5931
5932                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5933                 if (found_key.objectid == bytenr &&
5934                     found_key.type == BTRFS_EXTENT_REF_KEY) {
5935                         if (level < ref_path->shared_level)
5936                                 ref_path->shared_level = level;
5937                         goto found;
5938                 }
5939 next:
5940                 level--;
5941                 btrfs_release_path(extent_root, path);
5942                 cond_resched();
5943         }
5944         /* reached lowest level */
5945         ret = 1;
5946         goto out;
5947 walk_up:
5948         level = ref_path->current_level;
5949         while (level < BTRFS_MAX_LEVEL - 1) {
5950                 u64 ref_objectid;
5951
5952                 if (level >= 0)
5953                         bytenr = ref_path->nodes[level];
5954                 else
5955                         bytenr = ref_path->extent_start;
5956
5957                 BUG_ON(bytenr == 0);
5958
5959                 key.objectid = bytenr;
5960                 key.offset = 0;
5961                 key.type = BTRFS_EXTENT_REF_KEY;
5962
5963                 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5964                 if (ret < 0)
5965                         goto out;
5966
5967                 leaf = path->nodes[0];
5968                 nritems = btrfs_header_nritems(leaf);
5969                 if (path->slots[0] >= nritems) {
5970                         ret = btrfs_next_leaf(extent_root, path);
5971                         if (ret < 0)
5972                                 goto out;
5973                         if (ret > 0) {
5974                                 /* the extent was freed by someone */
5975                                 if (ref_path->lowest_level == level)
5976                                         goto out;
5977                                 btrfs_release_path(extent_root, path);
5978                                 goto walk_down;
5979                         }
5980                         leaf = path->nodes[0];
5981                 }
5982
5983                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5984                 if (found_key.objectid != bytenr ||
5985                                 found_key.type != BTRFS_EXTENT_REF_KEY) {
5986                         /* the extent was freed by someone */
5987                         if (ref_path->lowest_level == level) {
5988                                 ret = 1;
5989                                 goto out;
5990                         }
5991                         btrfs_release_path(extent_root, path);
5992                         goto walk_down;
5993                 }
5994 found:
5995                 ref = btrfs_item_ptr(leaf, path->slots[0],
5996                                 struct btrfs_extent_ref);
5997                 ref_objectid = btrfs_ref_objectid(leaf, ref);
5998                 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5999                         if (first_time) {
6000                                 level = (int)ref_objectid;
6001                                 BUG_ON(level >= BTRFS_MAX_LEVEL);
6002                                 ref_path->lowest_level = level;
6003                                 ref_path->current_level = level;
6004                                 ref_path->nodes[level] = bytenr;
6005                         } else {
6006                                 WARN_ON(ref_objectid != level);
6007                         }
6008                 } else {
6009                         WARN_ON(level != -1);
6010                 }
6011                 first_time = 0;
6012
6013                 if (ref_path->lowest_level == level) {
6014                         ref_path->owner_objectid = ref_objectid;
6015                         ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6016                 }
6017
6018                 /*
6019                  * the block is tree root or the block isn't in reference
6020                  * counted tree.
6021                  */
6022                 if (found_key.objectid == found_key.offset ||
6023                     is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6024                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6025                         ref_path->root_generation =
6026                                 btrfs_ref_generation(leaf, ref);
6027                         if (level < 0) {
6028                                 /* special reference from the tree log */
6029                                 ref_path->nodes[0] = found_key.offset;
6030                                 ref_path->current_level = 0;
6031                         }
6032                         ret = 0;
6033                         goto out;
6034                 }
6035
6036                 level++;
6037                 BUG_ON(ref_path->nodes[level] != 0);
6038                 ref_path->nodes[level] = found_key.offset;
6039                 ref_path->current_level = level;
6040
6041                 /*
6042                  * the reference was created in the running transaction,
6043                  * no need to continue walking up.
6044                  */
6045                 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6046                         ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6047                         ref_path->root_generation =
6048                                 btrfs_ref_generation(leaf, ref);
6049                         ret = 0;
6050                         goto out;
6051                 }
6052
6053                 btrfs_release_path(extent_root, path);
6054                 cond_resched();
6055         }
6056         /* reached max tree level, but no tree root found. */
6057         BUG();
6058 out:
6059         btrfs_free_path(path);
6060         return ret;
6061 }
6062
6063 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6064                                 struct btrfs_root *extent_root,
6065                                 struct btrfs_ref_path *ref_path,
6066                                 u64 extent_start)
6067 {
6068         memset(ref_path, 0, sizeof(*ref_path));
6069         ref_path->extent_start = extent_start;
6070
6071         return __next_ref_path(trans, extent_root, ref_path, 1);
6072 }
6073
6074 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6075                                struct btrfs_root *extent_root,
6076                                struct btrfs_ref_path *ref_path)
6077 {
6078         return __next_ref_path(trans, extent_root, ref_path, 0);
6079 }
6080
6081 static noinline int get_new_locations(struct inode *reloc_inode,
6082                                       struct btrfs_key *extent_key,
6083                                       u64 offset, int no_fragment,
6084                                       struct disk_extent **extents,
6085                                       int *nr_extents)
6086 {
6087         struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6088         struct btrfs_path *path;
6089         struct btrfs_file_extent_item *fi;
6090         struct extent_buffer *leaf;
6091         struct disk_extent *exts = *extents;
6092         struct btrfs_key found_key;
6093         u64 cur_pos;
6094         u64 last_byte;
6095         u32 nritems;
6096         int nr = 0;
6097         int max = *nr_extents;
6098         int ret;
6099
6100         WARN_ON(!no_fragment && *extents);
6101         if (!exts) {
6102                 max = 1;
6103                 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6104                 if (!exts)
6105                         return -ENOMEM;
6106         }
6107
6108         path = btrfs_alloc_path();
6109         BUG_ON(!path);
6110
6111         cur_pos = extent_key->objectid - offset;
6112         last_byte = extent_key->objectid + extent_key->offset;
6113         ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6114                                        cur_pos, 0);
6115         if (ret < 0)
6116                 goto out;
6117         if (ret > 0) {
6118                 ret = -ENOENT;
6119                 goto out;
6120         }
6121
6122         while (1) {
6123                 leaf = path->nodes[0];
6124                 nritems = btrfs_header_nritems(leaf);
6125                 if (path->slots[0] >= nritems) {
6126                         ret = btrfs_next_leaf(root, path);
6127                         if (ret < 0)
6128                                 goto out;
6129                         if (ret > 0)
6130                                 break;
6131                         leaf = path->nodes[0];
6132                 }
6133
6134                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6135                 if (found_key.offset != cur_pos ||
6136                     found_key.type != BTRFS_EXTENT_DATA_KEY ||
6137                     found_key.objectid != reloc_inode->i_ino)
6138                         break;
6139
6140                 fi = btrfs_item_ptr(leaf, path->slots[0],
6141                                     struct btrfs_file_extent_item);
6142                 if (btrfs_file_extent_type(leaf, fi) !=
6143                     BTRFS_FILE_EXTENT_REG ||
6144                     btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6145                         break;
6146
6147                 if (nr == max) {
6148                         struct disk_extent *old = exts;
6149                         max *= 2;
6150                         exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6151                         memcpy(exts, old, sizeof(*exts) * nr);
6152                         if (old != *extents)
6153                                 kfree(old);
6154                 }
6155
6156                 exts[nr].disk_bytenr =
6157                         btrfs_file_extent_disk_bytenr(leaf, fi);
6158                 exts[nr].disk_num_bytes =
6159                         btrfs_file_extent_disk_num_bytes(leaf, fi);
6160                 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6161                 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6162                 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6163                 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6164                 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6165                 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6166                                                                            fi);
6167                 BUG_ON(exts[nr].offset > 0);
6168                 BUG_ON(exts[nr].compression || exts[nr].encryption);
6169                 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6170
6171                 cur_pos += exts[nr].num_bytes;
6172                 nr++;
6173
6174                 if (cur_pos + offset >= last_byte)
6175                         break;
6176
6177                 if (no_fragment) {
6178                         ret = 1;
6179                         goto out;
6180                 }
6181                 path->slots[0]++;
6182         }
6183
6184         BUG_ON(cur_pos + offset > last_byte);
6185         if (cur_pos + offset < last_byte) {
6186                 ret = -ENOENT;
6187                 goto out;
6188         }
6189         ret = 0;
6190 out:
6191         btrfs_free_path(path);
6192         if (ret) {
6193                 if (exts != *extents)
6194                         kfree(exts);
6195         } else {
6196                 *extents = exts;
6197                 *nr_extents = nr;
6198         }
6199         return ret;
6200 }
6201
6202 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6203                                         struct btrfs_root *root,
6204                                         struct btrfs_path *path,
6205                                         struct btrfs_key *extent_key,
6206                                         struct btrfs_key *leaf_key,
6207                                         struct btrfs_ref_path *ref_path,
6208                                         struct disk_extent *new_extents,
6209                                         int nr_extents)
6210 {
6211         struct extent_buffer *leaf;
6212         struct btrfs_file_extent_item *fi;
6213         struct inode *inode = NULL;
6214         struct btrfs_key key;
6215         u64 lock_start = 0;
6216         u64 lock_end = 0;
6217         u64 num_bytes;
6218         u64 ext_offset;
6219         u64 search_end = (u64)-1;
6220         u32 nritems;
6221         int nr_scaned = 0;
6222         int extent_locked = 0;
6223         int extent_type;
6224         int ret;
6225
6226         memcpy(&key, leaf_key, sizeof(key));
6227         if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6228                 if (key.objectid < ref_path->owner_objectid ||
6229                     (key.objectid == ref_path->owner_objectid &&
6230                      key.type < BTRFS_EXTENT_DATA_KEY)) {
6231                         key.objectid = ref_path->owner_objectid;
6232                         key.type = BTRFS_EXTENT_DATA_KEY;
6233                         key.offset = 0;
6234                 }
6235         }
6236
6237         while (1) {
6238                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6239                 if (ret < 0)
6240                         goto out;
6241
6242                 leaf = path->nodes[0];
6243                 nritems = btrfs_header_nritems(leaf);
6244 next:
6245                 if (extent_locked && ret > 0) {
6246                         /*
6247                          * the file extent item was modified by someone
6248                          * before the extent got locked.
6249                          */
6250                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6251                                       lock_end, GFP_NOFS);
6252                         extent_locked = 0;
6253                 }
6254
6255                 if (path->slots[0] >= nritems) {
6256                         if (++nr_scaned > 2)
6257                                 break;
6258
6259                         BUG_ON(extent_locked);
6260                         ret = btrfs_next_leaf(root, path);
6261                         if (ret < 0)
6262                                 goto out;
6263                         if (ret > 0)
6264                                 break;
6265                         leaf = path->nodes[0];
6266                         nritems = btrfs_header_nritems(leaf);
6267                 }
6268
6269                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6270
6271                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6272                         if ((key.objectid > ref_path->owner_objectid) ||
6273                             (key.objectid == ref_path->owner_objectid &&
6274                              key.type > BTRFS_EXTENT_DATA_KEY) ||
6275                             key.offset >= search_end)
6276                                 break;
6277                 }
6278
6279                 if (inode && key.objectid != inode->i_ino) {
6280                         BUG_ON(extent_locked);
6281                         btrfs_release_path(root, path);
6282                         mutex_unlock(&inode->i_mutex);
6283                         iput(inode);
6284                         inode = NULL;
6285                         continue;
6286                 }
6287
6288                 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6289                         path->slots[0]++;
6290                         ret = 1;
6291                         goto next;
6292                 }
6293                 fi = btrfs_item_ptr(leaf, path->slots[0],
6294                                     struct btrfs_file_extent_item);
6295                 extent_type = btrfs_file_extent_type(leaf, fi);
6296                 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6297                      extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
6298                     (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6299                      extent_key->objectid)) {
6300                         path->slots[0]++;
6301                         ret = 1;
6302                         goto next;
6303                 }
6304
6305                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6306                 ext_offset = btrfs_file_extent_offset(leaf, fi);
6307
6308                 if (search_end == (u64)-1) {
6309                         search_end = key.offset - ext_offset +
6310                                 btrfs_file_extent_ram_bytes(leaf, fi);
6311                 }
6312
6313                 if (!extent_locked) {
6314                         lock_start = key.offset;
6315                         lock_end = lock_start + num_bytes - 1;
6316                 } else {
6317                         if (lock_start > key.offset ||
6318                             lock_end + 1 < key.offset + num_bytes) {
6319                                 unlock_extent(&BTRFS_I(inode)->io_tree,
6320                                               lock_start, lock_end, GFP_NOFS);
6321                                 extent_locked = 0;
6322                         }
6323                 }
6324
6325                 if (!inode) {
6326                         btrfs_release_path(root, path);
6327
6328                         inode = btrfs_iget_locked(root->fs_info->sb,
6329                                                   key.objectid, root);
6330                         if (inode->i_state & I_NEW) {
6331                                 BTRFS_I(inode)->root = root;
6332                                 BTRFS_I(inode)->location.objectid =
6333                                         key.objectid;
6334                                 BTRFS_I(inode)->location.type =
6335                                         BTRFS_INODE_ITEM_KEY;
6336                                 BTRFS_I(inode)->location.offset = 0;
6337                                 btrfs_read_locked_inode(inode);
6338                                 unlock_new_inode(inode);
6339                         }
6340                         /*
6341                          * some code call btrfs_commit_transaction while
6342                          * holding the i_mutex, so we can't use mutex_lock
6343                          * here.
6344                          */
6345                         if (is_bad_inode(inode) ||
6346                             !mutex_trylock(&inode->i_mutex)) {
6347                                 iput(inode);
6348                                 inode = NULL;
6349                                 key.offset = (u64)-1;
6350                                 goto skip;
6351                         }
6352                 }
6353
6354                 if (!extent_locked) {
6355                         struct btrfs_ordered_extent *ordered;
6356
6357                         btrfs_release_path(root, path);
6358
6359                         lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6360                                     lock_end, GFP_NOFS);
6361                         ordered = btrfs_lookup_first_ordered_extent(inode,
6362                                                                     lock_end);
6363                         if (ordered &&
6364                             ordered->file_offset <= lock_end &&
6365                             ordered->file_offset + ordered->len > lock_start) {
6366                                 unlock_extent(&BTRFS_I(inode)->io_tree,
6367                                               lock_start, lock_end, GFP_NOFS);
6368                                 btrfs_start_ordered_extent(inode, ordered, 1);
6369                                 btrfs_put_ordered_extent(ordered);
6370                                 key.offset += num_bytes;
6371                                 goto skip;
6372                         }
6373                         if (ordered)
6374                                 btrfs_put_ordered_extent(ordered);
6375
6376                         extent_locked = 1;
6377                         continue;
6378                 }
6379
6380                 if (nr_extents == 1) {
6381                         /* update extent pointer in place */
6382                         btrfs_set_file_extent_disk_bytenr(leaf, fi,
6383                                                 new_extents[0].disk_bytenr);
6384                         btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6385                                                 new_extents[0].disk_num_bytes);
6386                         btrfs_mark_buffer_dirty(leaf);
6387
6388                         btrfs_drop_extent_cache(inode, key.offset,
6389                                                 key.offset + num_bytes - 1, 0);
6390
6391                         ret = btrfs_inc_extent_ref(trans, root,
6392                                                 new_extents[0].disk_bytenr,
6393                                                 new_extents[0].disk_num_bytes,
6394                                                 leaf->start,
6395                                                 root->root_key.objectid,
6396                                                 trans->transid,
6397                                                 key.objectid);
6398                         BUG_ON(ret);
6399
6400                         ret = btrfs_free_extent(trans, root,
6401                                                 extent_key->objectid,
6402                                                 extent_key->offset,
6403                                                 leaf->start,
6404                                                 btrfs_header_owner(leaf),
6405                                                 btrfs_header_generation(leaf),
6406                                                 key.objectid, 0);
6407                         BUG_ON(ret);
6408
6409                         btrfs_release_path(root, path);
6410                         key.offset += num_bytes;
6411                 } else {
6412                         BUG_ON(1);
6413 #if 0
6414                         u64 alloc_hint;
6415                         u64 extent_len;
6416                         int i;
6417                         /*
6418                          * drop old extent pointer at first, then insert the
6419                          * new pointers one bye one
6420                          */
6421                         btrfs_release_path(root, path);
6422                         ret = btrfs_drop_extents(trans, root, inode, key.offset,
6423                                                  key.offset + num_bytes,
6424                                                  key.offset, &alloc_hint);
6425                         BUG_ON(ret);
6426
6427                         for (i = 0; i < nr_extents; i++) {
6428                                 if (ext_offset >= new_extents[i].num_bytes) {
6429                                         ext_offset -= new_extents[i].num_bytes;
6430                                         continue;
6431                                 }
6432                                 extent_len = min(new_extents[i].num_bytes -
6433                                                  ext_offset, num_bytes);
6434
6435                                 ret = btrfs_insert_empty_item(trans, root,
6436                                                               path, &key,
6437                                                               sizeof(*fi));
6438                                 BUG_ON(ret);
6439
6440                                 leaf = path->nodes[0];
6441                                 fi = btrfs_item_ptr(leaf, path->slots[0],
6442                                                 struct btrfs_file_extent_item);
6443                                 btrfs_set_file_extent_generation(leaf, fi,
6444                                                         trans->transid);
6445                                 btrfs_set_file_extent_type(leaf, fi,
6446                                                         BTRFS_FILE_EXTENT_REG);
6447                                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6448                                                 new_extents[i].disk_bytenr);
6449                                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6450                                                 new_extents[i].disk_num_bytes);
6451                                 btrfs_set_file_extent_ram_bytes(leaf, fi,
6452                                                 new_extents[i].ram_bytes);
6453
6454                                 btrfs_set_file_extent_compression(leaf, fi,
6455                                                 new_extents[i].compression);
6456                                 btrfs_set_file_extent_encryption(leaf, fi,
6457                                                 new_extents[i].encryption);
6458                                 btrfs_set_file_extent_other_encoding(leaf, fi,
6459                                                 new_extents[i].other_encoding);
6460
6461                                 btrfs_set_file_extent_num_bytes(leaf, fi,
6462                                                         extent_len);
6463                                 ext_offset += new_extents[i].offset;
6464                                 btrfs_set_file_extent_offset(leaf, fi,
6465                                                         ext_offset);
6466                                 btrfs_mark_buffer_dirty(leaf);
6467
6468                                 btrfs_drop_extent_cache(inode, key.offset,
6469                                                 key.offset + extent_len - 1, 0);
6470
6471                                 ret = btrfs_inc_extent_ref(trans, root,
6472                                                 new_extents[i].disk_bytenr,
6473                                                 new_extents[i].disk_num_bytes,
6474                                                 leaf->start,
6475                                                 root->root_key.objectid,
6476                                                 trans->transid, key.objectid);
6477                                 BUG_ON(ret);
6478                                 btrfs_release_path(root, path);
6479
6480                                 inode_add_bytes(inode, extent_len);
6481
6482                                 ext_offset = 0;
6483                                 num_bytes -= extent_len;
6484                                 key.offset += extent_len;
6485
6486                                 if (num_bytes == 0)
6487                                         break;
6488                         }
6489                         BUG_ON(i >= nr_extents);
6490 #endif
6491                 }
6492
6493                 if (extent_locked) {
6494                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6495                                       lock_end, GFP_NOFS);
6496                         extent_locked = 0;
6497                 }
6498 skip:
6499                 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
6500                     key.offset >= search_end)
6501                         break;
6502
6503                 cond_resched();
6504         }
6505         ret = 0;
6506 out:
6507         btrfs_release_path(root, path);
6508         if (inode) {
6509                 mutex_unlock(&inode->i_mutex);
6510                 if (extent_locked) {
6511                         unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6512                                       lock_end, GFP_NOFS);
6513                 }
6514                 iput(inode);
6515         }
6516         return ret;
6517 }
6518
6519 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6520                                struct btrfs_root *root,
6521                                struct extent_buffer *buf, u64 orig_start)
6522 {
6523         int level;
6524         int ret;
6525
6526         BUG_ON(btrfs_header_generation(buf) != trans->transid);
6527         BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6528
6529         level = btrfs_header_level(buf);
6530         if (level == 0) {
6531                 struct btrfs_leaf_ref *ref;
6532                 struct btrfs_leaf_ref *orig_ref;
6533
6534                 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6535                 if (!orig_ref)
6536                         return -ENOENT;
6537
6538                 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6539                 if (!ref) {
6540                         btrfs_free_leaf_ref(root, orig_ref);
6541                         return -ENOMEM;
6542                 }
6543
6544                 ref->nritems = orig_ref->nritems;
6545                 memcpy(ref->extents, orig_ref->extents,
6546                         sizeof(ref->extents[0]) * ref->nritems);
6547
6548                 btrfs_free_leaf_ref(root, orig_ref);
6549
6550                 ref->root_gen = trans->transid;
6551                 ref->bytenr = buf->start;
6552                 ref->owner = btrfs_header_owner(buf);
6553                 ref->generation = btrfs_header_generation(buf);
6554
6555                 ret = btrfs_add_leaf_ref(root, ref, 0);
6556                 WARN_ON(ret);
6557                 btrfs_free_leaf_ref(root, ref);
6558         }
6559         return 0;
6560 }
6561
6562 static noinline int invalidate_extent_cache(struct btrfs_root *root,
6563                                         struct extent_buffer *leaf,
6564                                         struct btrfs_block_group_cache *group,
6565                                         struct btrfs_root *target_root)
6566 {
6567         struct btrfs_key key;
6568         struct inode *inode = NULL;
6569         struct btrfs_file_extent_item *fi;
6570         struct extent_state *cached_state = NULL;
6571         u64 num_bytes;
6572         u64 skip_objectid = 0;
6573         u32 nritems;
6574         u32 i;
6575
6576         nritems = btrfs_header_nritems(leaf);
6577         for (i = 0; i < nritems; i++) {
6578                 btrfs_item_key_to_cpu(leaf, &key, i);
6579                 if (key.objectid == skip_objectid ||
6580                     key.type != BTRFS_EXTENT_DATA_KEY)
6581                         continue;
6582                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6583                 if (btrfs_file_extent_type(leaf, fi) ==
6584                     BTRFS_FILE_EXTENT_INLINE)
6585                         continue;
6586                 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6587                         continue;
6588                 if (!inode || inode->i_ino != key.objectid) {
6589                         iput(inode);
6590                         inode = btrfs_ilookup(target_root->fs_info->sb,
6591                                               key.objectid, target_root, 1);
6592                 }
6593                 if (!inode) {
6594                         skip_objectid = key.objectid;
6595                         continue;
6596                 }
6597                 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6598
6599                 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
6600                                  key.offset + num_bytes - 1, 0, &cached_state,
6601                                  GFP_NOFS);
6602                 btrfs_drop_extent_cache(inode, key.offset,
6603                                         key.offset + num_bytes - 1, 1);
6604                 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
6605                                      key.offset + num_bytes - 1, &cached_state,
6606                                      GFP_NOFS);
6607                 cond_resched();
6608         }
6609         iput(inode);
6610         return 0;
6611 }
6612
6613 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
6614                                         struct btrfs_root *root,
6615                                         struct extent_buffer *leaf,
6616                                         struct btrfs_block_group_cache *group,
6617                                         struct inode *reloc_inode)
6618 {
6619         struct btrfs_key key;
6620         struct btrfs_key extent_key;
6621         struct btrfs_file_extent_item *fi;
6622         struct btrfs_leaf_ref *ref;
6623         struct disk_extent *new_extent;
6624         u64 bytenr;
6625         u64 num_bytes;
6626         u32 nritems;
6627         u32 i;
6628         int ext_index;
6629         int nr_extent;
6630         int ret;
6631
6632         new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6633         BUG_ON(!new_extent);
6634
6635         ref = btrfs_lookup_leaf_ref(root, leaf->start);
6636         BUG_ON(!ref);
6637
6638         ext_index = -1;
6639         nritems = btrfs_header_nritems(leaf);
6640         for (i = 0; i < nritems; i++) {
6641                 btrfs_item_key_to_cpu(leaf, &key, i);
6642                 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6643                         continue;
6644                 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6645                 if (btrfs_file_extent_type(leaf, fi) ==
6646                     BTRFS_FILE_EXTENT_INLINE)
6647                         continue;
6648                 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6649                 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6650                 if (bytenr == 0)
6651                         continue;
6652
6653                 ext_index++;
6654                 if (bytenr >= group->key.objectid + group->key.offset ||
6655                     bytenr + num_bytes <= group->key.objectid)
6656                         continue;
6657
6658                 extent_key.objectid = bytenr;
6659                 extent_key.offset = num_bytes;
6660                 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6661                 nr_extent = 1;
6662                 ret = get_new_locations(reloc_inode, &extent_key,
6663                                         group->key.objectid, 1,
6664                                         &new_extent, &nr_extent);
6665                 if (ret > 0)
6666                         continue;
6667                 BUG_ON(ret < 0);
6668
6669                 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6670                 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6671                 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6672                 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6673
6674                 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6675                                                 new_extent->disk_bytenr);
6676                 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6677                                                 new_extent->disk_num_bytes);
6678                 btrfs_mark_buffer_dirty(leaf);
6679
6680                 ret = btrfs_inc_extent_ref(trans, root,
6681                                         new_extent->disk_bytenr,
6682                                         new_extent->disk_num_bytes,
6683                                         leaf->start,
6684                                         root->root_key.objectid,
6685                                         trans->transid, key.objectid);
6686                 BUG_ON(ret);
6687
6688                 ret = btrfs_free_extent(trans, root,
6689                                         bytenr, num_bytes, leaf->start,
6690                                         btrfs_header_owner(leaf),
6691                                         btrfs_header_generation(leaf),
6692                                         key.objectid, 0);
6693                 BUG_ON(ret);
6694                 cond_resched();
6695         }
6696         kfree(new_extent);
6697         BUG_ON(ext_index + 1 != ref->nritems);
6698         btrfs_free_leaf_ref(root, ref);
6699         return 0;
6700 }
6701
6702 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6703                           struct btrfs_root *root)
6704 {
6705         struct btrfs_root *reloc_root;
6706         int ret;
6707
6708         if (root->reloc_root) {
6709                 reloc_root = root->reloc_root;
6710                 root->reloc_root = NULL;
6711                 list_add(&reloc_root->dead_list,
6712                          &root->fs_info->dead_reloc_roots);
6713
6714                 btrfs_set_root_bytenr(&reloc_root->root_item,
6715                                       reloc_root->node->start);
6716                 btrfs_set_root_level(&root->root_item,
6717                                      btrfs_header_level(reloc_root->node));
6718                 memset(&reloc_root->root_item.drop_progress, 0,
6719                         sizeof(struct btrfs_disk_key));
6720                 reloc_root->root_item.drop_level = 0;
6721
6722                 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6723                                         &reloc_root->root_key,
6724                                         &reloc_root->root_item);
6725                 BUG_ON(ret);
6726         }
6727         return 0;
6728 }
6729
6730 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6731 {
6732         struct btrfs_trans_handle *trans;
6733         struct btrfs_root *reloc_root;
6734         struct btrfs_root *prev_root = NULL;
6735         struct list_head dead_roots;
6736         int ret;
6737         unsigned long nr;
6738
6739         INIT_LIST_HEAD(&dead_roots);
6740         list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6741
6742         while (!list_empty(&dead_roots)) {
6743                 reloc_root = list_entry(dead_roots.prev,
6744                                         struct btrfs_root, dead_list);
6745                 list_del_init(&reloc_root->dead_list);
6746
6747                 BUG_ON(reloc_root->commit_root != NULL);
6748                 while (1) {
6749                         trans = btrfs_join_transaction(root, 1);
6750                         BUG_ON(!trans);
6751
6752                         mutex_lock(&root->fs_info->drop_mutex);
6753                         ret = btrfs_drop_snapshot(trans, reloc_root);
6754                         if (ret != -EAGAIN)
6755                                 break;
6756                         mutex_unlock(&root->fs_info->drop_mutex);
6757
6758                         nr = trans->blocks_used;
6759                         ret = btrfs_end_transaction(trans, root);
6760                         BUG_ON(ret);
6761                         btrfs_btree_balance_dirty(root, nr);
6762                 }
6763
6764                 free_extent_buffer(reloc_root->node);
6765
6766                 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6767                                      &reloc_root->root_key);
6768                 BUG_ON(ret);
6769                 mutex_unlock(&root->fs_info->drop_mutex);
6770
6771                 nr = trans->blocks_used;
6772                 ret = btrfs_end_transaction(trans, root);
6773                 BUG_ON(ret);
6774                 btrfs_btree_balance_dirty(root, nr);
6775
6776                 kfree(prev_root);
6777                 prev_root = reloc_root;
6778         }
6779         if (prev_root) {
6780                 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6781                 kfree(prev_root);
6782         }
6783         return 0;
6784 }
6785
6786 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6787 {
6788         list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6789         return 0;
6790 }
6791
6792 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6793 {
6794         struct btrfs_root *reloc_root;
6795         struct btrfs_trans_handle *trans;
6796         struct btrfs_key location;
6797         int found;
6798         int ret;
6799
6800         mutex_lock(&root->fs_info->tree_reloc_mutex);
6801         ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6802         BUG_ON(ret);
6803         found = !list_empty(&root->fs_info->dead_reloc_roots);
6804         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6805
6806         if (found) {
6807                 trans = btrfs_start_transaction(root, 1);
6808                 BUG_ON(!trans);
6809                 ret = btrfs_commit_transaction(trans, root);
6810                 BUG_ON(ret);
6811         }
6812
6813         location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6814         location.offset = (u64)-1;
6815         location.type = BTRFS_ROOT_ITEM_KEY;
6816
6817         reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6818         BUG_ON(!reloc_root);
6819         btrfs_orphan_cleanup(reloc_root);
6820         return 0;
6821 }
6822
6823 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
6824                                     struct btrfs_root *root)
6825 {
6826         struct btrfs_root *reloc_root;
6827         struct extent_buffer *eb;
6828         struct btrfs_root_item *root_item;
6829         struct btrfs_key root_key;
6830         int ret;
6831
6832         BUG_ON(!root->ref_cows);
6833         if (root->reloc_root)
6834                 return 0;
6835
6836         root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6837         BUG_ON(!root_item);
6838
6839         ret = btrfs_copy_root(trans, root, root->commit_root,
6840                               &eb, BTRFS_TREE_RELOC_OBJECTID);
6841         BUG_ON(ret);
6842
6843         root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6844         root_key.offset = root->root_key.objectid;
6845         root_key.type = BTRFS_ROOT_ITEM_KEY;
6846
6847         memcpy(root_item, &root->root_item, sizeof(root_item));
6848         btrfs_set_root_refs(root_item, 0);
6849         btrfs_set_root_bytenr(root_item, eb->start);
6850         btrfs_set_root_level(root_item, btrfs_header_level(eb));
6851         btrfs_set_root_generation(root_item, trans->transid);
6852
6853         btrfs_tree_unlock(eb);
6854         free_extent_buffer(eb);
6855
6856         ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6857                                 &root_key, root_item);
6858         BUG_ON(ret);
6859         kfree(root_item);
6860
6861         reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6862                                                  &root_key);
6863         BUG_ON(!reloc_root);
6864         reloc_root->last_trans = trans->transid;
6865         reloc_root->commit_root = NULL;
6866         reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6867
6868         root->reloc_root = reloc_root;
6869         return 0;
6870 }
6871
6872 /*
6873  * Core function of space balance.
6874  *
6875  * The idea is using reloc trees to relocate tree blocks in reference
6876  * counted roots. There is one reloc tree for each subvol, and all
6877  * reloc trees share same root key objectid. Reloc trees are snapshots
6878  * of the latest committed roots of subvols (root->commit_root).
6879  *
6880  * To relocate a tree block referenced by a subvol, there are two steps.
6881  * COW the block through subvol's reloc tree, then update block pointer
6882  * in the subvol to point to the new block. Since all reloc trees share
6883  * same root key objectid, doing special handing for tree blocks owned
6884  * by them is easy. Once a tree block has been COWed in one reloc tree,
6885  * we can use the resulting new block directly when the same block is
6886  * required to COW again through other reloc trees. By this way, relocated
6887  * tree blocks are shared between reloc trees, so they are also shared
6888  * between subvols.
6889  */
6890 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
6891                                       struct btrfs_root *root,
6892                                       struct btrfs_path *path,
6893                                       struct btrfs_key *first_key,
6894                                       struct btrfs_ref_path *ref_path,
6895                                       struct btrfs_block_group_cache *group,
6896                                       struct inode *reloc_inode)
6897 {
6898         struct btrfs_root *reloc_root;
6899         struct extent_buffer *eb = NULL;
6900         struct btrfs_key *keys;
6901         u64 *nodes;
6902         int level;
6903         int shared_level;
6904         int lowest_level = 0;
6905         int ret;
6906
6907         if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6908                 lowest_level = ref_path->owner_objectid;
6909
6910         if (!root->ref_cows) {
6911                 path->lowest_level = lowest_level;
6912                 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6913                 BUG_ON(ret < 0);
6914                 path->lowest_level = 0;
6915                 btrfs_release_path(root, path);
6916                 return 0;
6917         }
6918
6919         mutex_lock(&root->fs_info->tree_reloc_mutex);
6920         ret = init_reloc_tree(trans, root);
6921         BUG_ON(ret);
6922         reloc_root = root->reloc_root;
6923
6924         shared_level = ref_path->shared_level;
6925         ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
6926
6927         keys = ref_path->node_keys;
6928         nodes = ref_path->new_nodes;
6929         memset(&keys[shared_level + 1], 0,
6930                sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6931         memset(&nodes[shared_level + 1], 0,
6932                sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
6933
6934         if (nodes[lowest_level] == 0) {
6935                 path->lowest_level = lowest_level;
6936                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6937                                         0, 1);
6938                 BUG_ON(ret);
6939                 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6940                         eb = path->nodes[level];
6941                         if (!eb || eb == reloc_root->node)
6942                                 break;
6943                         nodes[level] = eb->start;
6944                         if (level == 0)
6945                                 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6946                         else
6947                                 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6948                 }
6949                 if (nodes[0] &&
6950                     ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6951                         eb = path->nodes[0];
6952                         ret = replace_extents_in_leaf(trans, reloc_root, eb,
6953                                                       group, reloc_inode);
6954                         BUG_ON(ret);
6955                 }
6956                 btrfs_release_path(reloc_root, path);
6957         } else {
6958                 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
6959                                        lowest_level);
6960                 BUG_ON(ret);
6961         }
6962
6963         /*
6964          * replace tree blocks in the fs tree with tree blocks in
6965          * the reloc tree.
6966          */
6967         ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6968         BUG_ON(ret < 0);
6969
6970         if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
6971                 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6972                                         0, 0);
6973                 BUG_ON(ret);
6974                 extent_buffer_get(path->nodes[0]);
6975                 eb = path->nodes[0];
6976                 btrfs_release_path(reloc_root, path);
6977                 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6978                 BUG_ON(ret);
6979                 free_extent_buffer(eb);
6980         }
6981
6982         mutex_unlock(&root->fs_info->tree_reloc_mutex);
6983         path->lowest_level = 0;
6984         return 0;
6985 }
6986
6987 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
6988                                         struct btrfs_root *root,
6989                                         struct btrfs_path *path,
6990                                         struct btrfs_key *first_key,
6991                                         struct btrfs_ref_path *ref_path)
6992 {
6993         int ret;
6994
6995         ret = relocate_one_path(trans, root, path, first_key,
6996                                 ref_path, NULL, NULL);
6997         BUG_ON(ret);
6998
6999         return 0;
7000 }
7001
7002 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7003                                     struct btrfs_root *extent_root,
7004                                     struct btrfs_path *path,
7005                                     struct btrfs_key *extent_key)
7006 {
7007         int ret;
7008
7009         ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7010         if (ret)
7011                 goto out;
7012         ret = btrfs_del_item(trans, extent_root, path);
7013 out:
7014         btrfs_release_path(extent_root, path);
7015         return ret;
7016 }
7017
7018 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7019                                                 struct btrfs_ref_path *ref_path)
7020 {
7021         struct btrfs_key root_key;
7022
7023         root_key.objectid = ref_path->root_objectid;
7024         root_key.type = BTRFS_ROOT_ITEM_KEY;
7025         if (is_cowonly_root(ref_path->root_objectid))
7026                 root_key.offset = 0;
7027         else
7028                 root_key.offset = (u64)-1;
7029
7030         return btrfs_read_fs_root_no_name(fs_info, &root_key);
7031 }
7032
7033 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7034                                         struct btrfs_path *path,
7035                                         struct btrfs_key *extent_key,
7036                                         struct btrfs_block_group_cache *group,
7037                                         struct inode *reloc_inode, int pass)
7038 {
7039         struct btrfs_trans_handle *trans;
7040         struct btrfs_root *found_root;
7041         struct btrfs_ref_path *ref_path = NULL;
7042         struct disk_extent *new_extents = NULL;
7043         int nr_extents = 0;
7044         int loops;
7045         int ret;
7046         int level;
7047         struct btrfs_key first_key;
7048         u64 prev_block = 0;
7049
7050
7051         trans = btrfs_start_transaction(extent_root, 1);
7052         BUG_ON(!trans);
7053
7054         if (extent_key->objectid == 0) {
7055                 ret = del_extent_zero(trans, extent_root, path, extent_key);
7056                 goto out;
7057         }
7058
7059         ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7060         if (!ref_path) {
7061                 ret = -ENOMEM;
7062                 goto out;
7063         }
7064
7065         for (loops = 0; ; loops++) {
7066                 if (loops == 0) {
7067                         ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7068                                                    extent_key->objectid);
7069                 } else {
7070                         ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7071                 }
7072                 if (ret < 0)
7073                         goto out;
7074                 if (ret > 0)
7075                         break;
7076
7077                 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7078                     ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7079                         continue;
7080
7081                 found_root = read_ref_root(extent_root->fs_info, ref_path);
7082                 BUG_ON(!found_root);
7083                 /*
7084                  * for reference counted tree, only process reference paths
7085                  * rooted at the latest committed root.
7086                  */
7087                 if (found_root->ref_cows &&
7088                     ref_path->root_generation != found_root->root_key.offset)
7089                         continue;
7090
7091                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7092                         if (pass == 0) {
7093                                 /*
7094                                  * copy data extents to new locations
7095                                  */
7096                                 u64 group_start = group->key.objectid;
7097                                 ret = relocate_data_extent(reloc_inode,
7098                                                            extent_key,
7099                                                            group_start);
7100                                 if (ret < 0)
7101                                         goto out;
7102                                 break;
7103                         }
7104                         level = 0;
7105                 } else {
7106                         level = ref_path->owner_objectid;
7107                 }
7108
7109                 if (prev_block != ref_path->nodes[level]) {
7110                         struct extent_buffer *eb;
7111                         u64 block_start = ref_path->nodes[level];
7112                         u64 block_size = btrfs_level_size(found_root, level);
7113
7114                         eb = read_tree_block(found_root, block_start,
7115                                              block_size, 0);
7116                         btrfs_tree_lock(eb);
7117                         BUG_ON(level != btrfs_header_level(eb));
7118
7119                         if (level == 0)
7120                                 btrfs_item_key_to_cpu(eb, &first_key, 0);
7121                         else
7122                                 btrfs_node_key_to_cpu(eb, &first_key, 0);
7123
7124                         btrfs_tree_unlock(eb);
7125                         free_extent_buffer(eb);
7126                         prev_block = block_start;
7127                 }
7128
7129                 mutex_lock(&extent_root->fs_info->trans_mutex);
7130                 btrfs_record_root_in_trans(found_root);
7131                 mutex_unlock(&extent_root->fs_info->trans_mutex);
7132                 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7133                         /*
7134                          * try to update data extent references while
7135                          * keeping metadata shared between snapshots.
7136                          */
7137                         if (pass == 1) {
7138                                 ret = relocate_one_path(trans, found_root,
7139                                                 path, &first_key, ref_path,
7140                                                 group, reloc_inode);
7141                                 if (ret < 0)
7142                                         goto out;
7143                                 continue;
7144                         }
7145                         /*
7146                          * use fallback method to process the remaining
7147                          * references.
7148                          */
7149                         if (!new_extents) {
7150                                 u64 group_start = group->key.objectid;
7151                                 new_extents = kmalloc(sizeof(*new_extents),
7152                                                       GFP_NOFS);
7153                                 nr_extents = 1;
7154                                 ret = get_new_locations(reloc_inode,
7155                                                         extent_key,
7156                                                         group_start, 1,
7157                                                         &new_extents,
7158                                                         &nr_extents);
7159                                 if (ret)
7160                                         goto out;
7161                         }
7162                         ret = replace_one_extent(trans, found_root,
7163                                                 path, extent_key,
7164                                                 &first_key, ref_path,
7165                                                 new_extents, nr_extents);
7166                 } else {
7167                         ret = relocate_tree_block(trans, found_root, path,
7168                                                   &first_key, ref_path);
7169                 }
7170                 if (ret < 0)
7171                         goto out;
7172         }
7173         ret = 0;
7174 out:
7175         btrfs_end_transaction(trans, extent_root);
7176         kfree(new_extents);
7177         kfree(ref_path);
7178         return ret;
7179 }
7180 #endif
7181
7182 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7183 {
7184         u64 num_devices;
7185         u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7186                 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7187
7188         num_devices = root->fs_info->fs_devices->rw_devices;
7189         if (num_devices == 1) {
7190                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7191                 stripped = flags & ~stripped;
7192
7193                 /* turn raid0 into single device chunks */
7194                 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7195                         return stripped;
7196
7197                 /* turn mirroring into duplication */
7198                 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7199                              BTRFS_BLOCK_GROUP_RAID10))
7200                         return stripped | BTRFS_BLOCK_GROUP_DUP;
7201                 return flags;
7202         } else {
7203                 /* they already had raid on here, just return */
7204                 if (flags & stripped)
7205                         return flags;
7206
7207                 stripped |= BTRFS_BLOCK_GROUP_DUP;
7208                 stripped = flags & ~stripped;
7209
7210                 /* switch duplicated blocks with raid1 */
7211                 if (flags & BTRFS_BLOCK_GROUP_DUP)
7212                         return stripped | BTRFS_BLOCK_GROUP_RAID1;
7213
7214                 /* turn single device chunks into raid0 */
7215                 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7216         }
7217         return flags;
7218 }
7219
7220 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
7221                      struct btrfs_block_group_cache *shrink_block_group,
7222                      int force)
7223 {
7224         struct btrfs_trans_handle *trans;
7225         u64 new_alloc_flags;
7226         u64 calc;
7227
7228         spin_lock(&shrink_block_group->lock);
7229         if (btrfs_block_group_used(&shrink_block_group->item) +
7230             shrink_block_group->reserved > 0) {
7231                 spin_unlock(&shrink_block_group->lock);
7232
7233                 trans = btrfs_start_transaction(root, 1);
7234                 spin_lock(&shrink_block_group->lock);
7235
7236                 new_alloc_flags = update_block_group_flags(root,
7237                                                    shrink_block_group->flags);
7238                 if (new_alloc_flags != shrink_block_group->flags) {
7239                         calc =
7240                              btrfs_block_group_used(&shrink_block_group->item);
7241                 } else {
7242                         calc = shrink_block_group->key.offset;
7243                 }
7244                 spin_unlock(&shrink_block_group->lock);
7245
7246                 do_chunk_alloc(trans, root->fs_info->extent_root,
7247                                calc + 2 * 1024 * 1024, new_alloc_flags, force);
7248
7249                 btrfs_end_transaction(trans, root);
7250         } else
7251                 spin_unlock(&shrink_block_group->lock);
7252         return 0;
7253 }
7254
7255
7256 int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
7257                                          struct btrfs_block_group_cache *group)
7258
7259 {
7260         __alloc_chunk_for_shrink(root, group, 1);
7261         set_block_group_readonly(group);
7262         return 0;
7263 }
7264
7265 /*
7266  * checks to see if its even possible to relocate this block group.
7267  *
7268  * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7269  * ok to go ahead and try.
7270  */
7271 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
7272 {
7273         struct btrfs_block_group_cache *block_group;
7274         struct btrfs_space_info *space_info;
7275         struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7276         struct btrfs_device *device;
7277         int full = 0;
7278         int ret = 0;
7279
7280         block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
7281
7282         /* odd, couldn't find the block group, leave it alone */
7283         if (!block_group)
7284                 return -1;
7285
7286         /* no bytes used, we're good */
7287         if (!btrfs_block_group_used(&block_group->item))
7288                 goto out;
7289
7290         space_info = block_group->space_info;
7291         spin_lock(&space_info->lock);
7292
7293         full = space_info->full;
7294
7295         /*
7296          * if this is the last block group we have in this space, we can't
7297          * relocate it unless we're able to allocate a new chunk below.
7298          *
7299          * Otherwise, we need to make sure we have room in the space to handle
7300          * all of the extents from this block group.  If we can, we're good
7301          */
7302         if ((space_info->total_bytes != block_group->key.offset) &&
7303            (space_info->bytes_used + space_info->bytes_reserved +
7304             space_info->bytes_pinned + space_info->bytes_readonly +
7305             btrfs_block_group_used(&block_group->item) <
7306             space_info->total_bytes)) {
7307                 spin_unlock(&space_info->lock);
7308                 goto out;
7309         }
7310         spin_unlock(&space_info->lock);
7311
7312         /*
7313          * ok we don't have enough space, but maybe we have free space on our
7314          * devices to allocate new chunks for relocation, so loop through our
7315          * alloc devices and guess if we have enough space.  However, if we
7316          * were marked as full, then we know there aren't enough chunks, and we
7317          * can just return.
7318          */
7319         ret = -1;
7320         if (full)
7321                 goto out;
7322
7323         mutex_lock(&root->fs_info->chunk_mutex);
7324         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7325                 u64 min_free = btrfs_block_group_used(&block_group->item);
7326                 u64 dev_offset, max_avail;
7327
7328                 /*
7329                  * check to make sure we can actually find a chunk with enough
7330                  * space to fit our block group in.
7331                  */
7332                 if (device->total_bytes > device->bytes_used + min_free) {
7333                         ret = find_free_dev_extent(NULL, device, min_free,
7334                                                    &dev_offset, &max_avail);
7335                         if (!ret)
7336                                 break;
7337                         ret = -1;
7338                 }
7339         }
7340         mutex_unlock(&root->fs_info->chunk_mutex);
7341 out:
7342         btrfs_put_block_group(block_group);
7343         return ret;
7344 }
7345
7346 static int find_first_block_group(struct btrfs_root *root,
7347                 struct btrfs_path *path, struct btrfs_key *key)
7348 {
7349         int ret = 0;
7350         struct btrfs_key found_key;
7351         struct extent_buffer *leaf;
7352         int slot;
7353
7354         ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7355         if (ret < 0)
7356                 goto out;
7357
7358         while (1) {
7359                 slot = path->slots[0];
7360                 leaf = path->nodes[0];
7361                 if (slot >= btrfs_header_nritems(leaf)) {
7362                         ret = btrfs_next_leaf(root, path);
7363                         if (ret == 0)
7364                                 continue;
7365                         if (ret < 0)
7366                                 goto out;
7367                         break;
7368                 }
7369                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7370
7371                 if (found_key.objectid >= key->objectid &&
7372                     found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7373                         ret = 0;
7374                         goto out;
7375                 }
7376                 path->slots[0]++;
7377         }
7378 out:
7379         return ret;
7380 }
7381
7382 int btrfs_free_block_groups(struct btrfs_fs_info *info)
7383 {
7384         struct btrfs_block_group_cache *block_group;
7385         struct btrfs_space_info *space_info;
7386         struct btrfs_caching_control *caching_ctl;
7387         struct rb_node *n;
7388
7389         down_write(&info->extent_commit_sem);
7390         while (!list_empty(&info->caching_block_groups)) {
7391                 caching_ctl = list_entry(info->caching_block_groups.next,
7392                                          struct btrfs_caching_control, list);
7393                 list_del(&caching_ctl->list);
7394                 put_caching_control(caching_ctl);
7395         }
7396         up_write(&info->extent_commit_sem);
7397
7398         spin_lock(&info->block_group_cache_lock);
7399         while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7400                 block_group = rb_entry(n, struct btrfs_block_group_cache,
7401                                        cache_node);
7402                 rb_erase(&block_group->cache_node,
7403                          &info->block_group_cache_tree);
7404                 spin_unlock(&info->block_group_cache_lock);
7405
7406                 down_write(&block_group->space_info->groups_sem);
7407                 list_del(&block_group->list);
7408                 up_write(&block_group->space_info->groups_sem);
7409
7410                 if (block_group->cached == BTRFS_CACHE_STARTED)
7411                         wait_block_group_cache_done(block_group);
7412
7413                 btrfs_remove_free_space_cache(block_group);
7414                 btrfs_put_block_group(block_group);
7415
7416                 spin_lock(&info->block_group_cache_lock);
7417         }
7418         spin_unlock(&info->block_group_cache_lock);
7419
7420         /* now that all the block groups are freed, go through and
7421          * free all the space_info structs.  This is only called during
7422          * the final stages of unmount, and so we know nobody is
7423          * using them.  We call synchronize_rcu() once before we start,
7424          * just to be on the safe side.
7425          */
7426         synchronize_rcu();
7427
7428         while(!list_empty(&info->space_info)) {
7429                 space_info = list_entry(info->space_info.next,
7430                                         struct btrfs_space_info,
7431                                         list);
7432
7433                 list_del(&space_info->list);
7434                 kfree(space_info);
7435         }
7436         return 0;
7437 }
7438
7439 int btrfs_read_block_groups(struct btrfs_root *root)
7440 {
7441         struct btrfs_path *path;
7442         int ret;
7443         struct btrfs_block_group_cache *cache;
7444         struct btrfs_fs_info *info = root->fs_info;
7445         struct btrfs_space_info *space_info;
7446         struct btrfs_key key;
7447         struct btrfs_key found_key;
7448         struct extent_buffer *leaf;
7449
7450         root = info->extent_root;
7451         key.objectid = 0;
7452         key.offset = 0;
7453         btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
7454         path = btrfs_alloc_path();
7455         if (!path)
7456                 return -ENOMEM;
7457
7458         while (1) {
7459                 ret = find_first_block_group(root, path, &key);
7460                 if (ret > 0) {
7461                         ret = 0;
7462                         goto error;
7463                 }
7464                 if (ret != 0)
7465                         goto error;
7466
7467                 leaf = path->nodes[0];
7468                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
7469                 cache = kzalloc(sizeof(*cache), GFP_NOFS);
7470                 if (!cache) {
7471                         ret = -ENOMEM;
7472                         break;
7473                 }
7474
7475                 atomic_set(&cache->count, 1);
7476                 spin_lock_init(&cache->lock);
7477                 spin_lock_init(&cache->tree_lock);
7478                 cache->fs_info = info;
7479                 INIT_LIST_HEAD(&cache->list);
7480                 INIT_LIST_HEAD(&cache->cluster_list);
7481
7482                 /*
7483                  * we only want to have 32k of ram per block group for keeping
7484                  * track of free space, and if we pass 1/2 of that we want to
7485                  * start converting things over to using bitmaps
7486                  */
7487                 cache->extents_thresh = ((1024 * 32) / 2) /
7488                         sizeof(struct btrfs_free_space);
7489
7490                 read_extent_buffer(leaf, &cache->item,
7491                                    btrfs_item_ptr_offset(leaf, path->slots[0]),
7492                                    sizeof(cache->item));
7493                 memcpy(&cache->key, &found_key, sizeof(found_key));
7494
7495                 key.objectid = found_key.objectid + found_key.offset;
7496                 btrfs_release_path(root, path);
7497                 cache->flags = btrfs_block_group_flags(&cache->item);
7498                 cache->sectorsize = root->sectorsize;
7499
7500                 /*
7501                  * check for two cases, either we are full, and therefore
7502                  * don't need to bother with the caching work since we won't
7503                  * find any space, or we are empty, and we can just add all
7504                  * the space in and be done with it.  This saves us _alot_ of
7505                  * time, particularly in the full case.
7506                  */
7507                 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
7508                         exclude_super_stripes(root, cache);
7509                         cache->last_byte_to_unpin = (u64)-1;
7510                         cache->cached = BTRFS_CACHE_FINISHED;
7511                         free_excluded_extents(root, cache);
7512                 } else if (btrfs_block_group_used(&cache->item) == 0) {
7513                         exclude_super_stripes(root, cache);
7514                         cache->last_byte_to_unpin = (u64)-1;
7515                         cache->cached = BTRFS_CACHE_FINISHED;
7516                         add_new_free_space(cache, root->fs_info,
7517                                            found_key.objectid,
7518                                            found_key.objectid +
7519                                            found_key.offset);
7520                         free_excluded_extents(root, cache);
7521                 }
7522
7523                 ret = update_space_info(info, cache->flags, found_key.offset,
7524                                         btrfs_block_group_used(&cache->item),
7525                                         &space_info);
7526                 BUG_ON(ret);
7527                 cache->space_info = space_info;
7528                 spin_lock(&cache->space_info->lock);
7529                 cache->space_info->bytes_super += cache->bytes_super;
7530                 spin_unlock(&cache->space_info->lock);
7531
7532                 down_write(&space_info->groups_sem);
7533                 list_add_tail(&cache->list, &space_info->block_groups);
7534                 up_write(&space_info->groups_sem);
7535
7536                 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7537                 BUG_ON(ret);
7538
7539                 set_avail_alloc_bits(root->fs_info, cache->flags);
7540                 if (btrfs_chunk_readonly(root, cache->key.objectid))
7541                         set_block_group_readonly(cache);
7542         }
7543         ret = 0;
7544 error:
7545         btrfs_free_path(path);
7546         return ret;
7547 }
7548
7549 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7550                            struct btrfs_root *root, u64 bytes_used,
7551                            u64 type, u64 chunk_objectid, u64 chunk_offset,
7552                            u64 size)
7553 {
7554         int ret;
7555         struct btrfs_root *extent_root;
7556         struct btrfs_block_group_cache *cache;
7557
7558         extent_root = root->fs_info->extent_root;
7559
7560         root->fs_info->last_trans_log_full_commit = trans->transid;
7561
7562         cache = kzalloc(sizeof(*cache), GFP_NOFS);
7563         if (!cache)
7564                 return -ENOMEM;
7565
7566         cache->key.objectid = chunk_offset;
7567         cache->key.offset = size;
7568         cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
7569         cache->sectorsize = root->sectorsize;
7570
7571         /*
7572          * we only want to have 32k of ram per block group for keeping track
7573          * of free space, and if we pass 1/2 of that we want to start
7574          * converting things over to using bitmaps
7575          */
7576         cache->extents_thresh = ((1024 * 32) / 2) /
7577                 sizeof(struct btrfs_free_space);
7578         atomic_set(&cache->count, 1);
7579         spin_lock_init(&cache->lock);
7580         spin_lock_init(&cache->tree_lock);
7581         INIT_LIST_HEAD(&cache->list);
7582         INIT_LIST_HEAD(&cache->cluster_list);
7583
7584         btrfs_set_block_group_used(&cache->item, bytes_used);
7585         btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7586         cache->flags = type;
7587         btrfs_set_block_group_flags(&cache->item, type);
7588
7589         cache->last_byte_to_unpin = (u64)-1;
7590         cache->cached = BTRFS_CACHE_FINISHED;
7591         exclude_super_stripes(root, cache);
7592
7593         add_new_free_space(cache, root->fs_info, chunk_offset,
7594                            chunk_offset + size);
7595
7596         free_excluded_extents(root, cache);
7597
7598         ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7599                                 &cache->space_info);
7600         BUG_ON(ret);
7601
7602         spin_lock(&cache->space_info->lock);
7603         cache->space_info->bytes_super += cache->bytes_super;
7604         spin_unlock(&cache->space_info->lock);
7605
7606         down_write(&cache->space_info->groups_sem);
7607         list_add_tail(&cache->list, &cache->space_info->block_groups);
7608         up_write(&cache->space_info->groups_sem);
7609
7610         ret = btrfs_add_block_group_cache(root->fs_info, cache);
7611         BUG_ON(ret);
7612
7613         ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7614                                 sizeof(cache->item));
7615         BUG_ON(ret);
7616
7617         set_avail_alloc_bits(extent_root->fs_info, type);
7618
7619         return 0;
7620 }
7621
7622 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7623                              struct btrfs_root *root, u64 group_start)
7624 {
7625         struct btrfs_path *path;
7626         struct btrfs_block_group_cache *block_group;
7627         struct btrfs_free_cluster *cluster;
7628         struct btrfs_key key;
7629         int ret;
7630
7631         root = root->fs_info->extent_root;
7632
7633         block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7634         BUG_ON(!block_group);
7635         BUG_ON(!block_group->ro);
7636
7637         memcpy(&key, &block_group->key, sizeof(key));
7638
7639         /* make sure this block group isn't part of an allocation cluster */
7640         cluster = &root->fs_info->data_alloc_cluster;
7641         spin_lock(&cluster->refill_lock);
7642         btrfs_return_cluster_to_free_space(block_group, cluster);
7643         spin_unlock(&cluster->refill_lock);
7644
7645         /*
7646          * make sure this block group isn't part of a metadata
7647          * allocation cluster
7648          */
7649         cluster = &root->fs_info->meta_alloc_cluster;
7650         spin_lock(&cluster->refill_lock);
7651         btrfs_return_cluster_to_free_space(block_group, cluster);
7652         spin_unlock(&cluster->refill_lock);
7653
7654         path = btrfs_alloc_path();
7655         BUG_ON(!path);
7656
7657         spin_lock(&root->fs_info->block_group_cache_lock);
7658         rb_erase(&block_group->cache_node,
7659                  &root->fs_info->block_group_cache_tree);
7660         spin_unlock(&root->fs_info->block_group_cache_lock);
7661
7662         down_write(&block_group->space_info->groups_sem);
7663         /*
7664          * we must use list_del_init so people can check to see if they
7665          * are still on the list after taking the semaphore
7666          */
7667         list_del_init(&block_group->list);
7668         up_write(&block_group->space_info->groups_sem);
7669
7670         if (block_group->cached == BTRFS_CACHE_STARTED)
7671                 wait_block_group_cache_done(block_group);
7672
7673         btrfs_remove_free_space_cache(block_group);
7674
7675         spin_lock(&block_group->space_info->lock);
7676         block_group->space_info->total_bytes -= block_group->key.offset;
7677         block_group->space_info->bytes_readonly -= block_group->key.offset;
7678         spin_unlock(&block_group->space_info->lock);
7679
7680         btrfs_clear_space_info_full(root->fs_info);
7681
7682         btrfs_put_block_group(block_group);
7683         btrfs_put_block_group(block_group);
7684
7685         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7686         if (ret > 0)
7687                 ret = -EIO;
7688         if (ret < 0)
7689                 goto out;
7690
7691         ret = btrfs_del_item(trans, root, path);
7692 out:
7693         btrfs_free_path(path);
7694         return ret;
7695 }