]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/btrfs/extent-tree.c
Btrfs: Fix per root used space accounting
[net-next-2.6.git] / fs / btrfs / extent-tree.c
CommitLineData
6cbd5570
CM
1/*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
ec6b910f 18#include <linux/sched.h>
edbd8d4e 19#include <linux/pagemap.h>
ec44a35c 20#include <linux/writeback.h>
21af804c 21#include <linux/blkdev.h>
b7a9f29f 22#include <linux/sort.h>
4184ea7f 23#include <linux/rcupdate.h>
817d52f8 24#include <linux/kthread.h>
4b4e25f2 25#include "compat.h"
74493f7a 26#include "hash.h"
fec577fb
CM
27#include "ctree.h"
28#include "disk-io.h"
29#include "print-tree.h"
e089f05c 30#include "transaction.h"
0b86a832 31#include "volumes.h"
925baedd 32#include "locking.h"
fa9c0d79 33#include "free-space-cache.h"
fec577fb 34
f3465ca4
JB
35static int update_block_group(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 u64 bytenr, u64 num_bytes, int alloc,
38 int mark_free);
11833d66
YZ
39static int update_reserved_extents(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve);
5d4f98a2
YZ
41static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
6a63209f
JB
60static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force);
11833d66
YZ
63static int pin_down_bytes(struct btrfs_trans_handle *trans,
64 struct btrfs_root *root,
65 struct btrfs_path *path,
66 u64 bytenr, u64 num_bytes,
67 int is_data, int reserved,
68 struct extent_buffer **must_clean);
69static int find_next_key(struct btrfs_path *path, int level,
70 struct btrfs_key *key);
9ed74f2d
JB
71static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
72 int dump_block_groups);
6a63209f 73
817d52f8
JB
74static noinline int
75block_group_cache_done(struct btrfs_block_group_cache *cache)
76{
77 smp_mb();
78 return cache->cached == BTRFS_CACHE_FINISHED;
79}
80
0f9dd46c
JB
81static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
82{
83 return (cache->flags & bits) == bits;
84}
85
86/*
87 * this adds the block group to the fs_info rb tree for the block group
88 * cache
89 */
b2950863 90static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
0f9dd46c
JB
91 struct btrfs_block_group_cache *block_group)
92{
93 struct rb_node **p;
94 struct rb_node *parent = NULL;
95 struct btrfs_block_group_cache *cache;
96
97 spin_lock(&info->block_group_cache_lock);
98 p = &info->block_group_cache_tree.rb_node;
99
100 while (*p) {
101 parent = *p;
102 cache = rb_entry(parent, struct btrfs_block_group_cache,
103 cache_node);
104 if (block_group->key.objectid < cache->key.objectid) {
105 p = &(*p)->rb_left;
106 } else if (block_group->key.objectid > cache->key.objectid) {
107 p = &(*p)->rb_right;
108 } else {
109 spin_unlock(&info->block_group_cache_lock);
110 return -EEXIST;
111 }
112 }
113
114 rb_link_node(&block_group->cache_node, parent, p);
115 rb_insert_color(&block_group->cache_node,
116 &info->block_group_cache_tree);
117 spin_unlock(&info->block_group_cache_lock);
118
119 return 0;
120}
121
122/*
123 * This will return the block group at or after bytenr if contains is 0, else
124 * it will return the block group that contains the bytenr
125 */
126static struct btrfs_block_group_cache *
127block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
128 int contains)
129{
130 struct btrfs_block_group_cache *cache, *ret = NULL;
131 struct rb_node *n;
132 u64 end, start;
133
134 spin_lock(&info->block_group_cache_lock);
135 n = info->block_group_cache_tree.rb_node;
136
137 while (n) {
138 cache = rb_entry(n, struct btrfs_block_group_cache,
139 cache_node);
140 end = cache->key.objectid + cache->key.offset - 1;
141 start = cache->key.objectid;
142
143 if (bytenr < start) {
144 if (!contains && (!ret || start < ret->key.objectid))
145 ret = cache;
146 n = n->rb_left;
147 } else if (bytenr > start) {
148 if (contains && bytenr <= end) {
149 ret = cache;
150 break;
151 }
152 n = n->rb_right;
153 } else {
154 ret = cache;
155 break;
156 }
157 }
d2fb3437
YZ
158 if (ret)
159 atomic_inc(&ret->count);
0f9dd46c
JB
160 spin_unlock(&info->block_group_cache_lock);
161
162 return ret;
163}
164
11833d66
YZ
165static int add_excluded_extent(struct btrfs_root *root,
166 u64 start, u64 num_bytes)
817d52f8 167{
11833d66
YZ
168 u64 end = start + num_bytes - 1;
169 set_extent_bits(&root->fs_info->freed_extents[0],
170 start, end, EXTENT_UPTODATE, GFP_NOFS);
171 set_extent_bits(&root->fs_info->freed_extents[1],
172 start, end, EXTENT_UPTODATE, GFP_NOFS);
173 return 0;
174}
817d52f8 175
11833d66
YZ
176static void free_excluded_extents(struct btrfs_root *root,
177 struct btrfs_block_group_cache *cache)
178{
179 u64 start, end;
817d52f8 180
11833d66
YZ
181 start = cache->key.objectid;
182 end = start + cache->key.offset - 1;
183
184 clear_extent_bits(&root->fs_info->freed_extents[0],
185 start, end, EXTENT_UPTODATE, GFP_NOFS);
186 clear_extent_bits(&root->fs_info->freed_extents[1],
187 start, end, EXTENT_UPTODATE, GFP_NOFS);
817d52f8
JB
188}
189
11833d66
YZ
190static int exclude_super_stripes(struct btrfs_root *root,
191 struct btrfs_block_group_cache *cache)
817d52f8 192{
817d52f8
JB
193 u64 bytenr;
194 u64 *logical;
195 int stripe_len;
196 int i, nr, ret;
197
198 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
199 bytenr = btrfs_sb_offset(i);
200 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
201 cache->key.objectid, bytenr,
202 0, &logical, &nr, &stripe_len);
203 BUG_ON(ret);
11833d66 204
817d52f8 205 while (nr--) {
1b2da372 206 cache->bytes_super += stripe_len;
11833d66
YZ
207 ret = add_excluded_extent(root, logical[nr],
208 stripe_len);
209 BUG_ON(ret);
817d52f8 210 }
11833d66 211
817d52f8
JB
212 kfree(logical);
213 }
817d52f8
JB
214 return 0;
215}
216
11833d66
YZ
217static struct btrfs_caching_control *
218get_caching_control(struct btrfs_block_group_cache *cache)
219{
220 struct btrfs_caching_control *ctl;
221
222 spin_lock(&cache->lock);
223 if (cache->cached != BTRFS_CACHE_STARTED) {
224 spin_unlock(&cache->lock);
225 return NULL;
226 }
227
228 ctl = cache->caching_ctl;
229 atomic_inc(&ctl->count);
230 spin_unlock(&cache->lock);
231 return ctl;
232}
233
234static void put_caching_control(struct btrfs_caching_control *ctl)
235{
236 if (atomic_dec_and_test(&ctl->count))
237 kfree(ctl);
238}
239
0f9dd46c
JB
240/*
241 * this is only called by cache_block_group, since we could have freed extents
242 * we need to check the pinned_extents for any extents that can't be used yet
243 * since their free space will be released as soon as the transaction commits.
244 */
817d52f8 245static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
0f9dd46c
JB
246 struct btrfs_fs_info *info, u64 start, u64 end)
247{
817d52f8 248 u64 extent_start, extent_end, size, total_added = 0;
0f9dd46c
JB
249 int ret;
250
251 while (start < end) {
11833d66 252 ret = find_first_extent_bit(info->pinned_extents, start,
0f9dd46c 253 &extent_start, &extent_end,
11833d66 254 EXTENT_DIRTY | EXTENT_UPTODATE);
0f9dd46c
JB
255 if (ret)
256 break;
257
258 if (extent_start == start) {
259 start = extent_end + 1;
260 } else if (extent_start > start && extent_start < end) {
261 size = extent_start - start;
817d52f8 262 total_added += size;
ea6a478e
JB
263 ret = btrfs_add_free_space(block_group, start,
264 size);
0f9dd46c
JB
265 BUG_ON(ret);
266 start = extent_end + 1;
267 } else {
268 break;
269 }
270 }
271
272 if (start < end) {
273 size = end - start;
817d52f8 274 total_added += size;
ea6a478e 275 ret = btrfs_add_free_space(block_group, start, size);
0f9dd46c
JB
276 BUG_ON(ret);
277 }
278
817d52f8 279 return total_added;
0f9dd46c
JB
280}
281
817d52f8 282static int caching_kthread(void *data)
e37c9e69 283{
817d52f8
JB
284 struct btrfs_block_group_cache *block_group = data;
285 struct btrfs_fs_info *fs_info = block_group->fs_info;
11833d66
YZ
286 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
287 struct btrfs_root *extent_root = fs_info->extent_root;
e37c9e69 288 struct btrfs_path *path;
5f39d397 289 struct extent_buffer *leaf;
11833d66 290 struct btrfs_key key;
817d52f8 291 u64 total_found = 0;
11833d66
YZ
292 u64 last = 0;
293 u32 nritems;
294 int ret = 0;
f510cfec 295
e37c9e69
CM
296 path = btrfs_alloc_path();
297 if (!path)
298 return -ENOMEM;
7d7d6068 299
11833d66 300 exclude_super_stripes(extent_root, block_group);
1b2da372
JB
301 spin_lock(&block_group->space_info->lock);
302 block_group->space_info->bytes_super += block_group->bytes_super;
303 spin_unlock(&block_group->space_info->lock);
11833d66 304
817d52f8 305 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
11833d66 306
5cd57b2c 307 /*
817d52f8
JB
308 * We don't want to deadlock with somebody trying to allocate a new
309 * extent for the extent root while also trying to search the extent
310 * root to add free space. So we skip locking and search the commit
311 * root, since its read-only
5cd57b2c
CM
312 */
313 path->skip_locking = 1;
817d52f8
JB
314 path->search_commit_root = 1;
315 path->reada = 2;
316
e4404d6e 317 key.objectid = last;
e37c9e69 318 key.offset = 0;
11833d66 319 key.type = BTRFS_EXTENT_ITEM_KEY;
013f1b12 320again:
11833d66 321 mutex_lock(&caching_ctl->mutex);
013f1b12
CM
322 /* need to make sure the commit_root doesn't disappear */
323 down_read(&fs_info->extent_commit_sem);
324
11833d66 325 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
e37c9e69 326 if (ret < 0)
ef8bbdfe 327 goto err;
a512bbf8 328
11833d66
YZ
329 leaf = path->nodes[0];
330 nritems = btrfs_header_nritems(leaf);
331
d397712b 332 while (1) {
817d52f8 333 smp_mb();
11833d66 334 if (fs_info->closing > 1) {
f25784b3 335 last = (u64)-1;
817d52f8 336 break;
f25784b3 337 }
817d52f8 338
11833d66
YZ
339 if (path->slots[0] < nritems) {
340 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
341 } else {
342 ret = find_next_key(path, 0, &key);
343 if (ret)
e37c9e69 344 break;
817d52f8 345
11833d66
YZ
346 caching_ctl->progress = last;
347 btrfs_release_path(extent_root, path);
348 up_read(&fs_info->extent_commit_sem);
349 mutex_unlock(&caching_ctl->mutex);
350 if (btrfs_transaction_in_commit(fs_info))
f36f3042 351 schedule_timeout(1);
11833d66
YZ
352 else
353 cond_resched();
354 goto again;
355 }
817d52f8 356
11833d66
YZ
357 if (key.objectid < block_group->key.objectid) {
358 path->slots[0]++;
817d52f8 359 continue;
e37c9e69 360 }
0f9dd46c 361
e37c9e69 362 if (key.objectid >= block_group->key.objectid +
0f9dd46c 363 block_group->key.offset)
e37c9e69 364 break;
7d7d6068 365
11833d66 366 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
817d52f8
JB
367 total_found += add_new_free_space(block_group,
368 fs_info, last,
369 key.objectid);
7d7d6068 370 last = key.objectid + key.offset;
817d52f8 371
11833d66
YZ
372 if (total_found > (1024 * 1024 * 2)) {
373 total_found = 0;
374 wake_up(&caching_ctl->wait);
375 }
817d52f8 376 }
e37c9e69
CM
377 path->slots[0]++;
378 }
817d52f8 379 ret = 0;
e37c9e69 380
817d52f8
JB
381 total_found += add_new_free_space(block_group, fs_info, last,
382 block_group->key.objectid +
383 block_group->key.offset);
11833d66 384 caching_ctl->progress = (u64)-1;
817d52f8
JB
385
386 spin_lock(&block_group->lock);
11833d66 387 block_group->caching_ctl = NULL;
817d52f8
JB
388 block_group->cached = BTRFS_CACHE_FINISHED;
389 spin_unlock(&block_group->lock);
0f9dd46c 390
54aa1f4d 391err:
e37c9e69 392 btrfs_free_path(path);
276e680d 393 up_read(&fs_info->extent_commit_sem);
817d52f8 394
11833d66
YZ
395 free_excluded_extents(extent_root, block_group);
396
397 mutex_unlock(&caching_ctl->mutex);
398 wake_up(&caching_ctl->wait);
399
400 put_caching_control(caching_ctl);
401 atomic_dec(&block_group->space_info->caching_threads);
817d52f8
JB
402 return 0;
403}
404
405static int cache_block_group(struct btrfs_block_group_cache *cache)
406{
11833d66
YZ
407 struct btrfs_fs_info *fs_info = cache->fs_info;
408 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
409 struct task_struct *tsk;
410 int ret = 0;
411
11833d66
YZ
412 smp_mb();
413 if (cache->cached != BTRFS_CACHE_NO)
414 return 0;
415
416 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
417 BUG_ON(!caching_ctl);
418
419 INIT_LIST_HEAD(&caching_ctl->list);
420 mutex_init(&caching_ctl->mutex);
421 init_waitqueue_head(&caching_ctl->wait);
422 caching_ctl->block_group = cache;
423 caching_ctl->progress = cache->key.objectid;
424 /* one for caching kthread, one for caching block group list */
425 atomic_set(&caching_ctl->count, 2);
426
817d52f8
JB
427 spin_lock(&cache->lock);
428 if (cache->cached != BTRFS_CACHE_NO) {
429 spin_unlock(&cache->lock);
11833d66
YZ
430 kfree(caching_ctl);
431 return 0;
817d52f8 432 }
11833d66 433 cache->caching_ctl = caching_ctl;
817d52f8
JB
434 cache->cached = BTRFS_CACHE_STARTED;
435 spin_unlock(&cache->lock);
436
11833d66
YZ
437 down_write(&fs_info->extent_commit_sem);
438 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
439 up_write(&fs_info->extent_commit_sem);
440
441 atomic_inc(&cache->space_info->caching_threads);
442
817d52f8
JB
443 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
444 cache->key.objectid);
445 if (IS_ERR(tsk)) {
446 ret = PTR_ERR(tsk);
447 printk(KERN_ERR "error running thread %d\n", ret);
448 BUG();
449 }
450
ef8bbdfe 451 return ret;
e37c9e69
CM
452}
453
0f9dd46c
JB
454/*
455 * return the block group that starts at or after bytenr
456 */
d397712b
CM
457static struct btrfs_block_group_cache *
458btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
0ef3e66b 459{
0f9dd46c 460 struct btrfs_block_group_cache *cache;
0ef3e66b 461
0f9dd46c 462 cache = block_group_cache_tree_search(info, bytenr, 0);
0ef3e66b 463
0f9dd46c 464 return cache;
0ef3e66b
CM
465}
466
0f9dd46c 467/*
9f55684c 468 * return the block group that contains the given bytenr
0f9dd46c 469 */
d397712b
CM
470struct btrfs_block_group_cache *btrfs_lookup_block_group(
471 struct btrfs_fs_info *info,
472 u64 bytenr)
be744175 473{
0f9dd46c 474 struct btrfs_block_group_cache *cache;
be744175 475
0f9dd46c 476 cache = block_group_cache_tree_search(info, bytenr, 1);
96b5179d 477
0f9dd46c 478 return cache;
be744175 479}
0b86a832 480
fa9c0d79 481void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
d2fb3437
YZ
482{
483 if (atomic_dec_and_test(&cache->count))
484 kfree(cache);
485}
486
0f9dd46c
JB
487static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
488 u64 flags)
6324fbf3 489{
0f9dd46c 490 struct list_head *head = &info->space_info;
0f9dd46c 491 struct btrfs_space_info *found;
4184ea7f
CM
492
493 rcu_read_lock();
494 list_for_each_entry_rcu(found, head, list) {
495 if (found->flags == flags) {
496 rcu_read_unlock();
0f9dd46c 497 return found;
4184ea7f 498 }
0f9dd46c 499 }
4184ea7f 500 rcu_read_unlock();
0f9dd46c 501 return NULL;
6324fbf3
CM
502}
503
4184ea7f
CM
504/*
505 * after adding space to the filesystem, we need to clear the full flags
506 * on all the space infos.
507 */
508void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
509{
510 struct list_head *head = &info->space_info;
511 struct btrfs_space_info *found;
512
513 rcu_read_lock();
514 list_for_each_entry_rcu(found, head, list)
515 found->full = 0;
516 rcu_read_unlock();
517}
518
80eb234a
JB
519static u64 div_factor(u64 num, int factor)
520{
521 if (factor == 10)
522 return num;
523 num *= factor;
524 do_div(num, 10);
525 return num;
526}
527
d2fb3437
YZ
528u64 btrfs_find_block_group(struct btrfs_root *root,
529 u64 search_start, u64 search_hint, int owner)
cd1bc465 530{
96b5179d 531 struct btrfs_block_group_cache *cache;
cd1bc465 532 u64 used;
d2fb3437
YZ
533 u64 last = max(search_hint, search_start);
534 u64 group_start = 0;
31f3c99b 535 int full_search = 0;
d2fb3437 536 int factor = 9;
0ef3e66b 537 int wrapped = 0;
31f3c99b 538again:
e8569813
ZY
539 while (1) {
540 cache = btrfs_lookup_first_block_group(root->fs_info, last);
0f9dd46c
JB
541 if (!cache)
542 break;
96b5179d 543
c286ac48 544 spin_lock(&cache->lock);
96b5179d
CM
545 last = cache->key.objectid + cache->key.offset;
546 used = btrfs_block_group_used(&cache->item);
547
d2fb3437
YZ
548 if ((full_search || !cache->ro) &&
549 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
e8569813 550 if (used + cache->pinned + cache->reserved <
d2fb3437
YZ
551 div_factor(cache->key.offset, factor)) {
552 group_start = cache->key.objectid;
c286ac48 553 spin_unlock(&cache->lock);
fa9c0d79 554 btrfs_put_block_group(cache);
8790d502
CM
555 goto found;
556 }
6324fbf3 557 }
c286ac48 558 spin_unlock(&cache->lock);
fa9c0d79 559 btrfs_put_block_group(cache);
de428b63 560 cond_resched();
cd1bc465 561 }
0ef3e66b
CM
562 if (!wrapped) {
563 last = search_start;
564 wrapped = 1;
565 goto again;
566 }
567 if (!full_search && factor < 10) {
be744175 568 last = search_start;
31f3c99b 569 full_search = 1;
0ef3e66b 570 factor = 10;
31f3c99b
CM
571 goto again;
572 }
be744175 573found:
d2fb3437 574 return group_start;
925baedd 575}
0f9dd46c 576
e02119d5 577/* simple helper to search for an existing extent at a given offset */
31840ae1 578int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
e02119d5
CM
579{
580 int ret;
581 struct btrfs_key key;
31840ae1 582 struct btrfs_path *path;
e02119d5 583
31840ae1
ZY
584 path = btrfs_alloc_path();
585 BUG_ON(!path);
e02119d5
CM
586 key.objectid = start;
587 key.offset = len;
588 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
589 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
590 0, 0);
31840ae1 591 btrfs_free_path(path);
7bb86316
CM
592 return ret;
593}
594
d8d5f3e1
CM
595/*
596 * Back reference rules. Back refs have three main goals:
597 *
598 * 1) differentiate between all holders of references to an extent so that
599 * when a reference is dropped we can make sure it was a valid reference
600 * before freeing the extent.
601 *
602 * 2) Provide enough information to quickly find the holders of an extent
603 * if we notice a given block is corrupted or bad.
604 *
605 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
606 * maintenance. This is actually the same as #2, but with a slightly
607 * different use case.
608 *
5d4f98a2
YZ
609 * There are two kinds of back refs. The implicit back refs is optimized
610 * for pointers in non-shared tree blocks. For a given pointer in a block,
611 * back refs of this kind provide information about the block's owner tree
612 * and the pointer's key. These information allow us to find the block by
613 * b-tree searching. The full back refs is for pointers in tree blocks not
614 * referenced by their owner trees. The location of tree block is recorded
615 * in the back refs. Actually the full back refs is generic, and can be
616 * used in all cases the implicit back refs is used. The major shortcoming
617 * of the full back refs is its overhead. Every time a tree block gets
618 * COWed, we have to update back refs entry for all pointers in it.
619 *
620 * For a newly allocated tree block, we use implicit back refs for
621 * pointers in it. This means most tree related operations only involve
622 * implicit back refs. For a tree block created in old transaction, the
623 * only way to drop a reference to it is COW it. So we can detect the
624 * event that tree block loses its owner tree's reference and do the
625 * back refs conversion.
626 *
627 * When a tree block is COW'd through a tree, there are four cases:
628 *
629 * The reference count of the block is one and the tree is the block's
630 * owner tree. Nothing to do in this case.
631 *
632 * The reference count of the block is one and the tree is not the
633 * block's owner tree. In this case, full back refs is used for pointers
634 * in the block. Remove these full back refs, add implicit back refs for
635 * every pointers in the new block.
636 *
637 * The reference count of the block is greater than one and the tree is
638 * the block's owner tree. In this case, implicit back refs is used for
639 * pointers in the block. Add full back refs for every pointers in the
640 * block, increase lower level extents' reference counts. The original
641 * implicit back refs are entailed to the new block.
642 *
643 * The reference count of the block is greater than one and the tree is
644 * not the block's owner tree. Add implicit back refs for every pointer in
645 * the new block, increase lower level extents' reference count.
646 *
647 * Back Reference Key composing:
648 *
649 * The key objectid corresponds to the first byte in the extent,
650 * The key type is used to differentiate between types of back refs.
651 * There are different meanings of the key offset for different types
652 * of back refs.
653 *
d8d5f3e1
CM
654 * File extents can be referenced by:
655 *
656 * - multiple snapshots, subvolumes, or different generations in one subvol
31840ae1 657 * - different files inside a single subvolume
d8d5f3e1
CM
658 * - different offsets inside a file (bookend extents in file.c)
659 *
5d4f98a2 660 * The extent ref structure for the implicit back refs has fields for:
d8d5f3e1
CM
661 *
662 * - Objectid of the subvolume root
d8d5f3e1 663 * - objectid of the file holding the reference
5d4f98a2
YZ
664 * - original offset in the file
665 * - how many bookend extents
d8d5f3e1 666 *
5d4f98a2
YZ
667 * The key offset for the implicit back refs is hash of the first
668 * three fields.
d8d5f3e1 669 *
5d4f98a2 670 * The extent ref structure for the full back refs has field for:
d8d5f3e1 671 *
5d4f98a2 672 * - number of pointers in the tree leaf
d8d5f3e1 673 *
5d4f98a2
YZ
674 * The key offset for the implicit back refs is the first byte of
675 * the tree leaf
d8d5f3e1 676 *
5d4f98a2
YZ
677 * When a file extent is allocated, The implicit back refs is used.
678 * the fields are filled in:
d8d5f3e1 679 *
5d4f98a2 680 * (root_key.objectid, inode objectid, offset in file, 1)
d8d5f3e1 681 *
5d4f98a2
YZ
682 * When a file extent is removed file truncation, we find the
683 * corresponding implicit back refs and check the following fields:
d8d5f3e1 684 *
5d4f98a2 685 * (btrfs_header_owner(leaf), inode objectid, offset in file)
d8d5f3e1 686 *
5d4f98a2 687 * Btree extents can be referenced by:
d8d5f3e1 688 *
5d4f98a2 689 * - Different subvolumes
d8d5f3e1 690 *
5d4f98a2
YZ
691 * Both the implicit back refs and the full back refs for tree blocks
692 * only consist of key. The key offset for the implicit back refs is
693 * objectid of block's owner tree. The key offset for the full back refs
694 * is the first byte of parent block.
d8d5f3e1 695 *
5d4f98a2
YZ
696 * When implicit back refs is used, information about the lowest key and
697 * level of the tree block are required. These information are stored in
698 * tree block info structure.
d8d5f3e1 699 */
31840ae1 700
5d4f98a2
YZ
701#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
702static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
703 struct btrfs_root *root,
704 struct btrfs_path *path,
705 u64 owner, u32 extra_size)
7bb86316 706{
5d4f98a2
YZ
707 struct btrfs_extent_item *item;
708 struct btrfs_extent_item_v0 *ei0;
709 struct btrfs_extent_ref_v0 *ref0;
710 struct btrfs_tree_block_info *bi;
711 struct extent_buffer *leaf;
7bb86316 712 struct btrfs_key key;
5d4f98a2
YZ
713 struct btrfs_key found_key;
714 u32 new_size = sizeof(*item);
715 u64 refs;
716 int ret;
717
718 leaf = path->nodes[0];
719 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
720
721 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
722 ei0 = btrfs_item_ptr(leaf, path->slots[0],
723 struct btrfs_extent_item_v0);
724 refs = btrfs_extent_refs_v0(leaf, ei0);
725
726 if (owner == (u64)-1) {
727 while (1) {
728 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
729 ret = btrfs_next_leaf(root, path);
730 if (ret < 0)
731 return ret;
732 BUG_ON(ret > 0);
733 leaf = path->nodes[0];
734 }
735 btrfs_item_key_to_cpu(leaf, &found_key,
736 path->slots[0]);
737 BUG_ON(key.objectid != found_key.objectid);
738 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
739 path->slots[0]++;
740 continue;
741 }
742 ref0 = btrfs_item_ptr(leaf, path->slots[0],
743 struct btrfs_extent_ref_v0);
744 owner = btrfs_ref_objectid_v0(leaf, ref0);
745 break;
746 }
747 }
748 btrfs_release_path(root, path);
749
750 if (owner < BTRFS_FIRST_FREE_OBJECTID)
751 new_size += sizeof(*bi);
752
753 new_size -= sizeof(*ei0);
754 ret = btrfs_search_slot(trans, root, &key, path,
755 new_size + extra_size, 1);
756 if (ret < 0)
757 return ret;
758 BUG_ON(ret);
759
760 ret = btrfs_extend_item(trans, root, path, new_size);
761 BUG_ON(ret);
762
763 leaf = path->nodes[0];
764 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
765 btrfs_set_extent_refs(leaf, item, refs);
766 /* FIXME: get real generation */
767 btrfs_set_extent_generation(leaf, item, 0);
768 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
769 btrfs_set_extent_flags(leaf, item,
770 BTRFS_EXTENT_FLAG_TREE_BLOCK |
771 BTRFS_BLOCK_FLAG_FULL_BACKREF);
772 bi = (struct btrfs_tree_block_info *)(item + 1);
773 /* FIXME: get first key of the block */
774 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
775 btrfs_set_tree_block_level(leaf, bi, (int)owner);
776 } else {
777 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
778 }
779 btrfs_mark_buffer_dirty(leaf);
780 return 0;
781}
782#endif
783
784static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
785{
786 u32 high_crc = ~(u32)0;
787 u32 low_crc = ~(u32)0;
788 __le64 lenum;
789
790 lenum = cpu_to_le64(root_objectid);
163e783e 791 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
5d4f98a2 792 lenum = cpu_to_le64(owner);
163e783e 793 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2 794 lenum = cpu_to_le64(offset);
163e783e 795 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
5d4f98a2
YZ
796
797 return ((u64)high_crc << 31) ^ (u64)low_crc;
798}
799
800static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
801 struct btrfs_extent_data_ref *ref)
802{
803 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
804 btrfs_extent_data_ref_objectid(leaf, ref),
805 btrfs_extent_data_ref_offset(leaf, ref));
806}
807
808static int match_extent_data_ref(struct extent_buffer *leaf,
809 struct btrfs_extent_data_ref *ref,
810 u64 root_objectid, u64 owner, u64 offset)
811{
812 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
813 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
814 btrfs_extent_data_ref_offset(leaf, ref) != offset)
815 return 0;
816 return 1;
817}
818
819static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
820 struct btrfs_root *root,
821 struct btrfs_path *path,
822 u64 bytenr, u64 parent,
823 u64 root_objectid,
824 u64 owner, u64 offset)
825{
826 struct btrfs_key key;
827 struct btrfs_extent_data_ref *ref;
31840ae1 828 struct extent_buffer *leaf;
5d4f98a2 829 u32 nritems;
74493f7a 830 int ret;
5d4f98a2
YZ
831 int recow;
832 int err = -ENOENT;
74493f7a 833
31840ae1 834 key.objectid = bytenr;
5d4f98a2
YZ
835 if (parent) {
836 key.type = BTRFS_SHARED_DATA_REF_KEY;
837 key.offset = parent;
838 } else {
839 key.type = BTRFS_EXTENT_DATA_REF_KEY;
840 key.offset = hash_extent_data_ref(root_objectid,
841 owner, offset);
842 }
843again:
844 recow = 0;
845 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
846 if (ret < 0) {
847 err = ret;
848 goto fail;
849 }
31840ae1 850
5d4f98a2
YZ
851 if (parent) {
852 if (!ret)
853 return 0;
854#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
855 key.type = BTRFS_EXTENT_REF_V0_KEY;
856 btrfs_release_path(root, path);
857 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
858 if (ret < 0) {
859 err = ret;
860 goto fail;
861 }
862 if (!ret)
863 return 0;
864#endif
865 goto fail;
31840ae1
ZY
866 }
867
868 leaf = path->nodes[0];
5d4f98a2
YZ
869 nritems = btrfs_header_nritems(leaf);
870 while (1) {
871 if (path->slots[0] >= nritems) {
872 ret = btrfs_next_leaf(root, path);
873 if (ret < 0)
874 err = ret;
875 if (ret)
876 goto fail;
877
878 leaf = path->nodes[0];
879 nritems = btrfs_header_nritems(leaf);
880 recow = 1;
881 }
882
883 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
884 if (key.objectid != bytenr ||
885 key.type != BTRFS_EXTENT_DATA_REF_KEY)
886 goto fail;
887
888 ref = btrfs_item_ptr(leaf, path->slots[0],
889 struct btrfs_extent_data_ref);
890
891 if (match_extent_data_ref(leaf, ref, root_objectid,
892 owner, offset)) {
893 if (recow) {
894 btrfs_release_path(root, path);
895 goto again;
896 }
897 err = 0;
898 break;
899 }
900 path->slots[0]++;
31840ae1 901 }
5d4f98a2
YZ
902fail:
903 return err;
31840ae1
ZY
904}
905
5d4f98a2
YZ
906static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
907 struct btrfs_root *root,
908 struct btrfs_path *path,
909 u64 bytenr, u64 parent,
910 u64 root_objectid, u64 owner,
911 u64 offset, int refs_to_add)
31840ae1
ZY
912{
913 struct btrfs_key key;
914 struct extent_buffer *leaf;
5d4f98a2 915 u32 size;
31840ae1
ZY
916 u32 num_refs;
917 int ret;
74493f7a 918
74493f7a 919 key.objectid = bytenr;
5d4f98a2
YZ
920 if (parent) {
921 key.type = BTRFS_SHARED_DATA_REF_KEY;
922 key.offset = parent;
923 size = sizeof(struct btrfs_shared_data_ref);
924 } else {
925 key.type = BTRFS_EXTENT_DATA_REF_KEY;
926 key.offset = hash_extent_data_ref(root_objectid,
927 owner, offset);
928 size = sizeof(struct btrfs_extent_data_ref);
929 }
74493f7a 930
5d4f98a2
YZ
931 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
932 if (ret && ret != -EEXIST)
933 goto fail;
934
935 leaf = path->nodes[0];
936 if (parent) {
937 struct btrfs_shared_data_ref *ref;
31840ae1 938 ref = btrfs_item_ptr(leaf, path->slots[0],
5d4f98a2
YZ
939 struct btrfs_shared_data_ref);
940 if (ret == 0) {
941 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
942 } else {
943 num_refs = btrfs_shared_data_ref_count(leaf, ref);
944 num_refs += refs_to_add;
945 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
31840ae1 946 }
5d4f98a2
YZ
947 } else {
948 struct btrfs_extent_data_ref *ref;
949 while (ret == -EEXIST) {
950 ref = btrfs_item_ptr(leaf, path->slots[0],
951 struct btrfs_extent_data_ref);
952 if (match_extent_data_ref(leaf, ref, root_objectid,
953 owner, offset))
954 break;
955 btrfs_release_path(root, path);
956 key.offset++;
957 ret = btrfs_insert_empty_item(trans, root, path, &key,
958 size);
959 if (ret && ret != -EEXIST)
960 goto fail;
31840ae1 961
5d4f98a2
YZ
962 leaf = path->nodes[0];
963 }
964 ref = btrfs_item_ptr(leaf, path->slots[0],
965 struct btrfs_extent_data_ref);
966 if (ret == 0) {
967 btrfs_set_extent_data_ref_root(leaf, ref,
968 root_objectid);
969 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
970 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
971 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
972 } else {
973 num_refs = btrfs_extent_data_ref_count(leaf, ref);
974 num_refs += refs_to_add;
975 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
31840ae1 976 }
31840ae1 977 }
5d4f98a2
YZ
978 btrfs_mark_buffer_dirty(leaf);
979 ret = 0;
980fail:
7bb86316
CM
981 btrfs_release_path(root, path);
982 return ret;
74493f7a
CM
983}
984
5d4f98a2
YZ
985static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
986 struct btrfs_root *root,
987 struct btrfs_path *path,
988 int refs_to_drop)
31840ae1 989{
5d4f98a2
YZ
990 struct btrfs_key key;
991 struct btrfs_extent_data_ref *ref1 = NULL;
992 struct btrfs_shared_data_ref *ref2 = NULL;
31840ae1 993 struct extent_buffer *leaf;
5d4f98a2 994 u32 num_refs = 0;
31840ae1
ZY
995 int ret = 0;
996
997 leaf = path->nodes[0];
5d4f98a2
YZ
998 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
999
1000 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1001 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1002 struct btrfs_extent_data_ref);
1003 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1004 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1005 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1006 struct btrfs_shared_data_ref);
1007 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1008#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1009 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1010 struct btrfs_extent_ref_v0 *ref0;
1011 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1012 struct btrfs_extent_ref_v0);
1013 num_refs = btrfs_ref_count_v0(leaf, ref0);
1014#endif
1015 } else {
1016 BUG();
1017 }
1018
56bec294
CM
1019 BUG_ON(num_refs < refs_to_drop);
1020 num_refs -= refs_to_drop;
5d4f98a2 1021
31840ae1
ZY
1022 if (num_refs == 0) {
1023 ret = btrfs_del_item(trans, root, path);
1024 } else {
5d4f98a2
YZ
1025 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1026 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1027 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1028 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1029#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1030 else {
1031 struct btrfs_extent_ref_v0 *ref0;
1032 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1033 struct btrfs_extent_ref_v0);
1034 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1035 }
1036#endif
31840ae1
ZY
1037 btrfs_mark_buffer_dirty(leaf);
1038 }
31840ae1
ZY
1039 return ret;
1040}
1041
5d4f98a2
YZ
1042static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1043 struct btrfs_path *path,
1044 struct btrfs_extent_inline_ref *iref)
15916de8 1045{
5d4f98a2
YZ
1046 struct btrfs_key key;
1047 struct extent_buffer *leaf;
1048 struct btrfs_extent_data_ref *ref1;
1049 struct btrfs_shared_data_ref *ref2;
1050 u32 num_refs = 0;
1051
1052 leaf = path->nodes[0];
1053 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1054 if (iref) {
1055 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1056 BTRFS_EXTENT_DATA_REF_KEY) {
1057 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1058 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1059 } else {
1060 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1061 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1062 }
1063 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1064 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1065 struct btrfs_extent_data_ref);
1066 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1067 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1068 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1069 struct btrfs_shared_data_ref);
1070 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1071#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1072 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1073 struct btrfs_extent_ref_v0 *ref0;
1074 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1075 struct btrfs_extent_ref_v0);
1076 num_refs = btrfs_ref_count_v0(leaf, ref0);
4b4e25f2 1077#endif
5d4f98a2
YZ
1078 } else {
1079 WARN_ON(1);
1080 }
1081 return num_refs;
1082}
15916de8 1083
5d4f98a2
YZ
1084static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1085 struct btrfs_root *root,
1086 struct btrfs_path *path,
1087 u64 bytenr, u64 parent,
1088 u64 root_objectid)
1f3c79a2 1089{
5d4f98a2 1090 struct btrfs_key key;
1f3c79a2 1091 int ret;
1f3c79a2 1092
5d4f98a2
YZ
1093 key.objectid = bytenr;
1094 if (parent) {
1095 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1096 key.offset = parent;
1097 } else {
1098 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1099 key.offset = root_objectid;
1f3c79a2
LH
1100 }
1101
5d4f98a2
YZ
1102 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1103 if (ret > 0)
1104 ret = -ENOENT;
1105#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1106 if (ret == -ENOENT && parent) {
1107 btrfs_release_path(root, path);
1108 key.type = BTRFS_EXTENT_REF_V0_KEY;
1109 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1110 if (ret > 0)
1111 ret = -ENOENT;
1112 }
1f3c79a2 1113#endif
5d4f98a2 1114 return ret;
1f3c79a2
LH
1115}
1116
5d4f98a2
YZ
1117static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1118 struct btrfs_root *root,
1119 struct btrfs_path *path,
1120 u64 bytenr, u64 parent,
1121 u64 root_objectid)
31840ae1 1122{
5d4f98a2 1123 struct btrfs_key key;
31840ae1 1124 int ret;
31840ae1 1125
5d4f98a2
YZ
1126 key.objectid = bytenr;
1127 if (parent) {
1128 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1129 key.offset = parent;
1130 } else {
1131 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1132 key.offset = root_objectid;
1133 }
1134
1135 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1136 btrfs_release_path(root, path);
31840ae1
ZY
1137 return ret;
1138}
1139
5d4f98a2 1140static inline int extent_ref_type(u64 parent, u64 owner)
31840ae1 1141{
5d4f98a2
YZ
1142 int type;
1143 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1144 if (parent > 0)
1145 type = BTRFS_SHARED_BLOCK_REF_KEY;
1146 else
1147 type = BTRFS_TREE_BLOCK_REF_KEY;
1148 } else {
1149 if (parent > 0)
1150 type = BTRFS_SHARED_DATA_REF_KEY;
1151 else
1152 type = BTRFS_EXTENT_DATA_REF_KEY;
1153 }
1154 return type;
31840ae1 1155}
56bec294 1156
2c47e605
YZ
1157static int find_next_key(struct btrfs_path *path, int level,
1158 struct btrfs_key *key)
56bec294 1159
02217ed2 1160{
2c47e605 1161 for (; level < BTRFS_MAX_LEVEL; level++) {
5d4f98a2
YZ
1162 if (!path->nodes[level])
1163 break;
5d4f98a2
YZ
1164 if (path->slots[level] + 1 >=
1165 btrfs_header_nritems(path->nodes[level]))
1166 continue;
1167 if (level == 0)
1168 btrfs_item_key_to_cpu(path->nodes[level], key,
1169 path->slots[level] + 1);
1170 else
1171 btrfs_node_key_to_cpu(path->nodes[level], key,
1172 path->slots[level] + 1);
1173 return 0;
1174 }
1175 return 1;
1176}
037e6390 1177
5d4f98a2
YZ
1178/*
1179 * look for inline back ref. if back ref is found, *ref_ret is set
1180 * to the address of inline back ref, and 0 is returned.
1181 *
1182 * if back ref isn't found, *ref_ret is set to the address where it
1183 * should be inserted, and -ENOENT is returned.
1184 *
1185 * if insert is true and there are too many inline back refs, the path
1186 * points to the extent item, and -EAGAIN is returned.
1187 *
1188 * NOTE: inline back refs are ordered in the same way that back ref
1189 * items in the tree are ordered.
1190 */
1191static noinline_for_stack
1192int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1193 struct btrfs_root *root,
1194 struct btrfs_path *path,
1195 struct btrfs_extent_inline_ref **ref_ret,
1196 u64 bytenr, u64 num_bytes,
1197 u64 parent, u64 root_objectid,
1198 u64 owner, u64 offset, int insert)
1199{
1200 struct btrfs_key key;
1201 struct extent_buffer *leaf;
1202 struct btrfs_extent_item *ei;
1203 struct btrfs_extent_inline_ref *iref;
1204 u64 flags;
1205 u64 item_size;
1206 unsigned long ptr;
1207 unsigned long end;
1208 int extra_size;
1209 int type;
1210 int want;
1211 int ret;
1212 int err = 0;
26b8003f 1213
db94535d 1214 key.objectid = bytenr;
31840ae1 1215 key.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1216 key.offset = num_bytes;
31840ae1 1217
5d4f98a2
YZ
1218 want = extent_ref_type(parent, owner);
1219 if (insert) {
1220 extra_size = btrfs_extent_inline_ref_size(want);
85d4198e 1221 path->keep_locks = 1;
5d4f98a2
YZ
1222 } else
1223 extra_size = -1;
1224 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
b9473439 1225 if (ret < 0) {
5d4f98a2
YZ
1226 err = ret;
1227 goto out;
1228 }
1229 BUG_ON(ret);
1230
1231 leaf = path->nodes[0];
1232 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1233#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1234 if (item_size < sizeof(*ei)) {
1235 if (!insert) {
1236 err = -ENOENT;
1237 goto out;
1238 }
1239 ret = convert_extent_item_v0(trans, root, path, owner,
1240 extra_size);
1241 if (ret < 0) {
1242 err = ret;
1243 goto out;
1244 }
1245 leaf = path->nodes[0];
1246 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1247 }
1248#endif
1249 BUG_ON(item_size < sizeof(*ei));
1250
5d4f98a2
YZ
1251 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1252 flags = btrfs_extent_flags(leaf, ei);
1253
1254 ptr = (unsigned long)(ei + 1);
1255 end = (unsigned long)ei + item_size;
1256
1257 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1258 ptr += sizeof(struct btrfs_tree_block_info);
1259 BUG_ON(ptr > end);
1260 } else {
1261 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1262 }
1263
1264 err = -ENOENT;
1265 while (1) {
1266 if (ptr >= end) {
1267 WARN_ON(ptr > end);
1268 break;
1269 }
1270 iref = (struct btrfs_extent_inline_ref *)ptr;
1271 type = btrfs_extent_inline_ref_type(leaf, iref);
1272 if (want < type)
1273 break;
1274 if (want > type) {
1275 ptr += btrfs_extent_inline_ref_size(type);
1276 continue;
1277 }
1278
1279 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1280 struct btrfs_extent_data_ref *dref;
1281 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1282 if (match_extent_data_ref(leaf, dref, root_objectid,
1283 owner, offset)) {
1284 err = 0;
1285 break;
1286 }
1287 if (hash_extent_data_ref_item(leaf, dref) <
1288 hash_extent_data_ref(root_objectid, owner, offset))
1289 break;
1290 } else {
1291 u64 ref_offset;
1292 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1293 if (parent > 0) {
1294 if (parent == ref_offset) {
1295 err = 0;
1296 break;
1297 }
1298 if (ref_offset < parent)
1299 break;
1300 } else {
1301 if (root_objectid == ref_offset) {
1302 err = 0;
1303 break;
1304 }
1305 if (ref_offset < root_objectid)
1306 break;
1307 }
1308 }
1309 ptr += btrfs_extent_inline_ref_size(type);
1310 }
1311 if (err == -ENOENT && insert) {
1312 if (item_size + extra_size >=
1313 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1314 err = -EAGAIN;
1315 goto out;
1316 }
1317 /*
1318 * To add new inline back ref, we have to make sure
1319 * there is no corresponding back ref item.
1320 * For simplicity, we just do not add new inline back
1321 * ref if there is any kind of item for this block
1322 */
2c47e605
YZ
1323 if (find_next_key(path, 0, &key) == 0 &&
1324 key.objectid == bytenr &&
85d4198e 1325 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
5d4f98a2
YZ
1326 err = -EAGAIN;
1327 goto out;
1328 }
1329 }
1330 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1331out:
85d4198e 1332 if (insert) {
5d4f98a2
YZ
1333 path->keep_locks = 0;
1334 btrfs_unlock_up_safe(path, 1);
1335 }
1336 return err;
1337}
1338
1339/*
1340 * helper to add new inline back ref
1341 */
1342static noinline_for_stack
1343int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1344 struct btrfs_root *root,
1345 struct btrfs_path *path,
1346 struct btrfs_extent_inline_ref *iref,
1347 u64 parent, u64 root_objectid,
1348 u64 owner, u64 offset, int refs_to_add,
1349 struct btrfs_delayed_extent_op *extent_op)
1350{
1351 struct extent_buffer *leaf;
1352 struct btrfs_extent_item *ei;
1353 unsigned long ptr;
1354 unsigned long end;
1355 unsigned long item_offset;
1356 u64 refs;
1357 int size;
1358 int type;
1359 int ret;
1360
1361 leaf = path->nodes[0];
1362 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1363 item_offset = (unsigned long)iref - (unsigned long)ei;
1364
1365 type = extent_ref_type(parent, owner);
1366 size = btrfs_extent_inline_ref_size(type);
1367
1368 ret = btrfs_extend_item(trans, root, path, size);
1369 BUG_ON(ret);
1370
1371 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1372 refs = btrfs_extent_refs(leaf, ei);
1373 refs += refs_to_add;
1374 btrfs_set_extent_refs(leaf, ei, refs);
1375 if (extent_op)
1376 __run_delayed_extent_op(extent_op, leaf, ei);
1377
1378 ptr = (unsigned long)ei + item_offset;
1379 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1380 if (ptr < end - size)
1381 memmove_extent_buffer(leaf, ptr + size, ptr,
1382 end - size - ptr);
1383
1384 iref = (struct btrfs_extent_inline_ref *)ptr;
1385 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1386 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1387 struct btrfs_extent_data_ref *dref;
1388 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1389 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1390 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1391 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1392 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1393 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1394 struct btrfs_shared_data_ref *sref;
1395 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1396 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1397 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1398 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1399 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1400 } else {
1401 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1402 }
1403 btrfs_mark_buffer_dirty(leaf);
1404 return 0;
1405}
1406
1407static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1408 struct btrfs_root *root,
1409 struct btrfs_path *path,
1410 struct btrfs_extent_inline_ref **ref_ret,
1411 u64 bytenr, u64 num_bytes, u64 parent,
1412 u64 root_objectid, u64 owner, u64 offset)
1413{
1414 int ret;
1415
1416 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1417 bytenr, num_bytes, parent,
1418 root_objectid, owner, offset, 0);
1419 if (ret != -ENOENT)
54aa1f4d 1420 return ret;
5d4f98a2
YZ
1421
1422 btrfs_release_path(root, path);
1423 *ref_ret = NULL;
1424
1425 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1426 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1427 root_objectid);
1428 } else {
1429 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1430 root_objectid, owner, offset);
b9473439 1431 }
5d4f98a2
YZ
1432 return ret;
1433}
31840ae1 1434
5d4f98a2
YZ
1435/*
1436 * helper to update/remove inline back ref
1437 */
1438static noinline_for_stack
1439int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1440 struct btrfs_root *root,
1441 struct btrfs_path *path,
1442 struct btrfs_extent_inline_ref *iref,
1443 int refs_to_mod,
1444 struct btrfs_delayed_extent_op *extent_op)
1445{
1446 struct extent_buffer *leaf;
1447 struct btrfs_extent_item *ei;
1448 struct btrfs_extent_data_ref *dref = NULL;
1449 struct btrfs_shared_data_ref *sref = NULL;
1450 unsigned long ptr;
1451 unsigned long end;
1452 u32 item_size;
1453 int size;
1454 int type;
1455 int ret;
1456 u64 refs;
1457
1458 leaf = path->nodes[0];
1459 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1460 refs = btrfs_extent_refs(leaf, ei);
1461 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1462 refs += refs_to_mod;
1463 btrfs_set_extent_refs(leaf, ei, refs);
1464 if (extent_op)
1465 __run_delayed_extent_op(extent_op, leaf, ei);
1466
1467 type = btrfs_extent_inline_ref_type(leaf, iref);
1468
1469 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1470 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1471 refs = btrfs_extent_data_ref_count(leaf, dref);
1472 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1473 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1474 refs = btrfs_shared_data_ref_count(leaf, sref);
1475 } else {
1476 refs = 1;
1477 BUG_ON(refs_to_mod != -1);
56bec294 1478 }
31840ae1 1479
5d4f98a2
YZ
1480 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1481 refs += refs_to_mod;
1482
1483 if (refs > 0) {
1484 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1485 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1486 else
1487 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1488 } else {
1489 size = btrfs_extent_inline_ref_size(type);
1490 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1491 ptr = (unsigned long)iref;
1492 end = (unsigned long)ei + item_size;
1493 if (ptr + size < end)
1494 memmove_extent_buffer(leaf, ptr, ptr + size,
1495 end - ptr - size);
1496 item_size -= size;
1497 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1498 BUG_ON(ret);
1499 }
1500 btrfs_mark_buffer_dirty(leaf);
1501 return 0;
1502}
1503
1504static noinline_for_stack
1505int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1506 struct btrfs_root *root,
1507 struct btrfs_path *path,
1508 u64 bytenr, u64 num_bytes, u64 parent,
1509 u64 root_objectid, u64 owner,
1510 u64 offset, int refs_to_add,
1511 struct btrfs_delayed_extent_op *extent_op)
1512{
1513 struct btrfs_extent_inline_ref *iref;
1514 int ret;
1515
1516 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1517 bytenr, num_bytes, parent,
1518 root_objectid, owner, offset, 1);
1519 if (ret == 0) {
1520 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1521 ret = update_inline_extent_backref(trans, root, path, iref,
1522 refs_to_add, extent_op);
1523 } else if (ret == -ENOENT) {
1524 ret = setup_inline_extent_backref(trans, root, path, iref,
1525 parent, root_objectid,
1526 owner, offset, refs_to_add,
1527 extent_op);
771ed689 1528 }
5d4f98a2
YZ
1529 return ret;
1530}
31840ae1 1531
5d4f98a2
YZ
1532static int insert_extent_backref(struct btrfs_trans_handle *trans,
1533 struct btrfs_root *root,
1534 struct btrfs_path *path,
1535 u64 bytenr, u64 parent, u64 root_objectid,
1536 u64 owner, u64 offset, int refs_to_add)
1537{
1538 int ret;
1539 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1540 BUG_ON(refs_to_add != 1);
1541 ret = insert_tree_block_ref(trans, root, path, bytenr,
1542 parent, root_objectid);
1543 } else {
1544 ret = insert_extent_data_ref(trans, root, path, bytenr,
1545 parent, root_objectid,
1546 owner, offset, refs_to_add);
1547 }
1548 return ret;
1549}
56bec294 1550
5d4f98a2
YZ
1551static int remove_extent_backref(struct btrfs_trans_handle *trans,
1552 struct btrfs_root *root,
1553 struct btrfs_path *path,
1554 struct btrfs_extent_inline_ref *iref,
1555 int refs_to_drop, int is_data)
1556{
1557 int ret;
b9473439 1558
5d4f98a2
YZ
1559 BUG_ON(!is_data && refs_to_drop != 1);
1560 if (iref) {
1561 ret = update_inline_extent_backref(trans, root, path, iref,
1562 -refs_to_drop, NULL);
1563 } else if (is_data) {
1564 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1565 } else {
1566 ret = btrfs_del_item(trans, root, path);
1567 }
1568 return ret;
1569}
1570
5d4f98a2
YZ
1571static void btrfs_issue_discard(struct block_device *bdev,
1572 u64 start, u64 len)
1573{
746cd1e7
CH
1574 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL,
1575 DISCARD_FL_BARRIER);
5d4f98a2 1576}
5d4f98a2
YZ
1577
1578static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1579 u64 num_bytes)
1580{
5d4f98a2
YZ
1581 int ret;
1582 u64 map_length = num_bytes;
1583 struct btrfs_multi_bio *multi = NULL;
1584
e244a0ae
CH
1585 if (!btrfs_test_opt(root, DISCARD))
1586 return 0;
1587
5d4f98a2
YZ
1588 /* Tell the block device(s) that the sectors can be discarded */
1589 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1590 bytenr, &map_length, &multi, 0);
1591 if (!ret) {
1592 struct btrfs_bio_stripe *stripe = multi->stripes;
1593 int i;
1594
1595 if (map_length > num_bytes)
1596 map_length = num_bytes;
1597
1598 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1599 btrfs_issue_discard(stripe->dev->bdev,
1600 stripe->physical,
1601 map_length);
1602 }
1603 kfree(multi);
1604 }
1605
1606 return ret;
5d4f98a2
YZ
1607}
1608
1609int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1610 struct btrfs_root *root,
1611 u64 bytenr, u64 num_bytes, u64 parent,
1612 u64 root_objectid, u64 owner, u64 offset)
1613{
1614 int ret;
1615 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1616 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1617
1618 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1619 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1620 parent, root_objectid, (int)owner,
1621 BTRFS_ADD_DELAYED_REF, NULL);
1622 } else {
1623 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1624 parent, root_objectid, owner, offset,
1625 BTRFS_ADD_DELAYED_REF, NULL);
1626 }
1627 return ret;
1628}
1629
1630static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1631 struct btrfs_root *root,
1632 u64 bytenr, u64 num_bytes,
1633 u64 parent, u64 root_objectid,
1634 u64 owner, u64 offset, int refs_to_add,
1635 struct btrfs_delayed_extent_op *extent_op)
1636{
1637 struct btrfs_path *path;
1638 struct extent_buffer *leaf;
1639 struct btrfs_extent_item *item;
1640 u64 refs;
1641 int ret;
1642 int err = 0;
1643
1644 path = btrfs_alloc_path();
1645 if (!path)
1646 return -ENOMEM;
1647
1648 path->reada = 1;
1649 path->leave_spinning = 1;
1650 /* this will setup the path even if it fails to insert the back ref */
1651 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1652 path, bytenr, num_bytes, parent,
1653 root_objectid, owner, offset,
1654 refs_to_add, extent_op);
1655 if (ret == 0)
1656 goto out;
1657
1658 if (ret != -EAGAIN) {
1659 err = ret;
1660 goto out;
1661 }
1662
1663 leaf = path->nodes[0];
1664 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1665 refs = btrfs_extent_refs(leaf, item);
1666 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1667 if (extent_op)
1668 __run_delayed_extent_op(extent_op, leaf, item);
56bec294 1669
5d4f98a2 1670 btrfs_mark_buffer_dirty(leaf);
56bec294
CM
1671 btrfs_release_path(root->fs_info->extent_root, path);
1672
1673 path->reada = 1;
b9473439
CM
1674 path->leave_spinning = 1;
1675
56bec294
CM
1676 /* now insert the actual backref */
1677 ret = insert_extent_backref(trans, root->fs_info->extent_root,
5d4f98a2
YZ
1678 path, bytenr, parent, root_objectid,
1679 owner, offset, refs_to_add);
56bec294 1680 BUG_ON(ret);
5d4f98a2 1681out:
56bec294 1682 btrfs_free_path(path);
5d4f98a2 1683 return err;
56bec294
CM
1684}
1685
5d4f98a2
YZ
1686static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1687 struct btrfs_root *root,
1688 struct btrfs_delayed_ref_node *node,
1689 struct btrfs_delayed_extent_op *extent_op,
1690 int insert_reserved)
56bec294 1691{
5d4f98a2
YZ
1692 int ret = 0;
1693 struct btrfs_delayed_data_ref *ref;
1694 struct btrfs_key ins;
1695 u64 parent = 0;
1696 u64 ref_root = 0;
1697 u64 flags = 0;
1698
1699 ins.objectid = node->bytenr;
1700 ins.offset = node->num_bytes;
1701 ins.type = BTRFS_EXTENT_ITEM_KEY;
1702
1703 ref = btrfs_delayed_node_to_data_ref(node);
1704 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1705 parent = ref->parent;
1706 else
1707 ref_root = ref->root;
1708
1709 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1710 if (extent_op) {
1711 BUG_ON(extent_op->update_key);
1712 flags |= extent_op->flags_to_set;
1713 }
1714 ret = alloc_reserved_file_extent(trans, root,
1715 parent, ref_root, flags,
1716 ref->objectid, ref->offset,
1717 &ins, node->ref_mod);
5d4f98a2
YZ
1718 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1719 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1720 node->num_bytes, parent,
1721 ref_root, ref->objectid,
1722 ref->offset, node->ref_mod,
1723 extent_op);
1724 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1725 ret = __btrfs_free_extent(trans, root, node->bytenr,
1726 node->num_bytes, parent,
1727 ref_root, ref->objectid,
1728 ref->offset, node->ref_mod,
1729 extent_op);
1730 } else {
1731 BUG();
1732 }
1733 return ret;
1734}
1735
1736static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1737 struct extent_buffer *leaf,
1738 struct btrfs_extent_item *ei)
1739{
1740 u64 flags = btrfs_extent_flags(leaf, ei);
1741 if (extent_op->update_flags) {
1742 flags |= extent_op->flags_to_set;
1743 btrfs_set_extent_flags(leaf, ei, flags);
1744 }
1745
1746 if (extent_op->update_key) {
1747 struct btrfs_tree_block_info *bi;
1748 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1749 bi = (struct btrfs_tree_block_info *)(ei + 1);
1750 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1751 }
1752}
1753
1754static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1755 struct btrfs_root *root,
1756 struct btrfs_delayed_ref_node *node,
1757 struct btrfs_delayed_extent_op *extent_op)
1758{
1759 struct btrfs_key key;
1760 struct btrfs_path *path;
1761 struct btrfs_extent_item *ei;
1762 struct extent_buffer *leaf;
1763 u32 item_size;
56bec294 1764 int ret;
5d4f98a2
YZ
1765 int err = 0;
1766
1767 path = btrfs_alloc_path();
1768 if (!path)
1769 return -ENOMEM;
1770
1771 key.objectid = node->bytenr;
1772 key.type = BTRFS_EXTENT_ITEM_KEY;
1773 key.offset = node->num_bytes;
1774
1775 path->reada = 1;
1776 path->leave_spinning = 1;
1777 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1778 path, 0, 1);
1779 if (ret < 0) {
1780 err = ret;
1781 goto out;
1782 }
1783 if (ret > 0) {
1784 err = -EIO;
1785 goto out;
1786 }
1787
1788 leaf = path->nodes[0];
1789 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1790#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1791 if (item_size < sizeof(*ei)) {
1792 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1793 path, (u64)-1, 0);
1794 if (ret < 0) {
1795 err = ret;
1796 goto out;
1797 }
1798 leaf = path->nodes[0];
1799 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1800 }
1801#endif
1802 BUG_ON(item_size < sizeof(*ei));
1803 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1804 __run_delayed_extent_op(extent_op, leaf, ei);
56bec294 1805
5d4f98a2
YZ
1806 btrfs_mark_buffer_dirty(leaf);
1807out:
1808 btrfs_free_path(path);
1809 return err;
56bec294
CM
1810}
1811
5d4f98a2
YZ
1812static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1813 struct btrfs_root *root,
1814 struct btrfs_delayed_ref_node *node,
1815 struct btrfs_delayed_extent_op *extent_op,
1816 int insert_reserved)
56bec294
CM
1817{
1818 int ret = 0;
5d4f98a2
YZ
1819 struct btrfs_delayed_tree_ref *ref;
1820 struct btrfs_key ins;
1821 u64 parent = 0;
1822 u64 ref_root = 0;
56bec294 1823
5d4f98a2
YZ
1824 ins.objectid = node->bytenr;
1825 ins.offset = node->num_bytes;
1826 ins.type = BTRFS_EXTENT_ITEM_KEY;
56bec294 1827
5d4f98a2
YZ
1828 ref = btrfs_delayed_node_to_tree_ref(node);
1829 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1830 parent = ref->parent;
1831 else
1832 ref_root = ref->root;
1833
1834 BUG_ON(node->ref_mod != 1);
1835 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1836 BUG_ON(!extent_op || !extent_op->update_flags ||
1837 !extent_op->update_key);
1838 ret = alloc_reserved_tree_block(trans, root,
1839 parent, ref_root,
1840 extent_op->flags_to_set,
1841 &extent_op->key,
1842 ref->level, &ins);
5d4f98a2
YZ
1843 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1844 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1845 node->num_bytes, parent, ref_root,
1846 ref->level, 0, 1, extent_op);
1847 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1848 ret = __btrfs_free_extent(trans, root, node->bytenr,
1849 node->num_bytes, parent, ref_root,
1850 ref->level, 0, 1, extent_op);
1851 } else {
1852 BUG();
1853 }
56bec294
CM
1854 return ret;
1855}
1856
5d4f98a2 1857
56bec294 1858/* helper function to actually process a single delayed ref entry */
5d4f98a2
YZ
1859static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
1860 struct btrfs_root *root,
1861 struct btrfs_delayed_ref_node *node,
1862 struct btrfs_delayed_extent_op *extent_op,
1863 int insert_reserved)
56bec294
CM
1864{
1865 int ret;
5d4f98a2 1866 if (btrfs_delayed_ref_is_head(node)) {
56bec294
CM
1867 struct btrfs_delayed_ref_head *head;
1868 /*
1869 * we've hit the end of the chain and we were supposed
1870 * to insert this extent into the tree. But, it got
1871 * deleted before we ever needed to insert it, so all
1872 * we have to do is clean up the accounting
1873 */
5d4f98a2
YZ
1874 BUG_ON(extent_op);
1875 head = btrfs_delayed_node_to_head(node);
56bec294 1876 if (insert_reserved) {
11833d66
YZ
1877 int mark_free = 0;
1878 struct extent_buffer *must_clean = NULL;
1879
1880 ret = pin_down_bytes(trans, root, NULL,
1881 node->bytenr, node->num_bytes,
1882 head->is_data, 1, &must_clean);
1883 if (ret > 0)
1884 mark_free = 1;
1885
1886 if (must_clean) {
1887 clean_tree_block(NULL, root, must_clean);
1888 btrfs_tree_unlock(must_clean);
1889 free_extent_buffer(must_clean);
1890 }
5d4f98a2
YZ
1891 if (head->is_data) {
1892 ret = btrfs_del_csums(trans, root,
1893 node->bytenr,
1894 node->num_bytes);
1895 BUG_ON(ret);
1896 }
11833d66
YZ
1897 if (mark_free) {
1898 ret = btrfs_free_reserved_extent(root,
1899 node->bytenr,
1900 node->num_bytes);
1901 BUG_ON(ret);
1902 }
56bec294 1903 }
56bec294
CM
1904 mutex_unlock(&head->mutex);
1905 return 0;
1906 }
1907
5d4f98a2
YZ
1908 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
1909 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
1910 ret = run_delayed_tree_ref(trans, root, node, extent_op,
1911 insert_reserved);
1912 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
1913 node->type == BTRFS_SHARED_DATA_REF_KEY)
1914 ret = run_delayed_data_ref(trans, root, node, extent_op,
1915 insert_reserved);
1916 else
1917 BUG();
1918 return ret;
56bec294
CM
1919}
1920
1921static noinline struct btrfs_delayed_ref_node *
1922select_delayed_ref(struct btrfs_delayed_ref_head *head)
1923{
1924 struct rb_node *node;
1925 struct btrfs_delayed_ref_node *ref;
1926 int action = BTRFS_ADD_DELAYED_REF;
1927again:
1928 /*
1929 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
1930 * this prevents ref count from going down to zero when
1931 * there still are pending delayed ref.
1932 */
1933 node = rb_prev(&head->node.rb_node);
1934 while (1) {
1935 if (!node)
1936 break;
1937 ref = rb_entry(node, struct btrfs_delayed_ref_node,
1938 rb_node);
1939 if (ref->bytenr != head->node.bytenr)
1940 break;
5d4f98a2 1941 if (ref->action == action)
56bec294
CM
1942 return ref;
1943 node = rb_prev(node);
1944 }
1945 if (action == BTRFS_ADD_DELAYED_REF) {
1946 action = BTRFS_DROP_DELAYED_REF;
1947 goto again;
1948 }
1949 return NULL;
1950}
1951
c3e69d58
CM
1952static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
1953 struct btrfs_root *root,
1954 struct list_head *cluster)
56bec294 1955{
56bec294
CM
1956 struct btrfs_delayed_ref_root *delayed_refs;
1957 struct btrfs_delayed_ref_node *ref;
1958 struct btrfs_delayed_ref_head *locked_ref = NULL;
5d4f98a2 1959 struct btrfs_delayed_extent_op *extent_op;
56bec294 1960 int ret;
c3e69d58 1961 int count = 0;
56bec294 1962 int must_insert_reserved = 0;
56bec294
CM
1963
1964 delayed_refs = &trans->transaction->delayed_refs;
56bec294
CM
1965 while (1) {
1966 if (!locked_ref) {
c3e69d58
CM
1967 /* pick a new head ref from the cluster list */
1968 if (list_empty(cluster))
56bec294 1969 break;
56bec294 1970
c3e69d58
CM
1971 locked_ref = list_entry(cluster->next,
1972 struct btrfs_delayed_ref_head, cluster);
1973
1974 /* grab the lock that says we are going to process
1975 * all the refs for this head */
1976 ret = btrfs_delayed_ref_lock(trans, locked_ref);
1977
1978 /*
1979 * we may have dropped the spin lock to get the head
1980 * mutex lock, and that might have given someone else
1981 * time to free the head. If that's true, it has been
1982 * removed from our list and we can move on.
1983 */
1984 if (ret == -EAGAIN) {
1985 locked_ref = NULL;
1986 count++;
1987 continue;
56bec294
CM
1988 }
1989 }
a28ec197 1990
56bec294
CM
1991 /*
1992 * record the must insert reserved flag before we
1993 * drop the spin lock.
1994 */
1995 must_insert_reserved = locked_ref->must_insert_reserved;
1996 locked_ref->must_insert_reserved = 0;
7bb86316 1997
5d4f98a2
YZ
1998 extent_op = locked_ref->extent_op;
1999 locked_ref->extent_op = NULL;
2000
56bec294
CM
2001 /*
2002 * locked_ref is the head node, so we have to go one
2003 * node back for any delayed ref updates
2004 */
56bec294
CM
2005 ref = select_delayed_ref(locked_ref);
2006 if (!ref) {
2007 /* All delayed refs have been processed, Go ahead
2008 * and send the head node to run_one_delayed_ref,
2009 * so that any accounting fixes can happen
2010 */
2011 ref = &locked_ref->node;
5d4f98a2
YZ
2012
2013 if (extent_op && must_insert_reserved) {
2014 kfree(extent_op);
2015 extent_op = NULL;
2016 }
2017
2018 if (extent_op) {
2019 spin_unlock(&delayed_refs->lock);
2020
2021 ret = run_delayed_extent_op(trans, root,
2022 ref, extent_op);
2023 BUG_ON(ret);
2024 kfree(extent_op);
2025
2026 cond_resched();
2027 spin_lock(&delayed_refs->lock);
2028 continue;
2029 }
2030
c3e69d58 2031 list_del_init(&locked_ref->cluster);
56bec294
CM
2032 locked_ref = NULL;
2033 }
02217ed2 2034
56bec294
CM
2035 ref->in_tree = 0;
2036 rb_erase(&ref->rb_node, &delayed_refs->root);
2037 delayed_refs->num_entries--;
5d4f98a2 2038
56bec294 2039 spin_unlock(&delayed_refs->lock);
925baedd 2040
5d4f98a2 2041 ret = run_one_delayed_ref(trans, root, ref, extent_op,
56bec294
CM
2042 must_insert_reserved);
2043 BUG_ON(ret);
eb099670 2044
5d4f98a2
YZ
2045 btrfs_put_delayed_ref(ref);
2046 kfree(extent_op);
c3e69d58 2047 count++;
5d4f98a2 2048
c3e69d58
CM
2049 cond_resched();
2050 spin_lock(&delayed_refs->lock);
2051 }
2052 return count;
2053}
2054
2055/*
2056 * this starts processing the delayed reference count updates and
2057 * extent insertions we have queued up so far. count can be
2058 * 0, which means to process everything in the tree at the start
2059 * of the run (but not newly added entries), or it can be some target
2060 * number you'd like to process.
2061 */
2062int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2063 struct btrfs_root *root, unsigned long count)
2064{
2065 struct rb_node *node;
2066 struct btrfs_delayed_ref_root *delayed_refs;
2067 struct btrfs_delayed_ref_node *ref;
2068 struct list_head cluster;
2069 int ret;
2070 int run_all = count == (unsigned long)-1;
2071 int run_most = 0;
2072
2073 if (root == root->fs_info->extent_root)
2074 root = root->fs_info->tree_root;
2075
2076 delayed_refs = &trans->transaction->delayed_refs;
2077 INIT_LIST_HEAD(&cluster);
2078again:
2079 spin_lock(&delayed_refs->lock);
2080 if (count == 0) {
2081 count = delayed_refs->num_entries * 2;
2082 run_most = 1;
2083 }
2084 while (1) {
2085 if (!(run_all || run_most) &&
2086 delayed_refs->num_heads_ready < 64)
2087 break;
eb099670 2088
56bec294 2089 /*
c3e69d58
CM
2090 * go find something we can process in the rbtree. We start at
2091 * the beginning of the tree, and then build a cluster
2092 * of refs to process starting at the first one we are able to
2093 * lock
56bec294 2094 */
c3e69d58
CM
2095 ret = btrfs_find_ref_cluster(trans, &cluster,
2096 delayed_refs->run_delayed_start);
2097 if (ret)
56bec294
CM
2098 break;
2099
c3e69d58
CM
2100 ret = run_clustered_refs(trans, root, &cluster);
2101 BUG_ON(ret < 0);
2102
2103 count -= min_t(unsigned long, ret, count);
2104
2105 if (count == 0)
2106 break;
eb099670 2107 }
c3e69d58 2108
56bec294 2109 if (run_all) {
56bec294 2110 node = rb_first(&delayed_refs->root);
c3e69d58 2111 if (!node)
56bec294 2112 goto out;
c3e69d58 2113 count = (unsigned long)-1;
e9d0b13b 2114
56bec294
CM
2115 while (node) {
2116 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2117 rb_node);
2118 if (btrfs_delayed_ref_is_head(ref)) {
2119 struct btrfs_delayed_ref_head *head;
5caf2a00 2120
56bec294
CM
2121 head = btrfs_delayed_node_to_head(ref);
2122 atomic_inc(&ref->refs);
2123
2124 spin_unlock(&delayed_refs->lock);
2125 mutex_lock(&head->mutex);
2126 mutex_unlock(&head->mutex);
2127
2128 btrfs_put_delayed_ref(ref);
1887be66 2129 cond_resched();
56bec294
CM
2130 goto again;
2131 }
2132 node = rb_next(node);
2133 }
2134 spin_unlock(&delayed_refs->lock);
56bec294
CM
2135 schedule_timeout(1);
2136 goto again;
5f39d397 2137 }
54aa1f4d 2138out:
c3e69d58 2139 spin_unlock(&delayed_refs->lock);
a28ec197
CM
2140 return 0;
2141}
2142
5d4f98a2
YZ
2143int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2144 struct btrfs_root *root,
2145 u64 bytenr, u64 num_bytes, u64 flags,
2146 int is_data)
2147{
2148 struct btrfs_delayed_extent_op *extent_op;
2149 int ret;
2150
2151 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2152 if (!extent_op)
2153 return -ENOMEM;
2154
2155 extent_op->flags_to_set = flags;
2156 extent_op->update_flags = 1;
2157 extent_op->update_key = 0;
2158 extent_op->is_data = is_data ? 1 : 0;
2159
2160 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2161 if (ret)
2162 kfree(extent_op);
2163 return ret;
2164}
2165
2166static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2167 struct btrfs_root *root,
2168 struct btrfs_path *path,
2169 u64 objectid, u64 offset, u64 bytenr)
2170{
2171 struct btrfs_delayed_ref_head *head;
2172 struct btrfs_delayed_ref_node *ref;
2173 struct btrfs_delayed_data_ref *data_ref;
2174 struct btrfs_delayed_ref_root *delayed_refs;
2175 struct rb_node *node;
2176 int ret = 0;
2177
2178 ret = -ENOENT;
2179 delayed_refs = &trans->transaction->delayed_refs;
2180 spin_lock(&delayed_refs->lock);
2181 head = btrfs_find_delayed_ref_head(trans, bytenr);
2182 if (!head)
2183 goto out;
2184
2185 if (!mutex_trylock(&head->mutex)) {
2186 atomic_inc(&head->node.refs);
2187 spin_unlock(&delayed_refs->lock);
2188
2189 btrfs_release_path(root->fs_info->extent_root, path);
2190
2191 mutex_lock(&head->mutex);
2192 mutex_unlock(&head->mutex);
2193 btrfs_put_delayed_ref(&head->node);
2194 return -EAGAIN;
2195 }
2196
2197 node = rb_prev(&head->node.rb_node);
2198 if (!node)
2199 goto out_unlock;
2200
2201 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2202
2203 if (ref->bytenr != bytenr)
2204 goto out_unlock;
2205
2206 ret = 1;
2207 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2208 goto out_unlock;
2209
2210 data_ref = btrfs_delayed_node_to_data_ref(ref);
2211
2212 node = rb_prev(node);
2213 if (node) {
2214 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2215 if (ref->bytenr == bytenr)
2216 goto out_unlock;
2217 }
2218
2219 if (data_ref->root != root->root_key.objectid ||
2220 data_ref->objectid != objectid || data_ref->offset != offset)
2221 goto out_unlock;
2222
2223 ret = 0;
2224out_unlock:
2225 mutex_unlock(&head->mutex);
2226out:
2227 spin_unlock(&delayed_refs->lock);
2228 return ret;
2229}
2230
2231static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2232 struct btrfs_root *root,
2233 struct btrfs_path *path,
2234 u64 objectid, u64 offset, u64 bytenr)
be20aa9d
CM
2235{
2236 struct btrfs_root *extent_root = root->fs_info->extent_root;
f321e491 2237 struct extent_buffer *leaf;
5d4f98a2
YZ
2238 struct btrfs_extent_data_ref *ref;
2239 struct btrfs_extent_inline_ref *iref;
2240 struct btrfs_extent_item *ei;
f321e491 2241 struct btrfs_key key;
5d4f98a2 2242 u32 item_size;
be20aa9d 2243 int ret;
925baedd 2244
be20aa9d 2245 key.objectid = bytenr;
31840ae1 2246 key.offset = (u64)-1;
f321e491 2247 key.type = BTRFS_EXTENT_ITEM_KEY;
be20aa9d 2248
be20aa9d
CM
2249 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2250 if (ret < 0)
2251 goto out;
2252 BUG_ON(ret == 0);
80ff3856
YZ
2253
2254 ret = -ENOENT;
2255 if (path->slots[0] == 0)
31840ae1 2256 goto out;
be20aa9d 2257
31840ae1 2258 path->slots[0]--;
f321e491 2259 leaf = path->nodes[0];
5d4f98a2 2260 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
be20aa9d 2261
5d4f98a2 2262 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
be20aa9d 2263 goto out;
f321e491 2264
5d4f98a2
YZ
2265 ret = 1;
2266 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2267#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2268 if (item_size < sizeof(*ei)) {
2269 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2270 goto out;
2271 }
2272#endif
2273 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
bd09835d 2274
5d4f98a2
YZ
2275 if (item_size != sizeof(*ei) +
2276 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2277 goto out;
be20aa9d 2278
5d4f98a2
YZ
2279 if (btrfs_extent_generation(leaf, ei) <=
2280 btrfs_root_last_snapshot(&root->root_item))
2281 goto out;
2282
2283 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2284 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2285 BTRFS_EXTENT_DATA_REF_KEY)
2286 goto out;
2287
2288 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2289 if (btrfs_extent_refs(leaf, ei) !=
2290 btrfs_extent_data_ref_count(leaf, ref) ||
2291 btrfs_extent_data_ref_root(leaf, ref) !=
2292 root->root_key.objectid ||
2293 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2294 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2295 goto out;
2296
2297 ret = 0;
2298out:
2299 return ret;
2300}
2301
2302int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2303 struct btrfs_root *root,
2304 u64 objectid, u64 offset, u64 bytenr)
2305{
2306 struct btrfs_path *path;
2307 int ret;
2308 int ret2;
2309
2310 path = btrfs_alloc_path();
2311 if (!path)
2312 return -ENOENT;
2313
2314 do {
2315 ret = check_committed_ref(trans, root, path, objectid,
2316 offset, bytenr);
2317 if (ret && ret != -ENOENT)
f321e491 2318 goto out;
80ff3856 2319
5d4f98a2
YZ
2320 ret2 = check_delayed_ref(trans, root, path, objectid,
2321 offset, bytenr);
2322 } while (ret2 == -EAGAIN);
2323
2324 if (ret2 && ret2 != -ENOENT) {
2325 ret = ret2;
2326 goto out;
f321e491 2327 }
5d4f98a2
YZ
2328
2329 if (ret != -ENOENT || ret2 != -ENOENT)
2330 ret = 0;
be20aa9d 2331out:
80ff3856 2332 btrfs_free_path(path);
f321e491 2333 return ret;
be20aa9d 2334}
c5739bba 2335
5d4f98a2 2336#if 0
31840ae1
ZY
2337int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2338 struct extent_buffer *buf, u32 nr_extents)
02217ed2 2339{
5f39d397 2340 struct btrfs_key key;
6407bf6d 2341 struct btrfs_file_extent_item *fi;
e4657689
ZY
2342 u64 root_gen;
2343 u32 nritems;
02217ed2 2344 int i;
db94535d 2345 int level;
31840ae1 2346 int ret = 0;
e4657689 2347 int shared = 0;
a28ec197 2348
3768f368 2349 if (!root->ref_cows)
a28ec197 2350 return 0;
5f39d397 2351
e4657689
ZY
2352 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2353 shared = 0;
2354 root_gen = root->root_key.offset;
2355 } else {
2356 shared = 1;
2357 root_gen = trans->transid - 1;
2358 }
2359
db94535d 2360 level = btrfs_header_level(buf);
5f39d397 2361 nritems = btrfs_header_nritems(buf);
4a096752 2362
31840ae1 2363 if (level == 0) {
31153d81
YZ
2364 struct btrfs_leaf_ref *ref;
2365 struct btrfs_extent_info *info;
2366
31840ae1 2367 ref = btrfs_alloc_leaf_ref(root, nr_extents);
31153d81 2368 if (!ref) {
31840ae1 2369 ret = -ENOMEM;
31153d81
YZ
2370 goto out;
2371 }
2372
e4657689 2373 ref->root_gen = root_gen;
31153d81
YZ
2374 ref->bytenr = buf->start;
2375 ref->owner = btrfs_header_owner(buf);
2376 ref->generation = btrfs_header_generation(buf);
31840ae1 2377 ref->nritems = nr_extents;
31153d81 2378 info = ref->extents;
bcc63abb 2379
31840ae1 2380 for (i = 0; nr_extents > 0 && i < nritems; i++) {
31153d81
YZ
2381 u64 disk_bytenr;
2382 btrfs_item_key_to_cpu(buf, &key, i);
2383 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2384 continue;
2385 fi = btrfs_item_ptr(buf, i,
2386 struct btrfs_file_extent_item);
2387 if (btrfs_file_extent_type(buf, fi) ==
2388 BTRFS_FILE_EXTENT_INLINE)
2389 continue;
2390 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2391 if (disk_bytenr == 0)
2392 continue;
2393
2394 info->bytenr = disk_bytenr;
2395 info->num_bytes =
2396 btrfs_file_extent_disk_num_bytes(buf, fi);
2397 info->objectid = key.objectid;
2398 info->offset = key.offset;
2399 info++;
2400 }
2401
e4657689 2402 ret = btrfs_add_leaf_ref(root, ref, shared);
5b84e8d6
YZ
2403 if (ret == -EEXIST && shared) {
2404 struct btrfs_leaf_ref *old;
2405 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2406 BUG_ON(!old);
2407 btrfs_remove_leaf_ref(root, old);
2408 btrfs_free_leaf_ref(root, old);
2409 ret = btrfs_add_leaf_ref(root, ref, shared);
2410 }
31153d81 2411 WARN_ON(ret);
bcc63abb 2412 btrfs_free_leaf_ref(root, ref);
31153d81
YZ
2413 }
2414out:
31840ae1
ZY
2415 return ret;
2416}
2417
b7a9f29f
CM
2418/* when a block goes through cow, we update the reference counts of
2419 * everything that block points to. The internal pointers of the block
2420 * can be in just about any order, and it is likely to have clusters of
2421 * things that are close together and clusters of things that are not.
2422 *
2423 * To help reduce the seeks that come with updating all of these reference
2424 * counts, sort them by byte number before actual updates are done.
2425 *
2426 * struct refsort is used to match byte number to slot in the btree block.
2427 * we sort based on the byte number and then use the slot to actually
2428 * find the item.
bd56b302
CM
2429 *
2430 * struct refsort is smaller than strcut btrfs_item and smaller than
2431 * struct btrfs_key_ptr. Since we're currently limited to the page size
2432 * for a btree block, there's no way for a kmalloc of refsorts for a
2433 * single node to be bigger than a page.
b7a9f29f
CM
2434 */
2435struct refsort {
2436 u64 bytenr;
2437 u32 slot;
2438};
2439
2440/*
2441 * for passing into sort()
2442 */
2443static int refsort_cmp(const void *a_void, const void *b_void)
2444{
2445 const struct refsort *a = a_void;
2446 const struct refsort *b = b_void;
2447
2448 if (a->bytenr < b->bytenr)
2449 return -1;
2450 if (a->bytenr > b->bytenr)
2451 return 1;
2452 return 0;
2453}
5d4f98a2 2454#endif
b7a9f29f 2455
5d4f98a2 2456static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
b7a9f29f 2457 struct btrfs_root *root,
5d4f98a2
YZ
2458 struct extent_buffer *buf,
2459 int full_backref, int inc)
31840ae1
ZY
2460{
2461 u64 bytenr;
5d4f98a2
YZ
2462 u64 num_bytes;
2463 u64 parent;
31840ae1 2464 u64 ref_root;
31840ae1 2465 u32 nritems;
31840ae1
ZY
2466 struct btrfs_key key;
2467 struct btrfs_file_extent_item *fi;
2468 int i;
2469 int level;
2470 int ret = 0;
31840ae1 2471 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
5d4f98a2 2472 u64, u64, u64, u64, u64, u64);
31840ae1
ZY
2473
2474 ref_root = btrfs_header_owner(buf);
31840ae1
ZY
2475 nritems = btrfs_header_nritems(buf);
2476 level = btrfs_header_level(buf);
2477
5d4f98a2
YZ
2478 if (!root->ref_cows && level == 0)
2479 return 0;
31840ae1 2480
5d4f98a2
YZ
2481 if (inc)
2482 process_func = btrfs_inc_extent_ref;
2483 else
2484 process_func = btrfs_free_extent;
31840ae1 2485
5d4f98a2
YZ
2486 if (full_backref)
2487 parent = buf->start;
2488 else
2489 parent = 0;
2490
2491 for (i = 0; i < nritems; i++) {
31840ae1 2492 if (level == 0) {
5d4f98a2 2493 btrfs_item_key_to_cpu(buf, &key, i);
31840ae1
ZY
2494 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2495 continue;
5d4f98a2 2496 fi = btrfs_item_ptr(buf, i,
31840ae1
ZY
2497 struct btrfs_file_extent_item);
2498 if (btrfs_file_extent_type(buf, fi) ==
2499 BTRFS_FILE_EXTENT_INLINE)
2500 continue;
2501 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2502 if (bytenr == 0)
2503 continue;
5d4f98a2
YZ
2504
2505 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2506 key.offset -= btrfs_file_extent_offset(buf, fi);
2507 ret = process_func(trans, root, bytenr, num_bytes,
2508 parent, ref_root, key.objectid,
2509 key.offset);
31840ae1
ZY
2510 if (ret)
2511 goto fail;
2512 } else {
5d4f98a2
YZ
2513 bytenr = btrfs_node_blockptr(buf, i);
2514 num_bytes = btrfs_level_size(root, level - 1);
2515 ret = process_func(trans, root, bytenr, num_bytes,
2516 parent, ref_root, level - 1, 0);
31840ae1
ZY
2517 if (ret)
2518 goto fail;
2519 }
2520 }
2521 return 0;
2522fail:
5d4f98a2
YZ
2523 BUG();
2524 return ret;
2525}
2526
2527int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2528 struct extent_buffer *buf, int full_backref)
2529{
2530 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2531}
2532
2533int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2534 struct extent_buffer *buf, int full_backref)
2535{
2536 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
31840ae1
ZY
2537}
2538
9078a3e1
CM
2539static int write_one_cache_group(struct btrfs_trans_handle *trans,
2540 struct btrfs_root *root,
2541 struct btrfs_path *path,
2542 struct btrfs_block_group_cache *cache)
2543{
2544 int ret;
9078a3e1 2545 struct btrfs_root *extent_root = root->fs_info->extent_root;
5f39d397
CM
2546 unsigned long bi;
2547 struct extent_buffer *leaf;
9078a3e1 2548
9078a3e1 2549 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
54aa1f4d
CM
2550 if (ret < 0)
2551 goto fail;
9078a3e1 2552 BUG_ON(ret);
5f39d397
CM
2553
2554 leaf = path->nodes[0];
2555 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2556 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2557 btrfs_mark_buffer_dirty(leaf);
9078a3e1 2558 btrfs_release_path(extent_root, path);
54aa1f4d 2559fail:
9078a3e1
CM
2560 if (ret)
2561 return ret;
9078a3e1
CM
2562 return 0;
2563
2564}
2565
4a8c9a62
YZ
2566static struct btrfs_block_group_cache *
2567next_block_group(struct btrfs_root *root,
2568 struct btrfs_block_group_cache *cache)
2569{
2570 struct rb_node *node;
2571 spin_lock(&root->fs_info->block_group_cache_lock);
2572 node = rb_next(&cache->cache_node);
2573 btrfs_put_block_group(cache);
2574 if (node) {
2575 cache = rb_entry(node, struct btrfs_block_group_cache,
2576 cache_node);
2577 atomic_inc(&cache->count);
2578 } else
2579 cache = NULL;
2580 spin_unlock(&root->fs_info->block_group_cache_lock);
2581 return cache;
2582}
2583
96b5179d
CM
2584int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2585 struct btrfs_root *root)
9078a3e1 2586{
4a8c9a62 2587 struct btrfs_block_group_cache *cache;
9078a3e1 2588 int err = 0;
9078a3e1 2589 struct btrfs_path *path;
96b5179d 2590 u64 last = 0;
9078a3e1
CM
2591
2592 path = btrfs_alloc_path();
2593 if (!path)
2594 return -ENOMEM;
2595
d397712b 2596 while (1) {
4a8c9a62
YZ
2597 if (last == 0) {
2598 err = btrfs_run_delayed_refs(trans, root,
2599 (unsigned long)-1);
2600 BUG_ON(err);
0f9dd46c 2601 }
54aa1f4d 2602
4a8c9a62
YZ
2603 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2604 while (cache) {
2605 if (cache->dirty)
2606 break;
2607 cache = next_block_group(root, cache);
2608 }
2609 if (!cache) {
2610 if (last == 0)
2611 break;
2612 last = 0;
2613 continue;
2614 }
0f9dd46c 2615
e8569813 2616 cache->dirty = 0;
4a8c9a62 2617 last = cache->key.objectid + cache->key.offset;
0f9dd46c 2618
4a8c9a62
YZ
2619 err = write_one_cache_group(trans, root, path, cache);
2620 BUG_ON(err);
2621 btrfs_put_block_group(cache);
9078a3e1 2622 }
4a8c9a62 2623
9078a3e1 2624 btrfs_free_path(path);
4a8c9a62 2625 return 0;
9078a3e1
CM
2626}
2627
d2fb3437
YZ
2628int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2629{
2630 struct btrfs_block_group_cache *block_group;
2631 int readonly = 0;
2632
2633 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2634 if (!block_group || block_group->ro)
2635 readonly = 1;
2636 if (block_group)
fa9c0d79 2637 btrfs_put_block_group(block_group);
d2fb3437
YZ
2638 return readonly;
2639}
2640
593060d7
CM
2641static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2642 u64 total_bytes, u64 bytes_used,
2643 struct btrfs_space_info **space_info)
2644{
2645 struct btrfs_space_info *found;
2646
2647 found = __find_space_info(info, flags);
2648 if (found) {
25179201 2649 spin_lock(&found->lock);
593060d7
CM
2650 found->total_bytes += total_bytes;
2651 found->bytes_used += bytes_used;
8f18cf13 2652 found->full = 0;
25179201 2653 spin_unlock(&found->lock);
593060d7
CM
2654 *space_info = found;
2655 return 0;
2656 }
c146afad 2657 found = kzalloc(sizeof(*found), GFP_NOFS);
593060d7
CM
2658 if (!found)
2659 return -ENOMEM;
2660
0f9dd46c 2661 INIT_LIST_HEAD(&found->block_groups);
80eb234a 2662 init_rwsem(&found->groups_sem);
0f9dd46c 2663 spin_lock_init(&found->lock);
593060d7
CM
2664 found->flags = flags;
2665 found->total_bytes = total_bytes;
2666 found->bytes_used = bytes_used;
2667 found->bytes_pinned = 0;
e8569813 2668 found->bytes_reserved = 0;
c146afad 2669 found->bytes_readonly = 0;
6a63209f 2670 found->bytes_delalloc = 0;
593060d7 2671 found->full = 0;
0ef3e66b 2672 found->force_alloc = 0;
593060d7 2673 *space_info = found;
4184ea7f 2674 list_add_rcu(&found->list, &info->space_info);
817d52f8 2675 atomic_set(&found->caching_threads, 0);
593060d7
CM
2676 return 0;
2677}
2678
8790d502
CM
2679static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
2680{
2681 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
611f0e00 2682 BTRFS_BLOCK_GROUP_RAID1 |
321aecc6 2683 BTRFS_BLOCK_GROUP_RAID10 |
611f0e00 2684 BTRFS_BLOCK_GROUP_DUP);
8790d502
CM
2685 if (extra_flags) {
2686 if (flags & BTRFS_BLOCK_GROUP_DATA)
2687 fs_info->avail_data_alloc_bits |= extra_flags;
2688 if (flags & BTRFS_BLOCK_GROUP_METADATA)
2689 fs_info->avail_metadata_alloc_bits |= extra_flags;
2690 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
2691 fs_info->avail_system_alloc_bits |= extra_flags;
2692 }
2693}
593060d7 2694
c146afad
YZ
2695static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
2696{
2697 spin_lock(&cache->space_info->lock);
2698 spin_lock(&cache->lock);
2699 if (!cache->ro) {
2700 cache->space_info->bytes_readonly += cache->key.offset -
2701 btrfs_block_group_used(&cache->item);
2702 cache->ro = 1;
2703 }
2704 spin_unlock(&cache->lock);
2705 spin_unlock(&cache->space_info->lock);
2706}
2707
2b82032c 2708u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
ec44a35c 2709{
2b82032c 2710 u64 num_devices = root->fs_info->fs_devices->rw_devices;
a061fc8d
CM
2711
2712 if (num_devices == 1)
2713 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
2714 if (num_devices < 4)
2715 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
2716
ec44a35c
CM
2717 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
2718 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
a061fc8d 2719 BTRFS_BLOCK_GROUP_RAID10))) {
ec44a35c 2720 flags &= ~BTRFS_BLOCK_GROUP_DUP;
a061fc8d 2721 }
ec44a35c
CM
2722
2723 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
a061fc8d 2724 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
ec44a35c 2725 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
a061fc8d 2726 }
ec44a35c
CM
2727
2728 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
2729 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
2730 (flags & BTRFS_BLOCK_GROUP_RAID10) |
2731 (flags & BTRFS_BLOCK_GROUP_DUP)))
2732 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
2733 return flags;
2734}
2735
6a63209f
JB
2736static u64 btrfs_get_alloc_profile(struct btrfs_root *root, u64 data)
2737{
2738 struct btrfs_fs_info *info = root->fs_info;
2739 u64 alloc_profile;
2740
2741 if (data) {
2742 alloc_profile = info->avail_data_alloc_bits &
2743 info->data_alloc_profile;
2744 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
2745 } else if (root == root->fs_info->chunk_root) {
2746 alloc_profile = info->avail_system_alloc_bits &
2747 info->system_alloc_profile;
2748 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
2749 } else {
2750 alloc_profile = info->avail_metadata_alloc_bits &
2751 info->metadata_alloc_profile;
2752 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
2753 }
2754
2755 return btrfs_reduce_alloc_profile(root, data);
2756}
2757
2758void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
2759{
2760 u64 alloc_target;
2761
2762 alloc_target = btrfs_get_alloc_profile(root, 1);
2763 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
2764 alloc_target);
2765}
2766
9ed74f2d
JB
2767static u64 calculate_bytes_needed(struct btrfs_root *root, int num_items)
2768{
2769 u64 num_bytes;
2770 int level;
2771
2772 level = BTRFS_MAX_LEVEL - 2;
2773 /*
2774 * NOTE: these calculations are absolutely the worst possible case.
2775 * This assumes that _every_ item we insert will require a new leaf, and
2776 * that the tree has grown to its maximum level size.
2777 */
2778
2779 /*
2780 * for every item we insert we could insert both an extent item and a
2781 * extent ref item. Then for ever item we insert, we will need to cow
2782 * both the original leaf, plus the leaf to the left and right of it.
2783 *
2784 * Unless we are talking about the extent root, then we just want the
2785 * number of items * 2, since we just need the extent item plus its ref.
2786 */
2787 if (root == root->fs_info->extent_root)
2788 num_bytes = num_items * 2;
2789 else
2790 num_bytes = (num_items + (2 * num_items)) * 3;
2791
2792 /*
2793 * num_bytes is total number of leaves we could need times the leaf
2794 * size, and then for every leaf we could end up cow'ing 2 nodes per
2795 * level, down to the leaf level.
2796 */
2797 num_bytes = (num_bytes * root->leafsize) +
2798 (num_bytes * (level * 2)) * root->nodesize;
2799
2800 return num_bytes;
2801}
2802
6a63209f 2803/*
9ed74f2d
JB
2804 * Unreserve metadata space for delalloc. If we have less reserved credits than
2805 * we have extents, this function does nothing.
6a63209f 2806 */
9ed74f2d
JB
2807int btrfs_unreserve_metadata_for_delalloc(struct btrfs_root *root,
2808 struct inode *inode, int num_items)
6a63209f
JB
2809{
2810 struct btrfs_fs_info *info = root->fs_info;
2811 struct btrfs_space_info *meta_sinfo;
9ed74f2d
JB
2812 u64 num_bytes;
2813 u64 alloc_target;
2814 bool bug = false;
6a63209f
JB
2815
2816 /* get the space info for where the metadata will live */
2817 alloc_target = btrfs_get_alloc_profile(root, 0);
2818 meta_sinfo = __find_space_info(info, alloc_target);
2819
9ed74f2d
JB
2820 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
2821 num_items);
2822
6a63209f 2823 spin_lock(&meta_sinfo->lock);
32c00aff
JB
2824 spin_lock(&BTRFS_I(inode)->accounting_lock);
2825 if (BTRFS_I(inode)->reserved_extents <=
2826 BTRFS_I(inode)->outstanding_extents) {
2827 spin_unlock(&BTRFS_I(inode)->accounting_lock);
9ed74f2d
JB
2828 spin_unlock(&meta_sinfo->lock);
2829 return 0;
2830 }
32c00aff 2831 spin_unlock(&BTRFS_I(inode)->accounting_lock);
9ed74f2d 2832
32c00aff
JB
2833 BTRFS_I(inode)->reserved_extents--;
2834 BUG_ON(BTRFS_I(inode)->reserved_extents < 0);
9ed74f2d
JB
2835
2836 if (meta_sinfo->bytes_delalloc < num_bytes) {
2837 bug = true;
2838 meta_sinfo->bytes_delalloc = 0;
2839 } else {
2840 meta_sinfo->bytes_delalloc -= num_bytes;
2841 }
2842 spin_unlock(&meta_sinfo->lock);
2843
2844 BUG_ON(bug);
2845
2846 return 0;
2847}
6a63209f 2848
9ed74f2d
JB
2849static void check_force_delalloc(struct btrfs_space_info *meta_sinfo)
2850{
2851 u64 thresh;
2852
2853 thresh = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
2854 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
2855 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
2856 meta_sinfo->bytes_may_use;
6a63209f 2857
9ed74f2d
JB
2858 thresh = meta_sinfo->total_bytes - thresh;
2859 thresh *= 80;
6a63209f 2860 do_div(thresh, 100);
9ed74f2d
JB
2861 if (thresh <= meta_sinfo->bytes_delalloc)
2862 meta_sinfo->force_delalloc = 1;
2863 else
2864 meta_sinfo->force_delalloc = 0;
2865}
6a63209f 2866
e3ccfa98
JB
2867struct async_flush {
2868 struct btrfs_root *root;
2869 struct btrfs_space_info *info;
2870 struct btrfs_work work;
2871};
2872
2873static noinline void flush_delalloc_async(struct btrfs_work *work)
2874{
2875 struct async_flush *async;
2876 struct btrfs_root *root;
2877 struct btrfs_space_info *info;
2878
2879 async = container_of(work, struct async_flush, work);
2880 root = async->root;
2881 info = async->info;
2882
24bbcf04 2883 btrfs_start_delalloc_inodes(root, 0);
e3ccfa98 2884 wake_up(&info->flush_wait);
24bbcf04 2885 btrfs_wait_ordered_extents(root, 0, 0);
e3ccfa98
JB
2886
2887 spin_lock(&info->lock);
2888 info->flushing = 0;
2889 spin_unlock(&info->lock);
2890 wake_up(&info->flush_wait);
2891
2892 kfree(async);
2893}
2894
2895static void wait_on_flush(struct btrfs_space_info *info)
2896{
2897 DEFINE_WAIT(wait);
2898 u64 used;
2899
2900 while (1) {
2901 prepare_to_wait(&info->flush_wait, &wait,
2902 TASK_UNINTERRUPTIBLE);
2903 spin_lock(&info->lock);
2904 if (!info->flushing) {
2905 spin_unlock(&info->lock);
2906 break;
2907 }
2908
2909 used = info->bytes_used + info->bytes_reserved +
2910 info->bytes_pinned + info->bytes_readonly +
2911 info->bytes_super + info->bytes_root +
2912 info->bytes_may_use + info->bytes_delalloc;
2913 if (used < info->total_bytes) {
2914 spin_unlock(&info->lock);
2915 break;
2916 }
2917 spin_unlock(&info->lock);
2918 schedule();
2919 }
2920 finish_wait(&info->flush_wait, &wait);
2921}
2922
32c00aff
JB
2923static void flush_delalloc(struct btrfs_root *root,
2924 struct btrfs_space_info *info)
2925{
e3ccfa98 2926 struct async_flush *async;
32c00aff
JB
2927 bool wait = false;
2928
2929 spin_lock(&info->lock);
2930
2931 if (!info->flushing) {
2932 info->flushing = 1;
2933 init_waitqueue_head(&info->flush_wait);
2934 } else {
2935 wait = true;
2936 }
2937
2938 spin_unlock(&info->lock);
2939
2940 if (wait) {
e3ccfa98 2941 wait_on_flush(info);
32c00aff
JB
2942 return;
2943 }
2944
e3ccfa98
JB
2945 async = kzalloc(sizeof(*async), GFP_NOFS);
2946 if (!async)
2947 goto flush;
2948
2949 async->root = root;
2950 async->info = info;
2951 async->work.func = flush_delalloc_async;
2952
2953 btrfs_queue_worker(&root->fs_info->enospc_workers,
2954 &async->work);
2955 wait_on_flush(info);
2956 return;
2957
2958flush:
24bbcf04
YZ
2959 btrfs_start_delalloc_inodes(root, 0);
2960 btrfs_wait_ordered_extents(root, 0, 0);
32c00aff
JB
2961
2962 spin_lock(&info->lock);
2963 info->flushing = 0;
2964 spin_unlock(&info->lock);
2965 wake_up(&info->flush_wait);
2966}
2967
9ed74f2d
JB
2968static int maybe_allocate_chunk(struct btrfs_root *root,
2969 struct btrfs_space_info *info)
2970{
2971 struct btrfs_super_block *disk_super = &root->fs_info->super_copy;
2972 struct btrfs_trans_handle *trans;
2973 bool wait = false;
2974 int ret = 0;
2975 u64 min_metadata;
2976 u64 free_space;
2977
2978 free_space = btrfs_super_total_bytes(disk_super);
2979 /*
33b25808 2980 * we allow the metadata to grow to a max of either 10gb or 5% of the
9ed74f2d
JB
2981 * space in the volume.
2982 */
33b25808 2983 min_metadata = min((u64)10 * 1024 * 1024 * 1024,
9ed74f2d
JB
2984 div64_u64(free_space * 5, 100));
2985 if (info->total_bytes >= min_metadata) {
2986 spin_unlock(&info->lock);
2987 return 0;
2988 }
2989
2990 if (info->full) {
2991 spin_unlock(&info->lock);
2992 return 0;
2993 }
2994
2995 if (!info->allocating_chunk) {
2996 info->force_alloc = 1;
2997 info->allocating_chunk = 1;
e3ccfa98 2998 init_waitqueue_head(&info->allocate_wait);
9ed74f2d
JB
2999 } else {
3000 wait = true;
3001 }
3002
3003 spin_unlock(&info->lock);
3004
3005 if (wait) {
e3ccfa98 3006 wait_event(info->allocate_wait,
9ed74f2d
JB
3007 !info->allocating_chunk);
3008 return 1;
3009 }
3010
3011 trans = btrfs_start_transaction(root, 1);
3012 if (!trans) {
3013 ret = -ENOMEM;
3014 goto out;
3015 }
3016
3017 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3018 4096 + 2 * 1024 * 1024,
3019 info->flags, 0);
3020 btrfs_end_transaction(trans, root);
3021 if (ret)
3022 goto out;
3023out:
3024 spin_lock(&info->lock);
3025 info->allocating_chunk = 0;
3026 spin_unlock(&info->lock);
e3ccfa98 3027 wake_up(&info->allocate_wait);
9ed74f2d
JB
3028
3029 if (ret)
3030 return 0;
3031 return 1;
3032}
3033
3034/*
3035 * Reserve metadata space for delalloc.
3036 */
3037int btrfs_reserve_metadata_for_delalloc(struct btrfs_root *root,
3038 struct inode *inode, int num_items)
3039{
3040 struct btrfs_fs_info *info = root->fs_info;
3041 struct btrfs_space_info *meta_sinfo;
3042 u64 num_bytes;
3043 u64 used;
3044 u64 alloc_target;
3045 int flushed = 0;
3046 int force_delalloc;
3047
3048 /* get the space info for where the metadata will live */
3049 alloc_target = btrfs_get_alloc_profile(root, 0);
3050 meta_sinfo = __find_space_info(info, alloc_target);
3051
3052 num_bytes = calculate_bytes_needed(root->fs_info->extent_root,
3053 num_items);
3054again:
3055 spin_lock(&meta_sinfo->lock);
3056
3057 force_delalloc = meta_sinfo->force_delalloc;
3058
3059 if (unlikely(!meta_sinfo->bytes_root))
3060 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3061
3062 if (!flushed)
3063 meta_sinfo->bytes_delalloc += num_bytes;
3064
3065 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3066 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3067 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3068 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3069
3070 if (used > meta_sinfo->total_bytes) {
3071 flushed++;
3072
3073 if (flushed == 1) {
3074 if (maybe_allocate_chunk(root, meta_sinfo))
3075 goto again;
3076 flushed++;
3077 } else {
4e06bdd6 3078 spin_unlock(&meta_sinfo->lock);
9ed74f2d 3079 }
4e06bdd6 3080
9ed74f2d
JB
3081 if (flushed == 2) {
3082 filemap_flush(inode->i_mapping);
3083 goto again;
3084 } else if (flushed == 3) {
32c00aff 3085 flush_delalloc(root, meta_sinfo);
4e06bdd6
JB
3086 goto again;
3087 }
9ed74f2d
JB
3088 spin_lock(&meta_sinfo->lock);
3089 meta_sinfo->bytes_delalloc -= num_bytes;
6a63209f 3090 spin_unlock(&meta_sinfo->lock);
9ed74f2d 3091 printk(KERN_ERR "enospc, has %d, reserved %d\n",
32c00aff
JB
3092 BTRFS_I(inode)->outstanding_extents,
3093 BTRFS_I(inode)->reserved_extents);
9ed74f2d
JB
3094 dump_space_info(meta_sinfo, 0, 0);
3095 return -ENOSPC;
3096 }
4e06bdd6 3097
32c00aff 3098 BTRFS_I(inode)->reserved_extents++;
9ed74f2d
JB
3099 check_force_delalloc(meta_sinfo);
3100 spin_unlock(&meta_sinfo->lock);
3101
3102 if (!flushed && force_delalloc)
3103 filemap_flush(inode->i_mapping);
3104
3105 return 0;
3106}
3107
3108/*
3109 * unreserve num_items number of items worth of metadata space. This needs to
3110 * be paired with btrfs_reserve_metadata_space.
3111 *
3112 * NOTE: if you have the option, run this _AFTER_ you do a
3113 * btrfs_end_transaction, since btrfs_end_transaction will run delayed ref
3114 * oprations which will result in more used metadata, so we want to make sure we
3115 * can do that without issue.
3116 */
3117int btrfs_unreserve_metadata_space(struct btrfs_root *root, int num_items)
3118{
3119 struct btrfs_fs_info *info = root->fs_info;
3120 struct btrfs_space_info *meta_sinfo;
3121 u64 num_bytes;
3122 u64 alloc_target;
3123 bool bug = false;
3124
3125 /* get the space info for where the metadata will live */
3126 alloc_target = btrfs_get_alloc_profile(root, 0);
3127 meta_sinfo = __find_space_info(info, alloc_target);
3128
3129 num_bytes = calculate_bytes_needed(root, num_items);
3130
3131 spin_lock(&meta_sinfo->lock);
3132 if (meta_sinfo->bytes_may_use < num_bytes) {
3133 bug = true;
3134 meta_sinfo->bytes_may_use = 0;
3135 } else {
3136 meta_sinfo->bytes_may_use -= num_bytes;
3137 }
3138 spin_unlock(&meta_sinfo->lock);
3139
3140 BUG_ON(bug);
3141
3142 return 0;
3143}
3144
3145/*
3146 * Reserve some metadata space for use. We'll calculate the worste case number
3147 * of bytes that would be needed to modify num_items number of items. If we
3148 * have space, fantastic, if not, you get -ENOSPC. Please call
3149 * btrfs_unreserve_metadata_space when you are done for the _SAME_ number of
3150 * items you reserved, since whatever metadata you needed should have already
3151 * been allocated.
3152 *
3153 * This will commit the transaction to make more space if we don't have enough
3154 * metadata space. THe only time we don't do this is if we're reserving space
3155 * inside of a transaction, then we will just return -ENOSPC and it is the
3156 * callers responsibility to handle it properly.
3157 */
3158int btrfs_reserve_metadata_space(struct btrfs_root *root, int num_items)
3159{
3160 struct btrfs_fs_info *info = root->fs_info;
3161 struct btrfs_space_info *meta_sinfo;
3162 u64 num_bytes;
3163 u64 used;
3164 u64 alloc_target;
3165 int retries = 0;
3166
3167 /* get the space info for where the metadata will live */
3168 alloc_target = btrfs_get_alloc_profile(root, 0);
3169 meta_sinfo = __find_space_info(info, alloc_target);
3170
3171 num_bytes = calculate_bytes_needed(root, num_items);
3172again:
3173 spin_lock(&meta_sinfo->lock);
3174
3175 if (unlikely(!meta_sinfo->bytes_root))
3176 meta_sinfo->bytes_root = calculate_bytes_needed(root, 6);
3177
3178 if (!retries)
3179 meta_sinfo->bytes_may_use += num_bytes;
3180
3181 used = meta_sinfo->bytes_used + meta_sinfo->bytes_reserved +
3182 meta_sinfo->bytes_pinned + meta_sinfo->bytes_readonly +
3183 meta_sinfo->bytes_super + meta_sinfo->bytes_root +
3184 meta_sinfo->bytes_may_use + meta_sinfo->bytes_delalloc;
3185
3186 if (used > meta_sinfo->total_bytes) {
3187 retries++;
3188 if (retries == 1) {
3189 if (maybe_allocate_chunk(root, meta_sinfo))
3190 goto again;
3191 retries++;
3192 } else {
3193 spin_unlock(&meta_sinfo->lock);
3194 }
3195
3196 if (retries == 2) {
32c00aff 3197 flush_delalloc(root, meta_sinfo);
4e06bdd6
JB
3198 goto again;
3199 }
9ed74f2d
JB
3200 spin_lock(&meta_sinfo->lock);
3201 meta_sinfo->bytes_may_use -= num_bytes;
3202 spin_unlock(&meta_sinfo->lock);
3203
3204 dump_space_info(meta_sinfo, 0, 0);
6a63209f
JB
3205 return -ENOSPC;
3206 }
9ed74f2d
JB
3207
3208 check_force_delalloc(meta_sinfo);
6a63209f
JB
3209 spin_unlock(&meta_sinfo->lock);
3210
3211 return 0;
3212}
3213
3214/*
3215 * This will check the space that the inode allocates from to make sure we have
3216 * enough space for bytes.
3217 */
3218int btrfs_check_data_free_space(struct btrfs_root *root, struct inode *inode,
3219 u64 bytes)
3220{
3221 struct btrfs_space_info *data_sinfo;
4e06bdd6 3222 int ret = 0, committed = 0;
6a63209f
JB
3223
3224 /* make sure bytes are sectorsize aligned */
3225 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3226
3227 data_sinfo = BTRFS_I(inode)->space_info;
33b4d47f
CM
3228 if (!data_sinfo)
3229 goto alloc;
3230
6a63209f
JB
3231again:
3232 /* make sure we have enough space to handle the data first */
3233 spin_lock(&data_sinfo->lock);
3234 if (data_sinfo->total_bytes - data_sinfo->bytes_used -
3235 data_sinfo->bytes_delalloc - data_sinfo->bytes_reserved -
3236 data_sinfo->bytes_pinned - data_sinfo->bytes_readonly -
1b2da372 3237 data_sinfo->bytes_may_use - data_sinfo->bytes_super < bytes) {
4e06bdd6
JB
3238 struct btrfs_trans_handle *trans;
3239
6a63209f
JB
3240 /*
3241 * if we don't have enough free bytes in this space then we need
3242 * to alloc a new chunk.
3243 */
3244 if (!data_sinfo->full) {
3245 u64 alloc_target;
6a63209f
JB
3246
3247 data_sinfo->force_alloc = 1;
3248 spin_unlock(&data_sinfo->lock);
33b4d47f 3249alloc:
6a63209f
JB
3250 alloc_target = btrfs_get_alloc_profile(root, 1);
3251 trans = btrfs_start_transaction(root, 1);
3252 if (!trans)
3253 return -ENOMEM;
3254
3255 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3256 bytes + 2 * 1024 * 1024,
3257 alloc_target, 0);
3258 btrfs_end_transaction(trans, root);
3259 if (ret)
3260 return ret;
33b4d47f
CM
3261
3262 if (!data_sinfo) {
3263 btrfs_set_inode_space_info(root, inode);
3264 data_sinfo = BTRFS_I(inode)->space_info;
3265 }
6a63209f
JB
3266 goto again;
3267 }
3268 spin_unlock(&data_sinfo->lock);
4e06bdd6
JB
3269
3270 /* commit the current transaction and try again */
dd7e0b7b 3271 if (!committed && !root->fs_info->open_ioctl_trans) {
4e06bdd6
JB
3272 committed = 1;
3273 trans = btrfs_join_transaction(root, 1);
3274 if (!trans)
3275 return -ENOMEM;
3276 ret = btrfs_commit_transaction(trans, root);
3277 if (ret)
3278 return ret;
3279 goto again;
3280 }
3281
6a63209f
JB
3282 printk(KERN_ERR "no space left, need %llu, %llu delalloc bytes"
3283 ", %llu bytes_used, %llu bytes_reserved, "
68f5a38c 3284 "%llu bytes_pinned, %llu bytes_readonly, %llu may use "
21380931
JB
3285 "%llu total\n", (unsigned long long)bytes,
3286 (unsigned long long)data_sinfo->bytes_delalloc,
3287 (unsigned long long)data_sinfo->bytes_used,
3288 (unsigned long long)data_sinfo->bytes_reserved,
3289 (unsigned long long)data_sinfo->bytes_pinned,
3290 (unsigned long long)data_sinfo->bytes_readonly,
3291 (unsigned long long)data_sinfo->bytes_may_use,
3292 (unsigned long long)data_sinfo->total_bytes);
6a63209f
JB
3293 return -ENOSPC;
3294 }
3295 data_sinfo->bytes_may_use += bytes;
3296 BTRFS_I(inode)->reserved_bytes += bytes;
3297 spin_unlock(&data_sinfo->lock);
3298
9ed74f2d 3299 return 0;
6a63209f
JB
3300}
3301
3302/*
3303 * if there was an error for whatever reason after calling
3304 * btrfs_check_data_free_space, call this so we can cleanup the counters.
3305 */
3306void btrfs_free_reserved_data_space(struct btrfs_root *root,
3307 struct inode *inode, u64 bytes)
3308{
3309 struct btrfs_space_info *data_sinfo;
3310
3311 /* make sure bytes are sectorsize aligned */
3312 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3313
3314 data_sinfo = BTRFS_I(inode)->space_info;
3315 spin_lock(&data_sinfo->lock);
3316 data_sinfo->bytes_may_use -= bytes;
3317 BTRFS_I(inode)->reserved_bytes -= bytes;
3318 spin_unlock(&data_sinfo->lock);
3319}
3320
3321/* called when we are adding a delalloc extent to the inode's io_tree */
3322void btrfs_delalloc_reserve_space(struct btrfs_root *root, struct inode *inode,
3323 u64 bytes)
3324{
3325 struct btrfs_space_info *data_sinfo;
3326
3327 /* get the space info for where this inode will be storing its data */
3328 data_sinfo = BTRFS_I(inode)->space_info;
3329
3330 /* make sure we have enough space to handle the data first */
3331 spin_lock(&data_sinfo->lock);
3332 data_sinfo->bytes_delalloc += bytes;
3333
3334 /*
3335 * we are adding a delalloc extent without calling
3336 * btrfs_check_data_free_space first. This happens on a weird
3337 * writepage condition, but shouldn't hurt our accounting
3338 */
3339 if (unlikely(bytes > BTRFS_I(inode)->reserved_bytes)) {
3340 data_sinfo->bytes_may_use -= BTRFS_I(inode)->reserved_bytes;
3341 BTRFS_I(inode)->reserved_bytes = 0;
3342 } else {
3343 data_sinfo->bytes_may_use -= bytes;
3344 BTRFS_I(inode)->reserved_bytes -= bytes;
3345 }
3346
3347 spin_unlock(&data_sinfo->lock);
3348}
3349
3350/* called when we are clearing an delalloc extent from the inode's io_tree */
3351void btrfs_delalloc_free_space(struct btrfs_root *root, struct inode *inode,
3352 u64 bytes)
3353{
3354 struct btrfs_space_info *info;
3355
3356 info = BTRFS_I(inode)->space_info;
3357
3358 spin_lock(&info->lock);
3359 info->bytes_delalloc -= bytes;
3360 spin_unlock(&info->lock);
3361}
3362
97e728d4
JB
3363static void force_metadata_allocation(struct btrfs_fs_info *info)
3364{
3365 struct list_head *head = &info->space_info;
3366 struct btrfs_space_info *found;
3367
3368 rcu_read_lock();
3369 list_for_each_entry_rcu(found, head, list) {
3370 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3371 found->force_alloc = 1;
3372 }
3373 rcu_read_unlock();
3374}
3375
6324fbf3
CM
3376static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3377 struct btrfs_root *extent_root, u64 alloc_bytes,
0ef3e66b 3378 u64 flags, int force)
6324fbf3
CM
3379{
3380 struct btrfs_space_info *space_info;
97e728d4 3381 struct btrfs_fs_info *fs_info = extent_root->fs_info;
6324fbf3 3382 u64 thresh;
c146afad
YZ
3383 int ret = 0;
3384
97e728d4 3385 mutex_lock(&fs_info->chunk_mutex);
6324fbf3 3386
2b82032c 3387 flags = btrfs_reduce_alloc_profile(extent_root, flags);
ec44a35c 3388
6324fbf3 3389 space_info = __find_space_info(extent_root->fs_info, flags);
593060d7
CM
3390 if (!space_info) {
3391 ret = update_space_info(extent_root->fs_info, flags,
3392 0, 0, &space_info);
3393 BUG_ON(ret);
3394 }
6324fbf3
CM
3395 BUG_ON(!space_info);
3396
25179201 3397 spin_lock(&space_info->lock);
9ed74f2d 3398 if (space_info->force_alloc)
0ef3e66b 3399 force = 1;
25179201
JB
3400 if (space_info->full) {
3401 spin_unlock(&space_info->lock);
925baedd 3402 goto out;
25179201 3403 }
6324fbf3 3404
c146afad 3405 thresh = space_info->total_bytes - space_info->bytes_readonly;
9ed74f2d 3406 thresh = div_factor(thresh, 8);
0ef3e66b 3407 if (!force &&
e8569813 3408 (space_info->bytes_used + space_info->bytes_pinned +
25179201
JB
3409 space_info->bytes_reserved + alloc_bytes) < thresh) {
3410 spin_unlock(&space_info->lock);
925baedd 3411 goto out;
25179201 3412 }
25179201
JB
3413 spin_unlock(&space_info->lock);
3414
97e728d4
JB
3415 /*
3416 * if we're doing a data chunk, go ahead and make sure that
3417 * we keep a reasonable number of metadata chunks allocated in the
3418 * FS as well.
3419 */
9ed74f2d 3420 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
97e728d4
JB
3421 fs_info->data_chunk_allocations++;
3422 if (!(fs_info->data_chunk_allocations %
3423 fs_info->metadata_ratio))
3424 force_metadata_allocation(fs_info);
3425 }
3426
2b82032c 3427 ret = btrfs_alloc_chunk(trans, extent_root, flags);
9ed74f2d 3428 spin_lock(&space_info->lock);
d397712b 3429 if (ret)
6324fbf3 3430 space_info->full = 1;
9ed74f2d
JB
3431 space_info->force_alloc = 0;
3432 spin_unlock(&space_info->lock);
a74a4b97 3433out:
c146afad 3434 mutex_unlock(&extent_root->fs_info->chunk_mutex);
0f9dd46c 3435 return ret;
6324fbf3
CM
3436}
3437
9078a3e1
CM
3438static int update_block_group(struct btrfs_trans_handle *trans,
3439 struct btrfs_root *root,
db94535d 3440 u64 bytenr, u64 num_bytes, int alloc,
0b86a832 3441 int mark_free)
9078a3e1
CM
3442{
3443 struct btrfs_block_group_cache *cache;
3444 struct btrfs_fs_info *info = root->fs_info;
db94535d 3445 u64 total = num_bytes;
9078a3e1 3446 u64 old_val;
db94535d 3447 u64 byte_in_group;
3e1ad54f 3448
5d4f98a2
YZ
3449 /* block accounting for super block */
3450 spin_lock(&info->delalloc_lock);
3451 old_val = btrfs_super_bytes_used(&info->super_copy);
3452 if (alloc)
3453 old_val += num_bytes;
3454 else
3455 old_val -= num_bytes;
3456 btrfs_set_super_bytes_used(&info->super_copy, old_val);
5d4f98a2
YZ
3457 spin_unlock(&info->delalloc_lock);
3458
d397712b 3459 while (total) {
db94535d 3460 cache = btrfs_lookup_block_group(info, bytenr);
f3465ca4 3461 if (!cache)
9078a3e1 3462 return -1;
db94535d
CM
3463 byte_in_group = bytenr - cache->key.objectid;
3464 WARN_ON(byte_in_group > cache->key.offset);
9078a3e1 3465
25179201 3466 spin_lock(&cache->space_info->lock);
c286ac48 3467 spin_lock(&cache->lock);
0f9dd46c 3468 cache->dirty = 1;
9078a3e1 3469 old_val = btrfs_block_group_used(&cache->item);
db94535d 3470 num_bytes = min(total, cache->key.offset - byte_in_group);
cd1bc465 3471 if (alloc) {
db94535d 3472 old_val += num_bytes;
11833d66
YZ
3473 btrfs_set_block_group_used(&cache->item, old_val);
3474 cache->reserved -= num_bytes;
6324fbf3 3475 cache->space_info->bytes_used += num_bytes;
11833d66 3476 cache->space_info->bytes_reserved -= num_bytes;
a512bbf8 3477 if (cache->ro)
c146afad 3478 cache->space_info->bytes_readonly -= num_bytes;
c286ac48 3479 spin_unlock(&cache->lock);
25179201 3480 spin_unlock(&cache->space_info->lock);
cd1bc465 3481 } else {
db94535d 3482 old_val -= num_bytes;
6324fbf3 3483 cache->space_info->bytes_used -= num_bytes;
c146afad
YZ
3484 if (cache->ro)
3485 cache->space_info->bytes_readonly += num_bytes;
c286ac48
CM
3486 btrfs_set_block_group_used(&cache->item, old_val);
3487 spin_unlock(&cache->lock);
25179201 3488 spin_unlock(&cache->space_info->lock);
f510cfec 3489 if (mark_free) {
0f9dd46c 3490 int ret;
1f3c79a2
LH
3491
3492 ret = btrfs_discard_extent(root, bytenr,
3493 num_bytes);
3494 WARN_ON(ret);
3495
0f9dd46c
JB
3496 ret = btrfs_add_free_space(cache, bytenr,
3497 num_bytes);
d2fb3437 3498 WARN_ON(ret);
e37c9e69 3499 }
cd1bc465 3500 }
fa9c0d79 3501 btrfs_put_block_group(cache);
db94535d
CM
3502 total -= num_bytes;
3503 bytenr += num_bytes;
9078a3e1
CM
3504 }
3505 return 0;
3506}
6324fbf3 3507
a061fc8d
CM
3508static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
3509{
0f9dd46c 3510 struct btrfs_block_group_cache *cache;
d2fb3437 3511 u64 bytenr;
0f9dd46c
JB
3512
3513 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
3514 if (!cache)
a061fc8d 3515 return 0;
0f9dd46c 3516
d2fb3437 3517 bytenr = cache->key.objectid;
fa9c0d79 3518 btrfs_put_block_group(cache);
d2fb3437
YZ
3519
3520 return bytenr;
a061fc8d
CM
3521}
3522
11833d66
YZ
3523/*
3524 * this function must be called within transaction
3525 */
3526int btrfs_pin_extent(struct btrfs_root *root,
3527 u64 bytenr, u64 num_bytes, int reserved)
324ae4df 3528{
324ae4df 3529 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66 3530 struct btrfs_block_group_cache *cache;
324ae4df 3531
11833d66
YZ
3532 cache = btrfs_lookup_block_group(fs_info, bytenr);
3533 BUG_ON(!cache);
68b38550 3534
11833d66
YZ
3535 spin_lock(&cache->space_info->lock);
3536 spin_lock(&cache->lock);
3537 cache->pinned += num_bytes;
3538 cache->space_info->bytes_pinned += num_bytes;
3539 if (reserved) {
3540 cache->reserved -= num_bytes;
3541 cache->space_info->bytes_reserved -= num_bytes;
3542 }
3543 spin_unlock(&cache->lock);
3544 spin_unlock(&cache->space_info->lock);
68b38550 3545
11833d66 3546 btrfs_put_block_group(cache);
68b38550 3547
11833d66
YZ
3548 set_extent_dirty(fs_info->pinned_extents,
3549 bytenr, bytenr + num_bytes - 1, GFP_NOFS);
3550 return 0;
3551}
3552
3553static int update_reserved_extents(struct btrfs_block_group_cache *cache,
3554 u64 num_bytes, int reserve)
3555{
3556 spin_lock(&cache->space_info->lock);
3557 spin_lock(&cache->lock);
3558 if (reserve) {
3559 cache->reserved += num_bytes;
3560 cache->space_info->bytes_reserved += num_bytes;
3561 } else {
3562 cache->reserved -= num_bytes;
3563 cache->space_info->bytes_reserved -= num_bytes;
324ae4df 3564 }
11833d66
YZ
3565 spin_unlock(&cache->lock);
3566 spin_unlock(&cache->space_info->lock);
324ae4df
Y
3567 return 0;
3568}
9078a3e1 3569
11833d66
YZ
3570int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
3571 struct btrfs_root *root)
e8569813 3572{
e8569813 3573 struct btrfs_fs_info *fs_info = root->fs_info;
11833d66
YZ
3574 struct btrfs_caching_control *next;
3575 struct btrfs_caching_control *caching_ctl;
3576 struct btrfs_block_group_cache *cache;
e8569813 3577
11833d66 3578 down_write(&fs_info->extent_commit_sem);
25179201 3579
11833d66
YZ
3580 list_for_each_entry_safe(caching_ctl, next,
3581 &fs_info->caching_block_groups, list) {
3582 cache = caching_ctl->block_group;
3583 if (block_group_cache_done(cache)) {
3584 cache->last_byte_to_unpin = (u64)-1;
3585 list_del_init(&caching_ctl->list);
3586 put_caching_control(caching_ctl);
e8569813 3587 } else {
11833d66 3588 cache->last_byte_to_unpin = caching_ctl->progress;
e8569813 3589 }
e8569813 3590 }
11833d66
YZ
3591
3592 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3593 fs_info->pinned_extents = &fs_info->freed_extents[1];
3594 else
3595 fs_info->pinned_extents = &fs_info->freed_extents[0];
3596
3597 up_write(&fs_info->extent_commit_sem);
e8569813
ZY
3598 return 0;
3599}
3600
11833d66 3601static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
ccd467d6 3602{
11833d66
YZ
3603 struct btrfs_fs_info *fs_info = root->fs_info;
3604 struct btrfs_block_group_cache *cache = NULL;
3605 u64 len;
ccd467d6 3606
11833d66
YZ
3607 while (start <= end) {
3608 if (!cache ||
3609 start >= cache->key.objectid + cache->key.offset) {
3610 if (cache)
3611 btrfs_put_block_group(cache);
3612 cache = btrfs_lookup_block_group(fs_info, start);
3613 BUG_ON(!cache);
3614 }
3615
3616 len = cache->key.objectid + cache->key.offset - start;
3617 len = min(len, end + 1 - start);
3618
3619 if (start < cache->last_byte_to_unpin) {
3620 len = min(len, cache->last_byte_to_unpin - start);
3621 btrfs_add_free_space(cache, start, len);
3622 }
3623
3624 spin_lock(&cache->space_info->lock);
3625 spin_lock(&cache->lock);
3626 cache->pinned -= len;
3627 cache->space_info->bytes_pinned -= len;
3628 spin_unlock(&cache->lock);
3629 spin_unlock(&cache->space_info->lock);
817d52f8 3630
11833d66 3631 start += len;
ccd467d6 3632 }
11833d66
YZ
3633
3634 if (cache)
3635 btrfs_put_block_group(cache);
ccd467d6
CM
3636 return 0;
3637}
3638
3639int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
11833d66 3640 struct btrfs_root *root)
a28ec197 3641{
11833d66
YZ
3642 struct btrfs_fs_info *fs_info = root->fs_info;
3643 struct extent_io_tree *unpin;
1a5bc167
CM
3644 u64 start;
3645 u64 end;
a28ec197 3646 int ret;
a28ec197 3647
11833d66
YZ
3648 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
3649 unpin = &fs_info->freed_extents[1];
3650 else
3651 unpin = &fs_info->freed_extents[0];
3652
d397712b 3653 while (1) {
1a5bc167
CM
3654 ret = find_first_extent_bit(unpin, 0, &start, &end,
3655 EXTENT_DIRTY);
3656 if (ret)
a28ec197 3657 break;
1f3c79a2
LH
3658
3659 ret = btrfs_discard_extent(root, start, end + 1 - start);
3660
1a5bc167 3661 clear_extent_dirty(unpin, start, end, GFP_NOFS);
11833d66 3662 unpin_extent_range(root, start, end);
b9473439 3663 cond_resched();
a28ec197 3664 }
817d52f8 3665
1f3c79a2 3666 return ret;
a28ec197
CM
3667}
3668
31840ae1
ZY
3669static int pin_down_bytes(struct btrfs_trans_handle *trans,
3670 struct btrfs_root *root,
b9473439 3671 struct btrfs_path *path,
11833d66
YZ
3672 u64 bytenr, u64 num_bytes,
3673 int is_data, int reserved,
b9473439 3674 struct extent_buffer **must_clean)
e20d96d6 3675{
1a5bc167 3676 int err = 0;
31840ae1 3677 struct extent_buffer *buf;
8ef97622 3678
31840ae1
ZY
3679 if (is_data)
3680 goto pinit;
3681
444528b3
CM
3682 /*
3683 * discard is sloooow, and so triggering discards on
3684 * individual btree blocks isn't a good plan. Just
3685 * pin everything in discard mode.
3686 */
3687 if (btrfs_test_opt(root, DISCARD))
3688 goto pinit;
3689
31840ae1
ZY
3690 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
3691 if (!buf)
3692 goto pinit;
3693
3694 /* we can reuse a block if it hasn't been written
3695 * and it is from this transaction. We can't
3696 * reuse anything from the tree log root because
3697 * it has tiny sub-transactions.
3698 */
3699 if (btrfs_buffer_uptodate(buf, 0) &&
3700 btrfs_try_tree_lock(buf)) {
3701 u64 header_owner = btrfs_header_owner(buf);
3702 u64 header_transid = btrfs_header_generation(buf);
3703 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
3704 header_transid == trans->transid &&
3705 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
b9473439 3706 *must_clean = buf;
31840ae1 3707 return 1;
8ef97622 3708 }
31840ae1 3709 btrfs_tree_unlock(buf);
f4b9aa8d 3710 }
31840ae1
ZY
3711 free_extent_buffer(buf);
3712pinit:
11833d66
YZ
3713 if (path)
3714 btrfs_set_path_blocking(path);
b9473439 3715 /* unlocks the pinned mutex */
11833d66 3716 btrfs_pin_extent(root, bytenr, num_bytes, reserved);
31840ae1 3717
be744175 3718 BUG_ON(err < 0);
e20d96d6
CM
3719 return 0;
3720}
3721
5d4f98a2
YZ
3722static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
3723 struct btrfs_root *root,
3724 u64 bytenr, u64 num_bytes, u64 parent,
3725 u64 root_objectid, u64 owner_objectid,
3726 u64 owner_offset, int refs_to_drop,
3727 struct btrfs_delayed_extent_op *extent_op)
a28ec197 3728{
e2fa7227 3729 struct btrfs_key key;
5d4f98a2 3730 struct btrfs_path *path;
1261ec42
CM
3731 struct btrfs_fs_info *info = root->fs_info;
3732 struct btrfs_root *extent_root = info->extent_root;
5f39d397 3733 struct extent_buffer *leaf;
5d4f98a2
YZ
3734 struct btrfs_extent_item *ei;
3735 struct btrfs_extent_inline_ref *iref;
a28ec197 3736 int ret;
5d4f98a2 3737 int is_data;
952fccac
CM
3738 int extent_slot = 0;
3739 int found_extent = 0;
3740 int num_to_del = 1;
5d4f98a2
YZ
3741 u32 item_size;
3742 u64 refs;
037e6390 3743
5caf2a00 3744 path = btrfs_alloc_path();
54aa1f4d
CM
3745 if (!path)
3746 return -ENOMEM;
5f26f772 3747
3c12ac72 3748 path->reada = 1;
b9473439 3749 path->leave_spinning = 1;
5d4f98a2
YZ
3750
3751 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
3752 BUG_ON(!is_data && refs_to_drop != 1);
3753
3754 ret = lookup_extent_backref(trans, extent_root, path, &iref,
3755 bytenr, num_bytes, parent,
3756 root_objectid, owner_objectid,
3757 owner_offset);
7bb86316 3758 if (ret == 0) {
952fccac 3759 extent_slot = path->slots[0];
5d4f98a2
YZ
3760 while (extent_slot >= 0) {
3761 btrfs_item_key_to_cpu(path->nodes[0], &key,
952fccac 3762 extent_slot);
5d4f98a2 3763 if (key.objectid != bytenr)
952fccac 3764 break;
5d4f98a2
YZ
3765 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
3766 key.offset == num_bytes) {
952fccac
CM
3767 found_extent = 1;
3768 break;
3769 }
3770 if (path->slots[0] - extent_slot > 5)
3771 break;
5d4f98a2 3772 extent_slot--;
952fccac 3773 }
5d4f98a2
YZ
3774#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3775 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
3776 if (found_extent && item_size < sizeof(*ei))
3777 found_extent = 0;
3778#endif
31840ae1 3779 if (!found_extent) {
5d4f98a2 3780 BUG_ON(iref);
56bec294 3781 ret = remove_extent_backref(trans, extent_root, path,
5d4f98a2
YZ
3782 NULL, refs_to_drop,
3783 is_data);
31840ae1
ZY
3784 BUG_ON(ret);
3785 btrfs_release_path(extent_root, path);
b9473439 3786 path->leave_spinning = 1;
5d4f98a2
YZ
3787
3788 key.objectid = bytenr;
3789 key.type = BTRFS_EXTENT_ITEM_KEY;
3790 key.offset = num_bytes;
3791
31840ae1
ZY
3792 ret = btrfs_search_slot(trans, extent_root,
3793 &key, path, -1, 1);
f3465ca4
JB
3794 if (ret) {
3795 printk(KERN_ERR "umm, got %d back from search"
d397712b
CM
3796 ", was looking for %llu\n", ret,
3797 (unsigned long long)bytenr);
f3465ca4
JB
3798 btrfs_print_leaf(extent_root, path->nodes[0]);
3799 }
31840ae1
ZY
3800 BUG_ON(ret);
3801 extent_slot = path->slots[0];
3802 }
7bb86316
CM
3803 } else {
3804 btrfs_print_leaf(extent_root, path->nodes[0]);
3805 WARN_ON(1);
d397712b 3806 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
5d4f98a2 3807 "parent %llu root %llu owner %llu offset %llu\n",
d397712b 3808 (unsigned long long)bytenr,
56bec294 3809 (unsigned long long)parent,
d397712b 3810 (unsigned long long)root_objectid,
5d4f98a2
YZ
3811 (unsigned long long)owner_objectid,
3812 (unsigned long long)owner_offset);
7bb86316 3813 }
5f39d397
CM
3814
3815 leaf = path->nodes[0];
5d4f98a2
YZ
3816 item_size = btrfs_item_size_nr(leaf, extent_slot);
3817#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3818 if (item_size < sizeof(*ei)) {
3819 BUG_ON(found_extent || extent_slot != path->slots[0]);
3820 ret = convert_extent_item_v0(trans, extent_root, path,
3821 owner_objectid, 0);
3822 BUG_ON(ret < 0);
3823
3824 btrfs_release_path(extent_root, path);
3825 path->leave_spinning = 1;
3826
3827 key.objectid = bytenr;
3828 key.type = BTRFS_EXTENT_ITEM_KEY;
3829 key.offset = num_bytes;
3830
3831 ret = btrfs_search_slot(trans, extent_root, &key, path,
3832 -1, 1);
3833 if (ret) {
3834 printk(KERN_ERR "umm, got %d back from search"
3835 ", was looking for %llu\n", ret,
3836 (unsigned long long)bytenr);
3837 btrfs_print_leaf(extent_root, path->nodes[0]);
3838 }
3839 BUG_ON(ret);
3840 extent_slot = path->slots[0];
3841 leaf = path->nodes[0];
3842 item_size = btrfs_item_size_nr(leaf, extent_slot);
3843 }
3844#endif
3845 BUG_ON(item_size < sizeof(*ei));
952fccac 3846 ei = btrfs_item_ptr(leaf, extent_slot,
123abc88 3847 struct btrfs_extent_item);
5d4f98a2
YZ
3848 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
3849 struct btrfs_tree_block_info *bi;
3850 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
3851 bi = (struct btrfs_tree_block_info *)(ei + 1);
3852 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
3853 }
56bec294 3854
5d4f98a2 3855 refs = btrfs_extent_refs(leaf, ei);
56bec294
CM
3856 BUG_ON(refs < refs_to_drop);
3857 refs -= refs_to_drop;
5f39d397 3858
5d4f98a2
YZ
3859 if (refs > 0) {
3860 if (extent_op)
3861 __run_delayed_extent_op(extent_op, leaf, ei);
3862 /*
3863 * In the case of inline back ref, reference count will
3864 * be updated by remove_extent_backref
952fccac 3865 */
5d4f98a2
YZ
3866 if (iref) {
3867 BUG_ON(!found_extent);
3868 } else {
3869 btrfs_set_extent_refs(leaf, ei, refs);
3870 btrfs_mark_buffer_dirty(leaf);
3871 }
3872 if (found_extent) {
3873 ret = remove_extent_backref(trans, extent_root, path,
3874 iref, refs_to_drop,
3875 is_data);
952fccac
CM
3876 BUG_ON(ret);
3877 }
5d4f98a2
YZ
3878 } else {
3879 int mark_free = 0;
b9473439 3880 struct extent_buffer *must_clean = NULL;
78fae27e 3881
5d4f98a2
YZ
3882 if (found_extent) {
3883 BUG_ON(is_data && refs_to_drop !=
3884 extent_data_ref_count(root, path, iref));
3885 if (iref) {
3886 BUG_ON(path->slots[0] != extent_slot);
3887 } else {
3888 BUG_ON(path->slots[0] != extent_slot + 1);
3889 path->slots[0] = extent_slot;
3890 num_to_del = 2;
3891 }
78fae27e 3892 }
b9473439 3893
5d4f98a2 3894 ret = pin_down_bytes(trans, root, path, bytenr,
11833d66 3895 num_bytes, is_data, 0, &must_clean);
5d4f98a2
YZ
3896 if (ret > 0)
3897 mark_free = 1;
3898 BUG_ON(ret < 0);
b9473439
CM
3899 /*
3900 * it is going to be very rare for someone to be waiting
3901 * on the block we're freeing. del_items might need to
3902 * schedule, so rather than get fancy, just force it
3903 * to blocking here
3904 */
3905 if (must_clean)
3906 btrfs_set_lock_blocking(must_clean);
3907
952fccac
CM
3908 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
3909 num_to_del);
31840ae1 3910 BUG_ON(ret);
25179201 3911 btrfs_release_path(extent_root, path);
21af804c 3912
b9473439
CM
3913 if (must_clean) {
3914 clean_tree_block(NULL, root, must_clean);
3915 btrfs_tree_unlock(must_clean);
3916 free_extent_buffer(must_clean);
3917 }
3918
5d4f98a2 3919 if (is_data) {
459931ec
CM
3920 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
3921 BUG_ON(ret);
d57e62b8
CM
3922 } else {
3923 invalidate_mapping_pages(info->btree_inode->i_mapping,
3924 bytenr >> PAGE_CACHE_SHIFT,
3925 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
459931ec
CM
3926 }
3927
dcbdd4dc
CM
3928 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
3929 mark_free);
3930 BUG_ON(ret);
a28ec197 3931 }
5caf2a00 3932 btrfs_free_path(path);
a28ec197
CM
3933 return ret;
3934}
3935
1887be66
CM
3936/*
3937 * when we free an extent, it is possible (and likely) that we free the last
3938 * delayed ref for that extent as well. This searches the delayed ref tree for
3939 * a given extent, and if there are no other delayed refs to be processed, it
3940 * removes it from the tree.
3941 */
3942static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
3943 struct btrfs_root *root, u64 bytenr)
3944{
3945 struct btrfs_delayed_ref_head *head;
3946 struct btrfs_delayed_ref_root *delayed_refs;
3947 struct btrfs_delayed_ref_node *ref;
3948 struct rb_node *node;
3949 int ret;
3950
3951 delayed_refs = &trans->transaction->delayed_refs;
3952 spin_lock(&delayed_refs->lock);
3953 head = btrfs_find_delayed_ref_head(trans, bytenr);
3954 if (!head)
3955 goto out;
3956
3957 node = rb_prev(&head->node.rb_node);
3958 if (!node)
3959 goto out;
3960
3961 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3962
3963 /* there are still entries for this ref, we can't drop it */
3964 if (ref->bytenr == bytenr)
3965 goto out;
3966
5d4f98a2
YZ
3967 if (head->extent_op) {
3968 if (!head->must_insert_reserved)
3969 goto out;
3970 kfree(head->extent_op);
3971 head->extent_op = NULL;
3972 }
3973
1887be66
CM
3974 /*
3975 * waiting for the lock here would deadlock. If someone else has it
3976 * locked they are already in the process of dropping it anyway
3977 */
3978 if (!mutex_trylock(&head->mutex))
3979 goto out;
3980
3981 /*
3982 * at this point we have a head with no other entries. Go
3983 * ahead and process it.
3984 */
3985 head->node.in_tree = 0;
3986 rb_erase(&head->node.rb_node, &delayed_refs->root);
c3e69d58 3987
1887be66
CM
3988 delayed_refs->num_entries--;
3989
3990 /*
3991 * we don't take a ref on the node because we're removing it from the
3992 * tree, so we just steal the ref the tree was holding.
3993 */
c3e69d58
CM
3994 delayed_refs->num_heads--;
3995 if (list_empty(&head->cluster))
3996 delayed_refs->num_heads_ready--;
3997
3998 list_del_init(&head->cluster);
1887be66
CM
3999 spin_unlock(&delayed_refs->lock);
4000
4001 ret = run_one_delayed_ref(trans, root->fs_info->tree_root,
5d4f98a2
YZ
4002 &head->node, head->extent_op,
4003 head->must_insert_reserved);
1887be66
CM
4004 BUG_ON(ret);
4005 btrfs_put_delayed_ref(&head->node);
4006 return 0;
4007out:
4008 spin_unlock(&delayed_refs->lock);
4009 return 0;
4010}
4011
925baedd 4012int btrfs_free_extent(struct btrfs_trans_handle *trans,
31840ae1
ZY
4013 struct btrfs_root *root,
4014 u64 bytenr, u64 num_bytes, u64 parent,
5d4f98a2 4015 u64 root_objectid, u64 owner, u64 offset)
925baedd
CM
4016{
4017 int ret;
4018
56bec294
CM
4019 /*
4020 * tree log blocks never actually go into the extent allocation
4021 * tree, just update pinning info and exit early.
56bec294 4022 */
5d4f98a2
YZ
4023 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4024 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
b9473439 4025 /* unlocks the pinned mutex */
11833d66 4026 btrfs_pin_extent(root, bytenr, num_bytes, 1);
56bec294 4027 ret = 0;
5d4f98a2
YZ
4028 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4029 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4030 parent, root_objectid, (int)owner,
4031 BTRFS_DROP_DELAYED_REF, NULL);
1887be66
CM
4032 BUG_ON(ret);
4033 ret = check_ref_cleanup(trans, root, bytenr);
4034 BUG_ON(ret);
5d4f98a2
YZ
4035 } else {
4036 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4037 parent, root_objectid, owner,
4038 offset, BTRFS_DROP_DELAYED_REF, NULL);
4039 BUG_ON(ret);
56bec294 4040 }
925baedd
CM
4041 return ret;
4042}
4043
86b9f2ec
YZ
4044int btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4045 struct btrfs_root *root,
4046 u64 bytenr, u32 blocksize,
4047 u64 parent, u64 root_objectid, int level)
4048{
4049 u64 used;
4050 spin_lock(&root->node_lock);
4051 used = btrfs_root_used(&root->root_item) - blocksize;
4052 btrfs_set_root_used(&root->root_item, used);
4053 spin_unlock(&root->node_lock);
4054
4055 return btrfs_free_extent(trans, root, bytenr, blocksize,
4056 parent, root_objectid, level, 0);
4057}
4058
87ee04eb
CM
4059static u64 stripe_align(struct btrfs_root *root, u64 val)
4060{
4061 u64 mask = ((u64)root->stripesize - 1);
4062 u64 ret = (val + mask) & ~mask;
4063 return ret;
4064}
4065
817d52f8
JB
4066/*
4067 * when we wait for progress in the block group caching, its because
4068 * our allocation attempt failed at least once. So, we must sleep
4069 * and let some progress happen before we try again.
4070 *
4071 * This function will sleep at least once waiting for new free space to
4072 * show up, and then it will check the block group free space numbers
4073 * for our min num_bytes. Another option is to have it go ahead
4074 * and look in the rbtree for a free extent of a given size, but this
4075 * is a good start.
4076 */
4077static noinline int
4078wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4079 u64 num_bytes)
4080{
11833d66 4081 struct btrfs_caching_control *caching_ctl;
817d52f8
JB
4082 DEFINE_WAIT(wait);
4083
11833d66
YZ
4084 caching_ctl = get_caching_control(cache);
4085 if (!caching_ctl)
817d52f8 4086 return 0;
817d52f8 4087
11833d66 4088 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
817d52f8 4089 (cache->free_space >= num_bytes));
11833d66
YZ
4090
4091 put_caching_control(caching_ctl);
4092 return 0;
4093}
4094
4095static noinline int
4096wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4097{
4098 struct btrfs_caching_control *caching_ctl;
4099 DEFINE_WAIT(wait);
4100
4101 caching_ctl = get_caching_control(cache);
4102 if (!caching_ctl)
4103 return 0;
4104
4105 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4106
4107 put_caching_control(caching_ctl);
817d52f8
JB
4108 return 0;
4109}
4110
4111enum btrfs_loop_type {
ccf0e725 4112 LOOP_FIND_IDEAL = 0,
817d52f8
JB
4113 LOOP_CACHING_NOWAIT = 1,
4114 LOOP_CACHING_WAIT = 2,
4115 LOOP_ALLOC_CHUNK = 3,
4116 LOOP_NO_EMPTY_SIZE = 4,
4117};
4118
fec577fb
CM
4119/*
4120 * walks the btree of allocated extents and find a hole of a given size.
4121 * The key ins is changed to record the hole:
4122 * ins->objectid == block start
62e2749e 4123 * ins->flags = BTRFS_EXTENT_ITEM_KEY
fec577fb
CM
4124 * ins->offset == number of blocks
4125 * Any available blocks before search_start are skipped.
4126 */
d397712b 4127static noinline int find_free_extent(struct btrfs_trans_handle *trans,
98ed5174
CM
4128 struct btrfs_root *orig_root,
4129 u64 num_bytes, u64 empty_size,
4130 u64 search_start, u64 search_end,
4131 u64 hint_byte, struct btrfs_key *ins,
4132 u64 exclude_start, u64 exclude_nr,
4133 int data)
fec577fb 4134{
80eb234a 4135 int ret = 0;
d397712b 4136 struct btrfs_root *root = orig_root->fs_info->extent_root;
fa9c0d79 4137 struct btrfs_free_cluster *last_ptr = NULL;
80eb234a 4138 struct btrfs_block_group_cache *block_group = NULL;
239b14b3 4139 int empty_cluster = 2 * 1024 * 1024;
0ef3e66b 4140 int allowed_chunk_alloc = 0;
ccf0e725 4141 int done_chunk_alloc = 0;
80eb234a 4142 struct btrfs_space_info *space_info;
fa9c0d79
CM
4143 int last_ptr_loop = 0;
4144 int loop = 0;
817d52f8 4145 bool found_uncached_bg = false;
0a24325e 4146 bool failed_cluster_refill = false;
1cdda9b8 4147 bool failed_alloc = false;
ccf0e725
JB
4148 u64 ideal_cache_percent = 0;
4149 u64 ideal_cache_offset = 0;
fec577fb 4150
db94535d 4151 WARN_ON(num_bytes < root->sectorsize);
b1a4d965 4152 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
80eb234a
JB
4153 ins->objectid = 0;
4154 ins->offset = 0;
b1a4d965 4155
2552d17e
JB
4156 space_info = __find_space_info(root->fs_info, data);
4157
0ef3e66b
CM
4158 if (orig_root->ref_cows || empty_size)
4159 allowed_chunk_alloc = 1;
4160
239b14b3 4161 if (data & BTRFS_BLOCK_GROUP_METADATA) {
fa9c0d79 4162 last_ptr = &root->fs_info->meta_alloc_cluster;
536ac8ae
CM
4163 if (!btrfs_test_opt(root, SSD))
4164 empty_cluster = 64 * 1024;
239b14b3
CM
4165 }
4166
fa9c0d79
CM
4167 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD)) {
4168 last_ptr = &root->fs_info->data_alloc_cluster;
4169 }
0f9dd46c 4170
239b14b3 4171 if (last_ptr) {
fa9c0d79
CM
4172 spin_lock(&last_ptr->lock);
4173 if (last_ptr->block_group)
4174 hint_byte = last_ptr->window_start;
4175 spin_unlock(&last_ptr->lock);
239b14b3 4176 }
fa9c0d79 4177
a061fc8d 4178 search_start = max(search_start, first_logical_byte(root, 0));
239b14b3 4179 search_start = max(search_start, hint_byte);
0b86a832 4180
817d52f8 4181 if (!last_ptr)
fa9c0d79 4182 empty_cluster = 0;
fa9c0d79 4183
2552d17e 4184 if (search_start == hint_byte) {
ccf0e725 4185ideal_cache:
2552d17e
JB
4186 block_group = btrfs_lookup_block_group(root->fs_info,
4187 search_start);
817d52f8
JB
4188 /*
4189 * we don't want to use the block group if it doesn't match our
4190 * allocation bits, or if its not cached.
ccf0e725
JB
4191 *
4192 * However if we are re-searching with an ideal block group
4193 * picked out then we don't care that the block group is cached.
817d52f8
JB
4194 */
4195 if (block_group && block_group_bits(block_group, data) &&
ccf0e725
JB
4196 (block_group->cached != BTRFS_CACHE_NO ||
4197 search_start == ideal_cache_offset)) {
2552d17e 4198 down_read(&space_info->groups_sem);
44fb5511
CM
4199 if (list_empty(&block_group->list) ||
4200 block_group->ro) {
4201 /*
4202 * someone is removing this block group,
4203 * we can't jump into the have_block_group
4204 * target because our list pointers are not
4205 * valid
4206 */
4207 btrfs_put_block_group(block_group);
4208 up_read(&space_info->groups_sem);
ccf0e725 4209 } else {
44fb5511 4210 goto have_block_group;
ccf0e725 4211 }
2552d17e 4212 } else if (block_group) {
fa9c0d79 4213 btrfs_put_block_group(block_group);
2552d17e 4214 }
42e70e7a 4215 }
2552d17e 4216search:
80eb234a 4217 down_read(&space_info->groups_sem);
2552d17e 4218 list_for_each_entry(block_group, &space_info->block_groups, list) {
6226cb0a 4219 u64 offset;
817d52f8 4220 int cached;
8a1413a2 4221
2552d17e
JB
4222 atomic_inc(&block_group->count);
4223 search_start = block_group->key.objectid;
42e70e7a 4224
2552d17e 4225have_block_group:
817d52f8 4226 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
ccf0e725
JB
4227 u64 free_percent;
4228
4229 free_percent = btrfs_block_group_used(&block_group->item);
4230 free_percent *= 100;
4231 free_percent = div64_u64(free_percent,
4232 block_group->key.offset);
4233 free_percent = 100 - free_percent;
4234 if (free_percent > ideal_cache_percent &&
4235 likely(!block_group->ro)) {
4236 ideal_cache_offset = block_group->key.objectid;
4237 ideal_cache_percent = free_percent;
4238 }
4239
817d52f8 4240 /*
ccf0e725
JB
4241 * We only want to start kthread caching if we are at
4242 * the point where we will wait for caching to make
4243 * progress, or if our ideal search is over and we've
4244 * found somebody to start caching.
817d52f8
JB
4245 */
4246 if (loop > LOOP_CACHING_NOWAIT ||
ccf0e725
JB
4247 (loop > LOOP_FIND_IDEAL &&
4248 atomic_read(&space_info->caching_threads) < 2)) {
817d52f8
JB
4249 ret = cache_block_group(block_group);
4250 BUG_ON(ret);
2552d17e 4251 }
817d52f8
JB
4252 found_uncached_bg = true;
4253
ccf0e725
JB
4254 /*
4255 * If loop is set for cached only, try the next block
4256 * group.
4257 */
4258 if (loop == LOOP_FIND_IDEAL)
817d52f8
JB
4259 goto loop;
4260 }
4261
ccf0e725
JB
4262 cached = block_group_cache_done(block_group);
4263 if (unlikely(!cached))
4264 found_uncached_bg = true;
4265
ea6a478e 4266 if (unlikely(block_group->ro))
2552d17e 4267 goto loop;
0f9dd46c 4268
0a24325e
JB
4269 /*
4270 * Ok we want to try and use the cluster allocator, so lets look
4271 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
4272 * have tried the cluster allocator plenty of times at this
4273 * point and not have found anything, so we are likely way too
4274 * fragmented for the clustering stuff to find anything, so lets
4275 * just skip it and let the allocator find whatever block it can
4276 * find
4277 */
4278 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79
CM
4279 /*
4280 * the refill lock keeps out other
4281 * people trying to start a new cluster
4282 */
4283 spin_lock(&last_ptr->refill_lock);
44fb5511
CM
4284 if (last_ptr->block_group &&
4285 (last_ptr->block_group->ro ||
4286 !block_group_bits(last_ptr->block_group, data))) {
4287 offset = 0;
4288 goto refill_cluster;
4289 }
4290
fa9c0d79
CM
4291 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
4292 num_bytes, search_start);
4293 if (offset) {
4294 /* we have a block, we're done */
4295 spin_unlock(&last_ptr->refill_lock);
4296 goto checks;
4297 }
4298
4299 spin_lock(&last_ptr->lock);
4300 /*
4301 * whoops, this cluster doesn't actually point to
4302 * this block group. Get a ref on the block
4303 * group is does point to and try again
4304 */
4305 if (!last_ptr_loop && last_ptr->block_group &&
4306 last_ptr->block_group != block_group) {
4307
4308 btrfs_put_block_group(block_group);
4309 block_group = last_ptr->block_group;
4310 atomic_inc(&block_group->count);
4311 spin_unlock(&last_ptr->lock);
4312 spin_unlock(&last_ptr->refill_lock);
4313
4314 last_ptr_loop = 1;
4315 search_start = block_group->key.objectid;
44fb5511
CM
4316 /*
4317 * we know this block group is properly
4318 * in the list because
4319 * btrfs_remove_block_group, drops the
4320 * cluster before it removes the block
4321 * group from the list
4322 */
fa9c0d79
CM
4323 goto have_block_group;
4324 }
4325 spin_unlock(&last_ptr->lock);
44fb5511 4326refill_cluster:
fa9c0d79
CM
4327 /*
4328 * this cluster didn't work out, free it and
4329 * start over
4330 */
4331 btrfs_return_cluster_to_free_space(NULL, last_ptr);
4332
4333 last_ptr_loop = 0;
4334
4335 /* allocate a cluster in this block group */
451d7585 4336 ret = btrfs_find_space_cluster(trans, root,
fa9c0d79
CM
4337 block_group, last_ptr,
4338 offset, num_bytes,
4339 empty_cluster + empty_size);
4340 if (ret == 0) {
4341 /*
4342 * now pull our allocation out of this
4343 * cluster
4344 */
4345 offset = btrfs_alloc_from_cluster(block_group,
4346 last_ptr, num_bytes,
4347 search_start);
4348 if (offset) {
4349 /* we found one, proceed */
4350 spin_unlock(&last_ptr->refill_lock);
4351 goto checks;
4352 }
0a24325e
JB
4353 } else if (!cached && loop > LOOP_CACHING_NOWAIT
4354 && !failed_cluster_refill) {
817d52f8
JB
4355 spin_unlock(&last_ptr->refill_lock);
4356
0a24325e 4357 failed_cluster_refill = true;
817d52f8
JB
4358 wait_block_group_cache_progress(block_group,
4359 num_bytes + empty_cluster + empty_size);
4360 goto have_block_group;
fa9c0d79 4361 }
817d52f8 4362
fa9c0d79
CM
4363 /*
4364 * at this point we either didn't find a cluster
4365 * or we weren't able to allocate a block from our
4366 * cluster. Free the cluster we've been trying
4367 * to use, and go to the next block group
4368 */
0a24325e 4369 btrfs_return_cluster_to_free_space(NULL, last_ptr);
fa9c0d79 4370 spin_unlock(&last_ptr->refill_lock);
0a24325e 4371 goto loop;
fa9c0d79
CM
4372 }
4373
6226cb0a
JB
4374 offset = btrfs_find_space_for_alloc(block_group, search_start,
4375 num_bytes, empty_size);
1cdda9b8
JB
4376 /*
4377 * If we didn't find a chunk, and we haven't failed on this
4378 * block group before, and this block group is in the middle of
4379 * caching and we are ok with waiting, then go ahead and wait
4380 * for progress to be made, and set failed_alloc to true.
4381 *
4382 * If failed_alloc is true then we've already waited on this
4383 * block group once and should move on to the next block group.
4384 */
4385 if (!offset && !failed_alloc && !cached &&
4386 loop > LOOP_CACHING_NOWAIT) {
817d52f8 4387 wait_block_group_cache_progress(block_group,
1cdda9b8
JB
4388 num_bytes + empty_size);
4389 failed_alloc = true;
817d52f8 4390 goto have_block_group;
1cdda9b8
JB
4391 } else if (!offset) {
4392 goto loop;
817d52f8 4393 }
fa9c0d79 4394checks:
6226cb0a 4395 search_start = stripe_align(root, offset);
2552d17e 4396 /* move on to the next group */
6226cb0a
JB
4397 if (search_start + num_bytes >= search_end) {
4398 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 4399 goto loop;
6226cb0a 4400 }
25179201 4401
2552d17e
JB
4402 /* move on to the next group */
4403 if (search_start + num_bytes >
6226cb0a
JB
4404 block_group->key.objectid + block_group->key.offset) {
4405 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e 4406 goto loop;
6226cb0a 4407 }
f5a31e16 4408
2552d17e
JB
4409 if (exclude_nr > 0 &&
4410 (search_start + num_bytes > exclude_start &&
4411 search_start < exclude_start + exclude_nr)) {
4412 search_start = exclude_start + exclude_nr;
4413
6226cb0a 4414 btrfs_add_free_space(block_group, offset, num_bytes);
2552d17e
JB
4415 /*
4416 * if search_start is still in this block group
4417 * then we just re-search this block group
f5a31e16 4418 */
2552d17e
JB
4419 if (search_start >= block_group->key.objectid &&
4420 search_start < (block_group->key.objectid +
6226cb0a 4421 block_group->key.offset))
2552d17e 4422 goto have_block_group;
2552d17e 4423 goto loop;
0f9dd46c 4424 }
0b86a832 4425
2552d17e
JB
4426 ins->objectid = search_start;
4427 ins->offset = num_bytes;
d2fb3437 4428
6226cb0a
JB
4429 if (offset < search_start)
4430 btrfs_add_free_space(block_group, offset,
4431 search_start - offset);
4432 BUG_ON(offset > search_start);
4433
11833d66
YZ
4434 update_reserved_extents(block_group, num_bytes, 1);
4435
2552d17e 4436 /* we are all good, lets return */
2552d17e
JB
4437 break;
4438loop:
0a24325e 4439 failed_cluster_refill = false;
1cdda9b8 4440 failed_alloc = false;
fa9c0d79 4441 btrfs_put_block_group(block_group);
2552d17e
JB
4442 }
4443 up_read(&space_info->groups_sem);
4444
ccf0e725
JB
4445 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
4446 * for them to make caching progress. Also
4447 * determine the best possible bg to cache
4448 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
4449 * caching kthreads as we move along
817d52f8
JB
4450 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
4451 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
4452 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
4453 * again
fa9c0d79 4454 */
817d52f8
JB
4455 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
4456 (found_uncached_bg || empty_size || empty_cluster ||
4457 allowed_chunk_alloc)) {
ccf0e725 4458 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
817d52f8 4459 found_uncached_bg = false;
ccf0e725
JB
4460 loop++;
4461 if (!ideal_cache_percent &&
4462 atomic_read(&space_info->caching_threads))
817d52f8 4463 goto search;
ccf0e725
JB
4464
4465 /*
4466 * 1 of the following 2 things have happened so far
4467 *
4468 * 1) We found an ideal block group for caching that
4469 * is mostly full and will cache quickly, so we might
4470 * as well wait for it.
4471 *
4472 * 2) We searched for cached only and we didn't find
4473 * anything, and we didn't start any caching kthreads
4474 * either, so chances are we will loop through and
4475 * start a couple caching kthreads, and then come back
4476 * around and just wait for them. This will be slower
4477 * because we will have 2 caching kthreads reading at
4478 * the same time when we could have just started one
4479 * and waited for it to get far enough to give us an
4480 * allocation, so go ahead and go to the wait caching
4481 * loop.
4482 */
4483 loop = LOOP_CACHING_WAIT;
4484 search_start = ideal_cache_offset;
4485 ideal_cache_percent = 0;
4486 goto ideal_cache;
4487 } else if (loop == LOOP_FIND_IDEAL) {
4488 /*
4489 * Didn't find a uncached bg, wait on anything we find
4490 * next.
4491 */
4492 loop = LOOP_CACHING_WAIT;
4493 goto search;
4494 }
4495
4496 if (loop < LOOP_CACHING_WAIT) {
4497 loop++;
4498 goto search;
817d52f8
JB
4499 }
4500
4501 if (loop == LOOP_ALLOC_CHUNK) {
fa9c0d79
CM
4502 empty_size = 0;
4503 empty_cluster = 0;
4504 }
2552d17e
JB
4505
4506 if (allowed_chunk_alloc) {
4507 ret = do_chunk_alloc(trans, root, num_bytes +
4508 2 * 1024 * 1024, data, 1);
2552d17e 4509 allowed_chunk_alloc = 0;
ccf0e725
JB
4510 done_chunk_alloc = 1;
4511 } else if (!done_chunk_alloc) {
2552d17e
JB
4512 space_info->force_alloc = 1;
4513 }
4514
817d52f8 4515 if (loop < LOOP_NO_EMPTY_SIZE) {
fa9c0d79 4516 loop++;
2552d17e 4517 goto search;
fa9c0d79 4518 }
2552d17e
JB
4519 ret = -ENOSPC;
4520 } else if (!ins->objectid) {
4521 ret = -ENOSPC;
f2654de4 4522 }
0b86a832 4523
80eb234a
JB
4524 /* we found what we needed */
4525 if (ins->objectid) {
4526 if (!(data & BTRFS_BLOCK_GROUP_DATA))
d2fb3437 4527 trans->block_group = block_group->key.objectid;
0f9dd46c 4528
fa9c0d79 4529 btrfs_put_block_group(block_group);
80eb234a 4530 ret = 0;
be744175 4531 }
be744175 4532
0f70abe2 4533 return ret;
fec577fb 4534}
ec44a35c 4535
9ed74f2d
JB
4536static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
4537 int dump_block_groups)
0f9dd46c
JB
4538{
4539 struct btrfs_block_group_cache *cache;
0f9dd46c 4540
9ed74f2d 4541 spin_lock(&info->lock);
d397712b
CM
4542 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
4543 (unsigned long long)(info->total_bytes - info->bytes_used -
9ed74f2d
JB
4544 info->bytes_pinned - info->bytes_reserved -
4545 info->bytes_super),
d397712b 4546 (info->full) ? "" : "not ");
6a63209f 4547 printk(KERN_INFO "space_info total=%llu, pinned=%llu, delalloc=%llu,"
9ed74f2d
JB
4548 " may_use=%llu, used=%llu, root=%llu, super=%llu, reserved=%llu"
4549 "\n",
21380931
JB
4550 (unsigned long long)info->total_bytes,
4551 (unsigned long long)info->bytes_pinned,
4552 (unsigned long long)info->bytes_delalloc,
4553 (unsigned long long)info->bytes_may_use,
9ed74f2d
JB
4554 (unsigned long long)info->bytes_used,
4555 (unsigned long long)info->bytes_root,
4556 (unsigned long long)info->bytes_super,
4557 (unsigned long long)info->bytes_reserved);
4558 spin_unlock(&info->lock);
4559
4560 if (!dump_block_groups)
4561 return;
0f9dd46c 4562
80eb234a 4563 down_read(&info->groups_sem);
c6e30871 4564 list_for_each_entry(cache, &info->block_groups, list) {
0f9dd46c 4565 spin_lock(&cache->lock);
d397712b
CM
4566 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
4567 "%llu pinned %llu reserved\n",
4568 (unsigned long long)cache->key.objectid,
4569 (unsigned long long)cache->key.offset,
4570 (unsigned long long)btrfs_block_group_used(&cache->item),
4571 (unsigned long long)cache->pinned,
4572 (unsigned long long)cache->reserved);
0f9dd46c
JB
4573 btrfs_dump_free_space(cache, bytes);
4574 spin_unlock(&cache->lock);
4575 }
80eb234a 4576 up_read(&info->groups_sem);
0f9dd46c 4577}
e8569813 4578
11833d66
YZ
4579int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
4580 struct btrfs_root *root,
4581 u64 num_bytes, u64 min_alloc_size,
4582 u64 empty_size, u64 hint_byte,
4583 u64 search_end, struct btrfs_key *ins,
4584 u64 data)
fec577fb
CM
4585{
4586 int ret;
fbdc762b 4587 u64 search_start = 0;
1261ec42 4588 struct btrfs_fs_info *info = root->fs_info;
925baedd 4589
6a63209f 4590 data = btrfs_get_alloc_profile(root, data);
98d20f67 4591again:
0ef3e66b
CM
4592 /*
4593 * the only place that sets empty_size is btrfs_realloc_node, which
4594 * is not called recursively on allocations
4595 */
4596 if (empty_size || root->ref_cows) {
593060d7 4597 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
6324fbf3 4598 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0ef3e66b
CM
4599 2 * 1024 * 1024,
4600 BTRFS_BLOCK_GROUP_METADATA |
4601 (info->metadata_alloc_profile &
4602 info->avail_metadata_alloc_bits), 0);
6324fbf3
CM
4603 }
4604 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
0ef3e66b 4605 num_bytes + 2 * 1024 * 1024, data, 0);
6324fbf3 4606 }
0b86a832 4607
db94535d
CM
4608 WARN_ON(num_bytes < root->sectorsize);
4609 ret = find_free_extent(trans, root, num_bytes, empty_size,
4610 search_start, search_end, hint_byte, ins,
26b8003f
CM
4611 trans->alloc_exclude_start,
4612 trans->alloc_exclude_nr, data);
3b951516 4613
98d20f67
CM
4614 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
4615 num_bytes = num_bytes >> 1;
0f9dd46c 4616 num_bytes = num_bytes & ~(root->sectorsize - 1);
98d20f67 4617 num_bytes = max(num_bytes, min_alloc_size);
0ef3e66b
CM
4618 do_chunk_alloc(trans, root->fs_info->extent_root,
4619 num_bytes, data, 1);
98d20f67
CM
4620 goto again;
4621 }
817d52f8 4622 if (ret == -ENOSPC) {
0f9dd46c
JB
4623 struct btrfs_space_info *sinfo;
4624
4625 sinfo = __find_space_info(root->fs_info, data);
d397712b
CM
4626 printk(KERN_ERR "btrfs allocation failed flags %llu, "
4627 "wanted %llu\n", (unsigned long long)data,
4628 (unsigned long long)num_bytes);
9ed74f2d 4629 dump_space_info(sinfo, num_bytes, 1);
925baedd 4630 }
0f9dd46c
JB
4631
4632 return ret;
e6dcd2dc
CM
4633}
4634
65b51a00
CM
4635int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
4636{
0f9dd46c 4637 struct btrfs_block_group_cache *cache;
1f3c79a2 4638 int ret = 0;
0f9dd46c 4639
0f9dd46c
JB
4640 cache = btrfs_lookup_block_group(root->fs_info, start);
4641 if (!cache) {
d397712b
CM
4642 printk(KERN_ERR "Unable to find block group for %llu\n",
4643 (unsigned long long)start);
0f9dd46c
JB
4644 return -ENOSPC;
4645 }
1f3c79a2
LH
4646
4647 ret = btrfs_discard_extent(root, start, len);
4648
0f9dd46c 4649 btrfs_add_free_space(cache, start, len);
11833d66 4650 update_reserved_extents(cache, len, 0);
fa9c0d79 4651 btrfs_put_block_group(cache);
817d52f8 4652
e6dcd2dc
CM
4653 return ret;
4654}
4655
5d4f98a2
YZ
4656static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4657 struct btrfs_root *root,
4658 u64 parent, u64 root_objectid,
4659 u64 flags, u64 owner, u64 offset,
4660 struct btrfs_key *ins, int ref_mod)
e6dcd2dc
CM
4661{
4662 int ret;
5d4f98a2 4663 struct btrfs_fs_info *fs_info = root->fs_info;
e6dcd2dc 4664 struct btrfs_extent_item *extent_item;
5d4f98a2 4665 struct btrfs_extent_inline_ref *iref;
e6dcd2dc 4666 struct btrfs_path *path;
5d4f98a2
YZ
4667 struct extent_buffer *leaf;
4668 int type;
4669 u32 size;
26b8003f 4670
5d4f98a2
YZ
4671 if (parent > 0)
4672 type = BTRFS_SHARED_DATA_REF_KEY;
4673 else
4674 type = BTRFS_EXTENT_DATA_REF_KEY;
58176a96 4675
5d4f98a2 4676 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7bb86316
CM
4677
4678 path = btrfs_alloc_path();
4679 BUG_ON(!path);
47e4bb98 4680
b9473439 4681 path->leave_spinning = 1;
5d4f98a2
YZ
4682 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4683 ins, size);
ccd467d6 4684 BUG_ON(ret);
0f9dd46c 4685
5d4f98a2
YZ
4686 leaf = path->nodes[0];
4687 extent_item = btrfs_item_ptr(leaf, path->slots[0],
47e4bb98 4688 struct btrfs_extent_item);
5d4f98a2
YZ
4689 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
4690 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4691 btrfs_set_extent_flags(leaf, extent_item,
4692 flags | BTRFS_EXTENT_FLAG_DATA);
4693
4694 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
4695 btrfs_set_extent_inline_ref_type(leaf, iref, type);
4696 if (parent > 0) {
4697 struct btrfs_shared_data_ref *ref;
4698 ref = (struct btrfs_shared_data_ref *)(iref + 1);
4699 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4700 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
4701 } else {
4702 struct btrfs_extent_data_ref *ref;
4703 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
4704 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
4705 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
4706 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
4707 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
4708 }
47e4bb98
CM
4709
4710 btrfs_mark_buffer_dirty(path->nodes[0]);
7bb86316 4711 btrfs_free_path(path);
f510cfec 4712
5d4f98a2
YZ
4713 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4714 1, 0);
f5947066 4715 if (ret) {
d397712b
CM
4716 printk(KERN_ERR "btrfs update block group failed for %llu "
4717 "%llu\n", (unsigned long long)ins->objectid,
4718 (unsigned long long)ins->offset);
f5947066
CM
4719 BUG();
4720 }
e6dcd2dc
CM
4721 return ret;
4722}
4723
5d4f98a2
YZ
4724static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
4725 struct btrfs_root *root,
4726 u64 parent, u64 root_objectid,
4727 u64 flags, struct btrfs_disk_key *key,
4728 int level, struct btrfs_key *ins)
e6dcd2dc
CM
4729{
4730 int ret;
5d4f98a2
YZ
4731 struct btrfs_fs_info *fs_info = root->fs_info;
4732 struct btrfs_extent_item *extent_item;
4733 struct btrfs_tree_block_info *block_info;
4734 struct btrfs_extent_inline_ref *iref;
4735 struct btrfs_path *path;
4736 struct extent_buffer *leaf;
4737 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
1c2308f8 4738
5d4f98a2
YZ
4739 path = btrfs_alloc_path();
4740 BUG_ON(!path);
56bec294 4741
5d4f98a2
YZ
4742 path->leave_spinning = 1;
4743 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
4744 ins, size);
56bec294 4745 BUG_ON(ret);
5d4f98a2
YZ
4746
4747 leaf = path->nodes[0];
4748 extent_item = btrfs_item_ptr(leaf, path->slots[0],
4749 struct btrfs_extent_item);
4750 btrfs_set_extent_refs(leaf, extent_item, 1);
4751 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
4752 btrfs_set_extent_flags(leaf, extent_item,
4753 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
4754 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
4755
4756 btrfs_set_tree_block_key(leaf, block_info, key);
4757 btrfs_set_tree_block_level(leaf, block_info, level);
4758
4759 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
4760 if (parent > 0) {
4761 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
4762 btrfs_set_extent_inline_ref_type(leaf, iref,
4763 BTRFS_SHARED_BLOCK_REF_KEY);
4764 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
4765 } else {
4766 btrfs_set_extent_inline_ref_type(leaf, iref,
4767 BTRFS_TREE_BLOCK_REF_KEY);
4768 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
4769 }
4770
4771 btrfs_mark_buffer_dirty(leaf);
4772 btrfs_free_path(path);
4773
4774 ret = update_block_group(trans, root, ins->objectid, ins->offset,
4775 1, 0);
4776 if (ret) {
4777 printk(KERN_ERR "btrfs update block group failed for %llu "
4778 "%llu\n", (unsigned long long)ins->objectid,
4779 (unsigned long long)ins->offset);
4780 BUG();
4781 }
4782 return ret;
4783}
4784
4785int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
4786 struct btrfs_root *root,
4787 u64 root_objectid, u64 owner,
4788 u64 offset, struct btrfs_key *ins)
4789{
4790 int ret;
4791
4792 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
4793
4794 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
4795 0, root_objectid, owner, offset,
4796 BTRFS_ADD_DELAYED_EXTENT, NULL);
e6dcd2dc
CM
4797 return ret;
4798}
e02119d5
CM
4799
4800/*
4801 * this is used by the tree logging recovery code. It records that
4802 * an extent has been allocated and makes sure to clear the free
4803 * space cache bits as well
4804 */
5d4f98a2
YZ
4805int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
4806 struct btrfs_root *root,
4807 u64 root_objectid, u64 owner, u64 offset,
4808 struct btrfs_key *ins)
e02119d5
CM
4809{
4810 int ret;
4811 struct btrfs_block_group_cache *block_group;
11833d66
YZ
4812 struct btrfs_caching_control *caching_ctl;
4813 u64 start = ins->objectid;
4814 u64 num_bytes = ins->offset;
e02119d5 4815
e02119d5 4816 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
817d52f8 4817 cache_block_group(block_group);
11833d66 4818 caching_ctl = get_caching_control(block_group);
e02119d5 4819
11833d66
YZ
4820 if (!caching_ctl) {
4821 BUG_ON(!block_group_cache_done(block_group));
4822 ret = btrfs_remove_free_space(block_group, start, num_bytes);
4823 BUG_ON(ret);
4824 } else {
4825 mutex_lock(&caching_ctl->mutex);
4826
4827 if (start >= caching_ctl->progress) {
4828 ret = add_excluded_extent(root, start, num_bytes);
4829 BUG_ON(ret);
4830 } else if (start + num_bytes <= caching_ctl->progress) {
4831 ret = btrfs_remove_free_space(block_group,
4832 start, num_bytes);
4833 BUG_ON(ret);
4834 } else {
4835 num_bytes = caching_ctl->progress - start;
4836 ret = btrfs_remove_free_space(block_group,
4837 start, num_bytes);
4838 BUG_ON(ret);
4839
4840 start = caching_ctl->progress;
4841 num_bytes = ins->objectid + ins->offset -
4842 caching_ctl->progress;
4843 ret = add_excluded_extent(root, start, num_bytes);
4844 BUG_ON(ret);
4845 }
4846
4847 mutex_unlock(&caching_ctl->mutex);
4848 put_caching_control(caching_ctl);
4849 }
4850
4851 update_reserved_extents(block_group, ins->offset, 1);
fa9c0d79 4852 btrfs_put_block_group(block_group);
5d4f98a2
YZ
4853 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
4854 0, owner, offset, ins, 1);
e02119d5
CM
4855 return ret;
4856}
4857
e6dcd2dc
CM
4858/*
4859 * finds a free extent and does all the dirty work required for allocation
4860 * returns the key for the extent through ins, and a tree buffer for
4861 * the first block of the extent through buf.
4862 *
4863 * returns 0 if everything worked, non-zero otherwise.
4864 */
5d4f98a2
YZ
4865static int alloc_tree_block(struct btrfs_trans_handle *trans,
4866 struct btrfs_root *root,
4867 u64 num_bytes, u64 parent, u64 root_objectid,
4868 struct btrfs_disk_key *key, int level,
4869 u64 empty_size, u64 hint_byte, u64 search_end,
4870 struct btrfs_key *ins)
e6dcd2dc
CM
4871{
4872 int ret;
5d4f98a2
YZ
4873 u64 flags = 0;
4874
11833d66
YZ
4875 ret = btrfs_reserve_extent(trans, root, num_bytes, num_bytes,
4876 empty_size, hint_byte, search_end,
4877 ins, 0);
817d52f8
JB
4878 if (ret)
4879 return ret;
5d4f98a2
YZ
4880
4881 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
4882 if (parent == 0)
4883 parent = ins->objectid;
4884 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
4885 } else
4886 BUG_ON(parent > 0);
4887
d00aff00 4888 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5d4f98a2
YZ
4889 struct btrfs_delayed_extent_op *extent_op;
4890 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
4891 BUG_ON(!extent_op);
4892 if (key)
4893 memcpy(&extent_op->key, key, sizeof(extent_op->key));
4894 else
4895 memset(&extent_op->key, 0, sizeof(extent_op->key));
4896 extent_op->flags_to_set = flags;
4897 extent_op->update_key = 1;
4898 extent_op->update_flags = 1;
4899 extent_op->is_data = 0;
4900
4901 ret = btrfs_add_delayed_tree_ref(trans, ins->objectid,
4902 ins->offset, parent, root_objectid,
4903 level, BTRFS_ADD_DELAYED_EXTENT,
4904 extent_op);
d00aff00 4905 BUG_ON(ret);
d00aff00 4906 }
86b9f2ec
YZ
4907
4908 if (root_objectid == root->root_key.objectid) {
4909 u64 used;
4910 spin_lock(&root->node_lock);
4911 used = btrfs_root_used(&root->root_item) + num_bytes;
4912 btrfs_set_root_used(&root->root_item, used);
4913 spin_unlock(&root->node_lock);
4914 }
925baedd 4915 return ret;
fec577fb 4916}
65b51a00
CM
4917
4918struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
4919 struct btrfs_root *root,
4008c04a
CM
4920 u64 bytenr, u32 blocksize,
4921 int level)
65b51a00
CM
4922{
4923 struct extent_buffer *buf;
4924
4925 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
4926 if (!buf)
4927 return ERR_PTR(-ENOMEM);
4928 btrfs_set_header_generation(buf, trans->transid);
4008c04a 4929 btrfs_set_buffer_lockdep_class(buf, level);
65b51a00
CM
4930 btrfs_tree_lock(buf);
4931 clean_tree_block(trans, root, buf);
b4ce94de
CM
4932
4933 btrfs_set_lock_blocking(buf);
65b51a00 4934 btrfs_set_buffer_uptodate(buf);
b4ce94de 4935
d0c803c4 4936 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
8cef4e16
YZ
4937 /*
4938 * we allow two log transactions at a time, use different
4939 * EXENT bit to differentiate dirty pages.
4940 */
4941 if (root->log_transid % 2 == 0)
4942 set_extent_dirty(&root->dirty_log_pages, buf->start,
4943 buf->start + buf->len - 1, GFP_NOFS);
4944 else
4945 set_extent_new(&root->dirty_log_pages, buf->start,
4946 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4
CM
4947 } else {
4948 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
65b51a00 4949 buf->start + buf->len - 1, GFP_NOFS);
d0c803c4 4950 }
65b51a00 4951 trans->blocks_used++;
b4ce94de 4952 /* this returns a buffer locked for blocking */
65b51a00
CM
4953 return buf;
4954}
4955
fec577fb
CM
4956/*
4957 * helper function to allocate a block for a given tree
4958 * returns the tree buffer or NULL.
4959 */
5f39d397 4960struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5d4f98a2
YZ
4961 struct btrfs_root *root, u32 blocksize,
4962 u64 parent, u64 root_objectid,
4963 struct btrfs_disk_key *key, int level,
4964 u64 hint, u64 empty_size)
fec577fb 4965{
e2fa7227 4966 struct btrfs_key ins;
fec577fb 4967 int ret;
5f39d397 4968 struct extent_buffer *buf;
fec577fb 4969
5d4f98a2
YZ
4970 ret = alloc_tree_block(trans, root, blocksize, parent, root_objectid,
4971 key, level, empty_size, hint, (u64)-1, &ins);
fec577fb 4972 if (ret) {
54aa1f4d
CM
4973 BUG_ON(ret > 0);
4974 return ERR_PTR(ret);
fec577fb 4975 }
55c69072 4976
4008c04a
CM
4977 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
4978 blocksize, level);
fec577fb
CM
4979 return buf;
4980}
a28ec197 4981
2c47e605
YZ
4982struct walk_control {
4983 u64 refs[BTRFS_MAX_LEVEL];
4984 u64 flags[BTRFS_MAX_LEVEL];
4985 struct btrfs_key update_progress;
4986 int stage;
4987 int level;
4988 int shared_level;
4989 int update_ref;
4990 int keep_locks;
1c4850e2
YZ
4991 int reada_slot;
4992 int reada_count;
2c47e605
YZ
4993};
4994
4995#define DROP_REFERENCE 1
4996#define UPDATE_BACKREF 2
4997
1c4850e2
YZ
4998static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
4999 struct btrfs_root *root,
5000 struct walk_control *wc,
5001 struct btrfs_path *path)
6407bf6d 5002{
1c4850e2
YZ
5003 u64 bytenr;
5004 u64 generation;
5005 u64 refs;
94fcca9f 5006 u64 flags;
1c4850e2 5007 u64 last = 0;
5d4f98a2 5008 u32 nritems;
1c4850e2
YZ
5009 u32 blocksize;
5010 struct btrfs_key key;
5011 struct extent_buffer *eb;
6407bf6d 5012 int ret;
1c4850e2
YZ
5013 int slot;
5014 int nread = 0;
6407bf6d 5015
1c4850e2
YZ
5016 if (path->slots[wc->level] < wc->reada_slot) {
5017 wc->reada_count = wc->reada_count * 2 / 3;
5018 wc->reada_count = max(wc->reada_count, 2);
5019 } else {
5020 wc->reada_count = wc->reada_count * 3 / 2;
5021 wc->reada_count = min_t(int, wc->reada_count,
5022 BTRFS_NODEPTRS_PER_BLOCK(root));
5023 }
7bb86316 5024
1c4850e2
YZ
5025 eb = path->nodes[wc->level];
5026 nritems = btrfs_header_nritems(eb);
5027 blocksize = btrfs_level_size(root, wc->level - 1);
bd56b302 5028
1c4850e2
YZ
5029 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5030 if (nread >= wc->reada_count)
5031 break;
bd56b302 5032
2dd3e67b 5033 cond_resched();
1c4850e2
YZ
5034 bytenr = btrfs_node_blockptr(eb, slot);
5035 generation = btrfs_node_ptr_generation(eb, slot);
2dd3e67b 5036
1c4850e2
YZ
5037 if (slot == path->slots[wc->level])
5038 goto reada;
5d4f98a2 5039
1c4850e2
YZ
5040 if (wc->stage == UPDATE_BACKREF &&
5041 generation <= root->root_key.offset)
bd56b302
CM
5042 continue;
5043
94fcca9f
YZ
5044 /* We don't lock the tree block, it's OK to be racy here */
5045 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5046 &refs, &flags);
5047 BUG_ON(ret);
5048 BUG_ON(refs == 0);
5049
1c4850e2 5050 if (wc->stage == DROP_REFERENCE) {
1c4850e2
YZ
5051 if (refs == 1)
5052 goto reada;
bd56b302 5053
94fcca9f
YZ
5054 if (wc->level == 1 &&
5055 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5056 continue;
1c4850e2
YZ
5057 if (!wc->update_ref ||
5058 generation <= root->root_key.offset)
5059 continue;
5060 btrfs_node_key_to_cpu(eb, &key, slot);
5061 ret = btrfs_comp_cpu_keys(&key,
5062 &wc->update_progress);
5063 if (ret < 0)
5064 continue;
94fcca9f
YZ
5065 } else {
5066 if (wc->level == 1 &&
5067 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5068 continue;
6407bf6d 5069 }
1c4850e2
YZ
5070reada:
5071 ret = readahead_tree_block(root, bytenr, blocksize,
5072 generation);
5073 if (ret)
bd56b302 5074 break;
1c4850e2
YZ
5075 last = bytenr + blocksize;
5076 nread++;
20524f02 5077 }
1c4850e2 5078 wc->reada_slot = slot;
20524f02 5079}
2c47e605 5080
f82d02d9 5081/*
2c47e605
YZ
5082 * hepler to process tree block while walking down the tree.
5083 *
2c47e605
YZ
5084 * when wc->stage == UPDATE_BACKREF, this function updates
5085 * back refs for pointers in the block.
5086 *
5087 * NOTE: return value 1 means we should stop walking down.
f82d02d9 5088 */
2c47e605 5089static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5d4f98a2 5090 struct btrfs_root *root,
2c47e605 5091 struct btrfs_path *path,
94fcca9f 5092 struct walk_control *wc, int lookup_info)
f82d02d9 5093{
2c47e605
YZ
5094 int level = wc->level;
5095 struct extent_buffer *eb = path->nodes[level];
2c47e605 5096 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
f82d02d9
YZ
5097 int ret;
5098
2c47e605
YZ
5099 if (wc->stage == UPDATE_BACKREF &&
5100 btrfs_header_owner(eb) != root->root_key.objectid)
5101 return 1;
f82d02d9 5102
2c47e605
YZ
5103 /*
5104 * when reference count of tree block is 1, it won't increase
5105 * again. once full backref flag is set, we never clear it.
5106 */
94fcca9f
YZ
5107 if (lookup_info &&
5108 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5109 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
2c47e605
YZ
5110 BUG_ON(!path->locks[level]);
5111 ret = btrfs_lookup_extent_info(trans, root,
5112 eb->start, eb->len,
5113 &wc->refs[level],
5114 &wc->flags[level]);
5115 BUG_ON(ret);
5116 BUG_ON(wc->refs[level] == 0);
5117 }
5d4f98a2 5118
2c47e605
YZ
5119 if (wc->stage == DROP_REFERENCE) {
5120 if (wc->refs[level] > 1)
5121 return 1;
f82d02d9 5122
2c47e605
YZ
5123 if (path->locks[level] && !wc->keep_locks) {
5124 btrfs_tree_unlock(eb);
5125 path->locks[level] = 0;
5126 }
5127 return 0;
5128 }
f82d02d9 5129
2c47e605
YZ
5130 /* wc->stage == UPDATE_BACKREF */
5131 if (!(wc->flags[level] & flag)) {
5132 BUG_ON(!path->locks[level]);
5133 ret = btrfs_inc_ref(trans, root, eb, 1);
f82d02d9 5134 BUG_ON(ret);
2c47e605
YZ
5135 ret = btrfs_dec_ref(trans, root, eb, 0);
5136 BUG_ON(ret);
5137 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5138 eb->len, flag, 0);
5139 BUG_ON(ret);
5140 wc->flags[level] |= flag;
5141 }
5142
5143 /*
5144 * the block is shared by multiple trees, so it's not good to
5145 * keep the tree lock
5146 */
5147 if (path->locks[level] && level > 0) {
5148 btrfs_tree_unlock(eb);
5149 path->locks[level] = 0;
5150 }
5151 return 0;
5152}
5153
1c4850e2
YZ
5154/*
5155 * hepler to process tree block pointer.
5156 *
5157 * when wc->stage == DROP_REFERENCE, this function checks
5158 * reference count of the block pointed to. if the block
5159 * is shared and we need update back refs for the subtree
5160 * rooted at the block, this function changes wc->stage to
5161 * UPDATE_BACKREF. if the block is shared and there is no
5162 * need to update back, this function drops the reference
5163 * to the block.
5164 *
5165 * NOTE: return value 1 means we should stop walking down.
5166 */
5167static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5168 struct btrfs_root *root,
5169 struct btrfs_path *path,
94fcca9f 5170 struct walk_control *wc, int *lookup_info)
1c4850e2
YZ
5171{
5172 u64 bytenr;
5173 u64 generation;
5174 u64 parent;
5175 u32 blocksize;
5176 struct btrfs_key key;
5177 struct extent_buffer *next;
5178 int level = wc->level;
5179 int reada = 0;
5180 int ret = 0;
5181
5182 generation = btrfs_node_ptr_generation(path->nodes[level],
5183 path->slots[level]);
5184 /*
5185 * if the lower level block was created before the snapshot
5186 * was created, we know there is no need to update back refs
5187 * for the subtree
5188 */
5189 if (wc->stage == UPDATE_BACKREF &&
94fcca9f
YZ
5190 generation <= root->root_key.offset) {
5191 *lookup_info = 1;
1c4850e2 5192 return 1;
94fcca9f 5193 }
1c4850e2
YZ
5194
5195 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5196 blocksize = btrfs_level_size(root, level - 1);
5197
5198 next = btrfs_find_tree_block(root, bytenr, blocksize);
5199 if (!next) {
5200 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5201 reada = 1;
5202 }
5203 btrfs_tree_lock(next);
5204 btrfs_set_lock_blocking(next);
5205
94fcca9f
YZ
5206 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5207 &wc->refs[level - 1],
5208 &wc->flags[level - 1]);
5209 BUG_ON(ret);
5210 BUG_ON(wc->refs[level - 1] == 0);
5211 *lookup_info = 0;
1c4850e2 5212
94fcca9f 5213 if (wc->stage == DROP_REFERENCE) {
1c4850e2 5214 if (wc->refs[level - 1] > 1) {
94fcca9f
YZ
5215 if (level == 1 &&
5216 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5217 goto skip;
5218
1c4850e2
YZ
5219 if (!wc->update_ref ||
5220 generation <= root->root_key.offset)
5221 goto skip;
5222
5223 btrfs_node_key_to_cpu(path->nodes[level], &key,
5224 path->slots[level]);
5225 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
5226 if (ret < 0)
5227 goto skip;
5228
5229 wc->stage = UPDATE_BACKREF;
5230 wc->shared_level = level - 1;
5231 }
94fcca9f
YZ
5232 } else {
5233 if (level == 1 &&
5234 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5235 goto skip;
1c4850e2
YZ
5236 }
5237
5238 if (!btrfs_buffer_uptodate(next, generation)) {
5239 btrfs_tree_unlock(next);
5240 free_extent_buffer(next);
5241 next = NULL;
94fcca9f 5242 *lookup_info = 1;
1c4850e2
YZ
5243 }
5244
5245 if (!next) {
5246 if (reada && level == 1)
5247 reada_walk_down(trans, root, wc, path);
5248 next = read_tree_block(root, bytenr, blocksize, generation);
5249 btrfs_tree_lock(next);
5250 btrfs_set_lock_blocking(next);
5251 }
5252
5253 level--;
5254 BUG_ON(level != btrfs_header_level(next));
5255 path->nodes[level] = next;
5256 path->slots[level] = 0;
5257 path->locks[level] = 1;
5258 wc->level = level;
5259 if (wc->level == 1)
5260 wc->reada_slot = 0;
5261 return 0;
5262skip:
5263 wc->refs[level - 1] = 0;
5264 wc->flags[level - 1] = 0;
94fcca9f
YZ
5265 if (wc->stage == DROP_REFERENCE) {
5266 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
5267 parent = path->nodes[level]->start;
5268 } else {
5269 BUG_ON(root->root_key.objectid !=
5270 btrfs_header_owner(path->nodes[level]));
5271 parent = 0;
5272 }
1c4850e2 5273
94fcca9f
YZ
5274 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
5275 root->root_key.objectid, level - 1, 0);
5276 BUG_ON(ret);
1c4850e2 5277 }
1c4850e2
YZ
5278 btrfs_tree_unlock(next);
5279 free_extent_buffer(next);
94fcca9f 5280 *lookup_info = 1;
1c4850e2
YZ
5281 return 1;
5282}
5283
2c47e605
YZ
5284/*
5285 * hepler to process tree block while walking up the tree.
5286 *
5287 * when wc->stage == DROP_REFERENCE, this function drops
5288 * reference count on the block.
5289 *
5290 * when wc->stage == UPDATE_BACKREF, this function changes
5291 * wc->stage back to DROP_REFERENCE if we changed wc->stage
5292 * to UPDATE_BACKREF previously while processing the block.
5293 *
5294 * NOTE: return value 1 means we should stop walking up.
5295 */
5296static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
5297 struct btrfs_root *root,
5298 struct btrfs_path *path,
5299 struct walk_control *wc)
5300{
5301 int ret = 0;
5302 int level = wc->level;
5303 struct extent_buffer *eb = path->nodes[level];
5304 u64 parent = 0;
5305
5306 if (wc->stage == UPDATE_BACKREF) {
5307 BUG_ON(wc->shared_level < level);
5308 if (level < wc->shared_level)
5309 goto out;
5310
2c47e605
YZ
5311 ret = find_next_key(path, level + 1, &wc->update_progress);
5312 if (ret > 0)
5313 wc->update_ref = 0;
5314
5315 wc->stage = DROP_REFERENCE;
5316 wc->shared_level = -1;
5317 path->slots[level] = 0;
5318
5319 /*
5320 * check reference count again if the block isn't locked.
5321 * we should start walking down the tree again if reference
5322 * count is one.
5323 */
5324 if (!path->locks[level]) {
5325 BUG_ON(level == 0);
5326 btrfs_tree_lock(eb);
5327 btrfs_set_lock_blocking(eb);
5328 path->locks[level] = 1;
5329
5330 ret = btrfs_lookup_extent_info(trans, root,
5331 eb->start, eb->len,
5332 &wc->refs[level],
5333 &wc->flags[level]);
f82d02d9 5334 BUG_ON(ret);
2c47e605
YZ
5335 BUG_ON(wc->refs[level] == 0);
5336 if (wc->refs[level] == 1) {
5337 btrfs_tree_unlock(eb);
5338 path->locks[level] = 0;
5339 return 1;
5340 }
f82d02d9 5341 }
2c47e605 5342 }
f82d02d9 5343
2c47e605
YZ
5344 /* wc->stage == DROP_REFERENCE */
5345 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
5d4f98a2 5346
2c47e605
YZ
5347 if (wc->refs[level] == 1) {
5348 if (level == 0) {
5349 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5350 ret = btrfs_dec_ref(trans, root, eb, 1);
5351 else
5352 ret = btrfs_dec_ref(trans, root, eb, 0);
5353 BUG_ON(ret);
5354 }
5355 /* make block locked assertion in clean_tree_block happy */
5356 if (!path->locks[level] &&
5357 btrfs_header_generation(eb) == trans->transid) {
5358 btrfs_tree_lock(eb);
5359 btrfs_set_lock_blocking(eb);
5360 path->locks[level] = 1;
5361 }
5362 clean_tree_block(trans, root, eb);
5363 }
5364
5365 if (eb == root->node) {
5366 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5367 parent = eb->start;
5368 else
5369 BUG_ON(root->root_key.objectid !=
5370 btrfs_header_owner(eb));
5371 } else {
5372 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
5373 parent = path->nodes[level + 1]->start;
5374 else
5375 BUG_ON(root->root_key.objectid !=
5376 btrfs_header_owner(path->nodes[level + 1]));
f82d02d9 5377 }
f82d02d9 5378
2c47e605
YZ
5379 ret = btrfs_free_extent(trans, root, eb->start, eb->len, parent,
5380 root->root_key.objectid, level, 0);
f82d02d9 5381 BUG_ON(ret);
2c47e605
YZ
5382out:
5383 wc->refs[level] = 0;
5384 wc->flags[level] = 0;
5385 return ret;
5386}
5387
5388static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
5389 struct btrfs_root *root,
5390 struct btrfs_path *path,
5391 struct walk_control *wc)
5392{
2c47e605 5393 int level = wc->level;
94fcca9f 5394 int lookup_info = 1;
2c47e605
YZ
5395 int ret;
5396
5397 while (level >= 0) {
1c4850e2
YZ
5398 if (path->slots[level] >=
5399 btrfs_header_nritems(path->nodes[level]))
5400 break;
f82d02d9 5401
94fcca9f 5402 ret = walk_down_proc(trans, root, path, wc, lookup_info);
2c47e605
YZ
5403 if (ret > 0)
5404 break;
5405
5406 if (level == 0)
5407 break;
5408
94fcca9f 5409 ret = do_walk_down(trans, root, path, wc, &lookup_info);
1c4850e2
YZ
5410 if (ret > 0) {
5411 path->slots[level]++;
5412 continue;
5413 }
5414 level = wc->level;
f82d02d9 5415 }
f82d02d9
YZ
5416 return 0;
5417}
5418
d397712b 5419static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
98ed5174 5420 struct btrfs_root *root,
f82d02d9 5421 struct btrfs_path *path,
2c47e605 5422 struct walk_control *wc, int max_level)
20524f02 5423{
2c47e605 5424 int level = wc->level;
20524f02 5425 int ret;
9f3a7427 5426
2c47e605
YZ
5427 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
5428 while (level < max_level && path->nodes[level]) {
5429 wc->level = level;
5430 if (path->slots[level] + 1 <
5431 btrfs_header_nritems(path->nodes[level])) {
5432 path->slots[level]++;
20524f02
CM
5433 return 0;
5434 } else {
2c47e605
YZ
5435 ret = walk_up_proc(trans, root, path, wc);
5436 if (ret > 0)
5437 return 0;
bd56b302 5438
2c47e605
YZ
5439 if (path->locks[level]) {
5440 btrfs_tree_unlock(path->nodes[level]);
5441 path->locks[level] = 0;
f82d02d9 5442 }
2c47e605
YZ
5443 free_extent_buffer(path->nodes[level]);
5444 path->nodes[level] = NULL;
5445 level++;
20524f02
CM
5446 }
5447 }
5448 return 1;
5449}
5450
9aca1d51 5451/*
2c47e605
YZ
5452 * drop a subvolume tree.
5453 *
5454 * this function traverses the tree freeing any blocks that only
5455 * referenced by the tree.
5456 *
5457 * when a shared tree block is found. this function decreases its
5458 * reference count by one. if update_ref is true, this function
5459 * also make sure backrefs for the shared block and all lower level
5460 * blocks are properly updated.
9aca1d51 5461 */
2c47e605 5462int btrfs_drop_snapshot(struct btrfs_root *root, int update_ref)
20524f02 5463{
5caf2a00 5464 struct btrfs_path *path;
2c47e605
YZ
5465 struct btrfs_trans_handle *trans;
5466 struct btrfs_root *tree_root = root->fs_info->tree_root;
9f3a7427 5467 struct btrfs_root_item *root_item = &root->root_item;
2c47e605
YZ
5468 struct walk_control *wc;
5469 struct btrfs_key key;
5470 int err = 0;
5471 int ret;
5472 int level;
20524f02 5473
5caf2a00
CM
5474 path = btrfs_alloc_path();
5475 BUG_ON(!path);
20524f02 5476
2c47e605
YZ
5477 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5478 BUG_ON(!wc);
5479
5480 trans = btrfs_start_transaction(tree_root, 1);
5481
9f3a7427 5482 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
2c47e605 5483 level = btrfs_header_level(root->node);
5d4f98a2
YZ
5484 path->nodes[level] = btrfs_lock_root_node(root);
5485 btrfs_set_lock_blocking(path->nodes[level]);
9f3a7427 5486 path->slots[level] = 0;
5d4f98a2 5487 path->locks[level] = 1;
2c47e605
YZ
5488 memset(&wc->update_progress, 0,
5489 sizeof(wc->update_progress));
9f3a7427 5490 } else {
9f3a7427 5491 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
2c47e605
YZ
5492 memcpy(&wc->update_progress, &key,
5493 sizeof(wc->update_progress));
5494
6702ed49 5495 level = root_item->drop_level;
2c47e605 5496 BUG_ON(level == 0);
6702ed49 5497 path->lowest_level = level;
2c47e605
YZ
5498 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5499 path->lowest_level = 0;
5500 if (ret < 0) {
5501 err = ret;
9f3a7427
CM
5502 goto out;
5503 }
1c4850e2 5504 WARN_ON(ret > 0);
2c47e605 5505
7d9eb12c
CM
5506 /*
5507 * unlock our path, this is safe because only this
5508 * function is allowed to delete this snapshot
5509 */
5d4f98a2 5510 btrfs_unlock_up_safe(path, 0);
2c47e605
YZ
5511
5512 level = btrfs_header_level(root->node);
5513 while (1) {
5514 btrfs_tree_lock(path->nodes[level]);
5515 btrfs_set_lock_blocking(path->nodes[level]);
5516
5517 ret = btrfs_lookup_extent_info(trans, root,
5518 path->nodes[level]->start,
5519 path->nodes[level]->len,
5520 &wc->refs[level],
5521 &wc->flags[level]);
5522 BUG_ON(ret);
5523 BUG_ON(wc->refs[level] == 0);
5524
5525 if (level == root_item->drop_level)
5526 break;
5527
5528 btrfs_tree_unlock(path->nodes[level]);
5529 WARN_ON(wc->refs[level] != 1);
5530 level--;
5531 }
9f3a7427 5532 }
2c47e605
YZ
5533
5534 wc->level = level;
5535 wc->shared_level = -1;
5536 wc->stage = DROP_REFERENCE;
5537 wc->update_ref = update_ref;
5538 wc->keep_locks = 0;
1c4850e2 5539 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
2c47e605 5540
d397712b 5541 while (1) {
2c47e605
YZ
5542 ret = walk_down_tree(trans, root, path, wc);
5543 if (ret < 0) {
5544 err = ret;
20524f02 5545 break;
2c47e605 5546 }
9aca1d51 5547
2c47e605
YZ
5548 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
5549 if (ret < 0) {
5550 err = ret;
20524f02 5551 break;
2c47e605
YZ
5552 }
5553
5554 if (ret > 0) {
5555 BUG_ON(wc->stage != DROP_REFERENCE);
e7a84565
CM
5556 break;
5557 }
2c47e605
YZ
5558
5559 if (wc->stage == DROP_REFERENCE) {
5560 level = wc->level;
5561 btrfs_node_key(path->nodes[level],
5562 &root_item->drop_progress,
5563 path->slots[level]);
5564 root_item->drop_level = level;
5565 }
5566
5567 BUG_ON(wc->level == 0);
5568 if (trans->transaction->in_commit ||
5569 trans->transaction->delayed_refs.flushing) {
5570 ret = btrfs_update_root(trans, tree_root,
5571 &root->root_key,
5572 root_item);
5573 BUG_ON(ret);
5574
5575 btrfs_end_transaction(trans, tree_root);
5576 trans = btrfs_start_transaction(tree_root, 1);
5577 } else {
5578 unsigned long update;
c3e69d58
CM
5579 update = trans->delayed_ref_updates;
5580 trans->delayed_ref_updates = 0;
5581 if (update)
2c47e605
YZ
5582 btrfs_run_delayed_refs(trans, tree_root,
5583 update);
c3e69d58 5584 }
20524f02 5585 }
2c47e605
YZ
5586 btrfs_release_path(root, path);
5587 BUG_ON(err);
5588
5589 ret = btrfs_del_root(trans, tree_root, &root->root_key);
5590 BUG_ON(ret);
5591
76dda93c
YZ
5592 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
5593 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
5594 NULL, NULL);
5595 BUG_ON(ret < 0);
5596 if (ret > 0) {
5597 ret = btrfs_del_orphan_item(trans, tree_root,
5598 root->root_key.objectid);
5599 BUG_ON(ret);
5600 }
5601 }
5602
5603 if (root->in_radix) {
5604 btrfs_free_fs_root(tree_root->fs_info, root);
5605 } else {
5606 free_extent_buffer(root->node);
5607 free_extent_buffer(root->commit_root);
5608 kfree(root);
5609 }
9f3a7427 5610out:
2c47e605
YZ
5611 btrfs_end_transaction(trans, tree_root);
5612 kfree(wc);
5caf2a00 5613 btrfs_free_path(path);
2c47e605 5614 return err;
20524f02 5615}
9078a3e1 5616
2c47e605
YZ
5617/*
5618 * drop subtree rooted at tree block 'node'.
5619 *
5620 * NOTE: this function will unlock and release tree block 'node'
5621 */
f82d02d9
YZ
5622int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
5623 struct btrfs_root *root,
5624 struct extent_buffer *node,
5625 struct extent_buffer *parent)
5626{
5627 struct btrfs_path *path;
2c47e605 5628 struct walk_control *wc;
f82d02d9
YZ
5629 int level;
5630 int parent_level;
5631 int ret = 0;
5632 int wret;
5633
2c47e605
YZ
5634 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5635
f82d02d9
YZ
5636 path = btrfs_alloc_path();
5637 BUG_ON(!path);
5638
2c47e605
YZ
5639 wc = kzalloc(sizeof(*wc), GFP_NOFS);
5640 BUG_ON(!wc);
5641
b9447ef8 5642 btrfs_assert_tree_locked(parent);
f82d02d9
YZ
5643 parent_level = btrfs_header_level(parent);
5644 extent_buffer_get(parent);
5645 path->nodes[parent_level] = parent;
5646 path->slots[parent_level] = btrfs_header_nritems(parent);
5647
b9447ef8 5648 btrfs_assert_tree_locked(node);
f82d02d9 5649 level = btrfs_header_level(node);
f82d02d9
YZ
5650 path->nodes[level] = node;
5651 path->slots[level] = 0;
2c47e605
YZ
5652 path->locks[level] = 1;
5653
5654 wc->refs[parent_level] = 1;
5655 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5656 wc->level = level;
5657 wc->shared_level = -1;
5658 wc->stage = DROP_REFERENCE;
5659 wc->update_ref = 0;
5660 wc->keep_locks = 1;
1c4850e2 5661 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
f82d02d9
YZ
5662
5663 while (1) {
2c47e605
YZ
5664 wret = walk_down_tree(trans, root, path, wc);
5665 if (wret < 0) {
f82d02d9 5666 ret = wret;
f82d02d9 5667 break;
2c47e605 5668 }
f82d02d9 5669
2c47e605 5670 wret = walk_up_tree(trans, root, path, wc, parent_level);
f82d02d9
YZ
5671 if (wret < 0)
5672 ret = wret;
5673 if (wret != 0)
5674 break;
5675 }
5676
2c47e605 5677 kfree(wc);
f82d02d9
YZ
5678 btrfs_free_path(path);
5679 return ret;
5680}
5681
5d4f98a2 5682#if 0
8e7bf94f
CM
5683static unsigned long calc_ra(unsigned long start, unsigned long last,
5684 unsigned long nr)
5685{
5686 return min(last, start + nr - 1);
5687}
5688
d397712b 5689static noinline int relocate_inode_pages(struct inode *inode, u64 start,
98ed5174 5690 u64 len)
edbd8d4e
CM
5691{
5692 u64 page_start;
5693 u64 page_end;
1a40e23b 5694 unsigned long first_index;
edbd8d4e 5695 unsigned long last_index;
edbd8d4e
CM
5696 unsigned long i;
5697 struct page *page;
d1310b2e 5698 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4313b399 5699 struct file_ra_state *ra;
3eaa2885 5700 struct btrfs_ordered_extent *ordered;
1a40e23b
ZY
5701 unsigned int total_read = 0;
5702 unsigned int total_dirty = 0;
5703 int ret = 0;
4313b399
CM
5704
5705 ra = kzalloc(sizeof(*ra), GFP_NOFS);
edbd8d4e
CM
5706
5707 mutex_lock(&inode->i_mutex);
1a40e23b 5708 first_index = start >> PAGE_CACHE_SHIFT;
edbd8d4e
CM
5709 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
5710
1a40e23b
ZY
5711 /* make sure the dirty trick played by the caller work */
5712 ret = invalidate_inode_pages2_range(inode->i_mapping,
5713 first_index, last_index);
5714 if (ret)
5715 goto out_unlock;
8e7bf94f 5716
4313b399 5717 file_ra_state_init(ra, inode->i_mapping);
edbd8d4e 5718
1a40e23b
ZY
5719 for (i = first_index ; i <= last_index; i++) {
5720 if (total_read % ra->ra_pages == 0) {
8e7bf94f 5721 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
1a40e23b 5722 calc_ra(i, last_index, ra->ra_pages));
8e7bf94f
CM
5723 }
5724 total_read++;
3eaa2885
CM
5725again:
5726 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
1a40e23b 5727 BUG_ON(1);
edbd8d4e 5728 page = grab_cache_page(inode->i_mapping, i);
a061fc8d 5729 if (!page) {
1a40e23b 5730 ret = -ENOMEM;
edbd8d4e 5731 goto out_unlock;
a061fc8d 5732 }
edbd8d4e
CM
5733 if (!PageUptodate(page)) {
5734 btrfs_readpage(NULL, page);
5735 lock_page(page);
5736 if (!PageUptodate(page)) {
5737 unlock_page(page);
5738 page_cache_release(page);
1a40e23b 5739 ret = -EIO;
edbd8d4e
CM
5740 goto out_unlock;
5741 }
5742 }
ec44a35c 5743 wait_on_page_writeback(page);
3eaa2885 5744
edbd8d4e
CM
5745 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
5746 page_end = page_start + PAGE_CACHE_SIZE - 1;
d1310b2e 5747 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e 5748
3eaa2885
CM
5749 ordered = btrfs_lookup_ordered_extent(inode, page_start);
5750 if (ordered) {
5751 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
5752 unlock_page(page);
5753 page_cache_release(page);
5754 btrfs_start_ordered_extent(inode, ordered, 1);
5755 btrfs_put_ordered_extent(ordered);
5756 goto again;
5757 }
5758 set_page_extent_mapped(page);
5759
1a40e23b
ZY
5760 if (i == first_index)
5761 set_extent_bits(io_tree, page_start, page_end,
5762 EXTENT_BOUNDARY, GFP_NOFS);
1f80e4db 5763 btrfs_set_extent_delalloc(inode, page_start, page_end);
1a40e23b 5764
a061fc8d 5765 set_page_dirty(page);
1a40e23b 5766 total_dirty++;
edbd8d4e 5767
d1310b2e 5768 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
edbd8d4e
CM
5769 unlock_page(page);
5770 page_cache_release(page);
5771 }
5772
5773out_unlock:
ec44a35c 5774 kfree(ra);
edbd8d4e 5775 mutex_unlock(&inode->i_mutex);
1a40e23b
ZY
5776 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
5777 return ret;
edbd8d4e
CM
5778}
5779
d397712b 5780static noinline int relocate_data_extent(struct inode *reloc_inode,
1a40e23b
ZY
5781 struct btrfs_key *extent_key,
5782 u64 offset)
5783{
5784 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
5785 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
5786 struct extent_map *em;
6643558d
YZ
5787 u64 start = extent_key->objectid - offset;
5788 u64 end = start + extent_key->offset - 1;
bf4ef679 5789
1a40e23b
ZY
5790 em = alloc_extent_map(GFP_NOFS);
5791 BUG_ON(!em || IS_ERR(em));
bf4ef679 5792
6643558d 5793 em->start = start;
1a40e23b 5794 em->len = extent_key->offset;
c8b97818 5795 em->block_len = extent_key->offset;
1a40e23b
ZY
5796 em->block_start = extent_key->objectid;
5797 em->bdev = root->fs_info->fs_devices->latest_bdev;
5798 set_bit(EXTENT_FLAG_PINNED, &em->flags);
5799
5800 /* setup extent map to cheat btrfs_readpage */
6643558d 5801 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
1a40e23b
ZY
5802 while (1) {
5803 int ret;
890871be 5804 write_lock(&em_tree->lock);
1a40e23b 5805 ret = add_extent_mapping(em_tree, em);
890871be 5806 write_unlock(&em_tree->lock);
1a40e23b
ZY
5807 if (ret != -EEXIST) {
5808 free_extent_map(em);
bf4ef679
CM
5809 break;
5810 }
6643558d 5811 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
bf4ef679 5812 }
6643558d 5813 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
bf4ef679 5814
6643558d 5815 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
1a40e23b 5816}
edbd8d4e 5817
1a40e23b
ZY
5818struct btrfs_ref_path {
5819 u64 extent_start;
5820 u64 nodes[BTRFS_MAX_LEVEL];
5821 u64 root_objectid;
5822 u64 root_generation;
5823 u64 owner_objectid;
1a40e23b
ZY
5824 u32 num_refs;
5825 int lowest_level;
5826 int current_level;
f82d02d9
YZ
5827 int shared_level;
5828
5829 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
5830 u64 new_nodes[BTRFS_MAX_LEVEL];
1a40e23b 5831};
7d9eb12c 5832
1a40e23b 5833struct disk_extent {
c8b97818 5834 u64 ram_bytes;
1a40e23b
ZY
5835 u64 disk_bytenr;
5836 u64 disk_num_bytes;
5837 u64 offset;
5838 u64 num_bytes;
c8b97818
CM
5839 u8 compression;
5840 u8 encryption;
5841 u16 other_encoding;
1a40e23b 5842};
4313b399 5843
1a40e23b
ZY
5844static int is_cowonly_root(u64 root_objectid)
5845{
5846 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
5847 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
5848 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
5849 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
0403e47e
YZ
5850 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5851 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
1a40e23b
ZY
5852 return 1;
5853 return 0;
5854}
edbd8d4e 5855
d397712b 5856static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
5857 struct btrfs_root *extent_root,
5858 struct btrfs_ref_path *ref_path,
5859 int first_time)
5860{
5861 struct extent_buffer *leaf;
5862 struct btrfs_path *path;
5863 struct btrfs_extent_ref *ref;
5864 struct btrfs_key key;
5865 struct btrfs_key found_key;
5866 u64 bytenr;
5867 u32 nritems;
5868 int level;
5869 int ret = 1;
edbd8d4e 5870
1a40e23b
ZY
5871 path = btrfs_alloc_path();
5872 if (!path)
5873 return -ENOMEM;
bf4ef679 5874
1a40e23b
ZY
5875 if (first_time) {
5876 ref_path->lowest_level = -1;
5877 ref_path->current_level = -1;
f82d02d9 5878 ref_path->shared_level = -1;
1a40e23b
ZY
5879 goto walk_up;
5880 }
5881walk_down:
5882 level = ref_path->current_level - 1;
5883 while (level >= -1) {
5884 u64 parent;
5885 if (level < ref_path->lowest_level)
5886 break;
bf4ef679 5887
d397712b 5888 if (level >= 0)
1a40e23b 5889 bytenr = ref_path->nodes[level];
d397712b 5890 else
1a40e23b 5891 bytenr = ref_path->extent_start;
1a40e23b 5892 BUG_ON(bytenr == 0);
bf4ef679 5893
1a40e23b
ZY
5894 parent = ref_path->nodes[level + 1];
5895 ref_path->nodes[level + 1] = 0;
5896 ref_path->current_level = level;
5897 BUG_ON(parent == 0);
0ef3e66b 5898
1a40e23b
ZY
5899 key.objectid = bytenr;
5900 key.offset = parent + 1;
5901 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 5902
1a40e23b
ZY
5903 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5904 if (ret < 0)
edbd8d4e 5905 goto out;
1a40e23b 5906 BUG_ON(ret == 0);
7d9eb12c 5907
1a40e23b
ZY
5908 leaf = path->nodes[0];
5909 nritems = btrfs_header_nritems(leaf);
5910 if (path->slots[0] >= nritems) {
5911 ret = btrfs_next_leaf(extent_root, path);
5912 if (ret < 0)
5913 goto out;
5914 if (ret > 0)
5915 goto next;
5916 leaf = path->nodes[0];
5917 }
0ef3e66b 5918
1a40e23b
ZY
5919 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5920 if (found_key.objectid == bytenr &&
f82d02d9
YZ
5921 found_key.type == BTRFS_EXTENT_REF_KEY) {
5922 if (level < ref_path->shared_level)
5923 ref_path->shared_level = level;
1a40e23b 5924 goto found;
f82d02d9 5925 }
1a40e23b
ZY
5926next:
5927 level--;
5928 btrfs_release_path(extent_root, path);
d899e052 5929 cond_resched();
1a40e23b
ZY
5930 }
5931 /* reached lowest level */
5932 ret = 1;
5933 goto out;
5934walk_up:
5935 level = ref_path->current_level;
5936 while (level < BTRFS_MAX_LEVEL - 1) {
5937 u64 ref_objectid;
d397712b
CM
5938
5939 if (level >= 0)
1a40e23b 5940 bytenr = ref_path->nodes[level];
d397712b 5941 else
1a40e23b 5942 bytenr = ref_path->extent_start;
d397712b 5943
1a40e23b 5944 BUG_ON(bytenr == 0);
edbd8d4e 5945
1a40e23b
ZY
5946 key.objectid = bytenr;
5947 key.offset = 0;
5948 key.type = BTRFS_EXTENT_REF_KEY;
edbd8d4e 5949
1a40e23b
ZY
5950 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
5951 if (ret < 0)
5952 goto out;
edbd8d4e 5953
1a40e23b
ZY
5954 leaf = path->nodes[0];
5955 nritems = btrfs_header_nritems(leaf);
5956 if (path->slots[0] >= nritems) {
5957 ret = btrfs_next_leaf(extent_root, path);
5958 if (ret < 0)
5959 goto out;
5960 if (ret > 0) {
5961 /* the extent was freed by someone */
5962 if (ref_path->lowest_level == level)
5963 goto out;
5964 btrfs_release_path(extent_root, path);
5965 goto walk_down;
5966 }
5967 leaf = path->nodes[0];
5968 }
edbd8d4e 5969
1a40e23b
ZY
5970 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5971 if (found_key.objectid != bytenr ||
5972 found_key.type != BTRFS_EXTENT_REF_KEY) {
5973 /* the extent was freed by someone */
5974 if (ref_path->lowest_level == level) {
5975 ret = 1;
5976 goto out;
5977 }
5978 btrfs_release_path(extent_root, path);
5979 goto walk_down;
5980 }
5981found:
5982 ref = btrfs_item_ptr(leaf, path->slots[0],
5983 struct btrfs_extent_ref);
5984 ref_objectid = btrfs_ref_objectid(leaf, ref);
5985 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
5986 if (first_time) {
5987 level = (int)ref_objectid;
5988 BUG_ON(level >= BTRFS_MAX_LEVEL);
5989 ref_path->lowest_level = level;
5990 ref_path->current_level = level;
5991 ref_path->nodes[level] = bytenr;
5992 } else {
5993 WARN_ON(ref_objectid != level);
5994 }
5995 } else {
5996 WARN_ON(level != -1);
5997 }
5998 first_time = 0;
bf4ef679 5999
1a40e23b
ZY
6000 if (ref_path->lowest_level == level) {
6001 ref_path->owner_objectid = ref_objectid;
1a40e23b
ZY
6002 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6003 }
bf4ef679 6004
7d9eb12c 6005 /*
1a40e23b
ZY
6006 * the block is tree root or the block isn't in reference
6007 * counted tree.
7d9eb12c 6008 */
1a40e23b
ZY
6009 if (found_key.objectid == found_key.offset ||
6010 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6011 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6012 ref_path->root_generation =
6013 btrfs_ref_generation(leaf, ref);
6014 if (level < 0) {
6015 /* special reference from the tree log */
6016 ref_path->nodes[0] = found_key.offset;
6017 ref_path->current_level = 0;
6018 }
6019 ret = 0;
6020 goto out;
6021 }
7d9eb12c 6022
1a40e23b
ZY
6023 level++;
6024 BUG_ON(ref_path->nodes[level] != 0);
6025 ref_path->nodes[level] = found_key.offset;
6026 ref_path->current_level = level;
bf4ef679 6027
1a40e23b
ZY
6028 /*
6029 * the reference was created in the running transaction,
6030 * no need to continue walking up.
6031 */
6032 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6033 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6034 ref_path->root_generation =
6035 btrfs_ref_generation(leaf, ref);
6036 ret = 0;
6037 goto out;
7d9eb12c
CM
6038 }
6039
1a40e23b 6040 btrfs_release_path(extent_root, path);
d899e052 6041 cond_resched();
7d9eb12c 6042 }
1a40e23b
ZY
6043 /* reached max tree level, but no tree root found. */
6044 BUG();
edbd8d4e 6045out:
1a40e23b
ZY
6046 btrfs_free_path(path);
6047 return ret;
edbd8d4e
CM
6048}
6049
1a40e23b
ZY
6050static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6051 struct btrfs_root *extent_root,
6052 struct btrfs_ref_path *ref_path,
6053 u64 extent_start)
a061fc8d 6054{
1a40e23b
ZY
6055 memset(ref_path, 0, sizeof(*ref_path));
6056 ref_path->extent_start = extent_start;
a061fc8d 6057
1a40e23b 6058 return __next_ref_path(trans, extent_root, ref_path, 1);
a061fc8d
CM
6059}
6060
1a40e23b
ZY
6061static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6062 struct btrfs_root *extent_root,
6063 struct btrfs_ref_path *ref_path)
edbd8d4e 6064{
1a40e23b
ZY
6065 return __next_ref_path(trans, extent_root, ref_path, 0);
6066}
6067
d397712b 6068static noinline int get_new_locations(struct inode *reloc_inode,
1a40e23b
ZY
6069 struct btrfs_key *extent_key,
6070 u64 offset, int no_fragment,
6071 struct disk_extent **extents,
6072 int *nr_extents)
6073{
6074 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6075 struct btrfs_path *path;
6076 struct btrfs_file_extent_item *fi;
edbd8d4e 6077 struct extent_buffer *leaf;
1a40e23b
ZY
6078 struct disk_extent *exts = *extents;
6079 struct btrfs_key found_key;
6080 u64 cur_pos;
6081 u64 last_byte;
edbd8d4e 6082 u32 nritems;
1a40e23b
ZY
6083 int nr = 0;
6084 int max = *nr_extents;
6085 int ret;
edbd8d4e 6086
1a40e23b
ZY
6087 WARN_ON(!no_fragment && *extents);
6088 if (!exts) {
6089 max = 1;
6090 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6091 if (!exts)
6092 return -ENOMEM;
a061fc8d 6093 }
edbd8d4e 6094
1a40e23b
ZY
6095 path = btrfs_alloc_path();
6096 BUG_ON(!path);
edbd8d4e 6097
1a40e23b
ZY
6098 cur_pos = extent_key->objectid - offset;
6099 last_byte = extent_key->objectid + extent_key->offset;
6100 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6101 cur_pos, 0);
6102 if (ret < 0)
6103 goto out;
6104 if (ret > 0) {
6105 ret = -ENOENT;
6106 goto out;
6107 }
edbd8d4e 6108
1a40e23b 6109 while (1) {
edbd8d4e
CM
6110 leaf = path->nodes[0];
6111 nritems = btrfs_header_nritems(leaf);
1a40e23b
ZY
6112 if (path->slots[0] >= nritems) {
6113 ret = btrfs_next_leaf(root, path);
a061fc8d
CM
6114 if (ret < 0)
6115 goto out;
1a40e23b
ZY
6116 if (ret > 0)
6117 break;
bf4ef679 6118 leaf = path->nodes[0];
a061fc8d 6119 }
edbd8d4e
CM
6120
6121 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1a40e23b
ZY
6122 if (found_key.offset != cur_pos ||
6123 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6124 found_key.objectid != reloc_inode->i_ino)
edbd8d4e
CM
6125 break;
6126
1a40e23b
ZY
6127 fi = btrfs_item_ptr(leaf, path->slots[0],
6128 struct btrfs_file_extent_item);
6129 if (btrfs_file_extent_type(leaf, fi) !=
6130 BTRFS_FILE_EXTENT_REG ||
6131 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
edbd8d4e 6132 break;
1a40e23b
ZY
6133
6134 if (nr == max) {
6135 struct disk_extent *old = exts;
6136 max *= 2;
6137 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6138 memcpy(exts, old, sizeof(*exts) * nr);
6139 if (old != *extents)
6140 kfree(old);
a061fc8d 6141 }
edbd8d4e 6142
1a40e23b
ZY
6143 exts[nr].disk_bytenr =
6144 btrfs_file_extent_disk_bytenr(leaf, fi);
6145 exts[nr].disk_num_bytes =
6146 btrfs_file_extent_disk_num_bytes(leaf, fi);
6147 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6148 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
c8b97818
CM
6149 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6150 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6151 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6152 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6153 fi);
d899e052
YZ
6154 BUG_ON(exts[nr].offset > 0);
6155 BUG_ON(exts[nr].compression || exts[nr].encryption);
6156 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
edbd8d4e 6157
1a40e23b
ZY
6158 cur_pos += exts[nr].num_bytes;
6159 nr++;
6160
6161 if (cur_pos + offset >= last_byte)
6162 break;
6163
6164 if (no_fragment) {
6165 ret = 1;
edbd8d4e 6166 goto out;
1a40e23b
ZY
6167 }
6168 path->slots[0]++;
6169 }
6170
1f80e4db 6171 BUG_ON(cur_pos + offset > last_byte);
1a40e23b
ZY
6172 if (cur_pos + offset < last_byte) {
6173 ret = -ENOENT;
6174 goto out;
edbd8d4e
CM
6175 }
6176 ret = 0;
6177out:
1a40e23b
ZY
6178 btrfs_free_path(path);
6179 if (ret) {
6180 if (exts != *extents)
6181 kfree(exts);
6182 } else {
6183 *extents = exts;
6184 *nr_extents = nr;
6185 }
6186 return ret;
6187}
6188
d397712b 6189static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6190 struct btrfs_root *root,
6191 struct btrfs_path *path,
6192 struct btrfs_key *extent_key,
6193 struct btrfs_key *leaf_key,
6194 struct btrfs_ref_path *ref_path,
6195 struct disk_extent *new_extents,
6196 int nr_extents)
6197{
6198 struct extent_buffer *leaf;
6199 struct btrfs_file_extent_item *fi;
6200 struct inode *inode = NULL;
6201 struct btrfs_key key;
6202 u64 lock_start = 0;
6203 u64 lock_end = 0;
6204 u64 num_bytes;
6205 u64 ext_offset;
86288a19 6206 u64 search_end = (u64)-1;
1a40e23b 6207 u32 nritems;
3bb1a1bc 6208 int nr_scaned = 0;
1a40e23b 6209 int extent_locked = 0;
d899e052 6210 int extent_type;
1a40e23b
ZY
6211 int ret;
6212
3bb1a1bc 6213 memcpy(&key, leaf_key, sizeof(key));
1a40e23b 6214 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
3bb1a1bc
YZ
6215 if (key.objectid < ref_path->owner_objectid ||
6216 (key.objectid == ref_path->owner_objectid &&
6217 key.type < BTRFS_EXTENT_DATA_KEY)) {
6218 key.objectid = ref_path->owner_objectid;
6219 key.type = BTRFS_EXTENT_DATA_KEY;
6220 key.offset = 0;
6221 }
1a40e23b
ZY
6222 }
6223
6224 while (1) {
6225 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
6226 if (ret < 0)
6227 goto out;
6228
6229 leaf = path->nodes[0];
6230 nritems = btrfs_header_nritems(leaf);
6231next:
6232 if (extent_locked && ret > 0) {
6233 /*
6234 * the file extent item was modified by someone
6235 * before the extent got locked.
6236 */
1a40e23b
ZY
6237 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6238 lock_end, GFP_NOFS);
6239 extent_locked = 0;
6240 }
6241
6242 if (path->slots[0] >= nritems) {
3bb1a1bc 6243 if (++nr_scaned > 2)
1a40e23b
ZY
6244 break;
6245
6246 BUG_ON(extent_locked);
6247 ret = btrfs_next_leaf(root, path);
6248 if (ret < 0)
6249 goto out;
6250 if (ret > 0)
6251 break;
6252 leaf = path->nodes[0];
6253 nritems = btrfs_header_nritems(leaf);
6254 }
6255
6256 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
6257
6258 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
6259 if ((key.objectid > ref_path->owner_objectid) ||
6260 (key.objectid == ref_path->owner_objectid &&
6261 key.type > BTRFS_EXTENT_DATA_KEY) ||
86288a19 6262 key.offset >= search_end)
1a40e23b
ZY
6263 break;
6264 }
6265
6266 if (inode && key.objectid != inode->i_ino) {
6267 BUG_ON(extent_locked);
6268 btrfs_release_path(root, path);
6269 mutex_unlock(&inode->i_mutex);
6270 iput(inode);
6271 inode = NULL;
6272 continue;
6273 }
6274
6275 if (key.type != BTRFS_EXTENT_DATA_KEY) {
6276 path->slots[0]++;
6277 ret = 1;
6278 goto next;
6279 }
6280 fi = btrfs_item_ptr(leaf, path->slots[0],
6281 struct btrfs_file_extent_item);
d899e052
YZ
6282 extent_type = btrfs_file_extent_type(leaf, fi);
6283 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
6284 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
1a40e23b
ZY
6285 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
6286 extent_key->objectid)) {
6287 path->slots[0]++;
6288 ret = 1;
6289 goto next;
6290 }
6291
6292 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6293 ext_offset = btrfs_file_extent_offset(leaf, fi);
6294
86288a19
YZ
6295 if (search_end == (u64)-1) {
6296 search_end = key.offset - ext_offset +
6297 btrfs_file_extent_ram_bytes(leaf, fi);
6298 }
1a40e23b
ZY
6299
6300 if (!extent_locked) {
6301 lock_start = key.offset;
6302 lock_end = lock_start + num_bytes - 1;
6303 } else {
6643558d
YZ
6304 if (lock_start > key.offset ||
6305 lock_end + 1 < key.offset + num_bytes) {
6306 unlock_extent(&BTRFS_I(inode)->io_tree,
6307 lock_start, lock_end, GFP_NOFS);
6308 extent_locked = 0;
6309 }
1a40e23b
ZY
6310 }
6311
6312 if (!inode) {
6313 btrfs_release_path(root, path);
6314
6315 inode = btrfs_iget_locked(root->fs_info->sb,
6316 key.objectid, root);
6317 if (inode->i_state & I_NEW) {
6318 BTRFS_I(inode)->root = root;
6319 BTRFS_I(inode)->location.objectid =
6320 key.objectid;
6321 BTRFS_I(inode)->location.type =
6322 BTRFS_INODE_ITEM_KEY;
6323 BTRFS_I(inode)->location.offset = 0;
6324 btrfs_read_locked_inode(inode);
6325 unlock_new_inode(inode);
6326 }
6327 /*
6328 * some code call btrfs_commit_transaction while
6329 * holding the i_mutex, so we can't use mutex_lock
6330 * here.
6331 */
6332 if (is_bad_inode(inode) ||
6333 !mutex_trylock(&inode->i_mutex)) {
6334 iput(inode);
6335 inode = NULL;
6336 key.offset = (u64)-1;
6337 goto skip;
6338 }
6339 }
6340
6341 if (!extent_locked) {
6342 struct btrfs_ordered_extent *ordered;
6343
6344 btrfs_release_path(root, path);
6345
6346 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6347 lock_end, GFP_NOFS);
6348 ordered = btrfs_lookup_first_ordered_extent(inode,
6349 lock_end);
6350 if (ordered &&
6351 ordered->file_offset <= lock_end &&
6352 ordered->file_offset + ordered->len > lock_start) {
6353 unlock_extent(&BTRFS_I(inode)->io_tree,
6354 lock_start, lock_end, GFP_NOFS);
6355 btrfs_start_ordered_extent(inode, ordered, 1);
6356 btrfs_put_ordered_extent(ordered);
6357 key.offset += num_bytes;
6358 goto skip;
6359 }
6360 if (ordered)
6361 btrfs_put_ordered_extent(ordered);
6362
1a40e23b
ZY
6363 extent_locked = 1;
6364 continue;
6365 }
6366
6367 if (nr_extents == 1) {
6368 /* update extent pointer in place */
1a40e23b
ZY
6369 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6370 new_extents[0].disk_bytenr);
6371 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6372 new_extents[0].disk_num_bytes);
1a40e23b
ZY
6373 btrfs_mark_buffer_dirty(leaf);
6374
6375 btrfs_drop_extent_cache(inode, key.offset,
6376 key.offset + num_bytes - 1, 0);
6377
6378 ret = btrfs_inc_extent_ref(trans, root,
6379 new_extents[0].disk_bytenr,
6380 new_extents[0].disk_num_bytes,
6381 leaf->start,
6382 root->root_key.objectid,
6383 trans->transid,
3bb1a1bc 6384 key.objectid);
1a40e23b
ZY
6385 BUG_ON(ret);
6386
6387 ret = btrfs_free_extent(trans, root,
6388 extent_key->objectid,
6389 extent_key->offset,
6390 leaf->start,
6391 btrfs_header_owner(leaf),
6392 btrfs_header_generation(leaf),
3bb1a1bc 6393 key.objectid, 0);
1a40e23b
ZY
6394 BUG_ON(ret);
6395
6396 btrfs_release_path(root, path);
6397 key.offset += num_bytes;
6398 } else {
d899e052
YZ
6399 BUG_ON(1);
6400#if 0
1a40e23b
ZY
6401 u64 alloc_hint;
6402 u64 extent_len;
6403 int i;
6404 /*
6405 * drop old extent pointer at first, then insert the
6406 * new pointers one bye one
6407 */
6408 btrfs_release_path(root, path);
6409 ret = btrfs_drop_extents(trans, root, inode, key.offset,
6410 key.offset + num_bytes,
6411 key.offset, &alloc_hint);
6412 BUG_ON(ret);
6413
6414 for (i = 0; i < nr_extents; i++) {
6415 if (ext_offset >= new_extents[i].num_bytes) {
6416 ext_offset -= new_extents[i].num_bytes;
6417 continue;
6418 }
6419 extent_len = min(new_extents[i].num_bytes -
6420 ext_offset, num_bytes);
6421
6422 ret = btrfs_insert_empty_item(trans, root,
6423 path, &key,
6424 sizeof(*fi));
6425 BUG_ON(ret);
6426
6427 leaf = path->nodes[0];
6428 fi = btrfs_item_ptr(leaf, path->slots[0],
6429 struct btrfs_file_extent_item);
6430 btrfs_set_file_extent_generation(leaf, fi,
6431 trans->transid);
6432 btrfs_set_file_extent_type(leaf, fi,
6433 BTRFS_FILE_EXTENT_REG);
6434 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6435 new_extents[i].disk_bytenr);
6436 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6437 new_extents[i].disk_num_bytes);
c8b97818
CM
6438 btrfs_set_file_extent_ram_bytes(leaf, fi,
6439 new_extents[i].ram_bytes);
6440
6441 btrfs_set_file_extent_compression(leaf, fi,
6442 new_extents[i].compression);
6443 btrfs_set_file_extent_encryption(leaf, fi,
6444 new_extents[i].encryption);
6445 btrfs_set_file_extent_other_encoding(leaf, fi,
6446 new_extents[i].other_encoding);
6447
1a40e23b
ZY
6448 btrfs_set_file_extent_num_bytes(leaf, fi,
6449 extent_len);
6450 ext_offset += new_extents[i].offset;
6451 btrfs_set_file_extent_offset(leaf, fi,
6452 ext_offset);
6453 btrfs_mark_buffer_dirty(leaf);
6454
6455 btrfs_drop_extent_cache(inode, key.offset,
6456 key.offset + extent_len - 1, 0);
6457
6458 ret = btrfs_inc_extent_ref(trans, root,
6459 new_extents[i].disk_bytenr,
6460 new_extents[i].disk_num_bytes,
6461 leaf->start,
6462 root->root_key.objectid,
3bb1a1bc 6463 trans->transid, key.objectid);
1a40e23b
ZY
6464 BUG_ON(ret);
6465 btrfs_release_path(root, path);
6466
a76a3cd4 6467 inode_add_bytes(inode, extent_len);
1a40e23b
ZY
6468
6469 ext_offset = 0;
6470 num_bytes -= extent_len;
6471 key.offset += extent_len;
6472
6473 if (num_bytes == 0)
6474 break;
6475 }
6476 BUG_ON(i >= nr_extents);
d899e052 6477#endif
1a40e23b
ZY
6478 }
6479
6480 if (extent_locked) {
1a40e23b
ZY
6481 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6482 lock_end, GFP_NOFS);
6483 extent_locked = 0;
6484 }
6485skip:
6486 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
86288a19 6487 key.offset >= search_end)
1a40e23b
ZY
6488 break;
6489
6490 cond_resched();
6491 }
6492 ret = 0;
6493out:
6494 btrfs_release_path(root, path);
6495 if (inode) {
6496 mutex_unlock(&inode->i_mutex);
6497 if (extent_locked) {
1a40e23b
ZY
6498 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
6499 lock_end, GFP_NOFS);
6500 }
6501 iput(inode);
6502 }
6503 return ret;
6504}
6505
1a40e23b
ZY
6506int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
6507 struct btrfs_root *root,
6508 struct extent_buffer *buf, u64 orig_start)
6509{
6510 int level;
6511 int ret;
6512
6513 BUG_ON(btrfs_header_generation(buf) != trans->transid);
6514 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6515
6516 level = btrfs_header_level(buf);
6517 if (level == 0) {
6518 struct btrfs_leaf_ref *ref;
6519 struct btrfs_leaf_ref *orig_ref;
6520
6521 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
6522 if (!orig_ref)
6523 return -ENOENT;
6524
6525 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
6526 if (!ref) {
6527 btrfs_free_leaf_ref(root, orig_ref);
6528 return -ENOMEM;
6529 }
6530
6531 ref->nritems = orig_ref->nritems;
6532 memcpy(ref->extents, orig_ref->extents,
6533 sizeof(ref->extents[0]) * ref->nritems);
6534
6535 btrfs_free_leaf_ref(root, orig_ref);
6536
6537 ref->root_gen = trans->transid;
6538 ref->bytenr = buf->start;
6539 ref->owner = btrfs_header_owner(buf);
6540 ref->generation = btrfs_header_generation(buf);
bd56b302 6541
1a40e23b
ZY
6542 ret = btrfs_add_leaf_ref(root, ref, 0);
6543 WARN_ON(ret);
6544 btrfs_free_leaf_ref(root, ref);
6545 }
6546 return 0;
6547}
6548
d397712b 6549static noinline int invalidate_extent_cache(struct btrfs_root *root,
1a40e23b
ZY
6550 struct extent_buffer *leaf,
6551 struct btrfs_block_group_cache *group,
6552 struct btrfs_root *target_root)
6553{
6554 struct btrfs_key key;
6555 struct inode *inode = NULL;
6556 struct btrfs_file_extent_item *fi;
6557 u64 num_bytes;
6558 u64 skip_objectid = 0;
6559 u32 nritems;
6560 u32 i;
6561
6562 nritems = btrfs_header_nritems(leaf);
6563 for (i = 0; i < nritems; i++) {
6564 btrfs_item_key_to_cpu(leaf, &key, i);
6565 if (key.objectid == skip_objectid ||
6566 key.type != BTRFS_EXTENT_DATA_KEY)
6567 continue;
6568 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6569 if (btrfs_file_extent_type(leaf, fi) ==
6570 BTRFS_FILE_EXTENT_INLINE)
6571 continue;
6572 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6573 continue;
6574 if (!inode || inode->i_ino != key.objectid) {
6575 iput(inode);
6576 inode = btrfs_ilookup(target_root->fs_info->sb,
6577 key.objectid, target_root, 1);
6578 }
6579 if (!inode) {
6580 skip_objectid = key.objectid;
6581 continue;
6582 }
6583 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6584
6585 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6586 key.offset + num_bytes - 1, GFP_NOFS);
1a40e23b
ZY
6587 btrfs_drop_extent_cache(inode, key.offset,
6588 key.offset + num_bytes - 1, 1);
1a40e23b
ZY
6589 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
6590 key.offset + num_bytes - 1, GFP_NOFS);
6591 cond_resched();
6592 }
6593 iput(inode);
6594 return 0;
6595}
6596
d397712b 6597static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6598 struct btrfs_root *root,
6599 struct extent_buffer *leaf,
6600 struct btrfs_block_group_cache *group,
6601 struct inode *reloc_inode)
6602{
6603 struct btrfs_key key;
6604 struct btrfs_key extent_key;
6605 struct btrfs_file_extent_item *fi;
6606 struct btrfs_leaf_ref *ref;
6607 struct disk_extent *new_extent;
6608 u64 bytenr;
6609 u64 num_bytes;
6610 u32 nritems;
6611 u32 i;
6612 int ext_index;
6613 int nr_extent;
6614 int ret;
6615
6616 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
6617 BUG_ON(!new_extent);
6618
6619 ref = btrfs_lookup_leaf_ref(root, leaf->start);
6620 BUG_ON(!ref);
6621
6622 ext_index = -1;
6623 nritems = btrfs_header_nritems(leaf);
6624 for (i = 0; i < nritems; i++) {
6625 btrfs_item_key_to_cpu(leaf, &key, i);
6626 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
6627 continue;
6628 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
6629 if (btrfs_file_extent_type(leaf, fi) ==
6630 BTRFS_FILE_EXTENT_INLINE)
6631 continue;
6632 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
6633 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
6634 if (bytenr == 0)
6635 continue;
6636
6637 ext_index++;
6638 if (bytenr >= group->key.objectid + group->key.offset ||
6639 bytenr + num_bytes <= group->key.objectid)
6640 continue;
6641
6642 extent_key.objectid = bytenr;
6643 extent_key.offset = num_bytes;
6644 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
6645 nr_extent = 1;
6646 ret = get_new_locations(reloc_inode, &extent_key,
6647 group->key.objectid, 1,
6648 &new_extent, &nr_extent);
6649 if (ret > 0)
6650 continue;
6651 BUG_ON(ret < 0);
6652
6653 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
6654 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
6655 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
6656 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
6657
1a40e23b
ZY
6658 btrfs_set_file_extent_disk_bytenr(leaf, fi,
6659 new_extent->disk_bytenr);
6660 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
6661 new_extent->disk_num_bytes);
1a40e23b
ZY
6662 btrfs_mark_buffer_dirty(leaf);
6663
6664 ret = btrfs_inc_extent_ref(trans, root,
6665 new_extent->disk_bytenr,
6666 new_extent->disk_num_bytes,
6667 leaf->start,
6668 root->root_key.objectid,
3bb1a1bc 6669 trans->transid, key.objectid);
1a40e23b 6670 BUG_ON(ret);
56bec294 6671
1a40e23b
ZY
6672 ret = btrfs_free_extent(trans, root,
6673 bytenr, num_bytes, leaf->start,
6674 btrfs_header_owner(leaf),
6675 btrfs_header_generation(leaf),
3bb1a1bc 6676 key.objectid, 0);
1a40e23b
ZY
6677 BUG_ON(ret);
6678 cond_resched();
6679 }
6680 kfree(new_extent);
6681 BUG_ON(ext_index + 1 != ref->nritems);
6682 btrfs_free_leaf_ref(root, ref);
6683 return 0;
6684}
6685
f82d02d9
YZ
6686int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
6687 struct btrfs_root *root)
1a40e23b
ZY
6688{
6689 struct btrfs_root *reloc_root;
f82d02d9 6690 int ret;
1a40e23b
ZY
6691
6692 if (root->reloc_root) {
6693 reloc_root = root->reloc_root;
6694 root->reloc_root = NULL;
6695 list_add(&reloc_root->dead_list,
6696 &root->fs_info->dead_reloc_roots);
f82d02d9
YZ
6697
6698 btrfs_set_root_bytenr(&reloc_root->root_item,
6699 reloc_root->node->start);
6700 btrfs_set_root_level(&root->root_item,
6701 btrfs_header_level(reloc_root->node));
6702 memset(&reloc_root->root_item.drop_progress, 0,
6703 sizeof(struct btrfs_disk_key));
6704 reloc_root->root_item.drop_level = 0;
6705
6706 ret = btrfs_update_root(trans, root->fs_info->tree_root,
6707 &reloc_root->root_key,
6708 &reloc_root->root_item);
6709 BUG_ON(ret);
1a40e23b
ZY
6710 }
6711 return 0;
6712}
6713
6714int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
6715{
6716 struct btrfs_trans_handle *trans;
6717 struct btrfs_root *reloc_root;
6718 struct btrfs_root *prev_root = NULL;
6719 struct list_head dead_roots;
6720 int ret;
6721 unsigned long nr;
6722
6723 INIT_LIST_HEAD(&dead_roots);
6724 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
6725
6726 while (!list_empty(&dead_roots)) {
6727 reloc_root = list_entry(dead_roots.prev,
6728 struct btrfs_root, dead_list);
6729 list_del_init(&reloc_root->dead_list);
6730
6731 BUG_ON(reloc_root->commit_root != NULL);
6732 while (1) {
6733 trans = btrfs_join_transaction(root, 1);
6734 BUG_ON(!trans);
6735
6736 mutex_lock(&root->fs_info->drop_mutex);
6737 ret = btrfs_drop_snapshot(trans, reloc_root);
6738 if (ret != -EAGAIN)
6739 break;
6740 mutex_unlock(&root->fs_info->drop_mutex);
6741
6742 nr = trans->blocks_used;
6743 ret = btrfs_end_transaction(trans, root);
6744 BUG_ON(ret);
6745 btrfs_btree_balance_dirty(root, nr);
6746 }
6747
6748 free_extent_buffer(reloc_root->node);
6749
6750 ret = btrfs_del_root(trans, root->fs_info->tree_root,
6751 &reloc_root->root_key);
6752 BUG_ON(ret);
6753 mutex_unlock(&root->fs_info->drop_mutex);
6754
6755 nr = trans->blocks_used;
6756 ret = btrfs_end_transaction(trans, root);
6757 BUG_ON(ret);
6758 btrfs_btree_balance_dirty(root, nr);
6759
6760 kfree(prev_root);
6761 prev_root = reloc_root;
6762 }
6763 if (prev_root) {
6764 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
6765 kfree(prev_root);
6766 }
6767 return 0;
6768}
6769
6770int btrfs_add_dead_reloc_root(struct btrfs_root *root)
6771{
6772 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
6773 return 0;
6774}
6775
6776int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
6777{
6778 struct btrfs_root *reloc_root;
6779 struct btrfs_trans_handle *trans;
6780 struct btrfs_key location;
6781 int found;
6782 int ret;
6783
6784 mutex_lock(&root->fs_info->tree_reloc_mutex);
6785 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
6786 BUG_ON(ret);
6787 found = !list_empty(&root->fs_info->dead_reloc_roots);
6788 mutex_unlock(&root->fs_info->tree_reloc_mutex);
6789
6790 if (found) {
6791 trans = btrfs_start_transaction(root, 1);
6792 BUG_ON(!trans);
6793 ret = btrfs_commit_transaction(trans, root);
6794 BUG_ON(ret);
6795 }
6796
6797 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
6798 location.offset = (u64)-1;
6799 location.type = BTRFS_ROOT_ITEM_KEY;
6800
6801 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
6802 BUG_ON(!reloc_root);
6803 btrfs_orphan_cleanup(reloc_root);
6804 return 0;
6805}
6806
d397712b 6807static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6808 struct btrfs_root *root)
6809{
6810 struct btrfs_root *reloc_root;
6811 struct extent_buffer *eb;
6812 struct btrfs_root_item *root_item;
6813 struct btrfs_key root_key;
6814 int ret;
6815
6816 BUG_ON(!root->ref_cows);
6817 if (root->reloc_root)
6818 return 0;
6819
6820 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
6821 BUG_ON(!root_item);
6822
6823 ret = btrfs_copy_root(trans, root, root->commit_root,
6824 &eb, BTRFS_TREE_RELOC_OBJECTID);
6825 BUG_ON(ret);
6826
6827 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
6828 root_key.offset = root->root_key.objectid;
6829 root_key.type = BTRFS_ROOT_ITEM_KEY;
6830
6831 memcpy(root_item, &root->root_item, sizeof(root_item));
6832 btrfs_set_root_refs(root_item, 0);
6833 btrfs_set_root_bytenr(root_item, eb->start);
6834 btrfs_set_root_level(root_item, btrfs_header_level(eb));
84234f3a 6835 btrfs_set_root_generation(root_item, trans->transid);
1a40e23b
ZY
6836
6837 btrfs_tree_unlock(eb);
6838 free_extent_buffer(eb);
6839
6840 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
6841 &root_key, root_item);
6842 BUG_ON(ret);
6843 kfree(root_item);
6844
6845 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
6846 &root_key);
6847 BUG_ON(!reloc_root);
6848 reloc_root->last_trans = trans->transid;
6849 reloc_root->commit_root = NULL;
6850 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
6851
6852 root->reloc_root = reloc_root;
6853 return 0;
6854}
6855
6856/*
6857 * Core function of space balance.
6858 *
6859 * The idea is using reloc trees to relocate tree blocks in reference
f82d02d9
YZ
6860 * counted roots. There is one reloc tree for each subvol, and all
6861 * reloc trees share same root key objectid. Reloc trees are snapshots
6862 * of the latest committed roots of subvols (root->commit_root).
6863 *
6864 * To relocate a tree block referenced by a subvol, there are two steps.
6865 * COW the block through subvol's reloc tree, then update block pointer
6866 * in the subvol to point to the new block. Since all reloc trees share
6867 * same root key objectid, doing special handing for tree blocks owned
6868 * by them is easy. Once a tree block has been COWed in one reloc tree,
6869 * we can use the resulting new block directly when the same block is
6870 * required to COW again through other reloc trees. By this way, relocated
6871 * tree blocks are shared between reloc trees, so they are also shared
6872 * between subvols.
1a40e23b 6873 */
d397712b 6874static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6875 struct btrfs_root *root,
6876 struct btrfs_path *path,
6877 struct btrfs_key *first_key,
6878 struct btrfs_ref_path *ref_path,
6879 struct btrfs_block_group_cache *group,
6880 struct inode *reloc_inode)
6881{
6882 struct btrfs_root *reloc_root;
6883 struct extent_buffer *eb = NULL;
6884 struct btrfs_key *keys;
6885 u64 *nodes;
6886 int level;
f82d02d9 6887 int shared_level;
1a40e23b 6888 int lowest_level = 0;
1a40e23b
ZY
6889 int ret;
6890
6891 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
6892 lowest_level = ref_path->owner_objectid;
6893
f82d02d9 6894 if (!root->ref_cows) {
1a40e23b
ZY
6895 path->lowest_level = lowest_level;
6896 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
6897 BUG_ON(ret < 0);
6898 path->lowest_level = 0;
6899 btrfs_release_path(root, path);
6900 return 0;
6901 }
6902
1a40e23b
ZY
6903 mutex_lock(&root->fs_info->tree_reloc_mutex);
6904 ret = init_reloc_tree(trans, root);
6905 BUG_ON(ret);
6906 reloc_root = root->reloc_root;
6907
f82d02d9
YZ
6908 shared_level = ref_path->shared_level;
6909 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
1a40e23b 6910
f82d02d9
YZ
6911 keys = ref_path->node_keys;
6912 nodes = ref_path->new_nodes;
6913 memset(&keys[shared_level + 1], 0,
6914 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
6915 memset(&nodes[shared_level + 1], 0,
6916 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
1a40e23b 6917
f82d02d9
YZ
6918 if (nodes[lowest_level] == 0) {
6919 path->lowest_level = lowest_level;
6920 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6921 0, 1);
6922 BUG_ON(ret);
6923 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
6924 eb = path->nodes[level];
6925 if (!eb || eb == reloc_root->node)
6926 break;
6927 nodes[level] = eb->start;
6928 if (level == 0)
6929 btrfs_item_key_to_cpu(eb, &keys[level], 0);
6930 else
6931 btrfs_node_key_to_cpu(eb, &keys[level], 0);
6932 }
2b82032c
YZ
6933 if (nodes[0] &&
6934 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
6935 eb = path->nodes[0];
6936 ret = replace_extents_in_leaf(trans, reloc_root, eb,
6937 group, reloc_inode);
6938 BUG_ON(ret);
6939 }
6940 btrfs_release_path(reloc_root, path);
6941 } else {
1a40e23b 6942 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
f82d02d9 6943 lowest_level);
1a40e23b
ZY
6944 BUG_ON(ret);
6945 }
6946
1a40e23b
ZY
6947 /*
6948 * replace tree blocks in the fs tree with tree blocks in
6949 * the reloc tree.
6950 */
6951 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
6952 BUG_ON(ret < 0);
6953
6954 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
f82d02d9
YZ
6955 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
6956 0, 0);
6957 BUG_ON(ret);
6958 extent_buffer_get(path->nodes[0]);
6959 eb = path->nodes[0];
6960 btrfs_release_path(reloc_root, path);
1a40e23b
ZY
6961 ret = invalidate_extent_cache(reloc_root, eb, group, root);
6962 BUG_ON(ret);
6963 free_extent_buffer(eb);
6964 }
1a40e23b 6965
f82d02d9 6966 mutex_unlock(&root->fs_info->tree_reloc_mutex);
1a40e23b 6967 path->lowest_level = 0;
1a40e23b
ZY
6968 return 0;
6969}
6970
d397712b 6971static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6972 struct btrfs_root *root,
6973 struct btrfs_path *path,
6974 struct btrfs_key *first_key,
6975 struct btrfs_ref_path *ref_path)
6976{
6977 int ret;
1a40e23b
ZY
6978
6979 ret = relocate_one_path(trans, root, path, first_key,
6980 ref_path, NULL, NULL);
6981 BUG_ON(ret);
6982
1a40e23b
ZY
6983 return 0;
6984}
6985
d397712b 6986static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
1a40e23b
ZY
6987 struct btrfs_root *extent_root,
6988 struct btrfs_path *path,
6989 struct btrfs_key *extent_key)
6990{
6991 int ret;
6992
1a40e23b
ZY
6993 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
6994 if (ret)
6995 goto out;
6996 ret = btrfs_del_item(trans, extent_root, path);
6997out:
6998 btrfs_release_path(extent_root, path);
1a40e23b
ZY
6999 return ret;
7000}
7001
d397712b 7002static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
1a40e23b
ZY
7003 struct btrfs_ref_path *ref_path)
7004{
7005 struct btrfs_key root_key;
7006
7007 root_key.objectid = ref_path->root_objectid;
7008 root_key.type = BTRFS_ROOT_ITEM_KEY;
7009 if (is_cowonly_root(ref_path->root_objectid))
7010 root_key.offset = 0;
7011 else
7012 root_key.offset = (u64)-1;
7013
7014 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7015}
7016
d397712b 7017static noinline int relocate_one_extent(struct btrfs_root *extent_root,
1a40e23b
ZY
7018 struct btrfs_path *path,
7019 struct btrfs_key *extent_key,
7020 struct btrfs_block_group_cache *group,
7021 struct inode *reloc_inode, int pass)
7022{
7023 struct btrfs_trans_handle *trans;
7024 struct btrfs_root *found_root;
7025 struct btrfs_ref_path *ref_path = NULL;
7026 struct disk_extent *new_extents = NULL;
7027 int nr_extents = 0;
7028 int loops;
7029 int ret;
7030 int level;
7031 struct btrfs_key first_key;
7032 u64 prev_block = 0;
7033
1a40e23b
ZY
7034
7035 trans = btrfs_start_transaction(extent_root, 1);
7036 BUG_ON(!trans);
7037
7038 if (extent_key->objectid == 0) {
7039 ret = del_extent_zero(trans, extent_root, path, extent_key);
7040 goto out;
7041 }
7042
7043 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7044 if (!ref_path) {
d397712b
CM
7045 ret = -ENOMEM;
7046 goto out;
1a40e23b
ZY
7047 }
7048
7049 for (loops = 0; ; loops++) {
7050 if (loops == 0) {
7051 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7052 extent_key->objectid);
7053 } else {
7054 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7055 }
7056 if (ret < 0)
7057 goto out;
7058 if (ret > 0)
7059 break;
7060
7061 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7062 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7063 continue;
7064
7065 found_root = read_ref_root(extent_root->fs_info, ref_path);
7066 BUG_ON(!found_root);
7067 /*
7068 * for reference counted tree, only process reference paths
7069 * rooted at the latest committed root.
7070 */
7071 if (found_root->ref_cows &&
7072 ref_path->root_generation != found_root->root_key.offset)
7073 continue;
7074
7075 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7076 if (pass == 0) {
7077 /*
7078 * copy data extents to new locations
7079 */
7080 u64 group_start = group->key.objectid;
7081 ret = relocate_data_extent(reloc_inode,
7082 extent_key,
7083 group_start);
7084 if (ret < 0)
7085 goto out;
7086 break;
7087 }
7088 level = 0;
7089 } else {
7090 level = ref_path->owner_objectid;
7091 }
7092
7093 if (prev_block != ref_path->nodes[level]) {
7094 struct extent_buffer *eb;
7095 u64 block_start = ref_path->nodes[level];
7096 u64 block_size = btrfs_level_size(found_root, level);
7097
7098 eb = read_tree_block(found_root, block_start,
7099 block_size, 0);
7100 btrfs_tree_lock(eb);
7101 BUG_ON(level != btrfs_header_level(eb));
7102
7103 if (level == 0)
7104 btrfs_item_key_to_cpu(eb, &first_key, 0);
7105 else
7106 btrfs_node_key_to_cpu(eb, &first_key, 0);
7107
7108 btrfs_tree_unlock(eb);
7109 free_extent_buffer(eb);
7110 prev_block = block_start;
7111 }
7112
24562425 7113 mutex_lock(&extent_root->fs_info->trans_mutex);
e4404d6e 7114 btrfs_record_root_in_trans(found_root);
24562425 7115 mutex_unlock(&extent_root->fs_info->trans_mutex);
e4404d6e
YZ
7116 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7117 /*
7118 * try to update data extent references while
7119 * keeping metadata shared between snapshots.
7120 */
7121 if (pass == 1) {
7122 ret = relocate_one_path(trans, found_root,
7123 path, &first_key, ref_path,
7124 group, reloc_inode);
7125 if (ret < 0)
7126 goto out;
7127 continue;
7128 }
1a40e23b
ZY
7129 /*
7130 * use fallback method to process the remaining
7131 * references.
7132 */
7133 if (!new_extents) {
7134 u64 group_start = group->key.objectid;
d899e052
YZ
7135 new_extents = kmalloc(sizeof(*new_extents),
7136 GFP_NOFS);
7137 nr_extents = 1;
1a40e23b
ZY
7138 ret = get_new_locations(reloc_inode,
7139 extent_key,
d899e052 7140 group_start, 1,
1a40e23b
ZY
7141 &new_extents,
7142 &nr_extents);
d899e052 7143 if (ret)
1a40e23b
ZY
7144 goto out;
7145 }
1a40e23b
ZY
7146 ret = replace_one_extent(trans, found_root,
7147 path, extent_key,
7148 &first_key, ref_path,
7149 new_extents, nr_extents);
e4404d6e 7150 } else {
1a40e23b
ZY
7151 ret = relocate_tree_block(trans, found_root, path,
7152 &first_key, ref_path);
1a40e23b
ZY
7153 }
7154 if (ret < 0)
7155 goto out;
7156 }
7157 ret = 0;
7158out:
7159 btrfs_end_transaction(trans, extent_root);
7160 kfree(new_extents);
7161 kfree(ref_path);
1a40e23b
ZY
7162 return ret;
7163}
5d4f98a2 7164#endif
1a40e23b 7165
ec44a35c
CM
7166static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7167{
7168 u64 num_devices;
7169 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7170 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7171
2b82032c 7172 num_devices = root->fs_info->fs_devices->rw_devices;
ec44a35c
CM
7173 if (num_devices == 1) {
7174 stripped |= BTRFS_BLOCK_GROUP_DUP;
7175 stripped = flags & ~stripped;
7176
7177 /* turn raid0 into single device chunks */
7178 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7179 return stripped;
7180
7181 /* turn mirroring into duplication */
7182 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7183 BTRFS_BLOCK_GROUP_RAID10))
7184 return stripped | BTRFS_BLOCK_GROUP_DUP;
7185 return flags;
7186 } else {
7187 /* they already had raid on here, just return */
ec44a35c
CM
7188 if (flags & stripped)
7189 return flags;
7190
7191 stripped |= BTRFS_BLOCK_GROUP_DUP;
7192 stripped = flags & ~stripped;
7193
7194 /* switch duplicated blocks with raid1 */
7195 if (flags & BTRFS_BLOCK_GROUP_DUP)
7196 return stripped | BTRFS_BLOCK_GROUP_RAID1;
7197
7198 /* turn single device chunks into raid0 */
7199 return stripped | BTRFS_BLOCK_GROUP_RAID0;
7200 }
7201 return flags;
7202}
7203
b2950863 7204static int __alloc_chunk_for_shrink(struct btrfs_root *root,
0ef3e66b
CM
7205 struct btrfs_block_group_cache *shrink_block_group,
7206 int force)
7207{
7208 struct btrfs_trans_handle *trans;
7209 u64 new_alloc_flags;
7210 u64 calc;
7211
c286ac48 7212 spin_lock(&shrink_block_group->lock);
5d4f98a2
YZ
7213 if (btrfs_block_group_used(&shrink_block_group->item) +
7214 shrink_block_group->reserved > 0) {
c286ac48 7215 spin_unlock(&shrink_block_group->lock);
c286ac48 7216
0ef3e66b 7217 trans = btrfs_start_transaction(root, 1);
c286ac48 7218 spin_lock(&shrink_block_group->lock);
7d9eb12c 7219
0ef3e66b
CM
7220 new_alloc_flags = update_block_group_flags(root,
7221 shrink_block_group->flags);
7222 if (new_alloc_flags != shrink_block_group->flags) {
7223 calc =
7224 btrfs_block_group_used(&shrink_block_group->item);
7225 } else {
7226 calc = shrink_block_group->key.offset;
7227 }
c286ac48
CM
7228 spin_unlock(&shrink_block_group->lock);
7229
0ef3e66b
CM
7230 do_chunk_alloc(trans, root->fs_info->extent_root,
7231 calc + 2 * 1024 * 1024, new_alloc_flags, force);
7d9eb12c 7232
0ef3e66b 7233 btrfs_end_transaction(trans, root);
c286ac48
CM
7234 } else
7235 spin_unlock(&shrink_block_group->lock);
0ef3e66b
CM
7236 return 0;
7237}
7238
5d4f98a2
YZ
7239
7240int btrfs_prepare_block_group_relocation(struct btrfs_root *root,
7241 struct btrfs_block_group_cache *group)
7242
7243{
7244 __alloc_chunk_for_shrink(root, group, 1);
7245 set_block_group_readonly(group);
7246 return 0;
7247}
7248
ba1bf481
JB
7249/*
7250 * checks to see if its even possible to relocate this block group.
7251 *
7252 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
7253 * ok to go ahead and try.
7254 */
7255int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
1a40e23b 7256{
ba1bf481
JB
7257 struct btrfs_block_group_cache *block_group;
7258 struct btrfs_space_info *space_info;
7259 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
7260 struct btrfs_device *device;
7261 int full = 0;
7262 int ret = 0;
1a40e23b 7263
ba1bf481 7264 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1a40e23b 7265
ba1bf481
JB
7266 /* odd, couldn't find the block group, leave it alone */
7267 if (!block_group)
7268 return -1;
1a40e23b 7269
ba1bf481
JB
7270 /* no bytes used, we're good */
7271 if (!btrfs_block_group_used(&block_group->item))
1a40e23b
ZY
7272 goto out;
7273
ba1bf481
JB
7274 space_info = block_group->space_info;
7275 spin_lock(&space_info->lock);
17d217fe 7276
ba1bf481 7277 full = space_info->full;
17d217fe 7278
ba1bf481
JB
7279 /*
7280 * if this is the last block group we have in this space, we can't
7ce618db
CM
7281 * relocate it unless we're able to allocate a new chunk below.
7282 *
7283 * Otherwise, we need to make sure we have room in the space to handle
7284 * all of the extents from this block group. If we can, we're good
ba1bf481 7285 */
7ce618db
CM
7286 if ((space_info->total_bytes != block_group->key.offset) &&
7287 (space_info->bytes_used + space_info->bytes_reserved +
ba1bf481
JB
7288 space_info->bytes_pinned + space_info->bytes_readonly +
7289 btrfs_block_group_used(&block_group->item) <
7ce618db 7290 space_info->total_bytes)) {
ba1bf481
JB
7291 spin_unlock(&space_info->lock);
7292 goto out;
17d217fe 7293 }
ba1bf481 7294 spin_unlock(&space_info->lock);
ea8c2819 7295
ba1bf481
JB
7296 /*
7297 * ok we don't have enough space, but maybe we have free space on our
7298 * devices to allocate new chunks for relocation, so loop through our
7299 * alloc devices and guess if we have enough space. However, if we
7300 * were marked as full, then we know there aren't enough chunks, and we
7301 * can just return.
7302 */
7303 ret = -1;
7304 if (full)
7305 goto out;
ea8c2819 7306
ba1bf481
JB
7307 mutex_lock(&root->fs_info->chunk_mutex);
7308 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
7309 u64 min_free = btrfs_block_group_used(&block_group->item);
7310 u64 dev_offset, max_avail;
56bec294 7311
ba1bf481
JB
7312 /*
7313 * check to make sure we can actually find a chunk with enough
7314 * space to fit our block group in.
7315 */
7316 if (device->total_bytes > device->bytes_used + min_free) {
7317 ret = find_free_dev_extent(NULL, device, min_free,
7318 &dev_offset, &max_avail);
7319 if (!ret)
73e48b27 7320 break;
ba1bf481 7321 ret = -1;
725c8463 7322 }
edbd8d4e 7323 }
ba1bf481 7324 mutex_unlock(&root->fs_info->chunk_mutex);
edbd8d4e 7325out:
ba1bf481 7326 btrfs_put_block_group(block_group);
edbd8d4e
CM
7327 return ret;
7328}
7329
b2950863
CH
7330static int find_first_block_group(struct btrfs_root *root,
7331 struct btrfs_path *path, struct btrfs_key *key)
0b86a832 7332{
925baedd 7333 int ret = 0;
0b86a832
CM
7334 struct btrfs_key found_key;
7335 struct extent_buffer *leaf;
7336 int slot;
edbd8d4e 7337
0b86a832
CM
7338 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
7339 if (ret < 0)
925baedd
CM
7340 goto out;
7341
d397712b 7342 while (1) {
0b86a832 7343 slot = path->slots[0];
edbd8d4e 7344 leaf = path->nodes[0];
0b86a832
CM
7345 if (slot >= btrfs_header_nritems(leaf)) {
7346 ret = btrfs_next_leaf(root, path);
7347 if (ret == 0)
7348 continue;
7349 if (ret < 0)
925baedd 7350 goto out;
0b86a832 7351 break;
edbd8d4e 7352 }
0b86a832 7353 btrfs_item_key_to_cpu(leaf, &found_key, slot);
edbd8d4e 7354
0b86a832 7355 if (found_key.objectid >= key->objectid &&
925baedd
CM
7356 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
7357 ret = 0;
7358 goto out;
7359 }
0b86a832 7360 path->slots[0]++;
edbd8d4e 7361 }
0b86a832 7362 ret = -ENOENT;
925baedd 7363out:
0b86a832 7364 return ret;
edbd8d4e
CM
7365}
7366
1a40e23b
ZY
7367int btrfs_free_block_groups(struct btrfs_fs_info *info)
7368{
7369 struct btrfs_block_group_cache *block_group;
4184ea7f 7370 struct btrfs_space_info *space_info;
11833d66 7371 struct btrfs_caching_control *caching_ctl;
1a40e23b
ZY
7372 struct rb_node *n;
7373
11833d66
YZ
7374 down_write(&info->extent_commit_sem);
7375 while (!list_empty(&info->caching_block_groups)) {
7376 caching_ctl = list_entry(info->caching_block_groups.next,
7377 struct btrfs_caching_control, list);
7378 list_del(&caching_ctl->list);
7379 put_caching_control(caching_ctl);
7380 }
7381 up_write(&info->extent_commit_sem);
7382
1a40e23b
ZY
7383 spin_lock(&info->block_group_cache_lock);
7384 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
7385 block_group = rb_entry(n, struct btrfs_block_group_cache,
7386 cache_node);
1a40e23b
ZY
7387 rb_erase(&block_group->cache_node,
7388 &info->block_group_cache_tree);
d899e052
YZ
7389 spin_unlock(&info->block_group_cache_lock);
7390
80eb234a 7391 down_write(&block_group->space_info->groups_sem);
1a40e23b 7392 list_del(&block_group->list);
80eb234a 7393 up_write(&block_group->space_info->groups_sem);
d2fb3437 7394
817d52f8 7395 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7396 wait_block_group_cache_done(block_group);
817d52f8
JB
7397
7398 btrfs_remove_free_space_cache(block_group);
7399
d2fb3437 7400 WARN_ON(atomic_read(&block_group->count) != 1);
1a40e23b 7401 kfree(block_group);
d899e052
YZ
7402
7403 spin_lock(&info->block_group_cache_lock);
1a40e23b
ZY
7404 }
7405 spin_unlock(&info->block_group_cache_lock);
4184ea7f
CM
7406
7407 /* now that all the block groups are freed, go through and
7408 * free all the space_info structs. This is only called during
7409 * the final stages of unmount, and so we know nobody is
7410 * using them. We call synchronize_rcu() once before we start,
7411 * just to be on the safe side.
7412 */
7413 synchronize_rcu();
7414
7415 while(!list_empty(&info->space_info)) {
7416 space_info = list_entry(info->space_info.next,
7417 struct btrfs_space_info,
7418 list);
7419
7420 list_del(&space_info->list);
7421 kfree(space_info);
7422 }
1a40e23b
ZY
7423 return 0;
7424}
7425
9078a3e1
CM
7426int btrfs_read_block_groups(struct btrfs_root *root)
7427{
7428 struct btrfs_path *path;
7429 int ret;
9078a3e1 7430 struct btrfs_block_group_cache *cache;
be744175 7431 struct btrfs_fs_info *info = root->fs_info;
6324fbf3 7432 struct btrfs_space_info *space_info;
9078a3e1
CM
7433 struct btrfs_key key;
7434 struct btrfs_key found_key;
5f39d397 7435 struct extent_buffer *leaf;
96b5179d 7436
be744175 7437 root = info->extent_root;
9078a3e1 7438 key.objectid = 0;
0b86a832 7439 key.offset = 0;
9078a3e1 7440 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
9078a3e1
CM
7441 path = btrfs_alloc_path();
7442 if (!path)
7443 return -ENOMEM;
7444
d397712b 7445 while (1) {
0b86a832
CM
7446 ret = find_first_block_group(root, path, &key);
7447 if (ret > 0) {
7448 ret = 0;
7449 goto error;
9078a3e1 7450 }
0b86a832
CM
7451 if (ret != 0)
7452 goto error;
7453
5f39d397
CM
7454 leaf = path->nodes[0];
7455 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8f18cf13 7456 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9078a3e1 7457 if (!cache) {
0b86a832 7458 ret = -ENOMEM;
9078a3e1
CM
7459 break;
7460 }
3e1ad54f 7461
d2fb3437 7462 atomic_set(&cache->count, 1);
c286ac48 7463 spin_lock_init(&cache->lock);
6226cb0a 7464 spin_lock_init(&cache->tree_lock);
817d52f8 7465 cache->fs_info = info;
0f9dd46c 7466 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7467 INIT_LIST_HEAD(&cache->cluster_list);
96303081
JB
7468
7469 /*
7470 * we only want to have 32k of ram per block group for keeping
7471 * track of free space, and if we pass 1/2 of that we want to
7472 * start converting things over to using bitmaps
7473 */
7474 cache->extents_thresh = ((1024 * 32) / 2) /
7475 sizeof(struct btrfs_free_space);
7476
5f39d397
CM
7477 read_extent_buffer(leaf, &cache->item,
7478 btrfs_item_ptr_offset(leaf, path->slots[0]),
7479 sizeof(cache->item));
9078a3e1 7480 memcpy(&cache->key, &found_key, sizeof(found_key));
0b86a832 7481
9078a3e1
CM
7482 key.objectid = found_key.objectid + found_key.offset;
7483 btrfs_release_path(root, path);
0b86a832 7484 cache->flags = btrfs_block_group_flags(&cache->item);
817d52f8
JB
7485 cache->sectorsize = root->sectorsize;
7486
817d52f8
JB
7487 /*
7488 * check for two cases, either we are full, and therefore
7489 * don't need to bother with the caching work since we won't
7490 * find any space, or we are empty, and we can just add all
7491 * the space in and be done with it. This saves us _alot_ of
7492 * time, particularly in the full case.
7493 */
7494 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
1b2da372 7495 exclude_super_stripes(root, cache);
11833d66 7496 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7497 cache->cached = BTRFS_CACHE_FINISHED;
1b2da372 7498 free_excluded_extents(root, cache);
817d52f8 7499 } else if (btrfs_block_group_used(&cache->item) == 0) {
11833d66
YZ
7500 exclude_super_stripes(root, cache);
7501 cache->last_byte_to_unpin = (u64)-1;
817d52f8
JB
7502 cache->cached = BTRFS_CACHE_FINISHED;
7503 add_new_free_space(cache, root->fs_info,
7504 found_key.objectid,
7505 found_key.objectid +
7506 found_key.offset);
11833d66 7507 free_excluded_extents(root, cache);
817d52f8 7508 }
96b5179d 7509
6324fbf3
CM
7510 ret = update_space_info(info, cache->flags, found_key.offset,
7511 btrfs_block_group_used(&cache->item),
7512 &space_info);
7513 BUG_ON(ret);
7514 cache->space_info = space_info;
1b2da372
JB
7515 spin_lock(&cache->space_info->lock);
7516 cache->space_info->bytes_super += cache->bytes_super;
7517 spin_unlock(&cache->space_info->lock);
7518
80eb234a
JB
7519 down_write(&space_info->groups_sem);
7520 list_add_tail(&cache->list, &space_info->block_groups);
7521 up_write(&space_info->groups_sem);
0f9dd46c
JB
7522
7523 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7524 BUG_ON(ret);
75ccf47d
CM
7525
7526 set_avail_alloc_bits(root->fs_info, cache->flags);
2b82032c
YZ
7527 if (btrfs_chunk_readonly(root, cache->key.objectid))
7528 set_block_group_readonly(cache);
9078a3e1 7529 }
0b86a832
CM
7530 ret = 0;
7531error:
9078a3e1 7532 btrfs_free_path(path);
0b86a832 7533 return ret;
9078a3e1 7534}
6324fbf3
CM
7535
7536int btrfs_make_block_group(struct btrfs_trans_handle *trans,
7537 struct btrfs_root *root, u64 bytes_used,
e17cade2 7538 u64 type, u64 chunk_objectid, u64 chunk_offset,
6324fbf3
CM
7539 u64 size)
7540{
7541 int ret;
6324fbf3
CM
7542 struct btrfs_root *extent_root;
7543 struct btrfs_block_group_cache *cache;
6324fbf3
CM
7544
7545 extent_root = root->fs_info->extent_root;
6324fbf3 7546
12fcfd22 7547 root->fs_info->last_trans_log_full_commit = trans->transid;
e02119d5 7548
8f18cf13 7549 cache = kzalloc(sizeof(*cache), GFP_NOFS);
0f9dd46c
JB
7550 if (!cache)
7551 return -ENOMEM;
7552
e17cade2 7553 cache->key.objectid = chunk_offset;
6324fbf3 7554 cache->key.offset = size;
d2fb3437 7555 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
96303081
JB
7556 cache->sectorsize = root->sectorsize;
7557
7558 /*
7559 * we only want to have 32k of ram per block group for keeping track
7560 * of free space, and if we pass 1/2 of that we want to start
7561 * converting things over to using bitmaps
7562 */
7563 cache->extents_thresh = ((1024 * 32) / 2) /
7564 sizeof(struct btrfs_free_space);
d2fb3437 7565 atomic_set(&cache->count, 1);
c286ac48 7566 spin_lock_init(&cache->lock);
6226cb0a 7567 spin_lock_init(&cache->tree_lock);
0f9dd46c 7568 INIT_LIST_HEAD(&cache->list);
fa9c0d79 7569 INIT_LIST_HEAD(&cache->cluster_list);
0ef3e66b 7570
6324fbf3 7571 btrfs_set_block_group_used(&cache->item, bytes_used);
6324fbf3
CM
7572 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
7573 cache->flags = type;
7574 btrfs_set_block_group_flags(&cache->item, type);
7575
11833d66 7576 cache->last_byte_to_unpin = (u64)-1;
817d52f8 7577 cache->cached = BTRFS_CACHE_FINISHED;
11833d66 7578 exclude_super_stripes(root, cache);
96303081 7579
817d52f8
JB
7580 add_new_free_space(cache, root->fs_info, chunk_offset,
7581 chunk_offset + size);
7582
11833d66
YZ
7583 free_excluded_extents(root, cache);
7584
6324fbf3
CM
7585 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
7586 &cache->space_info);
7587 BUG_ON(ret);
1b2da372
JB
7588
7589 spin_lock(&cache->space_info->lock);
7590 cache->space_info->bytes_super += cache->bytes_super;
7591 spin_unlock(&cache->space_info->lock);
7592
80eb234a
JB
7593 down_write(&cache->space_info->groups_sem);
7594 list_add_tail(&cache->list, &cache->space_info->block_groups);
7595 up_write(&cache->space_info->groups_sem);
6324fbf3 7596
0f9dd46c
JB
7597 ret = btrfs_add_block_group_cache(root->fs_info, cache);
7598 BUG_ON(ret);
c286ac48 7599
6324fbf3
CM
7600 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
7601 sizeof(cache->item));
7602 BUG_ON(ret);
7603
d18a2c44 7604 set_avail_alloc_bits(extent_root->fs_info, type);
925baedd 7605
6324fbf3
CM
7606 return 0;
7607}
1a40e23b
ZY
7608
7609int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
7610 struct btrfs_root *root, u64 group_start)
7611{
7612 struct btrfs_path *path;
7613 struct btrfs_block_group_cache *block_group;
44fb5511 7614 struct btrfs_free_cluster *cluster;
1a40e23b
ZY
7615 struct btrfs_key key;
7616 int ret;
7617
1a40e23b
ZY
7618 root = root->fs_info->extent_root;
7619
7620 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
7621 BUG_ON(!block_group);
c146afad 7622 BUG_ON(!block_group->ro);
1a40e23b
ZY
7623
7624 memcpy(&key, &block_group->key, sizeof(key));
7625
44fb5511
CM
7626 /* make sure this block group isn't part of an allocation cluster */
7627 cluster = &root->fs_info->data_alloc_cluster;
7628 spin_lock(&cluster->refill_lock);
7629 btrfs_return_cluster_to_free_space(block_group, cluster);
7630 spin_unlock(&cluster->refill_lock);
7631
7632 /*
7633 * make sure this block group isn't part of a metadata
7634 * allocation cluster
7635 */
7636 cluster = &root->fs_info->meta_alloc_cluster;
7637 spin_lock(&cluster->refill_lock);
7638 btrfs_return_cluster_to_free_space(block_group, cluster);
7639 spin_unlock(&cluster->refill_lock);
7640
1a40e23b
ZY
7641 path = btrfs_alloc_path();
7642 BUG_ON(!path);
7643
3dfdb934 7644 spin_lock(&root->fs_info->block_group_cache_lock);
1a40e23b
ZY
7645 rb_erase(&block_group->cache_node,
7646 &root->fs_info->block_group_cache_tree);
3dfdb934 7647 spin_unlock(&root->fs_info->block_group_cache_lock);
817d52f8 7648
80eb234a 7649 down_write(&block_group->space_info->groups_sem);
44fb5511
CM
7650 /*
7651 * we must use list_del_init so people can check to see if they
7652 * are still on the list after taking the semaphore
7653 */
7654 list_del_init(&block_group->list);
80eb234a 7655 up_write(&block_group->space_info->groups_sem);
1a40e23b 7656
817d52f8 7657 if (block_group->cached == BTRFS_CACHE_STARTED)
11833d66 7658 wait_block_group_cache_done(block_group);
817d52f8
JB
7659
7660 btrfs_remove_free_space_cache(block_group);
7661
c146afad
YZ
7662 spin_lock(&block_group->space_info->lock);
7663 block_group->space_info->total_bytes -= block_group->key.offset;
7664 block_group->space_info->bytes_readonly -= block_group->key.offset;
7665 spin_unlock(&block_group->space_info->lock);
283bb197
CM
7666
7667 btrfs_clear_space_info_full(root->fs_info);
c146afad 7668
fa9c0d79
CM
7669 btrfs_put_block_group(block_group);
7670 btrfs_put_block_group(block_group);
1a40e23b
ZY
7671
7672 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
7673 if (ret > 0)
7674 ret = -EIO;
7675 if (ret < 0)
7676 goto out;
7677
7678 ret = btrfs_del_item(trans, root, path);
7679out:
7680 btrfs_free_path(path);
7681 return ret;
7682}