]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2007 Oracle. All rights reserved. | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or | |
5 | * modify it under the terms of the GNU General Public | |
6 | * License v2 as published by the Free Software Foundation. | |
7 | * | |
8 | * This program is distributed in the hope that it will be useful, | |
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
11 | * General Public License for more details. | |
12 | * | |
13 | * You should have received a copy of the GNU General Public | |
14 | * License along with this program; if not, write to the | |
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | |
16 | * Boston, MA 021110-1307, USA. | |
17 | */ | |
18 | ||
19 | #include <linux/fs.h> | |
20 | #include <linux/slab.h> | |
21 | #include <linux/sched.h> | |
22 | #include <linux/writeback.h> | |
23 | #include <linux/pagemap.h> | |
24 | #include <linux/blkdev.h> | |
25 | #include "ctree.h" | |
26 | #include "disk-io.h" | |
27 | #include "transaction.h" | |
28 | #include "locking.h" | |
29 | #include "tree-log.h" | |
30 | ||
31 | #define BTRFS_ROOT_TRANS_TAG 0 | |
32 | ||
33 | static noinline void put_transaction(struct btrfs_transaction *transaction) | |
34 | { | |
35 | WARN_ON(transaction->use_count == 0); | |
36 | transaction->use_count--; | |
37 | if (transaction->use_count == 0) { | |
38 | list_del_init(&transaction->list); | |
39 | memset(transaction, 0, sizeof(*transaction)); | |
40 | kmem_cache_free(btrfs_transaction_cachep, transaction); | |
41 | } | |
42 | } | |
43 | ||
44 | static noinline void switch_commit_root(struct btrfs_root *root) | |
45 | { | |
46 | free_extent_buffer(root->commit_root); | |
47 | root->commit_root = btrfs_root_node(root); | |
48 | } | |
49 | ||
50 | /* | |
51 | * either allocate a new transaction or hop into the existing one | |
52 | */ | |
53 | static noinline int join_transaction(struct btrfs_root *root) | |
54 | { | |
55 | struct btrfs_transaction *cur_trans; | |
56 | cur_trans = root->fs_info->running_transaction; | |
57 | if (!cur_trans) { | |
58 | cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, | |
59 | GFP_NOFS); | |
60 | BUG_ON(!cur_trans); | |
61 | root->fs_info->generation++; | |
62 | cur_trans->num_writers = 1; | |
63 | cur_trans->num_joined = 0; | |
64 | cur_trans->transid = root->fs_info->generation; | |
65 | init_waitqueue_head(&cur_trans->writer_wait); | |
66 | init_waitqueue_head(&cur_trans->commit_wait); | |
67 | cur_trans->in_commit = 0; | |
68 | cur_trans->blocked = 0; | |
69 | cur_trans->use_count = 1; | |
70 | cur_trans->commit_done = 0; | |
71 | cur_trans->start_time = get_seconds(); | |
72 | ||
73 | cur_trans->delayed_refs.root = RB_ROOT; | |
74 | cur_trans->delayed_refs.num_entries = 0; | |
75 | cur_trans->delayed_refs.num_heads_ready = 0; | |
76 | cur_trans->delayed_refs.num_heads = 0; | |
77 | cur_trans->delayed_refs.flushing = 0; | |
78 | cur_trans->delayed_refs.run_delayed_start = 0; | |
79 | spin_lock_init(&cur_trans->delayed_refs.lock); | |
80 | ||
81 | INIT_LIST_HEAD(&cur_trans->pending_snapshots); | |
82 | list_add_tail(&cur_trans->list, &root->fs_info->trans_list); | |
83 | extent_io_tree_init(&cur_trans->dirty_pages, | |
84 | root->fs_info->btree_inode->i_mapping, | |
85 | GFP_NOFS); | |
86 | spin_lock(&root->fs_info->new_trans_lock); | |
87 | root->fs_info->running_transaction = cur_trans; | |
88 | spin_unlock(&root->fs_info->new_trans_lock); | |
89 | } else { | |
90 | cur_trans->num_writers++; | |
91 | cur_trans->num_joined++; | |
92 | } | |
93 | ||
94 | return 0; | |
95 | } | |
96 | ||
97 | /* | |
98 | * this does all the record keeping required to make sure that a reference | |
99 | * counted root is properly recorded in a given transaction. This is required | |
100 | * to make sure the old root from before we joined the transaction is deleted | |
101 | * when the transaction commits | |
102 | */ | |
103 | static noinline int record_root_in_trans(struct btrfs_trans_handle *trans, | |
104 | struct btrfs_root *root) | |
105 | { | |
106 | if (root->ref_cows && root->last_trans < trans->transid) { | |
107 | WARN_ON(root == root->fs_info->extent_root); | |
108 | WARN_ON(root->commit_root != root->node); | |
109 | ||
110 | radix_tree_tag_set(&root->fs_info->fs_roots_radix, | |
111 | (unsigned long)root->root_key.objectid, | |
112 | BTRFS_ROOT_TRANS_TAG); | |
113 | root->last_trans = trans->transid; | |
114 | btrfs_init_reloc_root(trans, root); | |
115 | } | |
116 | return 0; | |
117 | } | |
118 | ||
119 | int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans, | |
120 | struct btrfs_root *root) | |
121 | { | |
122 | if (!root->ref_cows) | |
123 | return 0; | |
124 | ||
125 | mutex_lock(&root->fs_info->trans_mutex); | |
126 | if (root->last_trans == trans->transid) { | |
127 | mutex_unlock(&root->fs_info->trans_mutex); | |
128 | return 0; | |
129 | } | |
130 | ||
131 | record_root_in_trans(trans, root); | |
132 | mutex_unlock(&root->fs_info->trans_mutex); | |
133 | return 0; | |
134 | } | |
135 | ||
136 | /* wait for commit against the current transaction to become unblocked | |
137 | * when this is done, it is safe to start a new transaction, but the current | |
138 | * transaction might not be fully on disk. | |
139 | */ | |
140 | static void wait_current_trans(struct btrfs_root *root) | |
141 | { | |
142 | struct btrfs_transaction *cur_trans; | |
143 | ||
144 | cur_trans = root->fs_info->running_transaction; | |
145 | if (cur_trans && cur_trans->blocked) { | |
146 | DEFINE_WAIT(wait); | |
147 | cur_trans->use_count++; | |
148 | while (1) { | |
149 | prepare_to_wait(&root->fs_info->transaction_wait, &wait, | |
150 | TASK_UNINTERRUPTIBLE); | |
151 | if (!cur_trans->blocked) | |
152 | break; | |
153 | mutex_unlock(&root->fs_info->trans_mutex); | |
154 | schedule(); | |
155 | mutex_lock(&root->fs_info->trans_mutex); | |
156 | } | |
157 | finish_wait(&root->fs_info->transaction_wait, &wait); | |
158 | put_transaction(cur_trans); | |
159 | } | |
160 | } | |
161 | ||
162 | enum btrfs_trans_type { | |
163 | TRANS_START, | |
164 | TRANS_JOIN, | |
165 | TRANS_USERSPACE, | |
166 | }; | |
167 | ||
168 | static int may_wait_transaction(struct btrfs_root *root, int type) | |
169 | { | |
170 | if (!root->fs_info->log_root_recovering && | |
171 | ((type == TRANS_START && !root->fs_info->open_ioctl_trans) || | |
172 | type == TRANS_USERSPACE)) | |
173 | return 1; | |
174 | return 0; | |
175 | } | |
176 | ||
177 | static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root, | |
178 | u64 num_items, int type) | |
179 | { | |
180 | struct btrfs_trans_handle *h; | |
181 | struct btrfs_transaction *cur_trans; | |
182 | int retries = 0; | |
183 | int ret; | |
184 | again: | |
185 | h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS); | |
186 | if (!h) | |
187 | return ERR_PTR(-ENOMEM); | |
188 | ||
189 | mutex_lock(&root->fs_info->trans_mutex); | |
190 | if (may_wait_transaction(root, type)) | |
191 | wait_current_trans(root); | |
192 | ||
193 | ret = join_transaction(root); | |
194 | BUG_ON(ret); | |
195 | ||
196 | cur_trans = root->fs_info->running_transaction; | |
197 | cur_trans->use_count++; | |
198 | mutex_unlock(&root->fs_info->trans_mutex); | |
199 | ||
200 | h->transid = cur_trans->transid; | |
201 | h->transaction = cur_trans; | |
202 | h->blocks_used = 0; | |
203 | h->block_group = 0; | |
204 | h->bytes_reserved = 0; | |
205 | h->delayed_ref_updates = 0; | |
206 | h->block_rsv = NULL; | |
207 | ||
208 | smp_mb(); | |
209 | if (cur_trans->blocked && may_wait_transaction(root, type)) { | |
210 | btrfs_commit_transaction(h, root); | |
211 | goto again; | |
212 | } | |
213 | ||
214 | if (num_items > 0) { | |
215 | ret = btrfs_trans_reserve_metadata(h, root, num_items, | |
216 | &retries); | |
217 | if (ret == -EAGAIN) { | |
218 | btrfs_commit_transaction(h, root); | |
219 | goto again; | |
220 | } | |
221 | if (ret < 0) { | |
222 | btrfs_end_transaction(h, root); | |
223 | return ERR_PTR(ret); | |
224 | } | |
225 | } | |
226 | ||
227 | mutex_lock(&root->fs_info->trans_mutex); | |
228 | record_root_in_trans(h, root); | |
229 | mutex_unlock(&root->fs_info->trans_mutex); | |
230 | ||
231 | if (!current->journal_info && type != TRANS_USERSPACE) | |
232 | current->journal_info = h; | |
233 | return h; | |
234 | } | |
235 | ||
236 | struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root, | |
237 | int num_items) | |
238 | { | |
239 | return start_transaction(root, num_items, TRANS_START); | |
240 | } | |
241 | struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root, | |
242 | int num_blocks) | |
243 | { | |
244 | return start_transaction(root, 0, TRANS_JOIN); | |
245 | } | |
246 | ||
247 | struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, | |
248 | int num_blocks) | |
249 | { | |
250 | return start_transaction(r, 0, TRANS_USERSPACE); | |
251 | } | |
252 | ||
253 | /* wait for a transaction commit to be fully complete */ | |
254 | static noinline int wait_for_commit(struct btrfs_root *root, | |
255 | struct btrfs_transaction *commit) | |
256 | { | |
257 | DEFINE_WAIT(wait); | |
258 | mutex_lock(&root->fs_info->trans_mutex); | |
259 | while (!commit->commit_done) { | |
260 | prepare_to_wait(&commit->commit_wait, &wait, | |
261 | TASK_UNINTERRUPTIBLE); | |
262 | if (commit->commit_done) | |
263 | break; | |
264 | mutex_unlock(&root->fs_info->trans_mutex); | |
265 | schedule(); | |
266 | mutex_lock(&root->fs_info->trans_mutex); | |
267 | } | |
268 | mutex_unlock(&root->fs_info->trans_mutex); | |
269 | finish_wait(&commit->commit_wait, &wait); | |
270 | return 0; | |
271 | } | |
272 | ||
273 | #if 0 | |
274 | /* | |
275 | * rate limit against the drop_snapshot code. This helps to slow down new | |
276 | * operations if the drop_snapshot code isn't able to keep up. | |
277 | */ | |
278 | static void throttle_on_drops(struct btrfs_root *root) | |
279 | { | |
280 | struct btrfs_fs_info *info = root->fs_info; | |
281 | int harder_count = 0; | |
282 | ||
283 | harder: | |
284 | if (atomic_read(&info->throttles)) { | |
285 | DEFINE_WAIT(wait); | |
286 | int thr; | |
287 | thr = atomic_read(&info->throttle_gen); | |
288 | ||
289 | do { | |
290 | prepare_to_wait(&info->transaction_throttle, | |
291 | &wait, TASK_UNINTERRUPTIBLE); | |
292 | if (!atomic_read(&info->throttles)) { | |
293 | finish_wait(&info->transaction_throttle, &wait); | |
294 | break; | |
295 | } | |
296 | schedule(); | |
297 | finish_wait(&info->transaction_throttle, &wait); | |
298 | } while (thr == atomic_read(&info->throttle_gen)); | |
299 | harder_count++; | |
300 | ||
301 | if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && | |
302 | harder_count < 2) | |
303 | goto harder; | |
304 | ||
305 | if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && | |
306 | harder_count < 10) | |
307 | goto harder; | |
308 | ||
309 | if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && | |
310 | harder_count < 20) | |
311 | goto harder; | |
312 | } | |
313 | } | |
314 | #endif | |
315 | ||
316 | void btrfs_throttle(struct btrfs_root *root) | |
317 | { | |
318 | mutex_lock(&root->fs_info->trans_mutex); | |
319 | if (!root->fs_info->open_ioctl_trans) | |
320 | wait_current_trans(root); | |
321 | mutex_unlock(&root->fs_info->trans_mutex); | |
322 | } | |
323 | ||
324 | static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |
325 | struct btrfs_root *root, int throttle) | |
326 | { | |
327 | struct btrfs_transaction *cur_trans; | |
328 | struct btrfs_fs_info *info = root->fs_info; | |
329 | int count = 0; | |
330 | ||
331 | while (count < 4) { | |
332 | unsigned long cur = trans->delayed_ref_updates; | |
333 | trans->delayed_ref_updates = 0; | |
334 | if (cur && | |
335 | trans->transaction->delayed_refs.num_heads_ready > 64) { | |
336 | trans->delayed_ref_updates = 0; | |
337 | ||
338 | /* | |
339 | * do a full flush if the transaction is trying | |
340 | * to close | |
341 | */ | |
342 | if (trans->transaction->delayed_refs.flushing) | |
343 | cur = 0; | |
344 | btrfs_run_delayed_refs(trans, root, cur); | |
345 | } else { | |
346 | break; | |
347 | } | |
348 | count++; | |
349 | } | |
350 | ||
351 | btrfs_trans_release_metadata(trans, root); | |
352 | ||
353 | mutex_lock(&info->trans_mutex); | |
354 | cur_trans = info->running_transaction; | |
355 | WARN_ON(cur_trans != trans->transaction); | |
356 | WARN_ON(cur_trans->num_writers < 1); | |
357 | cur_trans->num_writers--; | |
358 | ||
359 | if (waitqueue_active(&cur_trans->writer_wait)) | |
360 | wake_up(&cur_trans->writer_wait); | |
361 | put_transaction(cur_trans); | |
362 | mutex_unlock(&info->trans_mutex); | |
363 | ||
364 | if (current->journal_info == trans) | |
365 | current->journal_info = NULL; | |
366 | memset(trans, 0, sizeof(*trans)); | |
367 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | |
368 | ||
369 | if (throttle) | |
370 | btrfs_run_delayed_iputs(root); | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
375 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, | |
376 | struct btrfs_root *root) | |
377 | { | |
378 | return __btrfs_end_transaction(trans, root, 0); | |
379 | } | |
380 | ||
381 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | |
382 | struct btrfs_root *root) | |
383 | { | |
384 | return __btrfs_end_transaction(trans, root, 1); | |
385 | } | |
386 | ||
387 | /* | |
388 | * when btree blocks are allocated, they have some corresponding bits set for | |
389 | * them in one of two extent_io trees. This is used to make sure all of | |
390 | * those extents are sent to disk but does not wait on them | |
391 | */ | |
392 | int btrfs_write_marked_extents(struct btrfs_root *root, | |
393 | struct extent_io_tree *dirty_pages, int mark) | |
394 | { | |
395 | int ret; | |
396 | int err = 0; | |
397 | int werr = 0; | |
398 | struct page *page; | |
399 | struct inode *btree_inode = root->fs_info->btree_inode; | |
400 | u64 start = 0; | |
401 | u64 end; | |
402 | unsigned long index; | |
403 | ||
404 | while (1) { | |
405 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | |
406 | mark); | |
407 | if (ret) | |
408 | break; | |
409 | while (start <= end) { | |
410 | cond_resched(); | |
411 | ||
412 | index = start >> PAGE_CACHE_SHIFT; | |
413 | start = (u64)(index + 1) << PAGE_CACHE_SHIFT; | |
414 | page = find_get_page(btree_inode->i_mapping, index); | |
415 | if (!page) | |
416 | continue; | |
417 | ||
418 | btree_lock_page_hook(page); | |
419 | if (!page->mapping) { | |
420 | unlock_page(page); | |
421 | page_cache_release(page); | |
422 | continue; | |
423 | } | |
424 | ||
425 | if (PageWriteback(page)) { | |
426 | if (PageDirty(page)) | |
427 | wait_on_page_writeback(page); | |
428 | else { | |
429 | unlock_page(page); | |
430 | page_cache_release(page); | |
431 | continue; | |
432 | } | |
433 | } | |
434 | err = write_one_page(page, 0); | |
435 | if (err) | |
436 | werr = err; | |
437 | page_cache_release(page); | |
438 | } | |
439 | } | |
440 | if (err) | |
441 | werr = err; | |
442 | return werr; | |
443 | } | |
444 | ||
445 | /* | |
446 | * when btree blocks are allocated, they have some corresponding bits set for | |
447 | * them in one of two extent_io trees. This is used to make sure all of | |
448 | * those extents are on disk for transaction or log commit. We wait | |
449 | * on all the pages and clear them from the dirty pages state tree | |
450 | */ | |
451 | int btrfs_wait_marked_extents(struct btrfs_root *root, | |
452 | struct extent_io_tree *dirty_pages, int mark) | |
453 | { | |
454 | int ret; | |
455 | int err = 0; | |
456 | int werr = 0; | |
457 | struct page *page; | |
458 | struct inode *btree_inode = root->fs_info->btree_inode; | |
459 | u64 start = 0; | |
460 | u64 end; | |
461 | unsigned long index; | |
462 | ||
463 | while (1) { | |
464 | ret = find_first_extent_bit(dirty_pages, start, &start, &end, | |
465 | mark); | |
466 | if (ret) | |
467 | break; | |
468 | ||
469 | clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS); | |
470 | while (start <= end) { | |
471 | index = start >> PAGE_CACHE_SHIFT; | |
472 | start = (u64)(index + 1) << PAGE_CACHE_SHIFT; | |
473 | page = find_get_page(btree_inode->i_mapping, index); | |
474 | if (!page) | |
475 | continue; | |
476 | if (PageDirty(page)) { | |
477 | btree_lock_page_hook(page); | |
478 | wait_on_page_writeback(page); | |
479 | err = write_one_page(page, 0); | |
480 | if (err) | |
481 | werr = err; | |
482 | } | |
483 | wait_on_page_writeback(page); | |
484 | page_cache_release(page); | |
485 | cond_resched(); | |
486 | } | |
487 | } | |
488 | if (err) | |
489 | werr = err; | |
490 | return werr; | |
491 | } | |
492 | ||
493 | /* | |
494 | * when btree blocks are allocated, they have some corresponding bits set for | |
495 | * them in one of two extent_io trees. This is used to make sure all of | |
496 | * those extents are on disk for transaction or log commit | |
497 | */ | |
498 | int btrfs_write_and_wait_marked_extents(struct btrfs_root *root, | |
499 | struct extent_io_tree *dirty_pages, int mark) | |
500 | { | |
501 | int ret; | |
502 | int ret2; | |
503 | ||
504 | ret = btrfs_write_marked_extents(root, dirty_pages, mark); | |
505 | ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark); | |
506 | return ret || ret2; | |
507 | } | |
508 | ||
509 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | |
510 | struct btrfs_root *root) | |
511 | { | |
512 | if (!trans || !trans->transaction) { | |
513 | struct inode *btree_inode; | |
514 | btree_inode = root->fs_info->btree_inode; | |
515 | return filemap_write_and_wait(btree_inode->i_mapping); | |
516 | } | |
517 | return btrfs_write_and_wait_marked_extents(root, | |
518 | &trans->transaction->dirty_pages, | |
519 | EXTENT_DIRTY); | |
520 | } | |
521 | ||
522 | /* | |
523 | * this is used to update the root pointer in the tree of tree roots. | |
524 | * | |
525 | * But, in the case of the extent allocation tree, updating the root | |
526 | * pointer may allocate blocks which may change the root of the extent | |
527 | * allocation tree. | |
528 | * | |
529 | * So, this loops and repeats and makes sure the cowonly root didn't | |
530 | * change while the root pointer was being updated in the metadata. | |
531 | */ | |
532 | static int update_cowonly_root(struct btrfs_trans_handle *trans, | |
533 | struct btrfs_root *root) | |
534 | { | |
535 | int ret; | |
536 | u64 old_root_bytenr; | |
537 | u64 old_root_used; | |
538 | struct btrfs_root *tree_root = root->fs_info->tree_root; | |
539 | ||
540 | old_root_used = btrfs_root_used(&root->root_item); | |
541 | btrfs_write_dirty_block_groups(trans, root); | |
542 | ||
543 | while (1) { | |
544 | old_root_bytenr = btrfs_root_bytenr(&root->root_item); | |
545 | if (old_root_bytenr == root->node->start && | |
546 | old_root_used == btrfs_root_used(&root->root_item)) | |
547 | break; | |
548 | ||
549 | btrfs_set_root_node(&root->root_item, root->node); | |
550 | ret = btrfs_update_root(trans, tree_root, | |
551 | &root->root_key, | |
552 | &root->root_item); | |
553 | BUG_ON(ret); | |
554 | ||
555 | old_root_used = btrfs_root_used(&root->root_item); | |
556 | ret = btrfs_write_dirty_block_groups(trans, root); | |
557 | BUG_ON(ret); | |
558 | } | |
559 | ||
560 | if (root != root->fs_info->extent_root) | |
561 | switch_commit_root(root); | |
562 | ||
563 | return 0; | |
564 | } | |
565 | ||
566 | /* | |
567 | * update all the cowonly tree roots on disk | |
568 | */ | |
569 | static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans, | |
570 | struct btrfs_root *root) | |
571 | { | |
572 | struct btrfs_fs_info *fs_info = root->fs_info; | |
573 | struct list_head *next; | |
574 | struct extent_buffer *eb; | |
575 | int ret; | |
576 | ||
577 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | |
578 | BUG_ON(ret); | |
579 | ||
580 | eb = btrfs_lock_root_node(fs_info->tree_root); | |
581 | btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb); | |
582 | btrfs_tree_unlock(eb); | |
583 | free_extent_buffer(eb); | |
584 | ||
585 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | |
586 | BUG_ON(ret); | |
587 | ||
588 | while (!list_empty(&fs_info->dirty_cowonly_roots)) { | |
589 | next = fs_info->dirty_cowonly_roots.next; | |
590 | list_del_init(next); | |
591 | root = list_entry(next, struct btrfs_root, dirty_list); | |
592 | ||
593 | update_cowonly_root(trans, root); | |
594 | } | |
595 | ||
596 | down_write(&fs_info->extent_commit_sem); | |
597 | switch_commit_root(fs_info->extent_root); | |
598 | up_write(&fs_info->extent_commit_sem); | |
599 | ||
600 | return 0; | |
601 | } | |
602 | ||
603 | /* | |
604 | * dead roots are old snapshots that need to be deleted. This allocates | |
605 | * a dirty root struct and adds it into the list of dead roots that need to | |
606 | * be deleted | |
607 | */ | |
608 | int btrfs_add_dead_root(struct btrfs_root *root) | |
609 | { | |
610 | mutex_lock(&root->fs_info->trans_mutex); | |
611 | list_add(&root->root_list, &root->fs_info->dead_roots); | |
612 | mutex_unlock(&root->fs_info->trans_mutex); | |
613 | return 0; | |
614 | } | |
615 | ||
616 | /* | |
617 | * update all the cowonly tree roots on disk | |
618 | */ | |
619 | static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |
620 | struct btrfs_root *root) | |
621 | { | |
622 | struct btrfs_root *gang[8]; | |
623 | struct btrfs_fs_info *fs_info = root->fs_info; | |
624 | int i; | |
625 | int ret; | |
626 | int err = 0; | |
627 | ||
628 | while (1) { | |
629 | ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix, | |
630 | (void **)gang, 0, | |
631 | ARRAY_SIZE(gang), | |
632 | BTRFS_ROOT_TRANS_TAG); | |
633 | if (ret == 0) | |
634 | break; | |
635 | for (i = 0; i < ret; i++) { | |
636 | root = gang[i]; | |
637 | radix_tree_tag_clear(&fs_info->fs_roots_radix, | |
638 | (unsigned long)root->root_key.objectid, | |
639 | BTRFS_ROOT_TRANS_TAG); | |
640 | ||
641 | btrfs_free_log(trans, root); | |
642 | btrfs_update_reloc_root(trans, root); | |
643 | ||
644 | if (root->commit_root != root->node) { | |
645 | switch_commit_root(root); | |
646 | btrfs_set_root_node(&root->root_item, | |
647 | root->node); | |
648 | } | |
649 | ||
650 | err = btrfs_update_root(trans, fs_info->tree_root, | |
651 | &root->root_key, | |
652 | &root->root_item); | |
653 | if (err) | |
654 | break; | |
655 | } | |
656 | } | |
657 | return err; | |
658 | } | |
659 | ||
660 | /* | |
661 | * defrag a given btree. If cacheonly == 1, this won't read from the disk, | |
662 | * otherwise every leaf in the btree is read and defragged. | |
663 | */ | |
664 | int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) | |
665 | { | |
666 | struct btrfs_fs_info *info = root->fs_info; | |
667 | int ret; | |
668 | struct btrfs_trans_handle *trans; | |
669 | unsigned long nr; | |
670 | ||
671 | smp_mb(); | |
672 | if (root->defrag_running) | |
673 | return 0; | |
674 | trans = btrfs_start_transaction(root, 1); | |
675 | while (1) { | |
676 | root->defrag_running = 1; | |
677 | ret = btrfs_defrag_leaves(trans, root, cacheonly); | |
678 | nr = trans->blocks_used; | |
679 | btrfs_end_transaction(trans, root); | |
680 | btrfs_btree_balance_dirty(info->tree_root, nr); | |
681 | cond_resched(); | |
682 | ||
683 | trans = btrfs_start_transaction(root, 1); | |
684 | if (root->fs_info->closing || ret != -EAGAIN) | |
685 | break; | |
686 | } | |
687 | root->defrag_running = 0; | |
688 | smp_mb(); | |
689 | btrfs_end_transaction(trans, root); | |
690 | return 0; | |
691 | } | |
692 | ||
693 | #if 0 | |
694 | /* | |
695 | * when dropping snapshots, we generate a ton of delayed refs, and it makes | |
696 | * sense not to join the transaction while it is trying to flush the current | |
697 | * queue of delayed refs out. | |
698 | * | |
699 | * This is used by the drop snapshot code only | |
700 | */ | |
701 | static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info) | |
702 | { | |
703 | DEFINE_WAIT(wait); | |
704 | ||
705 | mutex_lock(&info->trans_mutex); | |
706 | while (info->running_transaction && | |
707 | info->running_transaction->delayed_refs.flushing) { | |
708 | prepare_to_wait(&info->transaction_wait, &wait, | |
709 | TASK_UNINTERRUPTIBLE); | |
710 | mutex_unlock(&info->trans_mutex); | |
711 | ||
712 | schedule(); | |
713 | ||
714 | mutex_lock(&info->trans_mutex); | |
715 | finish_wait(&info->transaction_wait, &wait); | |
716 | } | |
717 | mutex_unlock(&info->trans_mutex); | |
718 | return 0; | |
719 | } | |
720 | ||
721 | /* | |
722 | * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on | |
723 | * all of them | |
724 | */ | |
725 | int btrfs_drop_dead_root(struct btrfs_root *root) | |
726 | { | |
727 | struct btrfs_trans_handle *trans; | |
728 | struct btrfs_root *tree_root = root->fs_info->tree_root; | |
729 | unsigned long nr; | |
730 | int ret; | |
731 | ||
732 | while (1) { | |
733 | /* | |
734 | * we don't want to jump in and create a bunch of | |
735 | * delayed refs if the transaction is starting to close | |
736 | */ | |
737 | wait_transaction_pre_flush(tree_root->fs_info); | |
738 | trans = btrfs_start_transaction(tree_root, 1); | |
739 | ||
740 | /* | |
741 | * we've joined a transaction, make sure it isn't | |
742 | * closing right now | |
743 | */ | |
744 | if (trans->transaction->delayed_refs.flushing) { | |
745 | btrfs_end_transaction(trans, tree_root); | |
746 | continue; | |
747 | } | |
748 | ||
749 | ret = btrfs_drop_snapshot(trans, root); | |
750 | if (ret != -EAGAIN) | |
751 | break; | |
752 | ||
753 | ret = btrfs_update_root(trans, tree_root, | |
754 | &root->root_key, | |
755 | &root->root_item); | |
756 | if (ret) | |
757 | break; | |
758 | ||
759 | nr = trans->blocks_used; | |
760 | ret = btrfs_end_transaction(trans, tree_root); | |
761 | BUG_ON(ret); | |
762 | ||
763 | btrfs_btree_balance_dirty(tree_root, nr); | |
764 | cond_resched(); | |
765 | } | |
766 | BUG_ON(ret); | |
767 | ||
768 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | |
769 | BUG_ON(ret); | |
770 | ||
771 | nr = trans->blocks_used; | |
772 | ret = btrfs_end_transaction(trans, tree_root); | |
773 | BUG_ON(ret); | |
774 | ||
775 | free_extent_buffer(root->node); | |
776 | free_extent_buffer(root->commit_root); | |
777 | kfree(root); | |
778 | ||
779 | btrfs_btree_balance_dirty(tree_root, nr); | |
780 | return ret; | |
781 | } | |
782 | #endif | |
783 | ||
784 | /* | |
785 | * new snapshots need to be created at a very specific time in the | |
786 | * transaction commit. This does the actual creation | |
787 | */ | |
788 | static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |
789 | struct btrfs_fs_info *fs_info, | |
790 | struct btrfs_pending_snapshot *pending) | |
791 | { | |
792 | struct btrfs_key key; | |
793 | struct btrfs_root_item *new_root_item; | |
794 | struct btrfs_root *tree_root = fs_info->tree_root; | |
795 | struct btrfs_root *root = pending->root; | |
796 | struct btrfs_root *parent_root; | |
797 | struct inode *parent_inode; | |
798 | struct dentry *dentry; | |
799 | struct extent_buffer *tmp; | |
800 | struct extent_buffer *old; | |
801 | int ret; | |
802 | u64 index = 0; | |
803 | u64 objectid; | |
804 | ||
805 | new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS); | |
806 | if (!new_root_item) { | |
807 | pending->error = -ENOMEM; | |
808 | goto fail; | |
809 | } | |
810 | ||
811 | ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); | |
812 | if (ret) { | |
813 | pending->error = ret; | |
814 | goto fail; | |
815 | } | |
816 | ||
817 | key.objectid = objectid; | |
818 | key.offset = (u64)-1; | |
819 | key.type = BTRFS_ROOT_ITEM_KEY; | |
820 | ||
821 | trans->block_rsv = &pending->block_rsv; | |
822 | ||
823 | dentry = pending->dentry; | |
824 | parent_inode = dentry->d_parent->d_inode; | |
825 | parent_root = BTRFS_I(parent_inode)->root; | |
826 | record_root_in_trans(trans, parent_root); | |
827 | ||
828 | /* | |
829 | * insert the directory item | |
830 | */ | |
831 | ret = btrfs_set_inode_index(parent_inode, &index); | |
832 | BUG_ON(ret); | |
833 | ret = btrfs_insert_dir_item(trans, parent_root, | |
834 | dentry->d_name.name, dentry->d_name.len, | |
835 | parent_inode->i_ino, &key, | |
836 | BTRFS_FT_DIR, index); | |
837 | BUG_ON(ret); | |
838 | ||
839 | btrfs_i_size_write(parent_inode, parent_inode->i_size + | |
840 | dentry->d_name.len * 2); | |
841 | ret = btrfs_update_inode(trans, parent_root, parent_inode); | |
842 | BUG_ON(ret); | |
843 | ||
844 | record_root_in_trans(trans, root); | |
845 | btrfs_set_root_last_snapshot(&root->root_item, trans->transid); | |
846 | memcpy(new_root_item, &root->root_item, sizeof(*new_root_item)); | |
847 | ||
848 | old = btrfs_lock_root_node(root); | |
849 | btrfs_cow_block(trans, root, old, NULL, 0, &old); | |
850 | btrfs_set_lock_blocking(old); | |
851 | ||
852 | btrfs_copy_root(trans, root, old, &tmp, objectid); | |
853 | btrfs_tree_unlock(old); | |
854 | free_extent_buffer(old); | |
855 | ||
856 | btrfs_set_root_node(new_root_item, tmp); | |
857 | /* record when the snapshot was created in key.offset */ | |
858 | key.offset = trans->transid; | |
859 | ret = btrfs_insert_root(trans, tree_root, &key, new_root_item); | |
860 | btrfs_tree_unlock(tmp); | |
861 | free_extent_buffer(tmp); | |
862 | BUG_ON(ret); | |
863 | ||
864 | /* | |
865 | * insert root back/forward references | |
866 | */ | |
867 | ret = btrfs_add_root_ref(trans, tree_root, objectid, | |
868 | parent_root->root_key.objectid, | |
869 | parent_inode->i_ino, index, | |
870 | dentry->d_name.name, dentry->d_name.len); | |
871 | BUG_ON(ret); | |
872 | ||
873 | key.offset = (u64)-1; | |
874 | pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key); | |
875 | BUG_ON(IS_ERR(pending->snap)); | |
876 | fail: | |
877 | kfree(new_root_item); | |
878 | btrfs_block_rsv_release(root, &pending->block_rsv, (u64)-1); | |
879 | return 0; | |
880 | } | |
881 | ||
882 | /* | |
883 | * create all the snapshots we've scheduled for creation | |
884 | */ | |
885 | static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, | |
886 | struct btrfs_fs_info *fs_info) | |
887 | { | |
888 | struct btrfs_pending_snapshot *pending; | |
889 | struct list_head *head = &trans->transaction->pending_snapshots; | |
890 | int ret; | |
891 | ||
892 | list_for_each_entry(pending, head, list) { | |
893 | ret = create_pending_snapshot(trans, fs_info, pending); | |
894 | BUG_ON(ret); | |
895 | } | |
896 | return 0; | |
897 | } | |
898 | ||
899 | static void update_super_roots(struct btrfs_root *root) | |
900 | { | |
901 | struct btrfs_root_item *root_item; | |
902 | struct btrfs_super_block *super; | |
903 | ||
904 | super = &root->fs_info->super_copy; | |
905 | ||
906 | root_item = &root->fs_info->chunk_root->root_item; | |
907 | super->chunk_root = root_item->bytenr; | |
908 | super->chunk_root_generation = root_item->generation; | |
909 | super->chunk_root_level = root_item->level; | |
910 | ||
911 | root_item = &root->fs_info->tree_root->root_item; | |
912 | super->root = root_item->bytenr; | |
913 | super->generation = root_item->generation; | |
914 | super->root_level = root_item->level; | |
915 | } | |
916 | ||
917 | int btrfs_transaction_in_commit(struct btrfs_fs_info *info) | |
918 | { | |
919 | int ret = 0; | |
920 | spin_lock(&info->new_trans_lock); | |
921 | if (info->running_transaction) | |
922 | ret = info->running_transaction->in_commit; | |
923 | spin_unlock(&info->new_trans_lock); | |
924 | return ret; | |
925 | } | |
926 | ||
927 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |
928 | struct btrfs_root *root) | |
929 | { | |
930 | unsigned long joined = 0; | |
931 | unsigned long timeout = 1; | |
932 | struct btrfs_transaction *cur_trans; | |
933 | struct btrfs_transaction *prev_trans = NULL; | |
934 | DEFINE_WAIT(wait); | |
935 | int ret; | |
936 | int should_grow = 0; | |
937 | unsigned long now = get_seconds(); | |
938 | int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT); | |
939 | ||
940 | btrfs_run_ordered_operations(root, 0); | |
941 | ||
942 | /* make a pass through all the delayed refs we have so far | |
943 | * any runnings procs may add more while we are here | |
944 | */ | |
945 | ret = btrfs_run_delayed_refs(trans, root, 0); | |
946 | BUG_ON(ret); | |
947 | ||
948 | btrfs_trans_release_metadata(trans, root); | |
949 | ||
950 | cur_trans = trans->transaction; | |
951 | /* | |
952 | * set the flushing flag so procs in this transaction have to | |
953 | * start sending their work down. | |
954 | */ | |
955 | cur_trans->delayed_refs.flushing = 1; | |
956 | ||
957 | ret = btrfs_run_delayed_refs(trans, root, 0); | |
958 | BUG_ON(ret); | |
959 | ||
960 | mutex_lock(&root->fs_info->trans_mutex); | |
961 | if (cur_trans->in_commit) { | |
962 | cur_trans->use_count++; | |
963 | mutex_unlock(&root->fs_info->trans_mutex); | |
964 | btrfs_end_transaction(trans, root); | |
965 | ||
966 | ret = wait_for_commit(root, cur_trans); | |
967 | BUG_ON(ret); | |
968 | ||
969 | mutex_lock(&root->fs_info->trans_mutex); | |
970 | put_transaction(cur_trans); | |
971 | mutex_unlock(&root->fs_info->trans_mutex); | |
972 | ||
973 | return 0; | |
974 | } | |
975 | ||
976 | trans->transaction->in_commit = 1; | |
977 | trans->transaction->blocked = 1; | |
978 | if (cur_trans->list.prev != &root->fs_info->trans_list) { | |
979 | prev_trans = list_entry(cur_trans->list.prev, | |
980 | struct btrfs_transaction, list); | |
981 | if (!prev_trans->commit_done) { | |
982 | prev_trans->use_count++; | |
983 | mutex_unlock(&root->fs_info->trans_mutex); | |
984 | ||
985 | wait_for_commit(root, prev_trans); | |
986 | ||
987 | mutex_lock(&root->fs_info->trans_mutex); | |
988 | put_transaction(prev_trans); | |
989 | } | |
990 | } | |
991 | ||
992 | if (now < cur_trans->start_time || now - cur_trans->start_time < 1) | |
993 | should_grow = 1; | |
994 | ||
995 | do { | |
996 | int snap_pending = 0; | |
997 | joined = cur_trans->num_joined; | |
998 | if (!list_empty(&trans->transaction->pending_snapshots)) | |
999 | snap_pending = 1; | |
1000 | ||
1001 | WARN_ON(cur_trans != trans->transaction); | |
1002 | prepare_to_wait(&cur_trans->writer_wait, &wait, | |
1003 | TASK_UNINTERRUPTIBLE); | |
1004 | ||
1005 | if (cur_trans->num_writers > 1) | |
1006 | timeout = MAX_SCHEDULE_TIMEOUT; | |
1007 | else if (should_grow) | |
1008 | timeout = 1; | |
1009 | ||
1010 | mutex_unlock(&root->fs_info->trans_mutex); | |
1011 | ||
1012 | if (flush_on_commit || snap_pending) { | |
1013 | btrfs_start_delalloc_inodes(root, 1); | |
1014 | ret = btrfs_wait_ordered_extents(root, 0, 1); | |
1015 | BUG_ON(ret); | |
1016 | } | |
1017 | ||
1018 | /* | |
1019 | * rename don't use btrfs_join_transaction, so, once we | |
1020 | * set the transaction to blocked above, we aren't going | |
1021 | * to get any new ordered operations. We can safely run | |
1022 | * it here and no for sure that nothing new will be added | |
1023 | * to the list | |
1024 | */ | |
1025 | btrfs_run_ordered_operations(root, 1); | |
1026 | ||
1027 | smp_mb(); | |
1028 | if (cur_trans->num_writers > 1 || should_grow) | |
1029 | schedule_timeout(timeout); | |
1030 | ||
1031 | mutex_lock(&root->fs_info->trans_mutex); | |
1032 | finish_wait(&cur_trans->writer_wait, &wait); | |
1033 | } while (cur_trans->num_writers > 1 || | |
1034 | (should_grow && cur_trans->num_joined != joined)); | |
1035 | ||
1036 | ret = create_pending_snapshots(trans, root->fs_info); | |
1037 | BUG_ON(ret); | |
1038 | ||
1039 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | |
1040 | BUG_ON(ret); | |
1041 | ||
1042 | WARN_ON(cur_trans != trans->transaction); | |
1043 | ||
1044 | /* btrfs_commit_tree_roots is responsible for getting the | |
1045 | * various roots consistent with each other. Every pointer | |
1046 | * in the tree of tree roots has to point to the most up to date | |
1047 | * root for every subvolume and other tree. So, we have to keep | |
1048 | * the tree logging code from jumping in and changing any | |
1049 | * of the trees. | |
1050 | * | |
1051 | * At this point in the commit, there can't be any tree-log | |
1052 | * writers, but a little lower down we drop the trans mutex | |
1053 | * and let new people in. By holding the tree_log_mutex | |
1054 | * from now until after the super is written, we avoid races | |
1055 | * with the tree-log code. | |
1056 | */ | |
1057 | mutex_lock(&root->fs_info->tree_log_mutex); | |
1058 | ||
1059 | ret = commit_fs_roots(trans, root); | |
1060 | BUG_ON(ret); | |
1061 | ||
1062 | /* commit_fs_roots gets rid of all the tree log roots, it is now | |
1063 | * safe to free the root of tree log roots | |
1064 | */ | |
1065 | btrfs_free_log_root_tree(trans, root->fs_info); | |
1066 | ||
1067 | ret = commit_cowonly_roots(trans, root); | |
1068 | BUG_ON(ret); | |
1069 | ||
1070 | btrfs_prepare_extent_commit(trans, root); | |
1071 | ||
1072 | cur_trans = root->fs_info->running_transaction; | |
1073 | spin_lock(&root->fs_info->new_trans_lock); | |
1074 | root->fs_info->running_transaction = NULL; | |
1075 | spin_unlock(&root->fs_info->new_trans_lock); | |
1076 | ||
1077 | btrfs_set_root_node(&root->fs_info->tree_root->root_item, | |
1078 | root->fs_info->tree_root->node); | |
1079 | switch_commit_root(root->fs_info->tree_root); | |
1080 | ||
1081 | btrfs_set_root_node(&root->fs_info->chunk_root->root_item, | |
1082 | root->fs_info->chunk_root->node); | |
1083 | switch_commit_root(root->fs_info->chunk_root); | |
1084 | ||
1085 | update_super_roots(root); | |
1086 | ||
1087 | if (!root->fs_info->log_root_recovering) { | |
1088 | btrfs_set_super_log_root(&root->fs_info->super_copy, 0); | |
1089 | btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0); | |
1090 | } | |
1091 | ||
1092 | memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy, | |
1093 | sizeof(root->fs_info->super_copy)); | |
1094 | ||
1095 | trans->transaction->blocked = 0; | |
1096 | ||
1097 | wake_up(&root->fs_info->transaction_wait); | |
1098 | ||
1099 | mutex_unlock(&root->fs_info->trans_mutex); | |
1100 | ret = btrfs_write_and_wait_transaction(trans, root); | |
1101 | BUG_ON(ret); | |
1102 | write_ctree_super(trans, root, 0); | |
1103 | ||
1104 | /* | |
1105 | * the super is written, we can safely allow the tree-loggers | |
1106 | * to go about their business | |
1107 | */ | |
1108 | mutex_unlock(&root->fs_info->tree_log_mutex); | |
1109 | ||
1110 | btrfs_finish_extent_commit(trans, root); | |
1111 | ||
1112 | mutex_lock(&root->fs_info->trans_mutex); | |
1113 | ||
1114 | cur_trans->commit_done = 1; | |
1115 | ||
1116 | root->fs_info->last_trans_committed = cur_trans->transid; | |
1117 | ||
1118 | wake_up(&cur_trans->commit_wait); | |
1119 | ||
1120 | put_transaction(cur_trans); | |
1121 | put_transaction(cur_trans); | |
1122 | ||
1123 | mutex_unlock(&root->fs_info->trans_mutex); | |
1124 | ||
1125 | if (current->journal_info == trans) | |
1126 | current->journal_info = NULL; | |
1127 | ||
1128 | kmem_cache_free(btrfs_trans_handle_cachep, trans); | |
1129 | ||
1130 | if (current != root->fs_info->transaction_kthread) | |
1131 | btrfs_run_delayed_iputs(root); | |
1132 | ||
1133 | return ret; | |
1134 | } | |
1135 | ||
1136 | /* | |
1137 | * interface function to delete all the snapshots we have scheduled for deletion | |
1138 | */ | |
1139 | int btrfs_clean_old_snapshots(struct btrfs_root *root) | |
1140 | { | |
1141 | LIST_HEAD(list); | |
1142 | struct btrfs_fs_info *fs_info = root->fs_info; | |
1143 | ||
1144 | mutex_lock(&fs_info->trans_mutex); | |
1145 | list_splice_init(&fs_info->dead_roots, &list); | |
1146 | mutex_unlock(&fs_info->trans_mutex); | |
1147 | ||
1148 | while (!list_empty(&list)) { | |
1149 | root = list_entry(list.next, struct btrfs_root, root_list); | |
1150 | list_del(&root->root_list); | |
1151 | ||
1152 | if (btrfs_header_backref_rev(root->node) < | |
1153 | BTRFS_MIXED_BACKREF_REV) | |
1154 | btrfs_drop_snapshot(root, 0); | |
1155 | else | |
1156 | btrfs_drop_snapshot(root, 1); | |
1157 | } | |
1158 | return 0; | |
1159 | } |