]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/btrfs/transaction.c
c0d0e3e7bc63b7768557748b6fe4d1cf13c6d9b4
[net-next-2.6.git] / fs / btrfs / transaction.c
1 /*
2  * Copyright (C) 2007 Oracle.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/fs.h>
20 #include <linux/slab.h>
21 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/pagemap.h>
24 #include <linux/blkdev.h>
25 #include "ctree.h"
26 #include "disk-io.h"
27 #include "transaction.h"
28 #include "locking.h"
29 #include "tree-log.h"
30
31 #define BTRFS_ROOT_TRANS_TAG 0
32
33 static noinline void put_transaction(struct btrfs_transaction *transaction)
34 {
35         WARN_ON(transaction->use_count == 0);
36         transaction->use_count--;
37         if (transaction->use_count == 0) {
38                 list_del_init(&transaction->list);
39                 memset(transaction, 0, sizeof(*transaction));
40                 kmem_cache_free(btrfs_transaction_cachep, transaction);
41         }
42 }
43
44 static noinline void switch_commit_root(struct btrfs_root *root)
45 {
46         free_extent_buffer(root->commit_root);
47         root->commit_root = btrfs_root_node(root);
48 }
49
50 /*
51  * either allocate a new transaction or hop into the existing one
52  */
53 static noinline int join_transaction(struct btrfs_root *root)
54 {
55         struct btrfs_transaction *cur_trans;
56         cur_trans = root->fs_info->running_transaction;
57         if (!cur_trans) {
58                 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep,
59                                              GFP_NOFS);
60                 BUG_ON(!cur_trans);
61                 root->fs_info->generation++;
62                 cur_trans->num_writers = 1;
63                 cur_trans->num_joined = 0;
64                 cur_trans->transid = root->fs_info->generation;
65                 init_waitqueue_head(&cur_trans->writer_wait);
66                 init_waitqueue_head(&cur_trans->commit_wait);
67                 cur_trans->in_commit = 0;
68                 cur_trans->blocked = 0;
69                 cur_trans->use_count = 1;
70                 cur_trans->commit_done = 0;
71                 cur_trans->start_time = get_seconds();
72
73                 cur_trans->delayed_refs.root = RB_ROOT;
74                 cur_trans->delayed_refs.num_entries = 0;
75                 cur_trans->delayed_refs.num_heads_ready = 0;
76                 cur_trans->delayed_refs.num_heads = 0;
77                 cur_trans->delayed_refs.flushing = 0;
78                 cur_trans->delayed_refs.run_delayed_start = 0;
79                 spin_lock_init(&cur_trans->delayed_refs.lock);
80
81                 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
82                 list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
83                 extent_io_tree_init(&cur_trans->dirty_pages,
84                                      root->fs_info->btree_inode->i_mapping,
85                                      GFP_NOFS);
86                 spin_lock(&root->fs_info->new_trans_lock);
87                 root->fs_info->running_transaction = cur_trans;
88                 spin_unlock(&root->fs_info->new_trans_lock);
89         } else {
90                 cur_trans->num_writers++;
91                 cur_trans->num_joined++;
92         }
93
94         return 0;
95 }
96
97 /*
98  * this does all the record keeping required to make sure that a reference
99  * counted root is properly recorded in a given transaction.  This is required
100  * to make sure the old root from before we joined the transaction is deleted
101  * when the transaction commits
102  */
103 static noinline int record_root_in_trans(struct btrfs_trans_handle *trans,
104                                          struct btrfs_root *root)
105 {
106         if (root->ref_cows && root->last_trans < trans->transid) {
107                 WARN_ON(root == root->fs_info->extent_root);
108                 WARN_ON(root->commit_root != root->node);
109
110                 radix_tree_tag_set(&root->fs_info->fs_roots_radix,
111                            (unsigned long)root->root_key.objectid,
112                            BTRFS_ROOT_TRANS_TAG);
113                 root->last_trans = trans->transid;
114                 btrfs_init_reloc_root(trans, root);
115         }
116         return 0;
117 }
118
119 int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
120                                struct btrfs_root *root)
121 {
122         if (!root->ref_cows)
123                 return 0;
124
125         mutex_lock(&root->fs_info->trans_mutex);
126         if (root->last_trans == trans->transid) {
127                 mutex_unlock(&root->fs_info->trans_mutex);
128                 return 0;
129         }
130
131         record_root_in_trans(trans, root);
132         mutex_unlock(&root->fs_info->trans_mutex);
133         return 0;
134 }
135
136 /* wait for commit against the current transaction to become unblocked
137  * when this is done, it is safe to start a new transaction, but the current
138  * transaction might not be fully on disk.
139  */
140 static void wait_current_trans(struct btrfs_root *root)
141 {
142         struct btrfs_transaction *cur_trans;
143
144         cur_trans = root->fs_info->running_transaction;
145         if (cur_trans && cur_trans->blocked) {
146                 DEFINE_WAIT(wait);
147                 cur_trans->use_count++;
148                 while (1) {
149                         prepare_to_wait(&root->fs_info->transaction_wait, &wait,
150                                         TASK_UNINTERRUPTIBLE);
151                         if (cur_trans->blocked) {
152                                 mutex_unlock(&root->fs_info->trans_mutex);
153                                 schedule();
154                                 mutex_lock(&root->fs_info->trans_mutex);
155                                 finish_wait(&root->fs_info->transaction_wait,
156                                             &wait);
157                         } else {
158                                 finish_wait(&root->fs_info->transaction_wait,
159                                             &wait);
160                                 break;
161                         }
162                 }
163                 put_transaction(cur_trans);
164         }
165 }
166
167 enum btrfs_trans_type {
168         TRANS_START,
169         TRANS_JOIN,
170         TRANS_USERSPACE,
171 };
172
173 static struct btrfs_trans_handle *start_transaction(struct btrfs_root *root,
174                                              int num_blocks, int type)
175 {
176         struct btrfs_trans_handle *h =
177                 kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
178         int ret;
179
180         mutex_lock(&root->fs_info->trans_mutex);
181         if (!root->fs_info->log_root_recovering &&
182             ((type == TRANS_START && !root->fs_info->open_ioctl_trans) ||
183              type == TRANS_USERSPACE))
184                 wait_current_trans(root);
185         ret = join_transaction(root);
186         BUG_ON(ret);
187
188         h->transid = root->fs_info->running_transaction->transid;
189         h->transaction = root->fs_info->running_transaction;
190         h->blocks_reserved = num_blocks;
191         h->blocks_used = 0;
192         h->block_group = 0;
193         h->alloc_exclude_nr = 0;
194         h->alloc_exclude_start = 0;
195         h->delayed_ref_updates = 0;
196
197         if (!current->journal_info && type != TRANS_USERSPACE)
198                 current->journal_info = h;
199
200         root->fs_info->running_transaction->use_count++;
201         record_root_in_trans(h, root);
202         mutex_unlock(&root->fs_info->trans_mutex);
203         return h;
204 }
205
206 struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
207                                                    int num_blocks)
208 {
209         return start_transaction(root, num_blocks, TRANS_START);
210 }
211 struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root,
212                                                    int num_blocks)
213 {
214         return start_transaction(root, num_blocks, TRANS_JOIN);
215 }
216
217 struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r,
218                                                          int num_blocks)
219 {
220         return start_transaction(r, num_blocks, TRANS_USERSPACE);
221 }
222
223 /* wait for a transaction commit to be fully complete */
224 static noinline int wait_for_commit(struct btrfs_root *root,
225                                     struct btrfs_transaction *commit)
226 {
227         DEFINE_WAIT(wait);
228         mutex_lock(&root->fs_info->trans_mutex);
229         while (!commit->commit_done) {
230                 prepare_to_wait(&commit->commit_wait, &wait,
231                                 TASK_UNINTERRUPTIBLE);
232                 if (commit->commit_done)
233                         break;
234                 mutex_unlock(&root->fs_info->trans_mutex);
235                 schedule();
236                 mutex_lock(&root->fs_info->trans_mutex);
237         }
238         mutex_unlock(&root->fs_info->trans_mutex);
239         finish_wait(&commit->commit_wait, &wait);
240         return 0;
241 }
242
243 #if 0
244 /*
245  * rate limit against the drop_snapshot code.  This helps to slow down new
246  * operations if the drop_snapshot code isn't able to keep up.
247  */
248 static void throttle_on_drops(struct btrfs_root *root)
249 {
250         struct btrfs_fs_info *info = root->fs_info;
251         int harder_count = 0;
252
253 harder:
254         if (atomic_read(&info->throttles)) {
255                 DEFINE_WAIT(wait);
256                 int thr;
257                 thr = atomic_read(&info->throttle_gen);
258
259                 do {
260                         prepare_to_wait(&info->transaction_throttle,
261                                         &wait, TASK_UNINTERRUPTIBLE);
262                         if (!atomic_read(&info->throttles)) {
263                                 finish_wait(&info->transaction_throttle, &wait);
264                                 break;
265                         }
266                         schedule();
267                         finish_wait(&info->transaction_throttle, &wait);
268                 } while (thr == atomic_read(&info->throttle_gen));
269                 harder_count++;
270
271                 if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 &&
272                     harder_count < 2)
273                         goto harder;
274
275                 if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 &&
276                     harder_count < 10)
277                         goto harder;
278
279                 if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 &&
280                     harder_count < 20)
281                         goto harder;
282         }
283 }
284 #endif
285
286 void btrfs_throttle(struct btrfs_root *root)
287 {
288         mutex_lock(&root->fs_info->trans_mutex);
289         if (!root->fs_info->open_ioctl_trans)
290                 wait_current_trans(root);
291         mutex_unlock(&root->fs_info->trans_mutex);
292 }
293
294 static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
295                           struct btrfs_root *root, int throttle)
296 {
297         struct btrfs_transaction *cur_trans;
298         struct btrfs_fs_info *info = root->fs_info;
299         int count = 0;
300
301         while (count < 4) {
302                 unsigned long cur = trans->delayed_ref_updates;
303                 trans->delayed_ref_updates = 0;
304                 if (cur &&
305                     trans->transaction->delayed_refs.num_heads_ready > 64) {
306                         trans->delayed_ref_updates = 0;
307
308                         /*
309                          * do a full flush if the transaction is trying
310                          * to close
311                          */
312                         if (trans->transaction->delayed_refs.flushing)
313                                 cur = 0;
314                         btrfs_run_delayed_refs(trans, root, cur);
315                 } else {
316                         break;
317                 }
318                 count++;
319         }
320
321         mutex_lock(&info->trans_mutex);
322         cur_trans = info->running_transaction;
323         WARN_ON(cur_trans != trans->transaction);
324         WARN_ON(cur_trans->num_writers < 1);
325         cur_trans->num_writers--;
326
327         if (waitqueue_active(&cur_trans->writer_wait))
328                 wake_up(&cur_trans->writer_wait);
329         put_transaction(cur_trans);
330         mutex_unlock(&info->trans_mutex);
331
332         if (current->journal_info == trans)
333                 current->journal_info = NULL;
334         memset(trans, 0, sizeof(*trans));
335         kmem_cache_free(btrfs_trans_handle_cachep, trans);
336
337         if (throttle)
338                 btrfs_run_delayed_iputs(root);
339
340         return 0;
341 }
342
343 int btrfs_end_transaction(struct btrfs_trans_handle *trans,
344                           struct btrfs_root *root)
345 {
346         return __btrfs_end_transaction(trans, root, 0);
347 }
348
349 int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
350                                    struct btrfs_root *root)
351 {
352         return __btrfs_end_transaction(trans, root, 1);
353 }
354
355 /*
356  * when btree blocks are allocated, they have some corresponding bits set for
357  * them in one of two extent_io trees.  This is used to make sure all of
358  * those extents are sent to disk but does not wait on them
359  */
360 int btrfs_write_marked_extents(struct btrfs_root *root,
361                                struct extent_io_tree *dirty_pages, int mark)
362 {
363         int ret;
364         int err = 0;
365         int werr = 0;
366         struct page *page;
367         struct inode *btree_inode = root->fs_info->btree_inode;
368         u64 start = 0;
369         u64 end;
370         unsigned long index;
371
372         while (1) {
373                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
374                                             mark);
375                 if (ret)
376                         break;
377                 while (start <= end) {
378                         cond_resched();
379
380                         index = start >> PAGE_CACHE_SHIFT;
381                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
382                         page = find_get_page(btree_inode->i_mapping, index);
383                         if (!page)
384                                 continue;
385
386                         btree_lock_page_hook(page);
387                         if (!page->mapping) {
388                                 unlock_page(page);
389                                 page_cache_release(page);
390                                 continue;
391                         }
392
393                         if (PageWriteback(page)) {
394                                 if (PageDirty(page))
395                                         wait_on_page_writeback(page);
396                                 else {
397                                         unlock_page(page);
398                                         page_cache_release(page);
399                                         continue;
400                                 }
401                         }
402                         err = write_one_page(page, 0);
403                         if (err)
404                                 werr = err;
405                         page_cache_release(page);
406                 }
407         }
408         if (err)
409                 werr = err;
410         return werr;
411 }
412
413 /*
414  * when btree blocks are allocated, they have some corresponding bits set for
415  * them in one of two extent_io trees.  This is used to make sure all of
416  * those extents are on disk for transaction or log commit.  We wait
417  * on all the pages and clear them from the dirty pages state tree
418  */
419 int btrfs_wait_marked_extents(struct btrfs_root *root,
420                               struct extent_io_tree *dirty_pages, int mark)
421 {
422         int ret;
423         int err = 0;
424         int werr = 0;
425         struct page *page;
426         struct inode *btree_inode = root->fs_info->btree_inode;
427         u64 start = 0;
428         u64 end;
429         unsigned long index;
430
431         while (1) {
432                 ret = find_first_extent_bit(dirty_pages, start, &start, &end,
433                                             mark);
434                 if (ret)
435                         break;
436
437                 clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
438                 while (start <= end) {
439                         index = start >> PAGE_CACHE_SHIFT;
440                         start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
441                         page = find_get_page(btree_inode->i_mapping, index);
442                         if (!page)
443                                 continue;
444                         if (PageDirty(page)) {
445                                 btree_lock_page_hook(page);
446                                 wait_on_page_writeback(page);
447                                 err = write_one_page(page, 0);
448                                 if (err)
449                                         werr = err;
450                         }
451                         wait_on_page_writeback(page);
452                         page_cache_release(page);
453                         cond_resched();
454                 }
455         }
456         if (err)
457                 werr = err;
458         return werr;
459 }
460
461 /*
462  * when btree blocks are allocated, they have some corresponding bits set for
463  * them in one of two extent_io trees.  This is used to make sure all of
464  * those extents are on disk for transaction or log commit
465  */
466 int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
467                                 struct extent_io_tree *dirty_pages, int mark)
468 {
469         int ret;
470         int ret2;
471
472         ret = btrfs_write_marked_extents(root, dirty_pages, mark);
473         ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
474         return ret || ret2;
475 }
476
477 int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
478                                      struct btrfs_root *root)
479 {
480         if (!trans || !trans->transaction) {
481                 struct inode *btree_inode;
482                 btree_inode = root->fs_info->btree_inode;
483                 return filemap_write_and_wait(btree_inode->i_mapping);
484         }
485         return btrfs_write_and_wait_marked_extents(root,
486                                            &trans->transaction->dirty_pages,
487                                            EXTENT_DIRTY);
488 }
489
490 /*
491  * this is used to update the root pointer in the tree of tree roots.
492  *
493  * But, in the case of the extent allocation tree, updating the root
494  * pointer may allocate blocks which may change the root of the extent
495  * allocation tree.
496  *
497  * So, this loops and repeats and makes sure the cowonly root didn't
498  * change while the root pointer was being updated in the metadata.
499  */
500 static int update_cowonly_root(struct btrfs_trans_handle *trans,
501                                struct btrfs_root *root)
502 {
503         int ret;
504         u64 old_root_bytenr;
505         u64 old_root_used;
506         struct btrfs_root *tree_root = root->fs_info->tree_root;
507
508         old_root_used = btrfs_root_used(&root->root_item);
509         btrfs_write_dirty_block_groups(trans, root);
510
511         while (1) {
512                 old_root_bytenr = btrfs_root_bytenr(&root->root_item);
513                 if (old_root_bytenr == root->node->start &&
514                     old_root_used == btrfs_root_used(&root->root_item))
515                         break;
516
517                 btrfs_set_root_node(&root->root_item, root->node);
518                 ret = btrfs_update_root(trans, tree_root,
519                                         &root->root_key,
520                                         &root->root_item);
521                 BUG_ON(ret);
522
523                 old_root_used = btrfs_root_used(&root->root_item);
524                 ret = btrfs_write_dirty_block_groups(trans, root);
525                 BUG_ON(ret);
526         }
527
528         if (root != root->fs_info->extent_root)
529                 switch_commit_root(root);
530
531         return 0;
532 }
533
534 /*
535  * update all the cowonly tree roots on disk
536  */
537 static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
538                                          struct btrfs_root *root)
539 {
540         struct btrfs_fs_info *fs_info = root->fs_info;
541         struct list_head *next;
542         struct extent_buffer *eb;
543         int ret;
544
545         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
546         BUG_ON(ret);
547
548         eb = btrfs_lock_root_node(fs_info->tree_root);
549         btrfs_cow_block(trans, fs_info->tree_root, eb, NULL, 0, &eb);
550         btrfs_tree_unlock(eb);
551         free_extent_buffer(eb);
552
553         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
554         BUG_ON(ret);
555
556         while (!list_empty(&fs_info->dirty_cowonly_roots)) {
557                 next = fs_info->dirty_cowonly_roots.next;
558                 list_del_init(next);
559                 root = list_entry(next, struct btrfs_root, dirty_list);
560
561                 update_cowonly_root(trans, root);
562         }
563
564         down_write(&fs_info->extent_commit_sem);
565         switch_commit_root(fs_info->extent_root);
566         up_write(&fs_info->extent_commit_sem);
567
568         return 0;
569 }
570
571 /*
572  * dead roots are old snapshots that need to be deleted.  This allocates
573  * a dirty root struct and adds it into the list of dead roots that need to
574  * be deleted
575  */
576 int btrfs_add_dead_root(struct btrfs_root *root)
577 {
578         mutex_lock(&root->fs_info->trans_mutex);
579         list_add(&root->root_list, &root->fs_info->dead_roots);
580         mutex_unlock(&root->fs_info->trans_mutex);
581         return 0;
582 }
583
584 /*
585  * update all the cowonly tree roots on disk
586  */
587 static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
588                                     struct btrfs_root *root)
589 {
590         struct btrfs_root *gang[8];
591         struct btrfs_fs_info *fs_info = root->fs_info;
592         int i;
593         int ret;
594         int err = 0;
595
596         while (1) {
597                 ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
598                                                  (void **)gang, 0,
599                                                  ARRAY_SIZE(gang),
600                                                  BTRFS_ROOT_TRANS_TAG);
601                 if (ret == 0)
602                         break;
603                 for (i = 0; i < ret; i++) {
604                         root = gang[i];
605                         radix_tree_tag_clear(&fs_info->fs_roots_radix,
606                                         (unsigned long)root->root_key.objectid,
607                                         BTRFS_ROOT_TRANS_TAG);
608
609                         btrfs_free_log(trans, root);
610                         btrfs_update_reloc_root(trans, root);
611
612                         if (root->commit_root != root->node) {
613                                 switch_commit_root(root);
614                                 btrfs_set_root_node(&root->root_item,
615                                                     root->node);
616                         }
617
618                         err = btrfs_update_root(trans, fs_info->tree_root,
619                                                 &root->root_key,
620                                                 &root->root_item);
621                         if (err)
622                                 break;
623                 }
624         }
625         return err;
626 }
627
628 /*
629  * defrag a given btree.  If cacheonly == 1, this won't read from the disk,
630  * otherwise every leaf in the btree is read and defragged.
631  */
632 int btrfs_defrag_root(struct btrfs_root *root, int cacheonly)
633 {
634         struct btrfs_fs_info *info = root->fs_info;
635         int ret;
636         struct btrfs_trans_handle *trans;
637         unsigned long nr;
638
639         smp_mb();
640         if (root->defrag_running)
641                 return 0;
642         trans = btrfs_start_transaction(root, 1);
643         while (1) {
644                 root->defrag_running = 1;
645                 ret = btrfs_defrag_leaves(trans, root, cacheonly);
646                 nr = trans->blocks_used;
647                 btrfs_end_transaction(trans, root);
648                 btrfs_btree_balance_dirty(info->tree_root, nr);
649                 cond_resched();
650
651                 trans = btrfs_start_transaction(root, 1);
652                 if (root->fs_info->closing || ret != -EAGAIN)
653                         break;
654         }
655         root->defrag_running = 0;
656         smp_mb();
657         btrfs_end_transaction(trans, root);
658         return 0;
659 }
660
661 #if 0
662 /*
663  * when dropping snapshots, we generate a ton of delayed refs, and it makes
664  * sense not to join the transaction while it is trying to flush the current
665  * queue of delayed refs out.
666  *
667  * This is used by the drop snapshot code only
668  */
669 static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info)
670 {
671         DEFINE_WAIT(wait);
672
673         mutex_lock(&info->trans_mutex);
674         while (info->running_transaction &&
675                info->running_transaction->delayed_refs.flushing) {
676                 prepare_to_wait(&info->transaction_wait, &wait,
677                                 TASK_UNINTERRUPTIBLE);
678                 mutex_unlock(&info->trans_mutex);
679
680                 schedule();
681
682                 mutex_lock(&info->trans_mutex);
683                 finish_wait(&info->transaction_wait, &wait);
684         }
685         mutex_unlock(&info->trans_mutex);
686         return 0;
687 }
688
689 /*
690  * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on
691  * all of them
692  */
693 int btrfs_drop_dead_root(struct btrfs_root *root)
694 {
695         struct btrfs_trans_handle *trans;
696         struct btrfs_root *tree_root = root->fs_info->tree_root;
697         unsigned long nr;
698         int ret;
699
700         while (1) {
701                 /*
702                  * we don't want to jump in and create a bunch of
703                  * delayed refs if the transaction is starting to close
704                  */
705                 wait_transaction_pre_flush(tree_root->fs_info);
706                 trans = btrfs_start_transaction(tree_root, 1);
707
708                 /*
709                  * we've joined a transaction, make sure it isn't
710                  * closing right now
711                  */
712                 if (trans->transaction->delayed_refs.flushing) {
713                         btrfs_end_transaction(trans, tree_root);
714                         continue;
715                 }
716
717                 ret = btrfs_drop_snapshot(trans, root);
718                 if (ret != -EAGAIN)
719                         break;
720
721                 ret = btrfs_update_root(trans, tree_root,
722                                         &root->root_key,
723                                         &root->root_item);
724                 if (ret)
725                         break;
726
727                 nr = trans->blocks_used;
728                 ret = btrfs_end_transaction(trans, tree_root);
729                 BUG_ON(ret);
730
731                 btrfs_btree_balance_dirty(tree_root, nr);
732                 cond_resched();
733         }
734         BUG_ON(ret);
735
736         ret = btrfs_del_root(trans, tree_root, &root->root_key);
737         BUG_ON(ret);
738
739         nr = trans->blocks_used;
740         ret = btrfs_end_transaction(trans, tree_root);
741         BUG_ON(ret);
742
743         free_extent_buffer(root->node);
744         free_extent_buffer(root->commit_root);
745         kfree(root);
746
747         btrfs_btree_balance_dirty(tree_root, nr);
748         return ret;
749 }
750 #endif
751
752 /*
753  * new snapshots need to be created at a very specific time in the
754  * transaction commit.  This does the actual creation
755  */
756 static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
757                                    struct btrfs_fs_info *fs_info,
758                                    struct btrfs_pending_snapshot *pending)
759 {
760         struct btrfs_key key;
761         struct btrfs_root_item *new_root_item;
762         struct btrfs_root *tree_root = fs_info->tree_root;
763         struct btrfs_root *root = pending->root;
764         struct extent_buffer *tmp;
765         struct extent_buffer *old;
766         int ret;
767         u64 objectid;
768
769         new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
770         if (!new_root_item) {
771                 ret = -ENOMEM;
772                 goto fail;
773         }
774         ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid);
775         if (ret)
776                 goto fail;
777
778         record_root_in_trans(trans, root);
779         btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
780         memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
781
782         key.objectid = objectid;
783         /* record when the snapshot was created in key.offset */
784         key.offset = trans->transid;
785         btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY);
786
787         old = btrfs_lock_root_node(root);
788         btrfs_cow_block(trans, root, old, NULL, 0, &old);
789         btrfs_set_lock_blocking(old);
790
791         btrfs_copy_root(trans, root, old, &tmp, objectid);
792         btrfs_tree_unlock(old);
793         free_extent_buffer(old);
794
795         btrfs_set_root_node(new_root_item, tmp);
796         ret = btrfs_insert_root(trans, root->fs_info->tree_root, &key,
797                                 new_root_item);
798         btrfs_tree_unlock(tmp);
799         free_extent_buffer(tmp);
800         if (ret)
801                 goto fail;
802
803         key.offset = (u64)-1;
804         memcpy(&pending->root_key, &key, sizeof(key));
805 fail:
806         kfree(new_root_item);
807         return ret;
808 }
809
810 static noinline int finish_pending_snapshot(struct btrfs_fs_info *fs_info,
811                                    struct btrfs_pending_snapshot *pending)
812 {
813         int ret;
814         int namelen;
815         u64 index = 0;
816         struct btrfs_trans_handle *trans;
817         struct inode *parent_inode;
818         struct btrfs_root *parent_root;
819
820         parent_inode = pending->dentry->d_parent->d_inode;
821         parent_root = BTRFS_I(parent_inode)->root;
822         trans = btrfs_join_transaction(parent_root, 1);
823
824         /*
825          * insert the directory item
826          */
827         namelen = strlen(pending->name);
828         ret = btrfs_set_inode_index(parent_inode, &index);
829         ret = btrfs_insert_dir_item(trans, parent_root,
830                             pending->name, namelen,
831                             parent_inode->i_ino,
832                             &pending->root_key, BTRFS_FT_DIR, index);
833
834         if (ret)
835                 goto fail;
836
837         btrfs_i_size_write(parent_inode, parent_inode->i_size + namelen * 2);
838         ret = btrfs_update_inode(trans, parent_root, parent_inode);
839         BUG_ON(ret);
840
841         ret = btrfs_add_root_ref(trans, parent_root->fs_info->tree_root,
842                                  pending->root_key.objectid,
843                                  parent_root->root_key.objectid,
844                                  parent_inode->i_ino, index, pending->name,
845                                  namelen);
846
847         BUG_ON(ret);
848
849 fail:
850         btrfs_end_transaction(trans, fs_info->fs_root);
851         return ret;
852 }
853
854 /*
855  * create all the snapshots we've scheduled for creation
856  */
857 static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
858                                              struct btrfs_fs_info *fs_info)
859 {
860         struct btrfs_pending_snapshot *pending;
861         struct list_head *head = &trans->transaction->pending_snapshots;
862         int ret;
863
864         list_for_each_entry(pending, head, list) {
865                 ret = create_pending_snapshot(trans, fs_info, pending);
866                 BUG_ON(ret);
867         }
868         return 0;
869 }
870
871 static noinline int finish_pending_snapshots(struct btrfs_trans_handle *trans,
872                                              struct btrfs_fs_info *fs_info)
873 {
874         struct btrfs_pending_snapshot *pending;
875         struct list_head *head = &trans->transaction->pending_snapshots;
876         int ret;
877
878         while (!list_empty(head)) {
879                 pending = list_entry(head->next,
880                                      struct btrfs_pending_snapshot, list);
881                 ret = finish_pending_snapshot(fs_info, pending);
882                 BUG_ON(ret);
883                 list_del(&pending->list);
884                 kfree(pending->name);
885                 kfree(pending);
886         }
887         return 0;
888 }
889
890 static void update_super_roots(struct btrfs_root *root)
891 {
892         struct btrfs_root_item *root_item;
893         struct btrfs_super_block *super;
894
895         super = &root->fs_info->super_copy;
896
897         root_item = &root->fs_info->chunk_root->root_item;
898         super->chunk_root = root_item->bytenr;
899         super->chunk_root_generation = root_item->generation;
900         super->chunk_root_level = root_item->level;
901
902         root_item = &root->fs_info->tree_root->root_item;
903         super->root = root_item->bytenr;
904         super->generation = root_item->generation;
905         super->root_level = root_item->level;
906 }
907
908 int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
909 {
910         int ret = 0;
911         spin_lock(&info->new_trans_lock);
912         if (info->running_transaction)
913                 ret = info->running_transaction->in_commit;
914         spin_unlock(&info->new_trans_lock);
915         return ret;
916 }
917
918 int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
919                              struct btrfs_root *root)
920 {
921         unsigned long joined = 0;
922         unsigned long timeout = 1;
923         struct btrfs_transaction *cur_trans;
924         struct btrfs_transaction *prev_trans = NULL;
925         DEFINE_WAIT(wait);
926         int ret;
927         int should_grow = 0;
928         unsigned long now = get_seconds();
929         int flush_on_commit = btrfs_test_opt(root, FLUSHONCOMMIT);
930
931         btrfs_run_ordered_operations(root, 0);
932
933         /* make a pass through all the delayed refs we have so far
934          * any runnings procs may add more while we are here
935          */
936         ret = btrfs_run_delayed_refs(trans, root, 0);
937         BUG_ON(ret);
938
939         cur_trans = trans->transaction;
940         /*
941          * set the flushing flag so procs in this transaction have to
942          * start sending their work down.
943          */
944         cur_trans->delayed_refs.flushing = 1;
945
946         ret = btrfs_run_delayed_refs(trans, root, 0);
947         BUG_ON(ret);
948
949         mutex_lock(&root->fs_info->trans_mutex);
950         if (cur_trans->in_commit) {
951                 cur_trans->use_count++;
952                 mutex_unlock(&root->fs_info->trans_mutex);
953                 btrfs_end_transaction(trans, root);
954
955                 ret = wait_for_commit(root, cur_trans);
956                 BUG_ON(ret);
957
958                 mutex_lock(&root->fs_info->trans_mutex);
959                 put_transaction(cur_trans);
960                 mutex_unlock(&root->fs_info->trans_mutex);
961
962                 return 0;
963         }
964
965         trans->transaction->in_commit = 1;
966         trans->transaction->blocked = 1;
967         if (cur_trans->list.prev != &root->fs_info->trans_list) {
968                 prev_trans = list_entry(cur_trans->list.prev,
969                                         struct btrfs_transaction, list);
970                 if (!prev_trans->commit_done) {
971                         prev_trans->use_count++;
972                         mutex_unlock(&root->fs_info->trans_mutex);
973
974                         wait_for_commit(root, prev_trans);
975
976                         mutex_lock(&root->fs_info->trans_mutex);
977                         put_transaction(prev_trans);
978                 }
979         }
980
981         if (now < cur_trans->start_time || now - cur_trans->start_time < 1)
982                 should_grow = 1;
983
984         do {
985                 int snap_pending = 0;
986                 joined = cur_trans->num_joined;
987                 if (!list_empty(&trans->transaction->pending_snapshots))
988                         snap_pending = 1;
989
990                 WARN_ON(cur_trans != trans->transaction);
991                 prepare_to_wait(&cur_trans->writer_wait, &wait,
992                                 TASK_UNINTERRUPTIBLE);
993
994                 if (cur_trans->num_writers > 1)
995                         timeout = MAX_SCHEDULE_TIMEOUT;
996                 else if (should_grow)
997                         timeout = 1;
998
999                 mutex_unlock(&root->fs_info->trans_mutex);
1000
1001                 if (flush_on_commit || snap_pending) {
1002                         btrfs_start_delalloc_inodes(root, 1);
1003                         ret = btrfs_wait_ordered_extents(root, 0, 1);
1004                         BUG_ON(ret);
1005                 }
1006
1007                 /*
1008                  * rename don't use btrfs_join_transaction, so, once we
1009                  * set the transaction to blocked above, we aren't going
1010                  * to get any new ordered operations.  We can safely run
1011                  * it here and no for sure that nothing new will be added
1012                  * to the list
1013                  */
1014                 btrfs_run_ordered_operations(root, 1);
1015
1016                 smp_mb();
1017                 if (cur_trans->num_writers > 1 || should_grow)
1018                         schedule_timeout(timeout);
1019
1020                 mutex_lock(&root->fs_info->trans_mutex);
1021                 finish_wait(&cur_trans->writer_wait, &wait);
1022         } while (cur_trans->num_writers > 1 ||
1023                  (should_grow && cur_trans->num_joined != joined));
1024
1025         ret = create_pending_snapshots(trans, root->fs_info);
1026         BUG_ON(ret);
1027
1028         ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
1029         BUG_ON(ret);
1030
1031         WARN_ON(cur_trans != trans->transaction);
1032
1033         /* btrfs_commit_tree_roots is responsible for getting the
1034          * various roots consistent with each other.  Every pointer
1035          * in the tree of tree roots has to point to the most up to date
1036          * root for every subvolume and other tree.  So, we have to keep
1037          * the tree logging code from jumping in and changing any
1038          * of the trees.
1039          *
1040          * At this point in the commit, there can't be any tree-log
1041          * writers, but a little lower down we drop the trans mutex
1042          * and let new people in.  By holding the tree_log_mutex
1043          * from now until after the super is written, we avoid races
1044          * with the tree-log code.
1045          */
1046         mutex_lock(&root->fs_info->tree_log_mutex);
1047
1048         ret = commit_fs_roots(trans, root);
1049         BUG_ON(ret);
1050
1051         /* commit_fs_roots gets rid of all the tree log roots, it is now
1052          * safe to free the root of tree log roots
1053          */
1054         btrfs_free_log_root_tree(trans, root->fs_info);
1055
1056         ret = commit_cowonly_roots(trans, root);
1057         BUG_ON(ret);
1058
1059         btrfs_prepare_extent_commit(trans, root);
1060
1061         cur_trans = root->fs_info->running_transaction;
1062         spin_lock(&root->fs_info->new_trans_lock);
1063         root->fs_info->running_transaction = NULL;
1064         spin_unlock(&root->fs_info->new_trans_lock);
1065
1066         btrfs_set_root_node(&root->fs_info->tree_root->root_item,
1067                             root->fs_info->tree_root->node);
1068         switch_commit_root(root->fs_info->tree_root);
1069
1070         btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
1071                             root->fs_info->chunk_root->node);
1072         switch_commit_root(root->fs_info->chunk_root);
1073
1074         update_super_roots(root);
1075
1076         if (!root->fs_info->log_root_recovering) {
1077                 btrfs_set_super_log_root(&root->fs_info->super_copy, 0);
1078                 btrfs_set_super_log_root_level(&root->fs_info->super_copy, 0);
1079         }
1080
1081         memcpy(&root->fs_info->super_for_commit, &root->fs_info->super_copy,
1082                sizeof(root->fs_info->super_copy));
1083
1084         trans->transaction->blocked = 0;
1085
1086         wake_up(&root->fs_info->transaction_wait);
1087
1088         mutex_unlock(&root->fs_info->trans_mutex);
1089         ret = btrfs_write_and_wait_transaction(trans, root);
1090         BUG_ON(ret);
1091         write_ctree_super(trans, root, 0);
1092
1093         /*
1094          * the super is written, we can safely allow the tree-loggers
1095          * to go about their business
1096          */
1097         mutex_unlock(&root->fs_info->tree_log_mutex);
1098
1099         btrfs_finish_extent_commit(trans, root);
1100
1101         /* do the directory inserts of any pending snapshot creations */
1102         finish_pending_snapshots(trans, root->fs_info);
1103
1104         mutex_lock(&root->fs_info->trans_mutex);
1105
1106         cur_trans->commit_done = 1;
1107
1108         root->fs_info->last_trans_committed = cur_trans->transid;
1109
1110         wake_up(&cur_trans->commit_wait);
1111
1112         put_transaction(cur_trans);
1113         put_transaction(cur_trans);
1114
1115         mutex_unlock(&root->fs_info->trans_mutex);
1116
1117         if (current->journal_info == trans)
1118                 current->journal_info = NULL;
1119
1120         kmem_cache_free(btrfs_trans_handle_cachep, trans);
1121
1122         if (current != root->fs_info->transaction_kthread)
1123                 btrfs_run_delayed_iputs(root);
1124
1125         return ret;
1126 }
1127
1128 /*
1129  * interface function to delete all the snapshots we have scheduled for deletion
1130  */
1131 int btrfs_clean_old_snapshots(struct btrfs_root *root)
1132 {
1133         LIST_HEAD(list);
1134         struct btrfs_fs_info *fs_info = root->fs_info;
1135
1136         mutex_lock(&fs_info->trans_mutex);
1137         list_splice_init(&fs_info->dead_roots, &list);
1138         mutex_unlock(&fs_info->trans_mutex);
1139
1140         while (!list_empty(&list)) {
1141                 root = list_entry(list.next, struct btrfs_root, root_list);
1142                 list_del(&root->root_list);
1143
1144                 if (btrfs_header_backref_rev(root->node) <
1145                     BTRFS_MIXED_BACKREF_REV)
1146                         btrfs_drop_snapshot(root, 0);
1147                 else
1148                         btrfs_drop_snapshot(root, 1);
1149         }
1150         return 0;
1151 }