]> bbs.cooldavid.org Git - net-next-2.6.git/blob - fs/jbd2/transaction.c
jbd2: Remove t_handle_lock from start_this_handle()
[net-next-2.6.git] / fs / jbd2 / transaction.c
1 /*
2  * linux/fs/jbd2/transaction.c
3  *
4  * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5  *
6  * Copyright 1998 Red Hat corp --- All Rights Reserved
7  *
8  * This file is part of the Linux kernel and is made available under
9  * the terms of the GNU General Public License, version 2, or at your
10  * option, any later version, incorporated herein by reference.
11  *
12  * Generic filesystem transaction handling code; part of the ext2fs
13  * journaling system.
14  *
15  * This file manages transactions (compound commits managed by the
16  * journaling code) and handles (individual atomic operations by the
17  * filesystem).
18  */
19
20 #include <linux/time.h>
21 #include <linux/fs.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <linux/mm.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 #include <linux/backing-dev.h>
30 #include <linux/module.h>
31
32 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
33
34 /*
35  * jbd2_get_transaction: obtain a new transaction_t object.
36  *
37  * Simply allocate and initialise a new transaction.  Create it in
38  * RUNNING state and add it to the current journal (which should not
39  * have an existing running transaction: we only make a new transaction
40  * once we have started to commit the old one).
41  *
42  * Preconditions:
43  *      The journal MUST be locked.  We don't perform atomic mallocs on the
44  *      new transaction and we can't block without protecting against other
45  *      processes trying to touch the journal while it is in transition.
46  *
47  */
48
49 static transaction_t *
50 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
51 {
52         transaction->t_journal = journal;
53         transaction->t_state = T_RUNNING;
54         transaction->t_start_time = ktime_get();
55         transaction->t_tid = journal->j_transaction_sequence++;
56         transaction->t_expires = jiffies + journal->j_commit_interval;
57         spin_lock_init(&transaction->t_handle_lock);
58         atomic_set(&transaction->t_updates, 0);
59         atomic_set(&transaction->t_outstanding_credits, 0);
60         atomic_set(&transaction->t_handle_count, 0);
61         INIT_LIST_HEAD(&transaction->t_inode_list);
62         INIT_LIST_HEAD(&transaction->t_private_list);
63
64         /* Set up the commit timer for the new transaction. */
65         journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
66         add_timer(&journal->j_commit_timer);
67
68         J_ASSERT(journal->j_running_transaction == NULL);
69         journal->j_running_transaction = transaction;
70         transaction->t_max_wait = 0;
71         transaction->t_start = jiffies;
72
73         return transaction;
74 }
75
76 /*
77  * Handle management.
78  *
79  * A handle_t is an object which represents a single atomic update to a
80  * filesystem, and which tracks all of the modifications which form part
81  * of that one update.
82  */
83
84 /*
85  * start_this_handle: Given a handle, deal with any locking or stalling
86  * needed to make sure that there is enough journal space for the handle
87  * to begin.  Attach the handle to a transaction and set up the
88  * transaction's buffer credits.
89  */
90
91 static int start_this_handle(journal_t *journal, handle_t *handle,
92                              int gfp_mask)
93 {
94         transaction_t *transaction;
95         int needed;
96         int nblocks = handle->h_buffer_credits;
97         transaction_t *new_transaction = NULL;
98         unsigned long ts = jiffies;
99
100         if (nblocks > journal->j_max_transaction_buffers) {
101                 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
102                        current->comm, nblocks,
103                        journal->j_max_transaction_buffers);
104                 return -ENOSPC;
105         }
106
107 alloc_transaction:
108         if (!journal->j_running_transaction) {
109                 new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask);
110                 if (!new_transaction) {
111                         /*
112                          * If __GFP_FS is not present, then we may be
113                          * being called from inside the fs writeback
114                          * layer, so we MUST NOT fail.  Since
115                          * __GFP_NOFAIL is going away, we will arrange
116                          * to retry the allocation ourselves.
117                          */
118                         if ((gfp_mask & __GFP_FS) == 0) {
119                                 congestion_wait(BLK_RW_ASYNC, HZ/50);
120                                 goto alloc_transaction;
121                         }
122                         return -ENOMEM;
123                 }
124         }
125
126         jbd_debug(3, "New handle %p going live.\n", handle);
127
128         /*
129          * We need to hold j_state_lock until t_updates has been incremented,
130          * for proper journal barrier handling
131          */
132 repeat:
133         read_lock(&journal->j_state_lock);
134         if (is_journal_aborted(journal) ||
135             (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
136                 read_unlock(&journal->j_state_lock);
137                 kfree(new_transaction);
138                 return -EROFS;
139         }
140
141         /* Wait on the journal's transaction barrier if necessary */
142         if (journal->j_barrier_count) {
143                 read_unlock(&journal->j_state_lock);
144                 wait_event(journal->j_wait_transaction_locked,
145                                 journal->j_barrier_count == 0);
146                 goto repeat;
147         }
148
149         if (!journal->j_running_transaction) {
150                 read_unlock(&journal->j_state_lock);
151                 if (!new_transaction)
152                         goto alloc_transaction;
153                 write_lock(&journal->j_state_lock);
154                 if (!journal->j_running_transaction) {
155                         jbd2_get_transaction(journal, new_transaction);
156                         new_transaction = NULL;
157                 }
158                 write_unlock(&journal->j_state_lock);
159                 goto repeat;
160         }
161
162         transaction = journal->j_running_transaction;
163
164         /*
165          * If the current transaction is locked down for commit, wait for the
166          * lock to be released.
167          */
168         if (transaction->t_state == T_LOCKED) {
169                 DEFINE_WAIT(wait);
170
171                 prepare_to_wait(&journal->j_wait_transaction_locked,
172                                         &wait, TASK_UNINTERRUPTIBLE);
173                 read_unlock(&journal->j_state_lock);
174                 schedule();
175                 finish_wait(&journal->j_wait_transaction_locked, &wait);
176                 goto repeat;
177         }
178
179         /*
180          * If there is not enough space left in the log to write all potential
181          * buffers requested by this operation, we need to stall pending a log
182          * checkpoint to free some more log space.
183          */
184         needed = atomic_add_return(nblocks,
185                                    &transaction->t_outstanding_credits);
186
187         if (needed > journal->j_max_transaction_buffers) {
188                 /*
189                  * If the current transaction is already too large, then start
190                  * to commit it: we can then go back and attach this handle to
191                  * a new transaction.
192                  */
193                 DEFINE_WAIT(wait);
194
195                 jbd_debug(2, "Handle %p starting new commit...\n", handle);
196                 atomic_sub(nblocks, &transaction->t_outstanding_credits);
197                 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
198                                 TASK_UNINTERRUPTIBLE);
199                 __jbd2_log_start_commit(journal, transaction->t_tid);
200                 read_unlock(&journal->j_state_lock);
201                 schedule();
202                 finish_wait(&journal->j_wait_transaction_locked, &wait);
203                 goto repeat;
204         }
205
206         /*
207          * The commit code assumes that it can get enough log space
208          * without forcing a checkpoint.  This is *critical* for
209          * correctness: a checkpoint of a buffer which is also
210          * associated with a committing transaction creates a deadlock,
211          * so commit simply cannot force through checkpoints.
212          *
213          * We must therefore ensure the necessary space in the journal
214          * *before* starting to dirty potentially checkpointed buffers
215          * in the new transaction.
216          *
217          * The worst part is, any transaction currently committing can
218          * reduce the free space arbitrarily.  Be careful to account for
219          * those buffers when checkpointing.
220          */
221
222         /*
223          * @@@ AKPM: This seems rather over-defensive.  We're giving commit
224          * a _lot_ of headroom: 1/4 of the journal plus the size of
225          * the committing transaction.  Really, we only need to give it
226          * committing_transaction->t_outstanding_credits plus "enough" for
227          * the log control blocks.
228          * Also, this test is inconsitent with the matching one in
229          * jbd2_journal_extend().
230          */
231         if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
232                 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
233                 atomic_sub(nblocks, &transaction->t_outstanding_credits);
234                 read_unlock(&journal->j_state_lock);
235                 write_lock(&journal->j_state_lock);
236                 if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
237                         __jbd2_log_wait_for_space(journal);
238                 write_unlock(&journal->j_state_lock);
239                 goto repeat;
240         }
241
242         /* OK, account for the buffers that this operation expects to
243          * use and add the handle to the running transaction. 
244          *
245          * In order for t_max_wait to be reliable, it must be
246          * protected by a lock.  But doing so will mean that
247          * start_this_handle() can not be run in parallel on SMP
248          * systems, which limits our scalability.  So we only enable
249          * it when debugging is enabled.  We may want to use a
250          * separate flag, eventually, so we can enable this
251          * independently of debugging.
252          */
253 #ifdef CONFIG_JBD2_DEBUG
254         if (jbd2_journal_enable_debug &&
255             time_after(transaction->t_start, ts)) {
256                 ts = jbd2_time_diff(ts, transaction->t_start);
257                 spin_lock(&transaction->t_handle_lock);
258                 if (ts > transaction->t_max_wait)
259                         transaction->t_max_wait = ts;
260                 spin_unlock(&transaction->t_handle_lock);
261         }
262 #endif
263         handle->h_transaction = transaction;
264         atomic_inc(&transaction->t_updates);
265         atomic_inc(&transaction->t_handle_count);
266         jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
267                   handle, nblocks,
268                   atomic_read(&transaction->t_outstanding_credits),
269                   __jbd2_log_space_left(journal));
270         read_unlock(&journal->j_state_lock);
271
272         lock_map_acquire(&handle->h_lockdep_map);
273         kfree(new_transaction);
274         return 0;
275 }
276
277 static struct lock_class_key jbd2_handle_key;
278
279 /* Allocate a new handle.  This should probably be in a slab... */
280 static handle_t *new_handle(int nblocks)
281 {
282         handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
283         if (!handle)
284                 return NULL;
285         memset(handle, 0, sizeof(*handle));
286         handle->h_buffer_credits = nblocks;
287         handle->h_ref = 1;
288
289         lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
290                                                 &jbd2_handle_key, 0);
291
292         return handle;
293 }
294
295 /**
296  * handle_t *jbd2_journal_start() - Obtain a new handle.
297  * @journal: Journal to start transaction on.
298  * @nblocks: number of block buffer we might modify
299  *
300  * We make sure that the transaction can guarantee at least nblocks of
301  * modified buffers in the log.  We block until the log can guarantee
302  * that much space.
303  *
304  * This function is visible to journal users (like ext3fs), so is not
305  * called with the journal already locked.
306  *
307  * Return a pointer to a newly allocated handle, or NULL on failure
308  */
309 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
310 {
311         handle_t *handle = journal_current_handle();
312         int err;
313
314         if (!journal)
315                 return ERR_PTR(-EROFS);
316
317         if (handle) {
318                 J_ASSERT(handle->h_transaction->t_journal == journal);
319                 handle->h_ref++;
320                 return handle;
321         }
322
323         handle = new_handle(nblocks);
324         if (!handle)
325                 return ERR_PTR(-ENOMEM);
326
327         current->journal_info = handle;
328
329         err = start_this_handle(journal, handle, gfp_mask);
330         if (err < 0) {
331                 jbd2_free_handle(handle);
332                 current->journal_info = NULL;
333                 handle = ERR_PTR(err);
334                 goto out;
335         }
336 out:
337         return handle;
338 }
339 EXPORT_SYMBOL(jbd2__journal_start);
340
341
342 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
343 {
344         return jbd2__journal_start(journal, nblocks, GFP_NOFS);
345 }
346 EXPORT_SYMBOL(jbd2_journal_start);
347
348
349 /**
350  * int jbd2_journal_extend() - extend buffer credits.
351  * @handle:  handle to 'extend'
352  * @nblocks: nr blocks to try to extend by.
353  *
354  * Some transactions, such as large extends and truncates, can be done
355  * atomically all at once or in several stages.  The operation requests
356  * a credit for a number of buffer modications in advance, but can
357  * extend its credit if it needs more.
358  *
359  * jbd2_journal_extend tries to give the running handle more buffer credits.
360  * It does not guarantee that allocation - this is a best-effort only.
361  * The calling process MUST be able to deal cleanly with a failure to
362  * extend here.
363  *
364  * Return 0 on success, non-zero on failure.
365  *
366  * return code < 0 implies an error
367  * return code > 0 implies normal transaction-full status.
368  */
369 int jbd2_journal_extend(handle_t *handle, int nblocks)
370 {
371         transaction_t *transaction = handle->h_transaction;
372         journal_t *journal = transaction->t_journal;
373         int result;
374         int wanted;
375
376         result = -EIO;
377         if (is_handle_aborted(handle))
378                 goto out;
379
380         result = 1;
381
382         read_lock(&journal->j_state_lock);
383
384         /* Don't extend a locked-down transaction! */
385         if (handle->h_transaction->t_state != T_RUNNING) {
386                 jbd_debug(3, "denied handle %p %d blocks: "
387                           "transaction not running\n", handle, nblocks);
388                 goto error_out;
389         }
390
391         spin_lock(&transaction->t_handle_lock);
392         wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
393
394         if (wanted > journal->j_max_transaction_buffers) {
395                 jbd_debug(3, "denied handle %p %d blocks: "
396                           "transaction too large\n", handle, nblocks);
397                 goto unlock;
398         }
399
400         if (wanted > __jbd2_log_space_left(journal)) {
401                 jbd_debug(3, "denied handle %p %d blocks: "
402                           "insufficient log space\n", handle, nblocks);
403                 goto unlock;
404         }
405
406         handle->h_buffer_credits += nblocks;
407         atomic_add(nblocks, &transaction->t_outstanding_credits);
408         result = 0;
409
410         jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
411 unlock:
412         spin_unlock(&transaction->t_handle_lock);
413 error_out:
414         read_unlock(&journal->j_state_lock);
415 out:
416         return result;
417 }
418
419
420 /**
421  * int jbd2_journal_restart() - restart a handle .
422  * @handle:  handle to restart
423  * @nblocks: nr credits requested
424  *
425  * Restart a handle for a multi-transaction filesystem
426  * operation.
427  *
428  * If the jbd2_journal_extend() call above fails to grant new buffer credits
429  * to a running handle, a call to jbd2_journal_restart will commit the
430  * handle's transaction so far and reattach the handle to a new
431  * transaction capabable of guaranteeing the requested number of
432  * credits.
433  */
434 int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask)
435 {
436         transaction_t *transaction = handle->h_transaction;
437         journal_t *journal = transaction->t_journal;
438         int ret;
439
440         /* If we've had an abort of any type, don't even think about
441          * actually doing the restart! */
442         if (is_handle_aborted(handle))
443                 return 0;
444
445         /*
446          * First unlink the handle from its current transaction, and start the
447          * commit on that.
448          */
449         J_ASSERT(atomic_read(&transaction->t_updates) > 0);
450         J_ASSERT(journal_current_handle() == handle);
451
452         read_lock(&journal->j_state_lock);
453         spin_lock(&transaction->t_handle_lock);
454         atomic_sub(handle->h_buffer_credits,
455                    &transaction->t_outstanding_credits);
456         if (atomic_dec_and_test(&transaction->t_updates))
457                 wake_up(&journal->j_wait_updates);
458         spin_unlock(&transaction->t_handle_lock);
459
460         jbd_debug(2, "restarting handle %p\n", handle);
461         __jbd2_log_start_commit(journal, transaction->t_tid);
462         read_unlock(&journal->j_state_lock);
463
464         lock_map_release(&handle->h_lockdep_map);
465         handle->h_buffer_credits = nblocks;
466         ret = start_this_handle(journal, handle, gfp_mask);
467         return ret;
468 }
469 EXPORT_SYMBOL(jbd2__journal_restart);
470
471
472 int jbd2_journal_restart(handle_t *handle, int nblocks)
473 {
474         return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
475 }
476 EXPORT_SYMBOL(jbd2_journal_restart);
477
478 /**
479  * void jbd2_journal_lock_updates () - establish a transaction barrier.
480  * @journal:  Journal to establish a barrier on.
481  *
482  * This locks out any further updates from being started, and blocks
483  * until all existing updates have completed, returning only once the
484  * journal is in a quiescent state with no updates running.
485  *
486  * The journal lock should not be held on entry.
487  */
488 void jbd2_journal_lock_updates(journal_t *journal)
489 {
490         DEFINE_WAIT(wait);
491
492         write_lock(&journal->j_state_lock);
493         ++journal->j_barrier_count;
494
495         /* Wait until there are no running updates */
496         while (1) {
497                 transaction_t *transaction = journal->j_running_transaction;
498
499                 if (!transaction)
500                         break;
501
502                 spin_lock(&transaction->t_handle_lock);
503                 if (!atomic_read(&transaction->t_updates)) {
504                         spin_unlock(&transaction->t_handle_lock);
505                         break;
506                 }
507                 prepare_to_wait(&journal->j_wait_updates, &wait,
508                                 TASK_UNINTERRUPTIBLE);
509                 spin_unlock(&transaction->t_handle_lock);
510                 write_unlock(&journal->j_state_lock);
511                 schedule();
512                 finish_wait(&journal->j_wait_updates, &wait);
513                 write_lock(&journal->j_state_lock);
514         }
515         write_unlock(&journal->j_state_lock);
516
517         /*
518          * We have now established a barrier against other normal updates, but
519          * we also need to barrier against other jbd2_journal_lock_updates() calls
520          * to make sure that we serialise special journal-locked operations
521          * too.
522          */
523         mutex_lock(&journal->j_barrier);
524 }
525
526 /**
527  * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
528  * @journal:  Journal to release the barrier on.
529  *
530  * Release a transaction barrier obtained with jbd2_journal_lock_updates().
531  *
532  * Should be called without the journal lock held.
533  */
534 void jbd2_journal_unlock_updates (journal_t *journal)
535 {
536         J_ASSERT(journal->j_barrier_count != 0);
537
538         mutex_unlock(&journal->j_barrier);
539         write_lock(&journal->j_state_lock);
540         --journal->j_barrier_count;
541         write_unlock(&journal->j_state_lock);
542         wake_up(&journal->j_wait_transaction_locked);
543 }
544
545 static void warn_dirty_buffer(struct buffer_head *bh)
546 {
547         char b[BDEVNAME_SIZE];
548
549         printk(KERN_WARNING
550                "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
551                "There's a risk of filesystem corruption in case of system "
552                "crash.\n",
553                bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
554 }
555
556 /*
557  * If the buffer is already part of the current transaction, then there
558  * is nothing we need to do.  If it is already part of a prior
559  * transaction which we are still committing to disk, then we need to
560  * make sure that we do not overwrite the old copy: we do copy-out to
561  * preserve the copy going to disk.  We also account the buffer against
562  * the handle's metadata buffer credits (unless the buffer is already
563  * part of the transaction, that is).
564  *
565  */
566 static int
567 do_get_write_access(handle_t *handle, struct journal_head *jh,
568                         int force_copy)
569 {
570         struct buffer_head *bh;
571         transaction_t *transaction;
572         journal_t *journal;
573         int error;
574         char *frozen_buffer = NULL;
575         int need_copy = 0;
576
577         if (is_handle_aborted(handle))
578                 return -EROFS;
579
580         transaction = handle->h_transaction;
581         journal = transaction->t_journal;
582
583         jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
584
585         JBUFFER_TRACE(jh, "entry");
586 repeat:
587         bh = jh2bh(jh);
588
589         /* @@@ Need to check for errors here at some point. */
590
591         lock_buffer(bh);
592         jbd_lock_bh_state(bh);
593
594         /* We now hold the buffer lock so it is safe to query the buffer
595          * state.  Is the buffer dirty?
596          *
597          * If so, there are two possibilities.  The buffer may be
598          * non-journaled, and undergoing a quite legitimate writeback.
599          * Otherwise, it is journaled, and we don't expect dirty buffers
600          * in that state (the buffers should be marked JBD_Dirty
601          * instead.)  So either the IO is being done under our own
602          * control and this is a bug, or it's a third party IO such as
603          * dump(8) (which may leave the buffer scheduled for read ---
604          * ie. locked but not dirty) or tune2fs (which may actually have
605          * the buffer dirtied, ugh.)  */
606
607         if (buffer_dirty(bh)) {
608                 /*
609                  * First question: is this buffer already part of the current
610                  * transaction or the existing committing transaction?
611                  */
612                 if (jh->b_transaction) {
613                         J_ASSERT_JH(jh,
614                                 jh->b_transaction == transaction ||
615                                 jh->b_transaction ==
616                                         journal->j_committing_transaction);
617                         if (jh->b_next_transaction)
618                                 J_ASSERT_JH(jh, jh->b_next_transaction ==
619                                                         transaction);
620                         warn_dirty_buffer(bh);
621                 }
622                 /*
623                  * In any case we need to clean the dirty flag and we must
624                  * do it under the buffer lock to be sure we don't race
625                  * with running write-out.
626                  */
627                 JBUFFER_TRACE(jh, "Journalling dirty buffer");
628                 clear_buffer_dirty(bh);
629                 set_buffer_jbddirty(bh);
630         }
631
632         unlock_buffer(bh);
633
634         error = -EROFS;
635         if (is_handle_aborted(handle)) {
636                 jbd_unlock_bh_state(bh);
637                 goto out;
638         }
639         error = 0;
640
641         /*
642          * The buffer is already part of this transaction if b_transaction or
643          * b_next_transaction points to it
644          */
645         if (jh->b_transaction == transaction ||
646             jh->b_next_transaction == transaction)
647                 goto done;
648
649         /*
650          * this is the first time this transaction is touching this buffer,
651          * reset the modified flag
652          */
653        jh->b_modified = 0;
654
655         /*
656          * If there is already a copy-out version of this buffer, then we don't
657          * need to make another one
658          */
659         if (jh->b_frozen_data) {
660                 JBUFFER_TRACE(jh, "has frozen data");
661                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
662                 jh->b_next_transaction = transaction;
663                 goto done;
664         }
665
666         /* Is there data here we need to preserve? */
667
668         if (jh->b_transaction && jh->b_transaction != transaction) {
669                 JBUFFER_TRACE(jh, "owned by older transaction");
670                 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
671                 J_ASSERT_JH(jh, jh->b_transaction ==
672                                         journal->j_committing_transaction);
673
674                 /* There is one case we have to be very careful about.
675                  * If the committing transaction is currently writing
676                  * this buffer out to disk and has NOT made a copy-out,
677                  * then we cannot modify the buffer contents at all
678                  * right now.  The essence of copy-out is that it is the
679                  * extra copy, not the primary copy, which gets
680                  * journaled.  If the primary copy is already going to
681                  * disk then we cannot do copy-out here. */
682
683                 if (jh->b_jlist == BJ_Shadow) {
684                         DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
685                         wait_queue_head_t *wqh;
686
687                         wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
688
689                         JBUFFER_TRACE(jh, "on shadow: sleep");
690                         jbd_unlock_bh_state(bh);
691                         /* commit wakes up all shadow buffers after IO */
692                         for ( ; ; ) {
693                                 prepare_to_wait(wqh, &wait.wait,
694                                                 TASK_UNINTERRUPTIBLE);
695                                 if (jh->b_jlist != BJ_Shadow)
696                                         break;
697                                 schedule();
698                         }
699                         finish_wait(wqh, &wait.wait);
700                         goto repeat;
701                 }
702
703                 /* Only do the copy if the currently-owning transaction
704                  * still needs it.  If it is on the Forget list, the
705                  * committing transaction is past that stage.  The
706                  * buffer had better remain locked during the kmalloc,
707                  * but that should be true --- we hold the journal lock
708                  * still and the buffer is already on the BUF_JOURNAL
709                  * list so won't be flushed.
710                  *
711                  * Subtle point, though: if this is a get_undo_access,
712                  * then we will be relying on the frozen_data to contain
713                  * the new value of the committed_data record after the
714                  * transaction, so we HAVE to force the frozen_data copy
715                  * in that case. */
716
717                 if (jh->b_jlist != BJ_Forget || force_copy) {
718                         JBUFFER_TRACE(jh, "generate frozen data");
719                         if (!frozen_buffer) {
720                                 JBUFFER_TRACE(jh, "allocate memory for buffer");
721                                 jbd_unlock_bh_state(bh);
722                                 frozen_buffer =
723                                         jbd2_alloc(jh2bh(jh)->b_size,
724                                                          GFP_NOFS);
725                                 if (!frozen_buffer) {
726                                         printk(KERN_EMERG
727                                                "%s: OOM for frozen_buffer\n",
728                                                __func__);
729                                         JBUFFER_TRACE(jh, "oom!");
730                                         error = -ENOMEM;
731                                         jbd_lock_bh_state(bh);
732                                         goto done;
733                                 }
734                                 goto repeat;
735                         }
736                         jh->b_frozen_data = frozen_buffer;
737                         frozen_buffer = NULL;
738                         need_copy = 1;
739                 }
740                 jh->b_next_transaction = transaction;
741         }
742
743
744         /*
745          * Finally, if the buffer is not journaled right now, we need to make
746          * sure it doesn't get written to disk before the caller actually
747          * commits the new data
748          */
749         if (!jh->b_transaction) {
750                 JBUFFER_TRACE(jh, "no transaction");
751                 J_ASSERT_JH(jh, !jh->b_next_transaction);
752                 jh->b_transaction = transaction;
753                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
754                 spin_lock(&journal->j_list_lock);
755                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
756                 spin_unlock(&journal->j_list_lock);
757         }
758
759 done:
760         if (need_copy) {
761                 struct page *page;
762                 int offset;
763                 char *source;
764
765                 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
766                             "Possible IO failure.\n");
767                 page = jh2bh(jh)->b_page;
768                 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
769                 source = kmap_atomic(page, KM_USER0);
770                 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
771                 kunmap_atomic(source, KM_USER0);
772
773                 /*
774                  * Now that the frozen data is saved off, we need to store
775                  * any matching triggers.
776                  */
777                 jh->b_frozen_triggers = jh->b_triggers;
778         }
779         jbd_unlock_bh_state(bh);
780
781         /*
782          * If we are about to journal a buffer, then any revoke pending on it is
783          * no longer valid
784          */
785         jbd2_journal_cancel_revoke(handle, jh);
786
787 out:
788         if (unlikely(frozen_buffer))    /* It's usually NULL */
789                 jbd2_free(frozen_buffer, bh->b_size);
790
791         JBUFFER_TRACE(jh, "exit");
792         return error;
793 }
794
795 /**
796  * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
797  * @handle: transaction to add buffer modifications to
798  * @bh:     bh to be used for metadata writes
799  * @credits: variable that will receive credits for the buffer
800  *
801  * Returns an error code or 0 on success.
802  *
803  * In full data journalling mode the buffer may be of type BJ_AsyncData,
804  * because we're write()ing a buffer which is also part of a shared mapping.
805  */
806
807 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
808 {
809         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
810         int rc;
811
812         /* We do not want to get caught playing with fields which the
813          * log thread also manipulates.  Make sure that the buffer
814          * completes any outstanding IO before proceeding. */
815         rc = do_get_write_access(handle, jh, 0);
816         jbd2_journal_put_journal_head(jh);
817         return rc;
818 }
819
820
821 /*
822  * When the user wants to journal a newly created buffer_head
823  * (ie. getblk() returned a new buffer and we are going to populate it
824  * manually rather than reading off disk), then we need to keep the
825  * buffer_head locked until it has been completely filled with new
826  * data.  In this case, we should be able to make the assertion that
827  * the bh is not already part of an existing transaction.
828  *
829  * The buffer should already be locked by the caller by this point.
830  * There is no lock ranking violation: it was a newly created,
831  * unlocked buffer beforehand. */
832
833 /**
834  * int jbd2_journal_get_create_access () - notify intent to use newly created bh
835  * @handle: transaction to new buffer to
836  * @bh: new buffer.
837  *
838  * Call this if you create a new bh.
839  */
840 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
841 {
842         transaction_t *transaction = handle->h_transaction;
843         journal_t *journal = transaction->t_journal;
844         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
845         int err;
846
847         jbd_debug(5, "journal_head %p\n", jh);
848         err = -EROFS;
849         if (is_handle_aborted(handle))
850                 goto out;
851         err = 0;
852
853         JBUFFER_TRACE(jh, "entry");
854         /*
855          * The buffer may already belong to this transaction due to pre-zeroing
856          * in the filesystem's new_block code.  It may also be on the previous,
857          * committing transaction's lists, but it HAS to be in Forget state in
858          * that case: the transaction must have deleted the buffer for it to be
859          * reused here.
860          */
861         jbd_lock_bh_state(bh);
862         spin_lock(&journal->j_list_lock);
863         J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
864                 jh->b_transaction == NULL ||
865                 (jh->b_transaction == journal->j_committing_transaction &&
866                           jh->b_jlist == BJ_Forget)));
867
868         J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
869         J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
870
871         if (jh->b_transaction == NULL) {
872                 /*
873                  * Previous jbd2_journal_forget() could have left the buffer
874                  * with jbddirty bit set because it was being committed. When
875                  * the commit finished, we've filed the buffer for
876                  * checkpointing and marked it dirty. Now we are reallocating
877                  * the buffer so the transaction freeing it must have
878                  * committed and so it's safe to clear the dirty bit.
879                  */
880                 clear_buffer_dirty(jh2bh(jh));
881                 jh->b_transaction = transaction;
882
883                 /* first access by this transaction */
884                 jh->b_modified = 0;
885
886                 JBUFFER_TRACE(jh, "file as BJ_Reserved");
887                 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
888         } else if (jh->b_transaction == journal->j_committing_transaction) {
889                 /* first access by this transaction */
890                 jh->b_modified = 0;
891
892                 JBUFFER_TRACE(jh, "set next transaction");
893                 jh->b_next_transaction = transaction;
894         }
895         spin_unlock(&journal->j_list_lock);
896         jbd_unlock_bh_state(bh);
897
898         /*
899          * akpm: I added this.  ext3_alloc_branch can pick up new indirect
900          * blocks which contain freed but then revoked metadata.  We need
901          * to cancel the revoke in case we end up freeing it yet again
902          * and the reallocating as data - this would cause a second revoke,
903          * which hits an assertion error.
904          */
905         JBUFFER_TRACE(jh, "cancelling revoke");
906         jbd2_journal_cancel_revoke(handle, jh);
907         jbd2_journal_put_journal_head(jh);
908 out:
909         return err;
910 }
911
912 /**
913  * int jbd2_journal_get_undo_access() -  Notify intent to modify metadata with
914  *     non-rewindable consequences
915  * @handle: transaction
916  * @bh: buffer to undo
917  * @credits: store the number of taken credits here (if not NULL)
918  *
919  * Sometimes there is a need to distinguish between metadata which has
920  * been committed to disk and that which has not.  The ext3fs code uses
921  * this for freeing and allocating space, we have to make sure that we
922  * do not reuse freed space until the deallocation has been committed,
923  * since if we overwrote that space we would make the delete
924  * un-rewindable in case of a crash.
925  *
926  * To deal with that, jbd2_journal_get_undo_access requests write access to a
927  * buffer for parts of non-rewindable operations such as delete
928  * operations on the bitmaps.  The journaling code must keep a copy of
929  * the buffer's contents prior to the undo_access call until such time
930  * as we know that the buffer has definitely been committed to disk.
931  *
932  * We never need to know which transaction the committed data is part
933  * of, buffers touched here are guaranteed to be dirtied later and so
934  * will be committed to a new transaction in due course, at which point
935  * we can discard the old committed data pointer.
936  *
937  * Returns error number or 0 on success.
938  */
939 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
940 {
941         int err;
942         struct journal_head *jh = jbd2_journal_add_journal_head(bh);
943         char *committed_data = NULL;
944
945         JBUFFER_TRACE(jh, "entry");
946
947         /*
948          * Do this first --- it can drop the journal lock, so we want to
949          * make sure that obtaining the committed_data is done
950          * atomically wrt. completion of any outstanding commits.
951          */
952         err = do_get_write_access(handle, jh, 1);
953         if (err)
954                 goto out;
955
956 repeat:
957         if (!jh->b_committed_data) {
958                 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
959                 if (!committed_data) {
960                         printk(KERN_EMERG "%s: No memory for committed data\n",
961                                 __func__);
962                         err = -ENOMEM;
963                         goto out;
964                 }
965         }
966
967         jbd_lock_bh_state(bh);
968         if (!jh->b_committed_data) {
969                 /* Copy out the current buffer contents into the
970                  * preserved, committed copy. */
971                 JBUFFER_TRACE(jh, "generate b_committed data");
972                 if (!committed_data) {
973                         jbd_unlock_bh_state(bh);
974                         goto repeat;
975                 }
976
977                 jh->b_committed_data = committed_data;
978                 committed_data = NULL;
979                 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
980         }
981         jbd_unlock_bh_state(bh);
982 out:
983         jbd2_journal_put_journal_head(jh);
984         if (unlikely(committed_data))
985                 jbd2_free(committed_data, bh->b_size);
986         return err;
987 }
988
989 /**
990  * void jbd2_journal_set_triggers() - Add triggers for commit writeout
991  * @bh: buffer to trigger on
992  * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
993  *
994  * Set any triggers on this journal_head.  This is always safe, because
995  * triggers for a committing buffer will be saved off, and triggers for
996  * a running transaction will match the buffer in that transaction.
997  *
998  * Call with NULL to clear the triggers.
999  */
1000 void jbd2_journal_set_triggers(struct buffer_head *bh,
1001                                struct jbd2_buffer_trigger_type *type)
1002 {
1003         struct journal_head *jh = bh2jh(bh);
1004
1005         jh->b_triggers = type;
1006 }
1007
1008 void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data,
1009                                 struct jbd2_buffer_trigger_type *triggers)
1010 {
1011         struct buffer_head *bh = jh2bh(jh);
1012
1013         if (!triggers || !triggers->t_commit)
1014                 return;
1015
1016         triggers->t_commit(triggers, bh, mapped_data, bh->b_size);
1017 }
1018
1019 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1020                                struct jbd2_buffer_trigger_type *triggers)
1021 {
1022         if (!triggers || !triggers->t_abort)
1023                 return;
1024
1025         triggers->t_abort(triggers, jh2bh(jh));
1026 }
1027
1028
1029
1030 /**
1031  * int jbd2_journal_dirty_metadata() -  mark a buffer as containing dirty metadata
1032  * @handle: transaction to add buffer to.
1033  * @bh: buffer to mark
1034  *
1035  * mark dirty metadata which needs to be journaled as part of the current
1036  * transaction.
1037  *
1038  * The buffer is placed on the transaction's metadata list and is marked
1039  * as belonging to the transaction.
1040  *
1041  * Returns error number or 0 on success.
1042  *
1043  * Special care needs to be taken if the buffer already belongs to the
1044  * current committing transaction (in which case we should have frozen
1045  * data present for that commit).  In that case, we don't relink the
1046  * buffer: that only gets done when the old transaction finally
1047  * completes its commit.
1048  */
1049 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1050 {
1051         transaction_t *transaction = handle->h_transaction;
1052         journal_t *journal = transaction->t_journal;
1053         struct journal_head *jh = bh2jh(bh);
1054
1055         jbd_debug(5, "journal_head %p\n", jh);
1056         JBUFFER_TRACE(jh, "entry");
1057         if (is_handle_aborted(handle))
1058                 goto out;
1059
1060         jbd_lock_bh_state(bh);
1061
1062         if (jh->b_modified == 0) {
1063                 /*
1064                  * This buffer's got modified and becoming part
1065                  * of the transaction. This needs to be done
1066                  * once a transaction -bzzz
1067                  */
1068                 jh->b_modified = 1;
1069                 J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1070                 handle->h_buffer_credits--;
1071         }
1072
1073         /*
1074          * fastpath, to avoid expensive locking.  If this buffer is already
1075          * on the running transaction's metadata list there is nothing to do.
1076          * Nobody can take it off again because there is a handle open.
1077          * I _think_ we're OK here with SMP barriers - a mistaken decision will
1078          * result in this test being false, so we go in and take the locks.
1079          */
1080         if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1081                 JBUFFER_TRACE(jh, "fastpath");
1082                 J_ASSERT_JH(jh, jh->b_transaction ==
1083                                         journal->j_running_transaction);
1084                 goto out_unlock_bh;
1085         }
1086
1087         set_buffer_jbddirty(bh);
1088
1089         /*
1090          * Metadata already on the current transaction list doesn't
1091          * need to be filed.  Metadata on another transaction's list must
1092          * be committing, and will be refiled once the commit completes:
1093          * leave it alone for now.
1094          */
1095         if (jh->b_transaction != transaction) {
1096                 JBUFFER_TRACE(jh, "already on other transaction");
1097                 J_ASSERT_JH(jh, jh->b_transaction ==
1098                                         journal->j_committing_transaction);
1099                 J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
1100                 /* And this case is illegal: we can't reuse another
1101                  * transaction's data buffer, ever. */
1102                 goto out_unlock_bh;
1103         }
1104
1105         /* That test should have eliminated the following case: */
1106         J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1107
1108         JBUFFER_TRACE(jh, "file as BJ_Metadata");
1109         spin_lock(&journal->j_list_lock);
1110         __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1111         spin_unlock(&journal->j_list_lock);
1112 out_unlock_bh:
1113         jbd_unlock_bh_state(bh);
1114 out:
1115         JBUFFER_TRACE(jh, "exit");
1116         return 0;
1117 }
1118
1119 /*
1120  * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1121  * updates, if the update decided in the end that it didn't need access.
1122  *
1123  */
1124 void
1125 jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1126 {
1127         BUFFER_TRACE(bh, "entry");
1128 }
1129
1130 /**
1131  * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1132  * @handle: transaction handle
1133  * @bh:     bh to 'forget'
1134  *
1135  * We can only do the bforget if there are no commits pending against the
1136  * buffer.  If the buffer is dirty in the current running transaction we
1137  * can safely unlink it.
1138  *
1139  * bh may not be a journalled buffer at all - it may be a non-JBD
1140  * buffer which came off the hashtable.  Check for this.
1141  *
1142  * Decrements bh->b_count by one.
1143  *
1144  * Allow this call even if the handle has aborted --- it may be part of
1145  * the caller's cleanup after an abort.
1146  */
1147 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1148 {
1149         transaction_t *transaction = handle->h_transaction;
1150         journal_t *journal = transaction->t_journal;
1151         struct journal_head *jh;
1152         int drop_reserve = 0;
1153         int err = 0;
1154         int was_modified = 0;
1155
1156         BUFFER_TRACE(bh, "entry");
1157
1158         jbd_lock_bh_state(bh);
1159         spin_lock(&journal->j_list_lock);
1160
1161         if (!buffer_jbd(bh))
1162                 goto not_jbd;
1163         jh = bh2jh(bh);
1164
1165         /* Critical error: attempting to delete a bitmap buffer, maybe?
1166          * Don't do any jbd operations, and return an error. */
1167         if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1168                          "inconsistent data on disk")) {
1169                 err = -EIO;
1170                 goto not_jbd;
1171         }
1172
1173         /* keep track of wether or not this transaction modified us */
1174         was_modified = jh->b_modified;
1175
1176         /*
1177          * The buffer's going from the transaction, we must drop
1178          * all references -bzzz
1179          */
1180         jh->b_modified = 0;
1181
1182         if (jh->b_transaction == handle->h_transaction) {
1183                 J_ASSERT_JH(jh, !jh->b_frozen_data);
1184
1185                 /* If we are forgetting a buffer which is already part
1186                  * of this transaction, then we can just drop it from
1187                  * the transaction immediately. */
1188                 clear_buffer_dirty(bh);
1189                 clear_buffer_jbddirty(bh);
1190
1191                 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1192
1193                 /*
1194                  * we only want to drop a reference if this transaction
1195                  * modified the buffer
1196                  */
1197                 if (was_modified)
1198                         drop_reserve = 1;
1199
1200                 /*
1201                  * We are no longer going to journal this buffer.
1202                  * However, the commit of this transaction is still
1203                  * important to the buffer: the delete that we are now
1204                  * processing might obsolete an old log entry, so by
1205                  * committing, we can satisfy the buffer's checkpoint.
1206                  *
1207                  * So, if we have a checkpoint on the buffer, we should
1208                  * now refile the buffer on our BJ_Forget list so that
1209                  * we know to remove the checkpoint after we commit.
1210                  */
1211
1212                 if (jh->b_cp_transaction) {
1213                         __jbd2_journal_temp_unlink_buffer(jh);
1214                         __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1215                 } else {
1216                         __jbd2_journal_unfile_buffer(jh);
1217                         jbd2_journal_remove_journal_head(bh);
1218                         __brelse(bh);
1219                         if (!buffer_jbd(bh)) {
1220                                 spin_unlock(&journal->j_list_lock);
1221                                 jbd_unlock_bh_state(bh);
1222                                 __bforget(bh);
1223                                 goto drop;
1224                         }
1225                 }
1226         } else if (jh->b_transaction) {
1227                 J_ASSERT_JH(jh, (jh->b_transaction ==
1228                                  journal->j_committing_transaction));
1229                 /* However, if the buffer is still owned by a prior
1230                  * (committing) transaction, we can't drop it yet... */
1231                 JBUFFER_TRACE(jh, "belongs to older transaction");
1232                 /* ... but we CAN drop it from the new transaction if we
1233                  * have also modified it since the original commit. */
1234
1235                 if (jh->b_next_transaction) {
1236                         J_ASSERT(jh->b_next_transaction == transaction);
1237                         jh->b_next_transaction = NULL;
1238
1239                         /*
1240                          * only drop a reference if this transaction modified
1241                          * the buffer
1242                          */
1243                         if (was_modified)
1244                                 drop_reserve = 1;
1245                 }
1246         }
1247
1248 not_jbd:
1249         spin_unlock(&journal->j_list_lock);
1250         jbd_unlock_bh_state(bh);
1251         __brelse(bh);
1252 drop:
1253         if (drop_reserve) {
1254                 /* no need to reserve log space for this block -bzzz */
1255                 handle->h_buffer_credits++;
1256         }
1257         return err;
1258 }
1259
1260 /**
1261  * int jbd2_journal_stop() - complete a transaction
1262  * @handle: tranaction to complete.
1263  *
1264  * All done for a particular handle.
1265  *
1266  * There is not much action needed here.  We just return any remaining
1267  * buffer credits to the transaction and remove the handle.  The only
1268  * complication is that we need to start a commit operation if the
1269  * filesystem is marked for synchronous update.
1270  *
1271  * jbd2_journal_stop itself will not usually return an error, but it may
1272  * do so in unusual circumstances.  In particular, expect it to
1273  * return -EIO if a jbd2_journal_abort has been executed since the
1274  * transaction began.
1275  */
1276 int jbd2_journal_stop(handle_t *handle)
1277 {
1278         transaction_t *transaction = handle->h_transaction;
1279         journal_t *journal = transaction->t_journal;
1280         int err, wait_for_commit = 0;
1281         tid_t tid;
1282         pid_t pid;
1283
1284         J_ASSERT(journal_current_handle() == handle);
1285
1286         if (is_handle_aborted(handle))
1287                 err = -EIO;
1288         else {
1289                 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
1290                 err = 0;
1291         }
1292
1293         if (--handle->h_ref > 0) {
1294                 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1295                           handle->h_ref);
1296                 return err;
1297         }
1298
1299         jbd_debug(4, "Handle %p going down\n", handle);
1300
1301         /*
1302          * Implement synchronous transaction batching.  If the handle
1303          * was synchronous, don't force a commit immediately.  Let's
1304          * yield and let another thread piggyback onto this
1305          * transaction.  Keep doing that while new threads continue to
1306          * arrive.  It doesn't cost much - we're about to run a commit
1307          * and sleep on IO anyway.  Speeds up many-threaded, many-dir
1308          * operations by 30x or more...
1309          *
1310          * We try and optimize the sleep time against what the
1311          * underlying disk can do, instead of having a static sleep
1312          * time.  This is useful for the case where our storage is so
1313          * fast that it is more optimal to go ahead and force a flush
1314          * and wait for the transaction to be committed than it is to
1315          * wait for an arbitrary amount of time for new writers to
1316          * join the transaction.  We achieve this by measuring how
1317          * long it takes to commit a transaction, and compare it with
1318          * how long this transaction has been running, and if run time
1319          * < commit time then we sleep for the delta and commit.  This
1320          * greatly helps super fast disks that would see slowdowns as
1321          * more threads started doing fsyncs.
1322          *
1323          * But don't do this if this process was the most recent one
1324          * to perform a synchronous write.  We do this to detect the
1325          * case where a single process is doing a stream of sync
1326          * writes.  No point in waiting for joiners in that case.
1327          */
1328         pid = current->pid;
1329         if (handle->h_sync && journal->j_last_sync_writer != pid) {
1330                 u64 commit_time, trans_time;
1331
1332                 journal->j_last_sync_writer = pid;
1333
1334                 read_lock(&journal->j_state_lock);
1335                 commit_time = journal->j_average_commit_time;
1336                 read_unlock(&journal->j_state_lock);
1337
1338                 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1339                                                    transaction->t_start_time));
1340
1341                 commit_time = max_t(u64, commit_time,
1342                                     1000*journal->j_min_batch_time);
1343                 commit_time = min_t(u64, commit_time,
1344                                     1000*journal->j_max_batch_time);
1345
1346                 if (trans_time < commit_time) {
1347                         ktime_t expires = ktime_add_ns(ktime_get(),
1348                                                        commit_time);
1349                         set_current_state(TASK_UNINTERRUPTIBLE);
1350                         schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1351                 }
1352         }
1353
1354         if (handle->h_sync)
1355                 transaction->t_synchronous_commit = 1;
1356         current->journal_info = NULL;
1357         atomic_sub(handle->h_buffer_credits,
1358                    &transaction->t_outstanding_credits);
1359
1360         /*
1361          * If the handle is marked SYNC, we need to set another commit
1362          * going!  We also want to force a commit if the current
1363          * transaction is occupying too much of the log, or if the
1364          * transaction is too old now.
1365          */
1366         if (handle->h_sync ||
1367             (atomic_read(&transaction->t_outstanding_credits) >
1368              journal->j_max_transaction_buffers) ||
1369             time_after_eq(jiffies, transaction->t_expires)) {
1370                 /* Do this even for aborted journals: an abort still
1371                  * completes the commit thread, it just doesn't write
1372                  * anything to disk. */
1373
1374                 jbd_debug(2, "transaction too old, requesting commit for "
1375                                         "handle %p\n", handle);
1376                 /* This is non-blocking */
1377                 jbd2_log_start_commit(journal, transaction->t_tid);
1378
1379                 /*
1380                  * Special case: JBD2_SYNC synchronous updates require us
1381                  * to wait for the commit to complete.
1382                  */
1383                 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1384                         wait_for_commit = 1;
1385         }
1386
1387         /*
1388          * Once we drop t_updates, if it goes to zero the transaction
1389          * could start commiting on us and eventually disappear.  So
1390          * once we do this, we must not dereference transaction
1391          * pointer again.
1392          */
1393         tid = transaction->t_tid;
1394         if (atomic_dec_and_test(&transaction->t_updates)) {
1395                 wake_up(&journal->j_wait_updates);
1396                 if (journal->j_barrier_count)
1397                         wake_up(&journal->j_wait_transaction_locked);
1398         }
1399
1400         if (wait_for_commit)
1401                 err = jbd2_log_wait_commit(journal, tid);
1402
1403         lock_map_release(&handle->h_lockdep_map);
1404
1405         jbd2_free_handle(handle);
1406         return err;
1407 }
1408
1409 /**
1410  * int jbd2_journal_force_commit() - force any uncommitted transactions
1411  * @journal: journal to force
1412  *
1413  * For synchronous operations: force any uncommitted transactions
1414  * to disk.  May seem kludgy, but it reuses all the handle batching
1415  * code in a very simple manner.
1416  */
1417 int jbd2_journal_force_commit(journal_t *journal)
1418 {
1419         handle_t *handle;
1420         int ret;
1421
1422         handle = jbd2_journal_start(journal, 1);
1423         if (IS_ERR(handle)) {
1424                 ret = PTR_ERR(handle);
1425         } else {
1426                 handle->h_sync = 1;
1427                 ret = jbd2_journal_stop(handle);
1428         }
1429         return ret;
1430 }
1431
1432 /*
1433  *
1434  * List management code snippets: various functions for manipulating the
1435  * transaction buffer lists.
1436  *
1437  */
1438
1439 /*
1440  * Append a buffer to a transaction list, given the transaction's list head
1441  * pointer.
1442  *
1443  * j_list_lock is held.
1444  *
1445  * jbd_lock_bh_state(jh2bh(jh)) is held.
1446  */
1447
1448 static inline void
1449 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1450 {
1451         if (!*list) {
1452                 jh->b_tnext = jh->b_tprev = jh;
1453                 *list = jh;
1454         } else {
1455                 /* Insert at the tail of the list to preserve order */
1456                 struct journal_head *first = *list, *last = first->b_tprev;
1457                 jh->b_tprev = last;
1458                 jh->b_tnext = first;
1459                 last->b_tnext = first->b_tprev = jh;
1460         }
1461 }
1462
1463 /*
1464  * Remove a buffer from a transaction list, given the transaction's list
1465  * head pointer.
1466  *
1467  * Called with j_list_lock held, and the journal may not be locked.
1468  *
1469  * jbd_lock_bh_state(jh2bh(jh)) is held.
1470  */
1471
1472 static inline void
1473 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1474 {
1475         if (*list == jh) {
1476                 *list = jh->b_tnext;
1477                 if (*list == jh)
1478                         *list = NULL;
1479         }
1480         jh->b_tprev->b_tnext = jh->b_tnext;
1481         jh->b_tnext->b_tprev = jh->b_tprev;
1482 }
1483
1484 /*
1485  * Remove a buffer from the appropriate transaction list.
1486  *
1487  * Note that this function can *change* the value of
1488  * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1489  * t_log_list or t_reserved_list.  If the caller is holding onto a copy of one
1490  * of these pointers, it could go bad.  Generally the caller needs to re-read
1491  * the pointer from the transaction_t.
1492  *
1493  * Called under j_list_lock.  The journal may not be locked.
1494  */
1495 void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1496 {
1497         struct journal_head **list = NULL;
1498         transaction_t *transaction;
1499         struct buffer_head *bh = jh2bh(jh);
1500
1501         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1502         transaction = jh->b_transaction;
1503         if (transaction)
1504                 assert_spin_locked(&transaction->t_journal->j_list_lock);
1505
1506         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1507         if (jh->b_jlist != BJ_None)
1508                 J_ASSERT_JH(jh, transaction != NULL);
1509
1510         switch (jh->b_jlist) {
1511         case BJ_None:
1512                 return;
1513         case BJ_Metadata:
1514                 transaction->t_nr_buffers--;
1515                 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1516                 list = &transaction->t_buffers;
1517                 break;
1518         case BJ_Forget:
1519                 list = &transaction->t_forget;
1520                 break;
1521         case BJ_IO:
1522                 list = &transaction->t_iobuf_list;
1523                 break;
1524         case BJ_Shadow:
1525                 list = &transaction->t_shadow_list;
1526                 break;
1527         case BJ_LogCtl:
1528                 list = &transaction->t_log_list;
1529                 break;
1530         case BJ_Reserved:
1531                 list = &transaction->t_reserved_list;
1532                 break;
1533         }
1534
1535         __blist_del_buffer(list, jh);
1536         jh->b_jlist = BJ_None;
1537         if (test_clear_buffer_jbddirty(bh))
1538                 mark_buffer_dirty(bh);  /* Expose it to the VM */
1539 }
1540
1541 void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1542 {
1543         __jbd2_journal_temp_unlink_buffer(jh);
1544         jh->b_transaction = NULL;
1545 }
1546
1547 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1548 {
1549         jbd_lock_bh_state(jh2bh(jh));
1550         spin_lock(&journal->j_list_lock);
1551         __jbd2_journal_unfile_buffer(jh);
1552         spin_unlock(&journal->j_list_lock);
1553         jbd_unlock_bh_state(jh2bh(jh));
1554 }
1555
1556 /*
1557  * Called from jbd2_journal_try_to_free_buffers().
1558  *
1559  * Called under jbd_lock_bh_state(bh)
1560  */
1561 static void
1562 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1563 {
1564         struct journal_head *jh;
1565
1566         jh = bh2jh(bh);
1567
1568         if (buffer_locked(bh) || buffer_dirty(bh))
1569                 goto out;
1570
1571         if (jh->b_next_transaction != NULL)
1572                 goto out;
1573
1574         spin_lock(&journal->j_list_lock);
1575         if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1576                 /* written-back checkpointed metadata buffer */
1577                 if (jh->b_jlist == BJ_None) {
1578                         JBUFFER_TRACE(jh, "remove from checkpoint list");
1579                         __jbd2_journal_remove_checkpoint(jh);
1580                         jbd2_journal_remove_journal_head(bh);
1581                         __brelse(bh);
1582                 }
1583         }
1584         spin_unlock(&journal->j_list_lock);
1585 out:
1586         return;
1587 }
1588
1589 /**
1590  * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1591  * @journal: journal for operation
1592  * @page: to try and free
1593  * @gfp_mask: we use the mask to detect how hard should we try to release
1594  * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1595  * release the buffers.
1596  *
1597  *
1598  * For all the buffers on this page,
1599  * if they are fully written out ordered data, move them onto BUF_CLEAN
1600  * so try_to_free_buffers() can reap them.
1601  *
1602  * This function returns non-zero if we wish try_to_free_buffers()
1603  * to be called. We do this if the page is releasable by try_to_free_buffers().
1604  * We also do it if the page has locked or dirty buffers and the caller wants
1605  * us to perform sync or async writeout.
1606  *
1607  * This complicates JBD locking somewhat.  We aren't protected by the
1608  * BKL here.  We wish to remove the buffer from its committing or
1609  * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1610  *
1611  * This may *change* the value of transaction_t->t_datalist, so anyone
1612  * who looks at t_datalist needs to lock against this function.
1613  *
1614  * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1615  * buffer.  So we need to lock against that.  jbd2_journal_dirty_data()
1616  * will come out of the lock with the buffer dirty, which makes it
1617  * ineligible for release here.
1618  *
1619  * Who else is affected by this?  hmm...  Really the only contender
1620  * is do_get_write_access() - it could be looking at the buffer while
1621  * journal_try_to_free_buffer() is changing its state.  But that
1622  * cannot happen because we never reallocate freed data as metadata
1623  * while the data is part of a transaction.  Yes?
1624  *
1625  * Return 0 on failure, 1 on success
1626  */
1627 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1628                                 struct page *page, gfp_t gfp_mask)
1629 {
1630         struct buffer_head *head;
1631         struct buffer_head *bh;
1632         int ret = 0;
1633
1634         J_ASSERT(PageLocked(page));
1635
1636         head = page_buffers(page);
1637         bh = head;
1638         do {
1639                 struct journal_head *jh;
1640
1641                 /*
1642                  * We take our own ref against the journal_head here to avoid
1643                  * having to add tons of locking around each instance of
1644                  * jbd2_journal_remove_journal_head() and
1645                  * jbd2_journal_put_journal_head().
1646                  */
1647                 jh = jbd2_journal_grab_journal_head(bh);
1648                 if (!jh)
1649                         continue;
1650
1651                 jbd_lock_bh_state(bh);
1652                 __journal_try_to_free_buffer(journal, bh);
1653                 jbd2_journal_put_journal_head(jh);
1654                 jbd_unlock_bh_state(bh);
1655                 if (buffer_jbd(bh))
1656                         goto busy;
1657         } while ((bh = bh->b_this_page) != head);
1658
1659         ret = try_to_free_buffers(page);
1660
1661 busy:
1662         return ret;
1663 }
1664
1665 /*
1666  * This buffer is no longer needed.  If it is on an older transaction's
1667  * checkpoint list we need to record it on this transaction's forget list
1668  * to pin this buffer (and hence its checkpointing transaction) down until
1669  * this transaction commits.  If the buffer isn't on a checkpoint list, we
1670  * release it.
1671  * Returns non-zero if JBD no longer has an interest in the buffer.
1672  *
1673  * Called under j_list_lock.
1674  *
1675  * Called under jbd_lock_bh_state(bh).
1676  */
1677 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1678 {
1679         int may_free = 1;
1680         struct buffer_head *bh = jh2bh(jh);
1681
1682         __jbd2_journal_unfile_buffer(jh);
1683
1684         if (jh->b_cp_transaction) {
1685                 JBUFFER_TRACE(jh, "on running+cp transaction");
1686                 /*
1687                  * We don't want to write the buffer anymore, clear the
1688                  * bit so that we don't confuse checks in
1689                  * __journal_file_buffer
1690                  */
1691                 clear_buffer_dirty(bh);
1692                 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1693                 may_free = 0;
1694         } else {
1695                 JBUFFER_TRACE(jh, "on running transaction");
1696                 jbd2_journal_remove_journal_head(bh);
1697                 __brelse(bh);
1698         }
1699         return may_free;
1700 }
1701
1702 /*
1703  * jbd2_journal_invalidatepage
1704  *
1705  * This code is tricky.  It has a number of cases to deal with.
1706  *
1707  * There are two invariants which this code relies on:
1708  *
1709  * i_size must be updated on disk before we start calling invalidatepage on the
1710  * data.
1711  *
1712  *  This is done in ext3 by defining an ext3_setattr method which
1713  *  updates i_size before truncate gets going.  By maintaining this
1714  *  invariant, we can be sure that it is safe to throw away any buffers
1715  *  attached to the current transaction: once the transaction commits,
1716  *  we know that the data will not be needed.
1717  *
1718  *  Note however that we can *not* throw away data belonging to the
1719  *  previous, committing transaction!
1720  *
1721  * Any disk blocks which *are* part of the previous, committing
1722  * transaction (and which therefore cannot be discarded immediately) are
1723  * not going to be reused in the new running transaction
1724  *
1725  *  The bitmap committed_data images guarantee this: any block which is
1726  *  allocated in one transaction and removed in the next will be marked
1727  *  as in-use in the committed_data bitmap, so cannot be reused until
1728  *  the next transaction to delete the block commits.  This means that
1729  *  leaving committing buffers dirty is quite safe: the disk blocks
1730  *  cannot be reallocated to a different file and so buffer aliasing is
1731  *  not possible.
1732  *
1733  *
1734  * The above applies mainly to ordered data mode.  In writeback mode we
1735  * don't make guarantees about the order in which data hits disk --- in
1736  * particular we don't guarantee that new dirty data is flushed before
1737  * transaction commit --- so it is always safe just to discard data
1738  * immediately in that mode.  --sct
1739  */
1740
1741 /*
1742  * The journal_unmap_buffer helper function returns zero if the buffer
1743  * concerned remains pinned as an anonymous buffer belonging to an older
1744  * transaction.
1745  *
1746  * We're outside-transaction here.  Either or both of j_running_transaction
1747  * and j_committing_transaction may be NULL.
1748  */
1749 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1750 {
1751         transaction_t *transaction;
1752         struct journal_head *jh;
1753         int may_free = 1;
1754         int ret;
1755
1756         BUFFER_TRACE(bh, "entry");
1757
1758         /*
1759          * It is safe to proceed here without the j_list_lock because the
1760          * buffers cannot be stolen by try_to_free_buffers as long as we are
1761          * holding the page lock. --sct
1762          */
1763
1764         if (!buffer_jbd(bh))
1765                 goto zap_buffer_unlocked;
1766
1767         /* OK, we have data buffer in journaled mode */
1768         write_lock(&journal->j_state_lock);
1769         jbd_lock_bh_state(bh);
1770         spin_lock(&journal->j_list_lock);
1771
1772         jh = jbd2_journal_grab_journal_head(bh);
1773         if (!jh)
1774                 goto zap_buffer_no_jh;
1775
1776         /*
1777          * We cannot remove the buffer from checkpoint lists until the
1778          * transaction adding inode to orphan list (let's call it T)
1779          * is committed.  Otherwise if the transaction changing the
1780          * buffer would be cleaned from the journal before T is
1781          * committed, a crash will cause that the correct contents of
1782          * the buffer will be lost.  On the other hand we have to
1783          * clear the buffer dirty bit at latest at the moment when the
1784          * transaction marking the buffer as freed in the filesystem
1785          * structures is committed because from that moment on the
1786          * buffer can be reallocated and used by a different page.
1787          * Since the block hasn't been freed yet but the inode has
1788          * already been added to orphan list, it is safe for us to add
1789          * the buffer to BJ_Forget list of the newest transaction.
1790          */
1791         transaction = jh->b_transaction;
1792         if (transaction == NULL) {
1793                 /* First case: not on any transaction.  If it
1794                  * has no checkpoint link, then we can zap it:
1795                  * it's a writeback-mode buffer so we don't care
1796                  * if it hits disk safely. */
1797                 if (!jh->b_cp_transaction) {
1798                         JBUFFER_TRACE(jh, "not on any transaction: zap");
1799                         goto zap_buffer;
1800                 }
1801
1802                 if (!buffer_dirty(bh)) {
1803                         /* bdflush has written it.  We can drop it now */
1804                         goto zap_buffer;
1805                 }
1806
1807                 /* OK, it must be in the journal but still not
1808                  * written fully to disk: it's metadata or
1809                  * journaled data... */
1810
1811                 if (journal->j_running_transaction) {
1812                         /* ... and once the current transaction has
1813                          * committed, the buffer won't be needed any
1814                          * longer. */
1815                         JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1816                         ret = __dispose_buffer(jh,
1817                                         journal->j_running_transaction);
1818                         jbd2_journal_put_journal_head(jh);
1819                         spin_unlock(&journal->j_list_lock);
1820                         jbd_unlock_bh_state(bh);
1821                         write_unlock(&journal->j_state_lock);
1822                         return ret;
1823                 } else {
1824                         /* There is no currently-running transaction. So the
1825                          * orphan record which we wrote for this file must have
1826                          * passed into commit.  We must attach this buffer to
1827                          * the committing transaction, if it exists. */
1828                         if (journal->j_committing_transaction) {
1829                                 JBUFFER_TRACE(jh, "give to committing trans");
1830                                 ret = __dispose_buffer(jh,
1831                                         journal->j_committing_transaction);
1832                                 jbd2_journal_put_journal_head(jh);
1833                                 spin_unlock(&journal->j_list_lock);
1834                                 jbd_unlock_bh_state(bh);
1835                                 write_unlock(&journal->j_state_lock);
1836                                 return ret;
1837                         } else {
1838                                 /* The orphan record's transaction has
1839                                  * committed.  We can cleanse this buffer */
1840                                 clear_buffer_jbddirty(bh);
1841                                 goto zap_buffer;
1842                         }
1843                 }
1844         } else if (transaction == journal->j_committing_transaction) {
1845                 JBUFFER_TRACE(jh, "on committing transaction");
1846                 /*
1847                  * The buffer is committing, we simply cannot touch
1848                  * it. So we just set j_next_transaction to the
1849                  * running transaction (if there is one) and mark
1850                  * buffer as freed so that commit code knows it should
1851                  * clear dirty bits when it is done with the buffer.
1852                  */
1853                 set_buffer_freed(bh);
1854                 if (journal->j_running_transaction && buffer_jbddirty(bh))
1855                         jh->b_next_transaction = journal->j_running_transaction;
1856                 jbd2_journal_put_journal_head(jh);
1857                 spin_unlock(&journal->j_list_lock);
1858                 jbd_unlock_bh_state(bh);
1859                 write_unlock(&journal->j_state_lock);
1860                 return 0;
1861         } else {
1862                 /* Good, the buffer belongs to the running transaction.
1863                  * We are writing our own transaction's data, not any
1864                  * previous one's, so it is safe to throw it away
1865                  * (remember that we expect the filesystem to have set
1866                  * i_size already for this truncate so recovery will not
1867                  * expose the disk blocks we are discarding here.) */
1868                 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1869                 JBUFFER_TRACE(jh, "on running transaction");
1870                 may_free = __dispose_buffer(jh, transaction);
1871         }
1872
1873 zap_buffer:
1874         jbd2_journal_put_journal_head(jh);
1875 zap_buffer_no_jh:
1876         spin_unlock(&journal->j_list_lock);
1877         jbd_unlock_bh_state(bh);
1878         write_unlock(&journal->j_state_lock);
1879 zap_buffer_unlocked:
1880         clear_buffer_dirty(bh);
1881         J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1882         clear_buffer_mapped(bh);
1883         clear_buffer_req(bh);
1884         clear_buffer_new(bh);
1885         bh->b_bdev = NULL;
1886         return may_free;
1887 }
1888
1889 /**
1890  * void jbd2_journal_invalidatepage()
1891  * @journal: journal to use for flush...
1892  * @page:    page to flush
1893  * @offset:  length of page to invalidate.
1894  *
1895  * Reap page buffers containing data after offset in page.
1896  *
1897  */
1898 void jbd2_journal_invalidatepage(journal_t *journal,
1899                       struct page *page,
1900                       unsigned long offset)
1901 {
1902         struct buffer_head *head, *bh, *next;
1903         unsigned int curr_off = 0;
1904         int may_free = 1;
1905
1906         if (!PageLocked(page))
1907                 BUG();
1908         if (!page_has_buffers(page))
1909                 return;
1910
1911         /* We will potentially be playing with lists other than just the
1912          * data lists (especially for journaled data mode), so be
1913          * cautious in our locking. */
1914
1915         head = bh = page_buffers(page);
1916         do {
1917                 unsigned int next_off = curr_off + bh->b_size;
1918                 next = bh->b_this_page;
1919
1920                 if (offset <= curr_off) {
1921                         /* This block is wholly outside the truncation point */
1922                         lock_buffer(bh);
1923                         may_free &= journal_unmap_buffer(journal, bh);
1924                         unlock_buffer(bh);
1925                 }
1926                 curr_off = next_off;
1927                 bh = next;
1928
1929         } while (bh != head);
1930
1931         if (!offset) {
1932                 if (may_free && try_to_free_buffers(page))
1933                         J_ASSERT(!page_has_buffers(page));
1934         }
1935 }
1936
1937 /*
1938  * File a buffer on the given transaction list.
1939  */
1940 void __jbd2_journal_file_buffer(struct journal_head *jh,
1941                         transaction_t *transaction, int jlist)
1942 {
1943         struct journal_head **list = NULL;
1944         int was_dirty = 0;
1945         struct buffer_head *bh = jh2bh(jh);
1946
1947         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1948         assert_spin_locked(&transaction->t_journal->j_list_lock);
1949
1950         J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1951         J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1952                                 jh->b_transaction == NULL);
1953
1954         if (jh->b_transaction && jh->b_jlist == jlist)
1955                 return;
1956
1957         if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1958             jlist == BJ_Shadow || jlist == BJ_Forget) {
1959                 /*
1960                  * For metadata buffers, we track dirty bit in buffer_jbddirty
1961                  * instead of buffer_dirty. We should not see a dirty bit set
1962                  * here because we clear it in do_get_write_access but e.g.
1963                  * tune2fs can modify the sb and set the dirty bit at any time
1964                  * so we try to gracefully handle that.
1965                  */
1966                 if (buffer_dirty(bh))
1967                         warn_dirty_buffer(bh);
1968                 if (test_clear_buffer_dirty(bh) ||
1969                     test_clear_buffer_jbddirty(bh))
1970                         was_dirty = 1;
1971         }
1972
1973         if (jh->b_transaction)
1974                 __jbd2_journal_temp_unlink_buffer(jh);
1975         jh->b_transaction = transaction;
1976
1977         switch (jlist) {
1978         case BJ_None:
1979                 J_ASSERT_JH(jh, !jh->b_committed_data);
1980                 J_ASSERT_JH(jh, !jh->b_frozen_data);
1981                 return;
1982         case BJ_Metadata:
1983                 transaction->t_nr_buffers++;
1984                 list = &transaction->t_buffers;
1985                 break;
1986         case BJ_Forget:
1987                 list = &transaction->t_forget;
1988                 break;
1989         case BJ_IO:
1990                 list = &transaction->t_iobuf_list;
1991                 break;
1992         case BJ_Shadow:
1993                 list = &transaction->t_shadow_list;
1994                 break;
1995         case BJ_LogCtl:
1996                 list = &transaction->t_log_list;
1997                 break;
1998         case BJ_Reserved:
1999                 list = &transaction->t_reserved_list;
2000                 break;
2001         }
2002
2003         __blist_add_buffer(list, jh);
2004         jh->b_jlist = jlist;
2005
2006         if (was_dirty)
2007                 set_buffer_jbddirty(bh);
2008 }
2009
2010 void jbd2_journal_file_buffer(struct journal_head *jh,
2011                                 transaction_t *transaction, int jlist)
2012 {
2013         jbd_lock_bh_state(jh2bh(jh));
2014         spin_lock(&transaction->t_journal->j_list_lock);
2015         __jbd2_journal_file_buffer(jh, transaction, jlist);
2016         spin_unlock(&transaction->t_journal->j_list_lock);
2017         jbd_unlock_bh_state(jh2bh(jh));
2018 }
2019
2020 /*
2021  * Remove a buffer from its current buffer list in preparation for
2022  * dropping it from its current transaction entirely.  If the buffer has
2023  * already started to be used by a subsequent transaction, refile the
2024  * buffer on that transaction's metadata list.
2025  *
2026  * Called under journal->j_list_lock
2027  *
2028  * Called under jbd_lock_bh_state(jh2bh(jh))
2029  */
2030 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2031 {
2032         int was_dirty, jlist;
2033         struct buffer_head *bh = jh2bh(jh);
2034
2035         J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2036         if (jh->b_transaction)
2037                 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2038
2039         /* If the buffer is now unused, just drop it. */
2040         if (jh->b_next_transaction == NULL) {
2041                 __jbd2_journal_unfile_buffer(jh);
2042                 return;
2043         }
2044
2045         /*
2046          * It has been modified by a later transaction: add it to the new
2047          * transaction's metadata list.
2048          */
2049
2050         was_dirty = test_clear_buffer_jbddirty(bh);
2051         __jbd2_journal_temp_unlink_buffer(jh);
2052         jh->b_transaction = jh->b_next_transaction;
2053         jh->b_next_transaction = NULL;
2054         if (buffer_freed(bh))
2055                 jlist = BJ_Forget;
2056         else if (jh->b_modified)
2057                 jlist = BJ_Metadata;
2058         else
2059                 jlist = BJ_Reserved;
2060         __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2061         J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2062
2063         if (was_dirty)
2064                 set_buffer_jbddirty(bh);
2065 }
2066
2067 /*
2068  * For the unlocked version of this call, also make sure that any
2069  * hanging journal_head is cleaned up if necessary.
2070  *
2071  * __jbd2_journal_refile_buffer is usually called as part of a single locked
2072  * operation on a buffer_head, in which the caller is probably going to
2073  * be hooking the journal_head onto other lists.  In that case it is up
2074  * to the caller to remove the journal_head if necessary.  For the
2075  * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2076  * doing anything else to the buffer so we need to do the cleanup
2077  * ourselves to avoid a jh leak.
2078  *
2079  * *** The journal_head may be freed by this call! ***
2080  */
2081 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2082 {
2083         struct buffer_head *bh = jh2bh(jh);
2084
2085         jbd_lock_bh_state(bh);
2086         spin_lock(&journal->j_list_lock);
2087
2088         __jbd2_journal_refile_buffer(jh);
2089         jbd_unlock_bh_state(bh);
2090         jbd2_journal_remove_journal_head(bh);
2091
2092         spin_unlock(&journal->j_list_lock);
2093         __brelse(bh);
2094 }
2095
2096 /*
2097  * File inode in the inode list of the handle's transaction
2098  */
2099 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2100 {
2101         transaction_t *transaction = handle->h_transaction;
2102         journal_t *journal = transaction->t_journal;
2103
2104         if (is_handle_aborted(handle))
2105                 return -EIO;
2106
2107         jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2108                         transaction->t_tid);
2109
2110         /*
2111          * First check whether inode isn't already on the transaction's
2112          * lists without taking the lock. Note that this check is safe
2113          * without the lock as we cannot race with somebody removing inode
2114          * from the transaction. The reason is that we remove inode from the
2115          * transaction only in journal_release_jbd_inode() and when we commit
2116          * the transaction. We are guarded from the first case by holding
2117          * a reference to the inode. We are safe against the second case
2118          * because if jinode->i_transaction == transaction, commit code
2119          * cannot touch the transaction because we hold reference to it,
2120          * and if jinode->i_next_transaction == transaction, commit code
2121          * will only file the inode where we want it.
2122          */
2123         if (jinode->i_transaction == transaction ||
2124             jinode->i_next_transaction == transaction)
2125                 return 0;
2126
2127         spin_lock(&journal->j_list_lock);
2128
2129         if (jinode->i_transaction == transaction ||
2130             jinode->i_next_transaction == transaction)
2131                 goto done;
2132
2133         /* On some different transaction's list - should be
2134          * the committing one */
2135         if (jinode->i_transaction) {
2136                 J_ASSERT(jinode->i_next_transaction == NULL);
2137                 J_ASSERT(jinode->i_transaction ==
2138                                         journal->j_committing_transaction);
2139                 jinode->i_next_transaction = transaction;
2140                 goto done;
2141         }
2142         /* Not on any transaction list... */
2143         J_ASSERT(!jinode->i_next_transaction);
2144         jinode->i_transaction = transaction;
2145         list_add(&jinode->i_list, &transaction->t_inode_list);
2146 done:
2147         spin_unlock(&journal->j_list_lock);
2148
2149         return 0;
2150 }
2151
2152 /*
2153  * File truncate and transaction commit interact with each other in a
2154  * non-trivial way.  If a transaction writing data block A is
2155  * committing, we cannot discard the data by truncate until we have
2156  * written them.  Otherwise if we crashed after the transaction with
2157  * write has committed but before the transaction with truncate has
2158  * committed, we could see stale data in block A.  This function is a
2159  * helper to solve this problem.  It starts writeout of the truncated
2160  * part in case it is in the committing transaction.
2161  *
2162  * Filesystem code must call this function when inode is journaled in
2163  * ordered mode before truncation happens and after the inode has been
2164  * placed on orphan list with the new inode size. The second condition
2165  * avoids the race that someone writes new data and we start
2166  * committing the transaction after this function has been called but
2167  * before a transaction for truncate is started (and furthermore it
2168  * allows us to optimize the case where the addition to orphan list
2169  * happens in the same transaction as write --- we don't have to write
2170  * any data in such case).
2171  */
2172 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2173                                         struct jbd2_inode *jinode,
2174                                         loff_t new_size)
2175 {
2176         transaction_t *inode_trans, *commit_trans;
2177         int ret = 0;
2178
2179         /* This is a quick check to avoid locking if not necessary */
2180         if (!jinode->i_transaction)
2181                 goto out;
2182         /* Locks are here just to force reading of recent values, it is
2183          * enough that the transaction was not committing before we started
2184          * a transaction adding the inode to orphan list */
2185         read_lock(&journal->j_state_lock);
2186         commit_trans = journal->j_committing_transaction;
2187         read_unlock(&journal->j_state_lock);
2188         spin_lock(&journal->j_list_lock);
2189         inode_trans = jinode->i_transaction;
2190         spin_unlock(&journal->j_list_lock);
2191         if (inode_trans == commit_trans) {
2192                 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2193                         new_size, LLONG_MAX);
2194                 if (ret)
2195                         jbd2_journal_abort(journal, ret);
2196         }
2197 out:
2198         return ret;
2199 }