2 * linux/fs/jbd2/transaction.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Generic filesystem transaction handling code; part of the ext2fs
15 * This file manages transactions (compound commits managed by the
16 * journaling code) and handles (individual atomic operations by the
20 #include <linux/time.h>
22 #include <linux/jbd2.h>
23 #include <linux/errno.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
27 #include <linux/highmem.h>
28 #include <linux/hrtimer.h>
29 #include <linux/backing-dev.h>
30 #include <linux/module.h>
32 static void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh);
35 * jbd2_get_transaction: obtain a new transaction_t object.
37 * Simply allocate and initialise a new transaction. Create it in
38 * RUNNING state and add it to the current journal (which should not
39 * have an existing running transaction: we only make a new transaction
40 * once we have started to commit the old one).
43 * The journal MUST be locked. We don't perform atomic mallocs on the
44 * new transaction and we can't block without protecting against other
45 * processes trying to touch the journal while it is in transition.
49 static transaction_t *
50 jbd2_get_transaction(journal_t *journal, transaction_t *transaction)
52 transaction->t_journal = journal;
53 transaction->t_state = T_RUNNING;
54 transaction->t_start_time = ktime_get();
55 transaction->t_tid = journal->j_transaction_sequence++;
56 transaction->t_expires = jiffies + journal->j_commit_interval;
57 spin_lock_init(&transaction->t_handle_lock);
58 atomic_set(&transaction->t_updates, 0);
59 atomic_set(&transaction->t_outstanding_credits, 0);
60 atomic_set(&transaction->t_handle_count, 0);
61 INIT_LIST_HEAD(&transaction->t_inode_list);
62 INIT_LIST_HEAD(&transaction->t_private_list);
64 /* Set up the commit timer for the new transaction. */
65 journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires);
66 add_timer(&journal->j_commit_timer);
68 J_ASSERT(journal->j_running_transaction == NULL);
69 journal->j_running_transaction = transaction;
70 transaction->t_max_wait = 0;
71 transaction->t_start = jiffies;
79 * A handle_t is an object which represents a single atomic update to a
80 * filesystem, and which tracks all of the modifications which form part
85 * start_this_handle: Given a handle, deal with any locking or stalling
86 * needed to make sure that there is enough journal space for the handle
87 * to begin. Attach the handle to a transaction and set up the
88 * transaction's buffer credits.
91 static int start_this_handle(journal_t *journal, handle_t *handle,
94 transaction_t *transaction;
96 int nblocks = handle->h_buffer_credits;
97 transaction_t *new_transaction = NULL;
98 unsigned long ts = jiffies;
100 if (nblocks > journal->j_max_transaction_buffers) {
101 printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n",
102 current->comm, nblocks,
103 journal->j_max_transaction_buffers);
108 if (!journal->j_running_transaction) {
109 new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask);
110 if (!new_transaction) {
112 * If __GFP_FS is not present, then we may be
113 * being called from inside the fs writeback
114 * layer, so we MUST NOT fail. Since
115 * __GFP_NOFAIL is going away, we will arrange
116 * to retry the allocation ourselves.
118 if ((gfp_mask & __GFP_FS) == 0) {
119 congestion_wait(BLK_RW_ASYNC, HZ/50);
120 goto alloc_transaction;
126 jbd_debug(3, "New handle %p going live.\n", handle);
129 * We need to hold j_state_lock until t_updates has been incremented,
130 * for proper journal barrier handling
133 read_lock(&journal->j_state_lock);
134 if (is_journal_aborted(journal) ||
135 (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) {
136 read_unlock(&journal->j_state_lock);
137 kfree(new_transaction);
141 /* Wait on the journal's transaction barrier if necessary */
142 if (journal->j_barrier_count) {
143 read_unlock(&journal->j_state_lock);
144 wait_event(journal->j_wait_transaction_locked,
145 journal->j_barrier_count == 0);
149 if (!journal->j_running_transaction) {
150 read_unlock(&journal->j_state_lock);
151 if (!new_transaction)
152 goto alloc_transaction;
153 write_lock(&journal->j_state_lock);
154 if (!journal->j_running_transaction) {
155 jbd2_get_transaction(journal, new_transaction);
156 new_transaction = NULL;
158 write_unlock(&journal->j_state_lock);
162 transaction = journal->j_running_transaction;
165 * If the current transaction is locked down for commit, wait for the
166 * lock to be released.
168 if (transaction->t_state == T_LOCKED) {
171 prepare_to_wait(&journal->j_wait_transaction_locked,
172 &wait, TASK_UNINTERRUPTIBLE);
173 read_unlock(&journal->j_state_lock);
175 finish_wait(&journal->j_wait_transaction_locked, &wait);
180 * If there is not enough space left in the log to write all potential
181 * buffers requested by this operation, we need to stall pending a log
182 * checkpoint to free some more log space.
184 needed = atomic_add_return(nblocks,
185 &transaction->t_outstanding_credits);
187 if (needed > journal->j_max_transaction_buffers) {
189 * If the current transaction is already too large, then start
190 * to commit it: we can then go back and attach this handle to
195 jbd_debug(2, "Handle %p starting new commit...\n", handle);
196 atomic_sub(nblocks, &transaction->t_outstanding_credits);
197 prepare_to_wait(&journal->j_wait_transaction_locked, &wait,
198 TASK_UNINTERRUPTIBLE);
199 __jbd2_log_start_commit(journal, transaction->t_tid);
200 read_unlock(&journal->j_state_lock);
202 finish_wait(&journal->j_wait_transaction_locked, &wait);
207 * The commit code assumes that it can get enough log space
208 * without forcing a checkpoint. This is *critical* for
209 * correctness: a checkpoint of a buffer which is also
210 * associated with a committing transaction creates a deadlock,
211 * so commit simply cannot force through checkpoints.
213 * We must therefore ensure the necessary space in the journal
214 * *before* starting to dirty potentially checkpointed buffers
215 * in the new transaction.
217 * The worst part is, any transaction currently committing can
218 * reduce the free space arbitrarily. Be careful to account for
219 * those buffers when checkpointing.
223 * @@@ AKPM: This seems rather over-defensive. We're giving commit
224 * a _lot_ of headroom: 1/4 of the journal plus the size of
225 * the committing transaction. Really, we only need to give it
226 * committing_transaction->t_outstanding_credits plus "enough" for
227 * the log control blocks.
228 * Also, this test is inconsitent with the matching one in
229 * jbd2_journal_extend().
231 if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) {
232 jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle);
233 atomic_sub(nblocks, &transaction->t_outstanding_credits);
234 read_unlock(&journal->j_state_lock);
235 write_lock(&journal->j_state_lock);
236 if (__jbd2_log_space_left(journal) < jbd_space_needed(journal))
237 __jbd2_log_wait_for_space(journal);
238 write_unlock(&journal->j_state_lock);
242 /* OK, account for the buffers that this operation expects to
243 * use and add the handle to the running transaction.
245 * In order for t_max_wait to be reliable, it must be
246 * protected by a lock. But doing so will mean that
247 * start_this_handle() can not be run in parallel on SMP
248 * systems, which limits our scalability. So we only enable
249 * it when debugging is enabled. We may want to use a
250 * separate flag, eventually, so we can enable this
251 * independently of debugging.
253 #ifdef CONFIG_JBD2_DEBUG
254 if (jbd2_journal_enable_debug &&
255 time_after(transaction->t_start, ts)) {
256 ts = jbd2_time_diff(ts, transaction->t_start);
257 spin_lock(&transaction->t_handle_lock);
258 if (ts > transaction->t_max_wait)
259 transaction->t_max_wait = ts;
260 spin_unlock(&transaction->t_handle_lock);
263 handle->h_transaction = transaction;
264 atomic_inc(&transaction->t_updates);
265 atomic_inc(&transaction->t_handle_count);
266 jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n",
268 atomic_read(&transaction->t_outstanding_credits),
269 __jbd2_log_space_left(journal));
270 read_unlock(&journal->j_state_lock);
272 lock_map_acquire(&handle->h_lockdep_map);
273 kfree(new_transaction);
277 static struct lock_class_key jbd2_handle_key;
279 /* Allocate a new handle. This should probably be in a slab... */
280 static handle_t *new_handle(int nblocks)
282 handle_t *handle = jbd2_alloc_handle(GFP_NOFS);
285 memset(handle, 0, sizeof(*handle));
286 handle->h_buffer_credits = nblocks;
289 lockdep_init_map(&handle->h_lockdep_map, "jbd2_handle",
290 &jbd2_handle_key, 0);
296 * handle_t *jbd2_journal_start() - Obtain a new handle.
297 * @journal: Journal to start transaction on.
298 * @nblocks: number of block buffer we might modify
300 * We make sure that the transaction can guarantee at least nblocks of
301 * modified buffers in the log. We block until the log can guarantee
304 * This function is visible to journal users (like ext3fs), so is not
305 * called with the journal already locked.
307 * Return a pointer to a newly allocated handle, or NULL on failure
309 handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask)
311 handle_t *handle = journal_current_handle();
315 return ERR_PTR(-EROFS);
318 J_ASSERT(handle->h_transaction->t_journal == journal);
323 handle = new_handle(nblocks);
325 return ERR_PTR(-ENOMEM);
327 current->journal_info = handle;
329 err = start_this_handle(journal, handle, gfp_mask);
331 jbd2_free_handle(handle);
332 current->journal_info = NULL;
333 handle = ERR_PTR(err);
339 EXPORT_SYMBOL(jbd2__journal_start);
342 handle_t *jbd2_journal_start(journal_t *journal, int nblocks)
344 return jbd2__journal_start(journal, nblocks, GFP_NOFS);
346 EXPORT_SYMBOL(jbd2_journal_start);
350 * int jbd2_journal_extend() - extend buffer credits.
351 * @handle: handle to 'extend'
352 * @nblocks: nr blocks to try to extend by.
354 * Some transactions, such as large extends and truncates, can be done
355 * atomically all at once or in several stages. The operation requests
356 * a credit for a number of buffer modications in advance, but can
357 * extend its credit if it needs more.
359 * jbd2_journal_extend tries to give the running handle more buffer credits.
360 * It does not guarantee that allocation - this is a best-effort only.
361 * The calling process MUST be able to deal cleanly with a failure to
364 * Return 0 on success, non-zero on failure.
366 * return code < 0 implies an error
367 * return code > 0 implies normal transaction-full status.
369 int jbd2_journal_extend(handle_t *handle, int nblocks)
371 transaction_t *transaction = handle->h_transaction;
372 journal_t *journal = transaction->t_journal;
377 if (is_handle_aborted(handle))
382 read_lock(&journal->j_state_lock);
384 /* Don't extend a locked-down transaction! */
385 if (handle->h_transaction->t_state != T_RUNNING) {
386 jbd_debug(3, "denied handle %p %d blocks: "
387 "transaction not running\n", handle, nblocks);
391 spin_lock(&transaction->t_handle_lock);
392 wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks;
394 if (wanted > journal->j_max_transaction_buffers) {
395 jbd_debug(3, "denied handle %p %d blocks: "
396 "transaction too large\n", handle, nblocks);
400 if (wanted > __jbd2_log_space_left(journal)) {
401 jbd_debug(3, "denied handle %p %d blocks: "
402 "insufficient log space\n", handle, nblocks);
406 handle->h_buffer_credits += nblocks;
407 atomic_add(nblocks, &transaction->t_outstanding_credits);
410 jbd_debug(3, "extended handle %p by %d\n", handle, nblocks);
412 spin_unlock(&transaction->t_handle_lock);
414 read_unlock(&journal->j_state_lock);
421 * int jbd2_journal_restart() - restart a handle .
422 * @handle: handle to restart
423 * @nblocks: nr credits requested
425 * Restart a handle for a multi-transaction filesystem
428 * If the jbd2_journal_extend() call above fails to grant new buffer credits
429 * to a running handle, a call to jbd2_journal_restart will commit the
430 * handle's transaction so far and reattach the handle to a new
431 * transaction capabable of guaranteeing the requested number of
434 int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask)
436 transaction_t *transaction = handle->h_transaction;
437 journal_t *journal = transaction->t_journal;
440 /* If we've had an abort of any type, don't even think about
441 * actually doing the restart! */
442 if (is_handle_aborted(handle))
446 * First unlink the handle from its current transaction, and start the
449 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
450 J_ASSERT(journal_current_handle() == handle);
452 read_lock(&journal->j_state_lock);
453 spin_lock(&transaction->t_handle_lock);
454 atomic_sub(handle->h_buffer_credits,
455 &transaction->t_outstanding_credits);
456 if (atomic_dec_and_test(&transaction->t_updates))
457 wake_up(&journal->j_wait_updates);
458 spin_unlock(&transaction->t_handle_lock);
460 jbd_debug(2, "restarting handle %p\n", handle);
461 __jbd2_log_start_commit(journal, transaction->t_tid);
462 read_unlock(&journal->j_state_lock);
464 lock_map_release(&handle->h_lockdep_map);
465 handle->h_buffer_credits = nblocks;
466 ret = start_this_handle(journal, handle, gfp_mask);
469 EXPORT_SYMBOL(jbd2__journal_restart);
472 int jbd2_journal_restart(handle_t *handle, int nblocks)
474 return jbd2__journal_restart(handle, nblocks, GFP_NOFS);
476 EXPORT_SYMBOL(jbd2_journal_restart);
479 * void jbd2_journal_lock_updates () - establish a transaction barrier.
480 * @journal: Journal to establish a barrier on.
482 * This locks out any further updates from being started, and blocks
483 * until all existing updates have completed, returning only once the
484 * journal is in a quiescent state with no updates running.
486 * The journal lock should not be held on entry.
488 void jbd2_journal_lock_updates(journal_t *journal)
492 write_lock(&journal->j_state_lock);
493 ++journal->j_barrier_count;
495 /* Wait until there are no running updates */
497 transaction_t *transaction = journal->j_running_transaction;
502 spin_lock(&transaction->t_handle_lock);
503 if (!atomic_read(&transaction->t_updates)) {
504 spin_unlock(&transaction->t_handle_lock);
507 prepare_to_wait(&journal->j_wait_updates, &wait,
508 TASK_UNINTERRUPTIBLE);
509 spin_unlock(&transaction->t_handle_lock);
510 write_unlock(&journal->j_state_lock);
512 finish_wait(&journal->j_wait_updates, &wait);
513 write_lock(&journal->j_state_lock);
515 write_unlock(&journal->j_state_lock);
518 * We have now established a barrier against other normal updates, but
519 * we also need to barrier against other jbd2_journal_lock_updates() calls
520 * to make sure that we serialise special journal-locked operations
523 mutex_lock(&journal->j_barrier);
527 * void jbd2_journal_unlock_updates (journal_t* journal) - release barrier
528 * @journal: Journal to release the barrier on.
530 * Release a transaction barrier obtained with jbd2_journal_lock_updates().
532 * Should be called without the journal lock held.
534 void jbd2_journal_unlock_updates (journal_t *journal)
536 J_ASSERT(journal->j_barrier_count != 0);
538 mutex_unlock(&journal->j_barrier);
539 write_lock(&journal->j_state_lock);
540 --journal->j_barrier_count;
541 write_unlock(&journal->j_state_lock);
542 wake_up(&journal->j_wait_transaction_locked);
545 static void warn_dirty_buffer(struct buffer_head *bh)
547 char b[BDEVNAME_SIZE];
550 "JBD: Spotted dirty metadata buffer (dev = %s, blocknr = %llu). "
551 "There's a risk of filesystem corruption in case of system "
553 bdevname(bh->b_bdev, b), (unsigned long long)bh->b_blocknr);
557 * If the buffer is already part of the current transaction, then there
558 * is nothing we need to do. If it is already part of a prior
559 * transaction which we are still committing to disk, then we need to
560 * make sure that we do not overwrite the old copy: we do copy-out to
561 * preserve the copy going to disk. We also account the buffer against
562 * the handle's metadata buffer credits (unless the buffer is already
563 * part of the transaction, that is).
567 do_get_write_access(handle_t *handle, struct journal_head *jh,
570 struct buffer_head *bh;
571 transaction_t *transaction;
574 char *frozen_buffer = NULL;
577 if (is_handle_aborted(handle))
580 transaction = handle->h_transaction;
581 journal = transaction->t_journal;
583 jbd_debug(5, "buffer_head %p, force_copy %d\n", jh, force_copy);
585 JBUFFER_TRACE(jh, "entry");
589 /* @@@ Need to check for errors here at some point. */
592 jbd_lock_bh_state(bh);
594 /* We now hold the buffer lock so it is safe to query the buffer
595 * state. Is the buffer dirty?
597 * If so, there are two possibilities. The buffer may be
598 * non-journaled, and undergoing a quite legitimate writeback.
599 * Otherwise, it is journaled, and we don't expect dirty buffers
600 * in that state (the buffers should be marked JBD_Dirty
601 * instead.) So either the IO is being done under our own
602 * control and this is a bug, or it's a third party IO such as
603 * dump(8) (which may leave the buffer scheduled for read ---
604 * ie. locked but not dirty) or tune2fs (which may actually have
605 * the buffer dirtied, ugh.) */
607 if (buffer_dirty(bh)) {
609 * First question: is this buffer already part of the current
610 * transaction or the existing committing transaction?
612 if (jh->b_transaction) {
614 jh->b_transaction == transaction ||
616 journal->j_committing_transaction);
617 if (jh->b_next_transaction)
618 J_ASSERT_JH(jh, jh->b_next_transaction ==
620 warn_dirty_buffer(bh);
623 * In any case we need to clean the dirty flag and we must
624 * do it under the buffer lock to be sure we don't race
625 * with running write-out.
627 JBUFFER_TRACE(jh, "Journalling dirty buffer");
628 clear_buffer_dirty(bh);
629 set_buffer_jbddirty(bh);
635 if (is_handle_aborted(handle)) {
636 jbd_unlock_bh_state(bh);
642 * The buffer is already part of this transaction if b_transaction or
643 * b_next_transaction points to it
645 if (jh->b_transaction == transaction ||
646 jh->b_next_transaction == transaction)
650 * this is the first time this transaction is touching this buffer,
651 * reset the modified flag
656 * If there is already a copy-out version of this buffer, then we don't
657 * need to make another one
659 if (jh->b_frozen_data) {
660 JBUFFER_TRACE(jh, "has frozen data");
661 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
662 jh->b_next_transaction = transaction;
666 /* Is there data here we need to preserve? */
668 if (jh->b_transaction && jh->b_transaction != transaction) {
669 JBUFFER_TRACE(jh, "owned by older transaction");
670 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
671 J_ASSERT_JH(jh, jh->b_transaction ==
672 journal->j_committing_transaction);
674 /* There is one case we have to be very careful about.
675 * If the committing transaction is currently writing
676 * this buffer out to disk and has NOT made a copy-out,
677 * then we cannot modify the buffer contents at all
678 * right now. The essence of copy-out is that it is the
679 * extra copy, not the primary copy, which gets
680 * journaled. If the primary copy is already going to
681 * disk then we cannot do copy-out here. */
683 if (jh->b_jlist == BJ_Shadow) {
684 DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow);
685 wait_queue_head_t *wqh;
687 wqh = bit_waitqueue(&bh->b_state, BH_Unshadow);
689 JBUFFER_TRACE(jh, "on shadow: sleep");
690 jbd_unlock_bh_state(bh);
691 /* commit wakes up all shadow buffers after IO */
693 prepare_to_wait(wqh, &wait.wait,
694 TASK_UNINTERRUPTIBLE);
695 if (jh->b_jlist != BJ_Shadow)
699 finish_wait(wqh, &wait.wait);
703 /* Only do the copy if the currently-owning transaction
704 * still needs it. If it is on the Forget list, the
705 * committing transaction is past that stage. The
706 * buffer had better remain locked during the kmalloc,
707 * but that should be true --- we hold the journal lock
708 * still and the buffer is already on the BUF_JOURNAL
709 * list so won't be flushed.
711 * Subtle point, though: if this is a get_undo_access,
712 * then we will be relying on the frozen_data to contain
713 * the new value of the committed_data record after the
714 * transaction, so we HAVE to force the frozen_data copy
717 if (jh->b_jlist != BJ_Forget || force_copy) {
718 JBUFFER_TRACE(jh, "generate frozen data");
719 if (!frozen_buffer) {
720 JBUFFER_TRACE(jh, "allocate memory for buffer");
721 jbd_unlock_bh_state(bh);
723 jbd2_alloc(jh2bh(jh)->b_size,
725 if (!frozen_buffer) {
727 "%s: OOM for frozen_buffer\n",
729 JBUFFER_TRACE(jh, "oom!");
731 jbd_lock_bh_state(bh);
736 jh->b_frozen_data = frozen_buffer;
737 frozen_buffer = NULL;
740 jh->b_next_transaction = transaction;
745 * Finally, if the buffer is not journaled right now, we need to make
746 * sure it doesn't get written to disk before the caller actually
747 * commits the new data
749 if (!jh->b_transaction) {
750 JBUFFER_TRACE(jh, "no transaction");
751 J_ASSERT_JH(jh, !jh->b_next_transaction);
752 jh->b_transaction = transaction;
753 JBUFFER_TRACE(jh, "file as BJ_Reserved");
754 spin_lock(&journal->j_list_lock);
755 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
756 spin_unlock(&journal->j_list_lock);
765 J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)),
766 "Possible IO failure.\n");
767 page = jh2bh(jh)->b_page;
768 offset = ((unsigned long) jh2bh(jh)->b_data) & ~PAGE_MASK;
769 source = kmap_atomic(page, KM_USER0);
770 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
771 kunmap_atomic(source, KM_USER0);
774 * Now that the frozen data is saved off, we need to store
775 * any matching triggers.
777 jh->b_frozen_triggers = jh->b_triggers;
779 jbd_unlock_bh_state(bh);
782 * If we are about to journal a buffer, then any revoke pending on it is
785 jbd2_journal_cancel_revoke(handle, jh);
788 if (unlikely(frozen_buffer)) /* It's usually NULL */
789 jbd2_free(frozen_buffer, bh->b_size);
791 JBUFFER_TRACE(jh, "exit");
796 * int jbd2_journal_get_write_access() - notify intent to modify a buffer for metadata (not data) update.
797 * @handle: transaction to add buffer modifications to
798 * @bh: bh to be used for metadata writes
799 * @credits: variable that will receive credits for the buffer
801 * Returns an error code or 0 on success.
803 * In full data journalling mode the buffer may be of type BJ_AsyncData,
804 * because we're write()ing a buffer which is also part of a shared mapping.
807 int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
809 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
812 /* We do not want to get caught playing with fields which the
813 * log thread also manipulates. Make sure that the buffer
814 * completes any outstanding IO before proceeding. */
815 rc = do_get_write_access(handle, jh, 0);
816 jbd2_journal_put_journal_head(jh);
822 * When the user wants to journal a newly created buffer_head
823 * (ie. getblk() returned a new buffer and we are going to populate it
824 * manually rather than reading off disk), then we need to keep the
825 * buffer_head locked until it has been completely filled with new
826 * data. In this case, we should be able to make the assertion that
827 * the bh is not already part of an existing transaction.
829 * The buffer should already be locked by the caller by this point.
830 * There is no lock ranking violation: it was a newly created,
831 * unlocked buffer beforehand. */
834 * int jbd2_journal_get_create_access () - notify intent to use newly created bh
835 * @handle: transaction to new buffer to
838 * Call this if you create a new bh.
840 int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
842 transaction_t *transaction = handle->h_transaction;
843 journal_t *journal = transaction->t_journal;
844 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
847 jbd_debug(5, "journal_head %p\n", jh);
849 if (is_handle_aborted(handle))
853 JBUFFER_TRACE(jh, "entry");
855 * The buffer may already belong to this transaction due to pre-zeroing
856 * in the filesystem's new_block code. It may also be on the previous,
857 * committing transaction's lists, but it HAS to be in Forget state in
858 * that case: the transaction must have deleted the buffer for it to be
861 jbd_lock_bh_state(bh);
862 spin_lock(&journal->j_list_lock);
863 J_ASSERT_JH(jh, (jh->b_transaction == transaction ||
864 jh->b_transaction == NULL ||
865 (jh->b_transaction == journal->j_committing_transaction &&
866 jh->b_jlist == BJ_Forget)));
868 J_ASSERT_JH(jh, jh->b_next_transaction == NULL);
869 J_ASSERT_JH(jh, buffer_locked(jh2bh(jh)));
871 if (jh->b_transaction == NULL) {
873 * Previous jbd2_journal_forget() could have left the buffer
874 * with jbddirty bit set because it was being committed. When
875 * the commit finished, we've filed the buffer for
876 * checkpointing and marked it dirty. Now we are reallocating
877 * the buffer so the transaction freeing it must have
878 * committed and so it's safe to clear the dirty bit.
880 clear_buffer_dirty(jh2bh(jh));
881 jh->b_transaction = transaction;
883 /* first access by this transaction */
886 JBUFFER_TRACE(jh, "file as BJ_Reserved");
887 __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved);
888 } else if (jh->b_transaction == journal->j_committing_transaction) {
889 /* first access by this transaction */
892 JBUFFER_TRACE(jh, "set next transaction");
893 jh->b_next_transaction = transaction;
895 spin_unlock(&journal->j_list_lock);
896 jbd_unlock_bh_state(bh);
899 * akpm: I added this. ext3_alloc_branch can pick up new indirect
900 * blocks which contain freed but then revoked metadata. We need
901 * to cancel the revoke in case we end up freeing it yet again
902 * and the reallocating as data - this would cause a second revoke,
903 * which hits an assertion error.
905 JBUFFER_TRACE(jh, "cancelling revoke");
906 jbd2_journal_cancel_revoke(handle, jh);
907 jbd2_journal_put_journal_head(jh);
913 * int jbd2_journal_get_undo_access() - Notify intent to modify metadata with
914 * non-rewindable consequences
915 * @handle: transaction
916 * @bh: buffer to undo
917 * @credits: store the number of taken credits here (if not NULL)
919 * Sometimes there is a need to distinguish between metadata which has
920 * been committed to disk and that which has not. The ext3fs code uses
921 * this for freeing and allocating space, we have to make sure that we
922 * do not reuse freed space until the deallocation has been committed,
923 * since if we overwrote that space we would make the delete
924 * un-rewindable in case of a crash.
926 * To deal with that, jbd2_journal_get_undo_access requests write access to a
927 * buffer for parts of non-rewindable operations such as delete
928 * operations on the bitmaps. The journaling code must keep a copy of
929 * the buffer's contents prior to the undo_access call until such time
930 * as we know that the buffer has definitely been committed to disk.
932 * We never need to know which transaction the committed data is part
933 * of, buffers touched here are guaranteed to be dirtied later and so
934 * will be committed to a new transaction in due course, at which point
935 * we can discard the old committed data pointer.
937 * Returns error number or 0 on success.
939 int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
942 struct journal_head *jh = jbd2_journal_add_journal_head(bh);
943 char *committed_data = NULL;
945 JBUFFER_TRACE(jh, "entry");
948 * Do this first --- it can drop the journal lock, so we want to
949 * make sure that obtaining the committed_data is done
950 * atomically wrt. completion of any outstanding commits.
952 err = do_get_write_access(handle, jh, 1);
957 if (!jh->b_committed_data) {
958 committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
959 if (!committed_data) {
960 printk(KERN_EMERG "%s: No memory for committed data\n",
967 jbd_lock_bh_state(bh);
968 if (!jh->b_committed_data) {
969 /* Copy out the current buffer contents into the
970 * preserved, committed copy. */
971 JBUFFER_TRACE(jh, "generate b_committed data");
972 if (!committed_data) {
973 jbd_unlock_bh_state(bh);
977 jh->b_committed_data = committed_data;
978 committed_data = NULL;
979 memcpy(jh->b_committed_data, bh->b_data, bh->b_size);
981 jbd_unlock_bh_state(bh);
983 jbd2_journal_put_journal_head(jh);
984 if (unlikely(committed_data))
985 jbd2_free(committed_data, bh->b_size);
990 * void jbd2_journal_set_triggers() - Add triggers for commit writeout
991 * @bh: buffer to trigger on
992 * @type: struct jbd2_buffer_trigger_type containing the trigger(s).
994 * Set any triggers on this journal_head. This is always safe, because
995 * triggers for a committing buffer will be saved off, and triggers for
996 * a running transaction will match the buffer in that transaction.
998 * Call with NULL to clear the triggers.
1000 void jbd2_journal_set_triggers(struct buffer_head *bh,
1001 struct jbd2_buffer_trigger_type *type)
1003 struct journal_head *jh = bh2jh(bh);
1005 jh->b_triggers = type;
1008 void jbd2_buffer_commit_trigger(struct journal_head *jh, void *mapped_data,
1009 struct jbd2_buffer_trigger_type *triggers)
1011 struct buffer_head *bh = jh2bh(jh);
1013 if (!triggers || !triggers->t_commit)
1016 triggers->t_commit(triggers, bh, mapped_data, bh->b_size);
1019 void jbd2_buffer_abort_trigger(struct journal_head *jh,
1020 struct jbd2_buffer_trigger_type *triggers)
1022 if (!triggers || !triggers->t_abort)
1025 triggers->t_abort(triggers, jh2bh(jh));
1031 * int jbd2_journal_dirty_metadata() - mark a buffer as containing dirty metadata
1032 * @handle: transaction to add buffer to.
1033 * @bh: buffer to mark
1035 * mark dirty metadata which needs to be journaled as part of the current
1038 * The buffer is placed on the transaction's metadata list and is marked
1039 * as belonging to the transaction.
1041 * Returns error number or 0 on success.
1043 * Special care needs to be taken if the buffer already belongs to the
1044 * current committing transaction (in which case we should have frozen
1045 * data present for that commit). In that case, we don't relink the
1046 * buffer: that only gets done when the old transaction finally
1047 * completes its commit.
1049 int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
1051 transaction_t *transaction = handle->h_transaction;
1052 journal_t *journal = transaction->t_journal;
1053 struct journal_head *jh = bh2jh(bh);
1055 jbd_debug(5, "journal_head %p\n", jh);
1056 JBUFFER_TRACE(jh, "entry");
1057 if (is_handle_aborted(handle))
1060 jbd_lock_bh_state(bh);
1062 if (jh->b_modified == 0) {
1064 * This buffer's got modified and becoming part
1065 * of the transaction. This needs to be done
1066 * once a transaction -bzzz
1069 J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
1070 handle->h_buffer_credits--;
1074 * fastpath, to avoid expensive locking. If this buffer is already
1075 * on the running transaction's metadata list there is nothing to do.
1076 * Nobody can take it off again because there is a handle open.
1077 * I _think_ we're OK here with SMP barriers - a mistaken decision will
1078 * result in this test being false, so we go in and take the locks.
1080 if (jh->b_transaction == transaction && jh->b_jlist == BJ_Metadata) {
1081 JBUFFER_TRACE(jh, "fastpath");
1082 J_ASSERT_JH(jh, jh->b_transaction ==
1083 journal->j_running_transaction);
1087 set_buffer_jbddirty(bh);
1090 * Metadata already on the current transaction list doesn't
1091 * need to be filed. Metadata on another transaction's list must
1092 * be committing, and will be refiled once the commit completes:
1093 * leave it alone for now.
1095 if (jh->b_transaction != transaction) {
1096 JBUFFER_TRACE(jh, "already on other transaction");
1097 J_ASSERT_JH(jh, jh->b_transaction ==
1098 journal->j_committing_transaction);
1099 J_ASSERT_JH(jh, jh->b_next_transaction == transaction);
1100 /* And this case is illegal: we can't reuse another
1101 * transaction's data buffer, ever. */
1105 /* That test should have eliminated the following case: */
1106 J_ASSERT_JH(jh, jh->b_frozen_data == NULL);
1108 JBUFFER_TRACE(jh, "file as BJ_Metadata");
1109 spin_lock(&journal->j_list_lock);
1110 __jbd2_journal_file_buffer(jh, handle->h_transaction, BJ_Metadata);
1111 spin_unlock(&journal->j_list_lock);
1113 jbd_unlock_bh_state(bh);
1115 JBUFFER_TRACE(jh, "exit");
1120 * jbd2_journal_release_buffer: undo a get_write_access without any buffer
1121 * updates, if the update decided in the end that it didn't need access.
1125 jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh)
1127 BUFFER_TRACE(bh, "entry");
1131 * void jbd2_journal_forget() - bforget() for potentially-journaled buffers.
1132 * @handle: transaction handle
1133 * @bh: bh to 'forget'
1135 * We can only do the bforget if there are no commits pending against the
1136 * buffer. If the buffer is dirty in the current running transaction we
1137 * can safely unlink it.
1139 * bh may not be a journalled buffer at all - it may be a non-JBD
1140 * buffer which came off the hashtable. Check for this.
1142 * Decrements bh->b_count by one.
1144 * Allow this call even if the handle has aborted --- it may be part of
1145 * the caller's cleanup after an abort.
1147 int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
1149 transaction_t *transaction = handle->h_transaction;
1150 journal_t *journal = transaction->t_journal;
1151 struct journal_head *jh;
1152 int drop_reserve = 0;
1154 int was_modified = 0;
1156 BUFFER_TRACE(bh, "entry");
1158 jbd_lock_bh_state(bh);
1159 spin_lock(&journal->j_list_lock);
1161 if (!buffer_jbd(bh))
1165 /* Critical error: attempting to delete a bitmap buffer, maybe?
1166 * Don't do any jbd operations, and return an error. */
1167 if (!J_EXPECT_JH(jh, !jh->b_committed_data,
1168 "inconsistent data on disk")) {
1173 /* keep track of wether or not this transaction modified us */
1174 was_modified = jh->b_modified;
1177 * The buffer's going from the transaction, we must drop
1178 * all references -bzzz
1182 if (jh->b_transaction == handle->h_transaction) {
1183 J_ASSERT_JH(jh, !jh->b_frozen_data);
1185 /* If we are forgetting a buffer which is already part
1186 * of this transaction, then we can just drop it from
1187 * the transaction immediately. */
1188 clear_buffer_dirty(bh);
1189 clear_buffer_jbddirty(bh);
1191 JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
1194 * we only want to drop a reference if this transaction
1195 * modified the buffer
1201 * We are no longer going to journal this buffer.
1202 * However, the commit of this transaction is still
1203 * important to the buffer: the delete that we are now
1204 * processing might obsolete an old log entry, so by
1205 * committing, we can satisfy the buffer's checkpoint.
1207 * So, if we have a checkpoint on the buffer, we should
1208 * now refile the buffer on our BJ_Forget list so that
1209 * we know to remove the checkpoint after we commit.
1212 if (jh->b_cp_transaction) {
1213 __jbd2_journal_temp_unlink_buffer(jh);
1214 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1216 __jbd2_journal_unfile_buffer(jh);
1217 jbd2_journal_remove_journal_head(bh);
1219 if (!buffer_jbd(bh)) {
1220 spin_unlock(&journal->j_list_lock);
1221 jbd_unlock_bh_state(bh);
1226 } else if (jh->b_transaction) {
1227 J_ASSERT_JH(jh, (jh->b_transaction ==
1228 journal->j_committing_transaction));
1229 /* However, if the buffer is still owned by a prior
1230 * (committing) transaction, we can't drop it yet... */
1231 JBUFFER_TRACE(jh, "belongs to older transaction");
1232 /* ... but we CAN drop it from the new transaction if we
1233 * have also modified it since the original commit. */
1235 if (jh->b_next_transaction) {
1236 J_ASSERT(jh->b_next_transaction == transaction);
1237 jh->b_next_transaction = NULL;
1240 * only drop a reference if this transaction modified
1249 spin_unlock(&journal->j_list_lock);
1250 jbd_unlock_bh_state(bh);
1254 /* no need to reserve log space for this block -bzzz */
1255 handle->h_buffer_credits++;
1261 * int jbd2_journal_stop() - complete a transaction
1262 * @handle: tranaction to complete.
1264 * All done for a particular handle.
1266 * There is not much action needed here. We just return any remaining
1267 * buffer credits to the transaction and remove the handle. The only
1268 * complication is that we need to start a commit operation if the
1269 * filesystem is marked for synchronous update.
1271 * jbd2_journal_stop itself will not usually return an error, but it may
1272 * do so in unusual circumstances. In particular, expect it to
1273 * return -EIO if a jbd2_journal_abort has been executed since the
1274 * transaction began.
1276 int jbd2_journal_stop(handle_t *handle)
1278 transaction_t *transaction = handle->h_transaction;
1279 journal_t *journal = transaction->t_journal;
1280 int err, wait_for_commit = 0;
1284 J_ASSERT(journal_current_handle() == handle);
1286 if (is_handle_aborted(handle))
1289 J_ASSERT(atomic_read(&transaction->t_updates) > 0);
1293 if (--handle->h_ref > 0) {
1294 jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
1299 jbd_debug(4, "Handle %p going down\n", handle);
1302 * Implement synchronous transaction batching. If the handle
1303 * was synchronous, don't force a commit immediately. Let's
1304 * yield and let another thread piggyback onto this
1305 * transaction. Keep doing that while new threads continue to
1306 * arrive. It doesn't cost much - we're about to run a commit
1307 * and sleep on IO anyway. Speeds up many-threaded, many-dir
1308 * operations by 30x or more...
1310 * We try and optimize the sleep time against what the
1311 * underlying disk can do, instead of having a static sleep
1312 * time. This is useful for the case where our storage is so
1313 * fast that it is more optimal to go ahead and force a flush
1314 * and wait for the transaction to be committed than it is to
1315 * wait for an arbitrary amount of time for new writers to
1316 * join the transaction. We achieve this by measuring how
1317 * long it takes to commit a transaction, and compare it with
1318 * how long this transaction has been running, and if run time
1319 * < commit time then we sleep for the delta and commit. This
1320 * greatly helps super fast disks that would see slowdowns as
1321 * more threads started doing fsyncs.
1323 * But don't do this if this process was the most recent one
1324 * to perform a synchronous write. We do this to detect the
1325 * case where a single process is doing a stream of sync
1326 * writes. No point in waiting for joiners in that case.
1329 if (handle->h_sync && journal->j_last_sync_writer != pid) {
1330 u64 commit_time, trans_time;
1332 journal->j_last_sync_writer = pid;
1334 read_lock(&journal->j_state_lock);
1335 commit_time = journal->j_average_commit_time;
1336 read_unlock(&journal->j_state_lock);
1338 trans_time = ktime_to_ns(ktime_sub(ktime_get(),
1339 transaction->t_start_time));
1341 commit_time = max_t(u64, commit_time,
1342 1000*journal->j_min_batch_time);
1343 commit_time = min_t(u64, commit_time,
1344 1000*journal->j_max_batch_time);
1346 if (trans_time < commit_time) {
1347 ktime_t expires = ktime_add_ns(ktime_get(),
1349 set_current_state(TASK_UNINTERRUPTIBLE);
1350 schedule_hrtimeout(&expires, HRTIMER_MODE_ABS);
1355 transaction->t_synchronous_commit = 1;
1356 current->journal_info = NULL;
1357 atomic_sub(handle->h_buffer_credits,
1358 &transaction->t_outstanding_credits);
1361 * If the handle is marked SYNC, we need to set another commit
1362 * going! We also want to force a commit if the current
1363 * transaction is occupying too much of the log, or if the
1364 * transaction is too old now.
1366 if (handle->h_sync ||
1367 (atomic_read(&transaction->t_outstanding_credits) >
1368 journal->j_max_transaction_buffers) ||
1369 time_after_eq(jiffies, transaction->t_expires)) {
1370 /* Do this even for aborted journals: an abort still
1371 * completes the commit thread, it just doesn't write
1372 * anything to disk. */
1374 jbd_debug(2, "transaction too old, requesting commit for "
1375 "handle %p\n", handle);
1376 /* This is non-blocking */
1377 jbd2_log_start_commit(journal, transaction->t_tid);
1380 * Special case: JBD2_SYNC synchronous updates require us
1381 * to wait for the commit to complete.
1383 if (handle->h_sync && !(current->flags & PF_MEMALLOC))
1384 wait_for_commit = 1;
1388 * Once we drop t_updates, if it goes to zero the transaction
1389 * could start commiting on us and eventually disappear. So
1390 * once we do this, we must not dereference transaction
1393 tid = transaction->t_tid;
1394 if (atomic_dec_and_test(&transaction->t_updates)) {
1395 wake_up(&journal->j_wait_updates);
1396 if (journal->j_barrier_count)
1397 wake_up(&journal->j_wait_transaction_locked);
1400 if (wait_for_commit)
1401 err = jbd2_log_wait_commit(journal, tid);
1403 lock_map_release(&handle->h_lockdep_map);
1405 jbd2_free_handle(handle);
1410 * int jbd2_journal_force_commit() - force any uncommitted transactions
1411 * @journal: journal to force
1413 * For synchronous operations: force any uncommitted transactions
1414 * to disk. May seem kludgy, but it reuses all the handle batching
1415 * code in a very simple manner.
1417 int jbd2_journal_force_commit(journal_t *journal)
1422 handle = jbd2_journal_start(journal, 1);
1423 if (IS_ERR(handle)) {
1424 ret = PTR_ERR(handle);
1427 ret = jbd2_journal_stop(handle);
1434 * List management code snippets: various functions for manipulating the
1435 * transaction buffer lists.
1440 * Append a buffer to a transaction list, given the transaction's list head
1443 * j_list_lock is held.
1445 * jbd_lock_bh_state(jh2bh(jh)) is held.
1449 __blist_add_buffer(struct journal_head **list, struct journal_head *jh)
1452 jh->b_tnext = jh->b_tprev = jh;
1455 /* Insert at the tail of the list to preserve order */
1456 struct journal_head *first = *list, *last = first->b_tprev;
1458 jh->b_tnext = first;
1459 last->b_tnext = first->b_tprev = jh;
1464 * Remove a buffer from a transaction list, given the transaction's list
1467 * Called with j_list_lock held, and the journal may not be locked.
1469 * jbd_lock_bh_state(jh2bh(jh)) is held.
1473 __blist_del_buffer(struct journal_head **list, struct journal_head *jh)
1476 *list = jh->b_tnext;
1480 jh->b_tprev->b_tnext = jh->b_tnext;
1481 jh->b_tnext->b_tprev = jh->b_tprev;
1485 * Remove a buffer from the appropriate transaction list.
1487 * Note that this function can *change* the value of
1488 * bh->b_transaction->t_buffers, t_forget, t_iobuf_list, t_shadow_list,
1489 * t_log_list or t_reserved_list. If the caller is holding onto a copy of one
1490 * of these pointers, it could go bad. Generally the caller needs to re-read
1491 * the pointer from the transaction_t.
1493 * Called under j_list_lock. The journal may not be locked.
1495 void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh)
1497 struct journal_head **list = NULL;
1498 transaction_t *transaction;
1499 struct buffer_head *bh = jh2bh(jh);
1501 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1502 transaction = jh->b_transaction;
1504 assert_spin_locked(&transaction->t_journal->j_list_lock);
1506 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1507 if (jh->b_jlist != BJ_None)
1508 J_ASSERT_JH(jh, transaction != NULL);
1510 switch (jh->b_jlist) {
1514 transaction->t_nr_buffers--;
1515 J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0);
1516 list = &transaction->t_buffers;
1519 list = &transaction->t_forget;
1522 list = &transaction->t_iobuf_list;
1525 list = &transaction->t_shadow_list;
1528 list = &transaction->t_log_list;
1531 list = &transaction->t_reserved_list;
1535 __blist_del_buffer(list, jh);
1536 jh->b_jlist = BJ_None;
1537 if (test_clear_buffer_jbddirty(bh))
1538 mark_buffer_dirty(bh); /* Expose it to the VM */
1541 void __jbd2_journal_unfile_buffer(struct journal_head *jh)
1543 __jbd2_journal_temp_unlink_buffer(jh);
1544 jh->b_transaction = NULL;
1547 void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh)
1549 jbd_lock_bh_state(jh2bh(jh));
1550 spin_lock(&journal->j_list_lock);
1551 __jbd2_journal_unfile_buffer(jh);
1552 spin_unlock(&journal->j_list_lock);
1553 jbd_unlock_bh_state(jh2bh(jh));
1557 * Called from jbd2_journal_try_to_free_buffers().
1559 * Called under jbd_lock_bh_state(bh)
1562 __journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh)
1564 struct journal_head *jh;
1568 if (buffer_locked(bh) || buffer_dirty(bh))
1571 if (jh->b_next_transaction != NULL)
1574 spin_lock(&journal->j_list_lock);
1575 if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) {
1576 /* written-back checkpointed metadata buffer */
1577 if (jh->b_jlist == BJ_None) {
1578 JBUFFER_TRACE(jh, "remove from checkpoint list");
1579 __jbd2_journal_remove_checkpoint(jh);
1580 jbd2_journal_remove_journal_head(bh);
1584 spin_unlock(&journal->j_list_lock);
1590 * int jbd2_journal_try_to_free_buffers() - try to free page buffers.
1591 * @journal: journal for operation
1592 * @page: to try and free
1593 * @gfp_mask: we use the mask to detect how hard should we try to release
1594 * buffers. If __GFP_WAIT and __GFP_FS is set, we wait for commit code to
1595 * release the buffers.
1598 * For all the buffers on this page,
1599 * if they are fully written out ordered data, move them onto BUF_CLEAN
1600 * so try_to_free_buffers() can reap them.
1602 * This function returns non-zero if we wish try_to_free_buffers()
1603 * to be called. We do this if the page is releasable by try_to_free_buffers().
1604 * We also do it if the page has locked or dirty buffers and the caller wants
1605 * us to perform sync or async writeout.
1607 * This complicates JBD locking somewhat. We aren't protected by the
1608 * BKL here. We wish to remove the buffer from its committing or
1609 * running transaction's ->t_datalist via __jbd2_journal_unfile_buffer.
1611 * This may *change* the value of transaction_t->t_datalist, so anyone
1612 * who looks at t_datalist needs to lock against this function.
1614 * Even worse, someone may be doing a jbd2_journal_dirty_data on this
1615 * buffer. So we need to lock against that. jbd2_journal_dirty_data()
1616 * will come out of the lock with the buffer dirty, which makes it
1617 * ineligible for release here.
1619 * Who else is affected by this? hmm... Really the only contender
1620 * is do_get_write_access() - it could be looking at the buffer while
1621 * journal_try_to_free_buffer() is changing its state. But that
1622 * cannot happen because we never reallocate freed data as metadata
1623 * while the data is part of a transaction. Yes?
1625 * Return 0 on failure, 1 on success
1627 int jbd2_journal_try_to_free_buffers(journal_t *journal,
1628 struct page *page, gfp_t gfp_mask)
1630 struct buffer_head *head;
1631 struct buffer_head *bh;
1634 J_ASSERT(PageLocked(page));
1636 head = page_buffers(page);
1639 struct journal_head *jh;
1642 * We take our own ref against the journal_head here to avoid
1643 * having to add tons of locking around each instance of
1644 * jbd2_journal_remove_journal_head() and
1645 * jbd2_journal_put_journal_head().
1647 jh = jbd2_journal_grab_journal_head(bh);
1651 jbd_lock_bh_state(bh);
1652 __journal_try_to_free_buffer(journal, bh);
1653 jbd2_journal_put_journal_head(jh);
1654 jbd_unlock_bh_state(bh);
1657 } while ((bh = bh->b_this_page) != head);
1659 ret = try_to_free_buffers(page);
1666 * This buffer is no longer needed. If it is on an older transaction's
1667 * checkpoint list we need to record it on this transaction's forget list
1668 * to pin this buffer (and hence its checkpointing transaction) down until
1669 * this transaction commits. If the buffer isn't on a checkpoint list, we
1671 * Returns non-zero if JBD no longer has an interest in the buffer.
1673 * Called under j_list_lock.
1675 * Called under jbd_lock_bh_state(bh).
1677 static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction)
1680 struct buffer_head *bh = jh2bh(jh);
1682 __jbd2_journal_unfile_buffer(jh);
1684 if (jh->b_cp_transaction) {
1685 JBUFFER_TRACE(jh, "on running+cp transaction");
1687 * We don't want to write the buffer anymore, clear the
1688 * bit so that we don't confuse checks in
1689 * __journal_file_buffer
1691 clear_buffer_dirty(bh);
1692 __jbd2_journal_file_buffer(jh, transaction, BJ_Forget);
1695 JBUFFER_TRACE(jh, "on running transaction");
1696 jbd2_journal_remove_journal_head(bh);
1703 * jbd2_journal_invalidatepage
1705 * This code is tricky. It has a number of cases to deal with.
1707 * There are two invariants which this code relies on:
1709 * i_size must be updated on disk before we start calling invalidatepage on the
1712 * This is done in ext3 by defining an ext3_setattr method which
1713 * updates i_size before truncate gets going. By maintaining this
1714 * invariant, we can be sure that it is safe to throw away any buffers
1715 * attached to the current transaction: once the transaction commits,
1716 * we know that the data will not be needed.
1718 * Note however that we can *not* throw away data belonging to the
1719 * previous, committing transaction!
1721 * Any disk blocks which *are* part of the previous, committing
1722 * transaction (and which therefore cannot be discarded immediately) are
1723 * not going to be reused in the new running transaction
1725 * The bitmap committed_data images guarantee this: any block which is
1726 * allocated in one transaction and removed in the next will be marked
1727 * as in-use in the committed_data bitmap, so cannot be reused until
1728 * the next transaction to delete the block commits. This means that
1729 * leaving committing buffers dirty is quite safe: the disk blocks
1730 * cannot be reallocated to a different file and so buffer aliasing is
1734 * The above applies mainly to ordered data mode. In writeback mode we
1735 * don't make guarantees about the order in which data hits disk --- in
1736 * particular we don't guarantee that new dirty data is flushed before
1737 * transaction commit --- so it is always safe just to discard data
1738 * immediately in that mode. --sct
1742 * The journal_unmap_buffer helper function returns zero if the buffer
1743 * concerned remains pinned as an anonymous buffer belonging to an older
1746 * We're outside-transaction here. Either or both of j_running_transaction
1747 * and j_committing_transaction may be NULL.
1749 static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
1751 transaction_t *transaction;
1752 struct journal_head *jh;
1756 BUFFER_TRACE(bh, "entry");
1759 * It is safe to proceed here without the j_list_lock because the
1760 * buffers cannot be stolen by try_to_free_buffers as long as we are
1761 * holding the page lock. --sct
1764 if (!buffer_jbd(bh))
1765 goto zap_buffer_unlocked;
1767 /* OK, we have data buffer in journaled mode */
1768 write_lock(&journal->j_state_lock);
1769 jbd_lock_bh_state(bh);
1770 spin_lock(&journal->j_list_lock);
1772 jh = jbd2_journal_grab_journal_head(bh);
1774 goto zap_buffer_no_jh;
1777 * We cannot remove the buffer from checkpoint lists until the
1778 * transaction adding inode to orphan list (let's call it T)
1779 * is committed. Otherwise if the transaction changing the
1780 * buffer would be cleaned from the journal before T is
1781 * committed, a crash will cause that the correct contents of
1782 * the buffer will be lost. On the other hand we have to
1783 * clear the buffer dirty bit at latest at the moment when the
1784 * transaction marking the buffer as freed in the filesystem
1785 * structures is committed because from that moment on the
1786 * buffer can be reallocated and used by a different page.
1787 * Since the block hasn't been freed yet but the inode has
1788 * already been added to orphan list, it is safe for us to add
1789 * the buffer to BJ_Forget list of the newest transaction.
1791 transaction = jh->b_transaction;
1792 if (transaction == NULL) {
1793 /* First case: not on any transaction. If it
1794 * has no checkpoint link, then we can zap it:
1795 * it's a writeback-mode buffer so we don't care
1796 * if it hits disk safely. */
1797 if (!jh->b_cp_transaction) {
1798 JBUFFER_TRACE(jh, "not on any transaction: zap");
1802 if (!buffer_dirty(bh)) {
1803 /* bdflush has written it. We can drop it now */
1807 /* OK, it must be in the journal but still not
1808 * written fully to disk: it's metadata or
1809 * journaled data... */
1811 if (journal->j_running_transaction) {
1812 /* ... and once the current transaction has
1813 * committed, the buffer won't be needed any
1815 JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget");
1816 ret = __dispose_buffer(jh,
1817 journal->j_running_transaction);
1818 jbd2_journal_put_journal_head(jh);
1819 spin_unlock(&journal->j_list_lock);
1820 jbd_unlock_bh_state(bh);
1821 write_unlock(&journal->j_state_lock);
1824 /* There is no currently-running transaction. So the
1825 * orphan record which we wrote for this file must have
1826 * passed into commit. We must attach this buffer to
1827 * the committing transaction, if it exists. */
1828 if (journal->j_committing_transaction) {
1829 JBUFFER_TRACE(jh, "give to committing trans");
1830 ret = __dispose_buffer(jh,
1831 journal->j_committing_transaction);
1832 jbd2_journal_put_journal_head(jh);
1833 spin_unlock(&journal->j_list_lock);
1834 jbd_unlock_bh_state(bh);
1835 write_unlock(&journal->j_state_lock);
1838 /* The orphan record's transaction has
1839 * committed. We can cleanse this buffer */
1840 clear_buffer_jbddirty(bh);
1844 } else if (transaction == journal->j_committing_transaction) {
1845 JBUFFER_TRACE(jh, "on committing transaction");
1847 * The buffer is committing, we simply cannot touch
1848 * it. So we just set j_next_transaction to the
1849 * running transaction (if there is one) and mark
1850 * buffer as freed so that commit code knows it should
1851 * clear dirty bits when it is done with the buffer.
1853 set_buffer_freed(bh);
1854 if (journal->j_running_transaction && buffer_jbddirty(bh))
1855 jh->b_next_transaction = journal->j_running_transaction;
1856 jbd2_journal_put_journal_head(jh);
1857 spin_unlock(&journal->j_list_lock);
1858 jbd_unlock_bh_state(bh);
1859 write_unlock(&journal->j_state_lock);
1862 /* Good, the buffer belongs to the running transaction.
1863 * We are writing our own transaction's data, not any
1864 * previous one's, so it is safe to throw it away
1865 * (remember that we expect the filesystem to have set
1866 * i_size already for this truncate so recovery will not
1867 * expose the disk blocks we are discarding here.) */
1868 J_ASSERT_JH(jh, transaction == journal->j_running_transaction);
1869 JBUFFER_TRACE(jh, "on running transaction");
1870 may_free = __dispose_buffer(jh, transaction);
1874 jbd2_journal_put_journal_head(jh);
1876 spin_unlock(&journal->j_list_lock);
1877 jbd_unlock_bh_state(bh);
1878 write_unlock(&journal->j_state_lock);
1879 zap_buffer_unlocked:
1880 clear_buffer_dirty(bh);
1881 J_ASSERT_BH(bh, !buffer_jbddirty(bh));
1882 clear_buffer_mapped(bh);
1883 clear_buffer_req(bh);
1884 clear_buffer_new(bh);
1890 * void jbd2_journal_invalidatepage()
1891 * @journal: journal to use for flush...
1892 * @page: page to flush
1893 * @offset: length of page to invalidate.
1895 * Reap page buffers containing data after offset in page.
1898 void jbd2_journal_invalidatepage(journal_t *journal,
1900 unsigned long offset)
1902 struct buffer_head *head, *bh, *next;
1903 unsigned int curr_off = 0;
1906 if (!PageLocked(page))
1908 if (!page_has_buffers(page))
1911 /* We will potentially be playing with lists other than just the
1912 * data lists (especially for journaled data mode), so be
1913 * cautious in our locking. */
1915 head = bh = page_buffers(page);
1917 unsigned int next_off = curr_off + bh->b_size;
1918 next = bh->b_this_page;
1920 if (offset <= curr_off) {
1921 /* This block is wholly outside the truncation point */
1923 may_free &= journal_unmap_buffer(journal, bh);
1926 curr_off = next_off;
1929 } while (bh != head);
1932 if (may_free && try_to_free_buffers(page))
1933 J_ASSERT(!page_has_buffers(page));
1938 * File a buffer on the given transaction list.
1940 void __jbd2_journal_file_buffer(struct journal_head *jh,
1941 transaction_t *transaction, int jlist)
1943 struct journal_head **list = NULL;
1945 struct buffer_head *bh = jh2bh(jh);
1947 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
1948 assert_spin_locked(&transaction->t_journal->j_list_lock);
1950 J_ASSERT_JH(jh, jh->b_jlist < BJ_Types);
1951 J_ASSERT_JH(jh, jh->b_transaction == transaction ||
1952 jh->b_transaction == NULL);
1954 if (jh->b_transaction && jh->b_jlist == jlist)
1957 if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
1958 jlist == BJ_Shadow || jlist == BJ_Forget) {
1960 * For metadata buffers, we track dirty bit in buffer_jbddirty
1961 * instead of buffer_dirty. We should not see a dirty bit set
1962 * here because we clear it in do_get_write_access but e.g.
1963 * tune2fs can modify the sb and set the dirty bit at any time
1964 * so we try to gracefully handle that.
1966 if (buffer_dirty(bh))
1967 warn_dirty_buffer(bh);
1968 if (test_clear_buffer_dirty(bh) ||
1969 test_clear_buffer_jbddirty(bh))
1973 if (jh->b_transaction)
1974 __jbd2_journal_temp_unlink_buffer(jh);
1975 jh->b_transaction = transaction;
1979 J_ASSERT_JH(jh, !jh->b_committed_data);
1980 J_ASSERT_JH(jh, !jh->b_frozen_data);
1983 transaction->t_nr_buffers++;
1984 list = &transaction->t_buffers;
1987 list = &transaction->t_forget;
1990 list = &transaction->t_iobuf_list;
1993 list = &transaction->t_shadow_list;
1996 list = &transaction->t_log_list;
1999 list = &transaction->t_reserved_list;
2003 __blist_add_buffer(list, jh);
2004 jh->b_jlist = jlist;
2007 set_buffer_jbddirty(bh);
2010 void jbd2_journal_file_buffer(struct journal_head *jh,
2011 transaction_t *transaction, int jlist)
2013 jbd_lock_bh_state(jh2bh(jh));
2014 spin_lock(&transaction->t_journal->j_list_lock);
2015 __jbd2_journal_file_buffer(jh, transaction, jlist);
2016 spin_unlock(&transaction->t_journal->j_list_lock);
2017 jbd_unlock_bh_state(jh2bh(jh));
2021 * Remove a buffer from its current buffer list in preparation for
2022 * dropping it from its current transaction entirely. If the buffer has
2023 * already started to be used by a subsequent transaction, refile the
2024 * buffer on that transaction's metadata list.
2026 * Called under journal->j_list_lock
2028 * Called under jbd_lock_bh_state(jh2bh(jh))
2030 void __jbd2_journal_refile_buffer(struct journal_head *jh)
2032 int was_dirty, jlist;
2033 struct buffer_head *bh = jh2bh(jh);
2035 J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh));
2036 if (jh->b_transaction)
2037 assert_spin_locked(&jh->b_transaction->t_journal->j_list_lock);
2039 /* If the buffer is now unused, just drop it. */
2040 if (jh->b_next_transaction == NULL) {
2041 __jbd2_journal_unfile_buffer(jh);
2046 * It has been modified by a later transaction: add it to the new
2047 * transaction's metadata list.
2050 was_dirty = test_clear_buffer_jbddirty(bh);
2051 __jbd2_journal_temp_unlink_buffer(jh);
2052 jh->b_transaction = jh->b_next_transaction;
2053 jh->b_next_transaction = NULL;
2054 if (buffer_freed(bh))
2056 else if (jh->b_modified)
2057 jlist = BJ_Metadata;
2059 jlist = BJ_Reserved;
2060 __jbd2_journal_file_buffer(jh, jh->b_transaction, jlist);
2061 J_ASSERT_JH(jh, jh->b_transaction->t_state == T_RUNNING);
2064 set_buffer_jbddirty(bh);
2068 * For the unlocked version of this call, also make sure that any
2069 * hanging journal_head is cleaned up if necessary.
2071 * __jbd2_journal_refile_buffer is usually called as part of a single locked
2072 * operation on a buffer_head, in which the caller is probably going to
2073 * be hooking the journal_head onto other lists. In that case it is up
2074 * to the caller to remove the journal_head if necessary. For the
2075 * unlocked jbd2_journal_refile_buffer call, the caller isn't going to be
2076 * doing anything else to the buffer so we need to do the cleanup
2077 * ourselves to avoid a jh leak.
2079 * *** The journal_head may be freed by this call! ***
2081 void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh)
2083 struct buffer_head *bh = jh2bh(jh);
2085 jbd_lock_bh_state(bh);
2086 spin_lock(&journal->j_list_lock);
2088 __jbd2_journal_refile_buffer(jh);
2089 jbd_unlock_bh_state(bh);
2090 jbd2_journal_remove_journal_head(bh);
2092 spin_unlock(&journal->j_list_lock);
2097 * File inode in the inode list of the handle's transaction
2099 int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
2101 transaction_t *transaction = handle->h_transaction;
2102 journal_t *journal = transaction->t_journal;
2104 if (is_handle_aborted(handle))
2107 jbd_debug(4, "Adding inode %lu, tid:%d\n", jinode->i_vfs_inode->i_ino,
2108 transaction->t_tid);
2111 * First check whether inode isn't already on the transaction's
2112 * lists without taking the lock. Note that this check is safe
2113 * without the lock as we cannot race with somebody removing inode
2114 * from the transaction. The reason is that we remove inode from the
2115 * transaction only in journal_release_jbd_inode() and when we commit
2116 * the transaction. We are guarded from the first case by holding
2117 * a reference to the inode. We are safe against the second case
2118 * because if jinode->i_transaction == transaction, commit code
2119 * cannot touch the transaction because we hold reference to it,
2120 * and if jinode->i_next_transaction == transaction, commit code
2121 * will only file the inode where we want it.
2123 if (jinode->i_transaction == transaction ||
2124 jinode->i_next_transaction == transaction)
2127 spin_lock(&journal->j_list_lock);
2129 if (jinode->i_transaction == transaction ||
2130 jinode->i_next_transaction == transaction)
2133 /* On some different transaction's list - should be
2134 * the committing one */
2135 if (jinode->i_transaction) {
2136 J_ASSERT(jinode->i_next_transaction == NULL);
2137 J_ASSERT(jinode->i_transaction ==
2138 journal->j_committing_transaction);
2139 jinode->i_next_transaction = transaction;
2142 /* Not on any transaction list... */
2143 J_ASSERT(!jinode->i_next_transaction);
2144 jinode->i_transaction = transaction;
2145 list_add(&jinode->i_list, &transaction->t_inode_list);
2147 spin_unlock(&journal->j_list_lock);
2153 * File truncate and transaction commit interact with each other in a
2154 * non-trivial way. If a transaction writing data block A is
2155 * committing, we cannot discard the data by truncate until we have
2156 * written them. Otherwise if we crashed after the transaction with
2157 * write has committed but before the transaction with truncate has
2158 * committed, we could see stale data in block A. This function is a
2159 * helper to solve this problem. It starts writeout of the truncated
2160 * part in case it is in the committing transaction.
2162 * Filesystem code must call this function when inode is journaled in
2163 * ordered mode before truncation happens and after the inode has been
2164 * placed on orphan list with the new inode size. The second condition
2165 * avoids the race that someone writes new data and we start
2166 * committing the transaction after this function has been called but
2167 * before a transaction for truncate is started (and furthermore it
2168 * allows us to optimize the case where the addition to orphan list
2169 * happens in the same transaction as write --- we don't have to write
2170 * any data in such case).
2172 int jbd2_journal_begin_ordered_truncate(journal_t *journal,
2173 struct jbd2_inode *jinode,
2176 transaction_t *inode_trans, *commit_trans;
2179 /* This is a quick check to avoid locking if not necessary */
2180 if (!jinode->i_transaction)
2182 /* Locks are here just to force reading of recent values, it is
2183 * enough that the transaction was not committing before we started
2184 * a transaction adding the inode to orphan list */
2185 read_lock(&journal->j_state_lock);
2186 commit_trans = journal->j_committing_transaction;
2187 read_unlock(&journal->j_state_lock);
2188 spin_lock(&journal->j_list_lock);
2189 inode_trans = jinode->i_transaction;
2190 spin_unlock(&journal->j_list_lock);
2191 if (inode_trans == commit_trans) {
2192 ret = filemap_fdatawrite_range(jinode->i_vfs_inode->i_mapping,
2193 new_size, LLONG_MAX);
2195 jbd2_journal_abort(journal, ret);