]> bbs.cooldavid.org Git - net-next-2.6.git/blame - fs/jbd2/commit.c
jbd2: fix the way the b_modified flag is cleared
[net-next-2.6.git] / fs / jbd2 / commit.c
CommitLineData
470decc6 1/*
f7f4bccb 2 * linux/fs/jbd2/commit.c
470decc6
DK
3 *
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 1998
5 *
6 * Copyright 1998 Red Hat corp --- All Rights Reserved
7 *
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
11 *
12 * Journal commit routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
14 */
15
16#include <linux/time.h>
17#include <linux/fs.h>
f7f4bccb 18#include <linux/jbd2.h>
470decc6
DK
19#include <linux/errno.h>
20#include <linux/slab.h>
21#include <linux/mm.h>
22#include <linux/pagemap.h>
8e85fb3f 23#include <linux/jiffies.h>
818d276c 24#include <linux/crc32.h>
470decc6
DK
25
26/*
27 * Default IO end handler for temporary BJ_IO buffer_heads.
28 */
29static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
30{
31 BUFFER_TRACE(bh, "");
32 if (uptodate)
33 set_buffer_uptodate(bh);
34 else
35 clear_buffer_uptodate(bh);
36 unlock_buffer(bh);
37}
38
39/*
40 * When an ext3-ordered file is truncated, it is possible that many pages are
41 * not sucessfully freed, because they are attached to a committing transaction.
42 * After the transaction commits, these pages are left on the LRU, with no
43 * ->mapping, and with attached buffers. These pages are trivially reclaimable
44 * by the VM, but their apparent absence upsets the VM accounting, and it makes
45 * the numbers in /proc/meminfo look odd.
46 *
47 * So here, we have a buffer which has just come off the forget list. Look to
48 * see if we can strip all buffers from the backing page.
49 *
50 * Called under lock_journal(), and possibly under journal_datalist_lock. The
51 * caller provided us with a ref against the buffer, and we drop that here.
52 */
53static void release_buffer_page(struct buffer_head *bh)
54{
55 struct page *page;
56
57 if (buffer_dirty(bh))
58 goto nope;
59 if (atomic_read(&bh->b_count) != 1)
60 goto nope;
61 page = bh->b_page;
62 if (!page)
63 goto nope;
64 if (page->mapping)
65 goto nope;
66
67 /* OK, it's a truncated page */
68 if (TestSetPageLocked(page))
69 goto nope;
70
71 page_cache_get(page);
72 __brelse(bh);
73 try_to_free_buffers(page);
74 unlock_page(page);
75 page_cache_release(page);
76 return;
77
78nope:
79 __brelse(bh);
80}
81
82/*
83 * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is
84 * held. For ranking reasons we must trylock. If we lose, schedule away and
85 * return 0. j_list_lock is dropped in this case.
86 */
87static int inverted_lock(journal_t *journal, struct buffer_head *bh)
88{
89 if (!jbd_trylock_bh_state(bh)) {
90 spin_unlock(&journal->j_list_lock);
91 schedule();
92 return 0;
93 }
94 return 1;
95}
96
818d276c
GS
97/*
98 * Done it all: now submit the commit record. We should have
470decc6
DK
99 * cleaned up our previous buffers by now, so if we are in abort
100 * mode we can now just skip the rest of the journal write
101 * entirely.
102 *
103 * Returns 1 if the journal needs to be aborted or 0 on success
104 */
818d276c
GS
105static int journal_submit_commit_record(journal_t *journal,
106 transaction_t *commit_transaction,
107 struct buffer_head **cbh,
108 __u32 crc32_sum)
470decc6
DK
109{
110 struct journal_head *descriptor;
818d276c 111 struct commit_header *tmp;
470decc6 112 struct buffer_head *bh;
818d276c 113 int ret;
470decc6
DK
114 int barrier_done = 0;
115
116 if (is_journal_aborted(journal))
117 return 0;
118
f7f4bccb 119 descriptor = jbd2_journal_get_descriptor_buffer(journal);
470decc6
DK
120 if (!descriptor)
121 return 1;
122
123 bh = jh2bh(descriptor);
124
818d276c
GS
125 tmp = (struct commit_header *)bh->b_data;
126 tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
127 tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
128 tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
129
130 if (JBD2_HAS_COMPAT_FEATURE(journal,
131 JBD2_FEATURE_COMPAT_CHECKSUM)) {
132 tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
133 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
134 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
470decc6
DK
135 }
136
818d276c
GS
137 JBUFFER_TRACE(descriptor, "submit commit block");
138 lock_buffer(bh);
c4b8e635 139 get_bh(bh);
470decc6 140 set_buffer_dirty(bh);
818d276c
GS
141 set_buffer_uptodate(bh);
142 bh->b_end_io = journal_end_buffer_io_sync;
143
144 if (journal->j_flags & JBD2_BARRIER &&
4d605179 145 !JBD2_HAS_INCOMPAT_FEATURE(journal,
818d276c 146 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
470decc6
DK
147 set_buffer_ordered(bh);
148 barrier_done = 1;
149 }
818d276c 150 ret = submit_bh(WRITE, bh);
c4e35e07
DK
151 if (barrier_done)
152 clear_buffer_ordered(bh);
818d276c 153
470decc6
DK
154 /* is it possible for another commit to fail at roughly
155 * the same time as this one? If so, we don't want to
156 * trust the barrier flag in the super, but instead want
157 * to remember if we sent a barrier request
158 */
159 if (ret == -EOPNOTSUPP && barrier_done) {
160 char b[BDEVNAME_SIZE];
161
162 printk(KERN_WARNING
163 "JBD: barrier-based sync failed on %s - "
164 "disabling barriers\n",
165 bdevname(journal->j_dev, b));
166 spin_lock(&journal->j_state_lock);
f7f4bccb 167 journal->j_flags &= ~JBD2_BARRIER;
470decc6
DK
168 spin_unlock(&journal->j_state_lock);
169
170 /* And try again, without the barrier */
470decc6
DK
171 set_buffer_uptodate(bh);
172 set_buffer_dirty(bh);
818d276c 173 ret = submit_bh(WRITE, bh);
470decc6 174 }
818d276c
GS
175 *cbh = bh;
176 return ret;
177}
178
179/*
180 * This function along with journal_submit_commit_record
181 * allows to write the commit record asynchronously.
182 */
183static int journal_wait_on_commit_record(struct buffer_head *bh)
184{
185 int ret = 0;
186
187 clear_buffer_dirty(bh);
188 wait_on_buffer(bh);
470decc6 189
818d276c
GS
190 if (unlikely(!buffer_uptodate(bh)))
191 ret = -EIO;
192 put_bh(bh); /* One for getblk() */
193 jbd2_journal_put_journal_head(bh2jh(bh));
194
195 return ret;
470decc6
DK
196}
197
818d276c
GS
198/*
199 * Wait for all submitted IO to complete.
200 */
201static int journal_wait_on_locked_list(journal_t *journal,
202 transaction_t *commit_transaction)
203{
204 int ret = 0;
205 struct journal_head *jh;
206
207 while (commit_transaction->t_locked_list) {
208 struct buffer_head *bh;
209
210 jh = commit_transaction->t_locked_list->b_tprev;
211 bh = jh2bh(jh);
212 get_bh(bh);
213 if (buffer_locked(bh)) {
214 spin_unlock(&journal->j_list_lock);
215 wait_on_buffer(bh);
216 if (unlikely(!buffer_uptodate(bh)))
217 ret = -EIO;
218 spin_lock(&journal->j_list_lock);
219 }
220 if (!inverted_lock(journal, bh)) {
221 put_bh(bh);
222 spin_lock(&journal->j_list_lock);
223 continue;
224 }
225 if (buffer_jbd(bh) && jh->b_jlist == BJ_Locked) {
226 __jbd2_journal_unfile_buffer(jh);
227 jbd_unlock_bh_state(bh);
228 jbd2_journal_remove_journal_head(bh);
229 put_bh(bh);
230 } else {
231 jbd_unlock_bh_state(bh);
232 }
233 put_bh(bh);
234 cond_resched_lock(&journal->j_list_lock);
235 }
236 return ret;
237 }
238
470decc6
DK
239static void journal_do_submit_data(struct buffer_head **wbuf, int bufs)
240{
241 int i;
242
243 for (i = 0; i < bufs; i++) {
244 wbuf[i]->b_end_io = end_buffer_write_sync;
245 /* We use-up our safety reference in submit_bh() */
246 submit_bh(WRITE, wbuf[i]);
247 }
248}
249
250/*
251 * Submit all the data buffers to disk
252 */
253static void journal_submit_data_buffers(journal_t *journal,
254 transaction_t *commit_transaction)
255{
256 struct journal_head *jh;
257 struct buffer_head *bh;
258 int locked;
259 int bufs = 0;
260 struct buffer_head **wbuf = journal->j_wbuf;
261
262 /*
263 * Whenever we unlock the journal and sleep, things can get added
264 * onto ->t_sync_datalist, so we have to keep looping back to
265 * write_out_data until we *know* that the list is empty.
266 *
267 * Cleanup any flushed data buffers from the data list. Even in
268 * abort mode, we want to flush this out as soon as possible.
269 */
270write_out_data:
271 cond_resched();
272 spin_lock(&journal->j_list_lock);
273
274 while (commit_transaction->t_sync_datalist) {
275 jh = commit_transaction->t_sync_datalist;
276 bh = jh2bh(jh);
277 locked = 0;
278
279 /* Get reference just to make sure buffer does not disappear
280 * when we are forced to drop various locks */
281 get_bh(bh);
282 /* If the buffer is dirty, we need to submit IO and hence
283 * we need the buffer lock. We try to lock the buffer without
284 * blocking. If we fail, we need to drop j_list_lock and do
285 * blocking lock_buffer().
286 */
287 if (buffer_dirty(bh)) {
288 if (test_set_buffer_locked(bh)) {
289 BUFFER_TRACE(bh, "needs blocking lock");
290 spin_unlock(&journal->j_list_lock);
291 /* Write out all data to prevent deadlocks */
292 journal_do_submit_data(wbuf, bufs);
293 bufs = 0;
294 lock_buffer(bh);
295 spin_lock(&journal->j_list_lock);
296 }
297 locked = 1;
298 }
299 /* We have to get bh_state lock. Again out of order, sigh. */
300 if (!inverted_lock(journal, bh)) {
301 jbd_lock_bh_state(bh);
302 spin_lock(&journal->j_list_lock);
303 }
304 /* Someone already cleaned up the buffer? */
305 if (!buffer_jbd(bh)
306 || jh->b_transaction != commit_transaction
307 || jh->b_jlist != BJ_SyncData) {
308 jbd_unlock_bh_state(bh);
309 if (locked)
310 unlock_buffer(bh);
311 BUFFER_TRACE(bh, "already cleaned up");
312 put_bh(bh);
313 continue;
314 }
315 if (locked && test_clear_buffer_dirty(bh)) {
316 BUFFER_TRACE(bh, "needs writeout, adding to array");
317 wbuf[bufs++] = bh;
f7f4bccb 318 __jbd2_journal_file_buffer(jh, commit_transaction,
470decc6
DK
319 BJ_Locked);
320 jbd_unlock_bh_state(bh);
321 if (bufs == journal->j_wbufsize) {
322 spin_unlock(&journal->j_list_lock);
323 journal_do_submit_data(wbuf, bufs);
324 bufs = 0;
325 goto write_out_data;
326 }
12603925
HH
327 } else if (!locked && buffer_locked(bh)) {
328 __jbd2_journal_file_buffer(jh, commit_transaction,
329 BJ_Locked);
330 jbd_unlock_bh_state(bh);
331 put_bh(bh);
332 } else {
470decc6 333 BUFFER_TRACE(bh, "writeout complete: unfile");
f7f4bccb 334 __jbd2_journal_unfile_buffer(jh);
470decc6
DK
335 jbd_unlock_bh_state(bh);
336 if (locked)
337 unlock_buffer(bh);
f7f4bccb 338 jbd2_journal_remove_journal_head(bh);
470decc6 339 /* Once for our safety reference, once for
f7f4bccb 340 * jbd2_journal_remove_journal_head() */
470decc6
DK
341 put_bh(bh);
342 put_bh(bh);
343 }
344
95c354fe 345 if (need_resched() || spin_needbreak(&journal->j_list_lock)) {
470decc6
DK
346 spin_unlock(&journal->j_list_lock);
347 goto write_out_data;
348 }
349 }
350 spin_unlock(&journal->j_list_lock);
351 journal_do_submit_data(wbuf, bufs);
352}
353
818d276c
GS
354static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
355{
356 struct page *page = bh->b_page;
357 char *addr;
358 __u32 checksum;
359
360 addr = kmap_atomic(page, KM_USER0);
361 checksum = crc32_be(crc32_sum,
362 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
363 kunmap_atomic(addr, KM_USER0);
364
365 return checksum;
366}
367
368static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
18eba7aa 369 unsigned long long block)
b517bea1
ZB
370{
371 tag->t_blocknr = cpu_to_be32(block & (u32)~0);
cd02ff0b 372 if (tag_bytes > JBD2_TAG_SIZE32)
b517bea1
ZB
373 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
374}
375
470decc6 376/*
f7f4bccb 377 * jbd2_journal_commit_transaction
470decc6
DK
378 *
379 * The primary function for committing a transaction to the log. This
380 * function is called by the journal thread to begin a complete commit.
381 */
f7f4bccb 382void jbd2_journal_commit_transaction(journal_t *journal)
470decc6 383{
8e85fb3f 384 struct transaction_stats_s stats;
470decc6
DK
385 transaction_t *commit_transaction;
386 struct journal_head *jh, *new_jh, *descriptor;
387 struct buffer_head **wbuf = journal->j_wbuf;
388 int bufs;
389 int flags;
390 int err;
18eba7aa 391 unsigned long long blocknr;
470decc6
DK
392 char *tagp = NULL;
393 journal_header_t *header;
394 journal_block_tag_t *tag = NULL;
395 int space_left = 0;
396 int first_tag = 0;
397 int tag_flag;
398 int i;
b517bea1 399 int tag_bytes = journal_tag_bytes(journal);
818d276c
GS
400 struct buffer_head *cbh = NULL; /* For transactional checksums */
401 __u32 crc32_sum = ~0;
470decc6
DK
402
403 /*
404 * First job: lock down the current transaction and wait for
405 * all outstanding updates to complete.
406 */
407
408#ifdef COMMIT_STATS
409 spin_lock(&journal->j_list_lock);
410 summarise_journal_usage(journal);
411 spin_unlock(&journal->j_list_lock);
412#endif
413
f7f4bccb
MC
414 /* Do we need to erase the effects of a prior jbd2_journal_flush? */
415 if (journal->j_flags & JBD2_FLUSHED) {
470decc6 416 jbd_debug(3, "super block updated\n");
f7f4bccb 417 jbd2_journal_update_superblock(journal, 1);
470decc6
DK
418 } else {
419 jbd_debug(3, "superblock not updated\n");
420 }
421
422 J_ASSERT(journal->j_running_transaction != NULL);
423 J_ASSERT(journal->j_committing_transaction == NULL);
424
425 commit_transaction = journal->j_running_transaction;
426 J_ASSERT(commit_transaction->t_state == T_RUNNING);
427
428 jbd_debug(1, "JBD: starting commit of transaction %d\n",
429 commit_transaction->t_tid);
430
431 spin_lock(&journal->j_state_lock);
432 commit_transaction->t_state = T_LOCKED;
433
8e85fb3f
JL
434 stats.u.run.rs_wait = commit_transaction->t_max_wait;
435 stats.u.run.rs_locked = jiffies;
436 stats.u.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
437 stats.u.run.rs_locked);
438
470decc6
DK
439 spin_lock(&commit_transaction->t_handle_lock);
440 while (commit_transaction->t_updates) {
441 DEFINE_WAIT(wait);
442
443 prepare_to_wait(&journal->j_wait_updates, &wait,
444 TASK_UNINTERRUPTIBLE);
445 if (commit_transaction->t_updates) {
446 spin_unlock(&commit_transaction->t_handle_lock);
447 spin_unlock(&journal->j_state_lock);
448 schedule();
449 spin_lock(&journal->j_state_lock);
450 spin_lock(&commit_transaction->t_handle_lock);
451 }
452 finish_wait(&journal->j_wait_updates, &wait);
453 }
454 spin_unlock(&commit_transaction->t_handle_lock);
455
456 J_ASSERT (commit_transaction->t_outstanding_credits <=
457 journal->j_max_transaction_buffers);
458
459 /*
460 * First thing we are allowed to do is to discard any remaining
461 * BJ_Reserved buffers. Note, it is _not_ permissible to assume
462 * that there are no such buffers: if a large filesystem
463 * operation like a truncate needs to split itself over multiple
f7f4bccb 464 * transactions, then it may try to do a jbd2_journal_restart() while
470decc6
DK
465 * there are still BJ_Reserved buffers outstanding. These must
466 * be released cleanly from the current transaction.
467 *
468 * In this case, the filesystem must still reserve write access
469 * again before modifying the buffer in the new transaction, but
470 * we do not require it to remember exactly which old buffers it
471 * has reserved. This is consistent with the existing behaviour
f7f4bccb 472 * that multiple jbd2_journal_get_write_access() calls to the same
470decc6
DK
473 * buffer are perfectly permissable.
474 */
475 while (commit_transaction->t_reserved_list) {
476 jh = commit_transaction->t_reserved_list;
477 JBUFFER_TRACE(jh, "reserved, unused: refile");
478 /*
f7f4bccb 479 * A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
470decc6
DK
480 * leave undo-committed data.
481 */
482 if (jh->b_committed_data) {
483 struct buffer_head *bh = jh2bh(jh);
484
485 jbd_lock_bh_state(bh);
af1e76d6 486 jbd2_free(jh->b_committed_data, bh->b_size);
470decc6
DK
487 jh->b_committed_data = NULL;
488 jbd_unlock_bh_state(bh);
489 }
f7f4bccb 490 jbd2_journal_refile_buffer(journal, jh);
470decc6
DK
491 }
492
493 /*
494 * Now try to drop any written-back buffers from the journal's
495 * checkpoint lists. We do this *before* commit because it potentially
496 * frees some memory
497 */
498 spin_lock(&journal->j_list_lock);
f7f4bccb 499 __jbd2_journal_clean_checkpoint_list(journal);
470decc6
DK
500 spin_unlock(&journal->j_list_lock);
501
502 jbd_debug (3, "JBD: commit phase 1\n");
503
504 /*
505 * Switch to a new revoke table.
506 */
f7f4bccb 507 jbd2_journal_switch_revoke_table(journal);
470decc6 508
8e85fb3f
JL
509 stats.u.run.rs_flushing = jiffies;
510 stats.u.run.rs_locked = jbd2_time_diff(stats.u.run.rs_locked,
511 stats.u.run.rs_flushing);
512
470decc6
DK
513 commit_transaction->t_state = T_FLUSH;
514 journal->j_committing_transaction = commit_transaction;
515 journal->j_running_transaction = NULL;
516 commit_transaction->t_log_start = journal->j_head;
517 wake_up(&journal->j_wait_transaction_locked);
518 spin_unlock(&journal->j_state_lock);
519
520 jbd_debug (3, "JBD: commit phase 2\n");
521
470decc6
DK
522 /*
523 * Now start flushing things to disk, in the order they appear
524 * on the transaction lists. Data blocks go first.
525 */
526 err = 0;
527 journal_submit_data_buffers(journal, commit_transaction);
528
529 /*
818d276c
GS
530 * Wait for all previously submitted IO to complete if commit
531 * record is to be written synchronously.
470decc6
DK
532 */
533 spin_lock(&journal->j_list_lock);
818d276c
GS
534 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
535 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
536 err = journal_wait_on_locked_list(journal,
537 commit_transaction);
470decc6 538
470decc6
DK
539 spin_unlock(&journal->j_list_lock);
540
541 if (err)
a7fa2baf 542 jbd2_journal_abort(journal, err);
470decc6 543
f7f4bccb 544 jbd2_journal_write_revoke_records(journal, commit_transaction);
470decc6
DK
545
546 jbd_debug(3, "JBD: commit phase 2\n");
547
548 /*
549 * If we found any dirty or locked buffers, then we should have
550 * looped back up to the write_out_data label. If there weren't
551 * any then journal_clean_data_list should have wiped the list
552 * clean by now, so check that it is in fact empty.
553 */
554 J_ASSERT (commit_transaction->t_sync_datalist == NULL);
555
556 jbd_debug (3, "JBD: commit phase 3\n");
557
558 /*
559 * Way to go: we have now written out all of the data for a
560 * transaction! Now comes the tricky part: we need to write out
561 * metadata. Loop over the transaction's entire buffer list:
562 */
563 commit_transaction->t_state = T_COMMIT;
564
8e85fb3f
JL
565 stats.u.run.rs_logging = jiffies;
566 stats.u.run.rs_flushing = jbd2_time_diff(stats.u.run.rs_flushing,
567 stats.u.run.rs_logging);
568 stats.u.run.rs_blocks = commit_transaction->t_outstanding_credits;
569 stats.u.run.rs_blocks_logged = 0;
570
470decc6
DK
571 descriptor = NULL;
572 bufs = 0;
573 while (commit_transaction->t_buffers) {
574
575 /* Find the next buffer to be journaled... */
576
577 jh = commit_transaction->t_buffers;
578
579 /* If we're in abort mode, we just un-journal the buffer and
580 release it for background writing. */
581
582 if (is_journal_aborted(journal)) {
583 JBUFFER_TRACE(jh, "journal is aborting: refile");
f7f4bccb 584 jbd2_journal_refile_buffer(journal, jh);
470decc6
DK
585 /* If that was the last one, we need to clean up
586 * any descriptor buffers which may have been
587 * already allocated, even if we are now
588 * aborting. */
589 if (!commit_transaction->t_buffers)
590 goto start_journal_io;
591 continue;
592 }
593
594 /* Make sure we have a descriptor block in which to
595 record the metadata buffer. */
596
597 if (!descriptor) {
598 struct buffer_head *bh;
599
600 J_ASSERT (bufs == 0);
601
602 jbd_debug(4, "JBD: get descriptor\n");
603
f7f4bccb 604 descriptor = jbd2_journal_get_descriptor_buffer(journal);
470decc6 605 if (!descriptor) {
a7fa2baf 606 jbd2_journal_abort(journal, -EIO);
470decc6
DK
607 continue;
608 }
609
610 bh = jh2bh(descriptor);
611 jbd_debug(4, "JBD: got buffer %llu (%p)\n",
612 (unsigned long long)bh->b_blocknr, bh->b_data);
613 header = (journal_header_t *)&bh->b_data[0];
f7f4bccb
MC
614 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
615 header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
470decc6
DK
616 header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
617
618 tagp = &bh->b_data[sizeof(journal_header_t)];
619 space_left = bh->b_size - sizeof(journal_header_t);
620 first_tag = 1;
621 set_buffer_jwrite(bh);
622 set_buffer_dirty(bh);
623 wbuf[bufs++] = bh;
624
625 /* Record it so that we can wait for IO
626 completion later */
627 BUFFER_TRACE(bh, "ph3: file as descriptor");
f7f4bccb 628 jbd2_journal_file_buffer(descriptor, commit_transaction,
470decc6
DK
629 BJ_LogCtl);
630 }
631
632 /* Where is the buffer to be written? */
633
f7f4bccb 634 err = jbd2_journal_next_log_block(journal, &blocknr);
470decc6
DK
635 /* If the block mapping failed, just abandon the buffer
636 and repeat this loop: we'll fall into the
637 refile-on-abort condition above. */
638 if (err) {
a7fa2baf 639 jbd2_journal_abort(journal, err);
470decc6
DK
640 continue;
641 }
642
643 /*
644 * start_this_handle() uses t_outstanding_credits to determine
645 * the free space in the log, but this counter is changed
f7f4bccb 646 * by jbd2_journal_next_log_block() also.
470decc6
DK
647 */
648 commit_transaction->t_outstanding_credits--;
649
650 /* Bump b_count to prevent truncate from stumbling over
651 the shadowed buffer! @@@ This can go if we ever get
652 rid of the BJ_IO/BJ_Shadow pairing of buffers. */
653 atomic_inc(&jh2bh(jh)->b_count);
654
655 /* Make a temporary IO buffer with which to write it out
656 (this will requeue both the metadata buffer and the
657 temporary IO buffer). new_bh goes on BJ_IO*/
658
659 set_bit(BH_JWrite, &jh2bh(jh)->b_state);
660 /*
f7f4bccb 661 * akpm: jbd2_journal_write_metadata_buffer() sets
470decc6
DK
662 * new_bh->b_transaction to commit_transaction.
663 * We need to clean this up before we release new_bh
664 * (which is of type BJ_IO)
665 */
666 JBUFFER_TRACE(jh, "ph3: write metadata");
f7f4bccb 667 flags = jbd2_journal_write_metadata_buffer(commit_transaction,
470decc6
DK
668 jh, &new_jh, blocknr);
669 set_bit(BH_JWrite, &jh2bh(new_jh)->b_state);
670 wbuf[bufs++] = jh2bh(new_jh);
671
672 /* Record the new block's tag in the current descriptor
673 buffer */
674
675 tag_flag = 0;
676 if (flags & 1)
f7f4bccb 677 tag_flag |= JBD2_FLAG_ESCAPE;
470decc6 678 if (!first_tag)
f7f4bccb 679 tag_flag |= JBD2_FLAG_SAME_UUID;
470decc6
DK
680
681 tag = (journal_block_tag_t *) tagp;
b517bea1 682 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
470decc6 683 tag->t_flags = cpu_to_be32(tag_flag);
b517bea1
ZB
684 tagp += tag_bytes;
685 space_left -= tag_bytes;
470decc6
DK
686
687 if (first_tag) {
688 memcpy (tagp, journal->j_uuid, 16);
689 tagp += 16;
690 space_left -= 16;
691 first_tag = 0;
692 }
693
694 /* If there's no more to do, or if the descriptor is full,
695 let the IO rip! */
696
697 if (bufs == journal->j_wbufsize ||
698 commit_transaction->t_buffers == NULL ||
b517bea1 699 space_left < tag_bytes + 16) {
470decc6
DK
700
701 jbd_debug(4, "JBD: Submit %d IOs\n", bufs);
702
703 /* Write an end-of-descriptor marker before
704 submitting the IOs. "tag" still points to
705 the last tag we set up. */
706
f7f4bccb 707 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG);
470decc6
DK
708
709start_journal_io:
710 for (i = 0; i < bufs; i++) {
711 struct buffer_head *bh = wbuf[i];
818d276c
GS
712 /*
713 * Compute checksum.
714 */
715 if (JBD2_HAS_COMPAT_FEATURE(journal,
716 JBD2_FEATURE_COMPAT_CHECKSUM)) {
717 crc32_sum =
718 jbd2_checksum_data(crc32_sum, bh);
719 }
720
470decc6
DK
721 lock_buffer(bh);
722 clear_buffer_dirty(bh);
723 set_buffer_uptodate(bh);
724 bh->b_end_io = journal_end_buffer_io_sync;
725 submit_bh(WRITE, bh);
726 }
727 cond_resched();
8e85fb3f 728 stats.u.run.rs_blocks_logged += bufs;
470decc6
DK
729
730 /* Force a new descriptor to be generated next
731 time round the loop. */
732 descriptor = NULL;
733 bufs = 0;
734 }
735 }
736
818d276c
GS
737 /* Done it all: now write the commit record asynchronously. */
738
739 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
740 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
741 err = journal_submit_commit_record(journal, commit_transaction,
742 &cbh, crc32_sum);
743 if (err)
744 __jbd2_journal_abort_hard(journal);
745
746 spin_lock(&journal->j_list_lock);
747 err = journal_wait_on_locked_list(journal,
748 commit_transaction);
749 spin_unlock(&journal->j_list_lock);
750 if (err)
751 __jbd2_journal_abort_hard(journal);
752 }
753
470decc6
DK
754 /* Lo and behold: we have just managed to send a transaction to
755 the log. Before we can commit it, wait for the IO so far to
756 complete. Control buffers being written are on the
757 transaction's t_log_list queue, and metadata buffers are on
758 the t_iobuf_list queue.
759
760 Wait for the buffers in reverse order. That way we are
761 less likely to be woken up until all IOs have completed, and
762 so we incur less scheduling load.
763 */
764
765 jbd_debug(3, "JBD: commit phase 4\n");
766
767 /*
768 * akpm: these are BJ_IO, and j_list_lock is not needed.
769 * See __journal_try_to_free_buffer.
770 */
771wait_for_iobuf:
772 while (commit_transaction->t_iobuf_list != NULL) {
773 struct buffer_head *bh;
774
775 jh = commit_transaction->t_iobuf_list->b_tprev;
776 bh = jh2bh(jh);
777 if (buffer_locked(bh)) {
778 wait_on_buffer(bh);
779 goto wait_for_iobuf;
780 }
781 if (cond_resched())
782 goto wait_for_iobuf;
783
784 if (unlikely(!buffer_uptodate(bh)))
785 err = -EIO;
786
787 clear_buffer_jwrite(bh);
788
789 JBUFFER_TRACE(jh, "ph4: unfile after journal write");
f7f4bccb 790 jbd2_journal_unfile_buffer(journal, jh);
470decc6
DK
791
792 /*
793 * ->t_iobuf_list should contain only dummy buffer_heads
f7f4bccb 794 * which were created by jbd2_journal_write_metadata_buffer().
470decc6
DK
795 */
796 BUFFER_TRACE(bh, "dumping temporary bh");
f7f4bccb 797 jbd2_journal_put_journal_head(jh);
470decc6
DK
798 __brelse(bh);
799 J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
800 free_buffer_head(bh);
801
802 /* We also have to unlock and free the corresponding
803 shadowed buffer */
804 jh = commit_transaction->t_shadow_list->b_tprev;
805 bh = jh2bh(jh);
806 clear_bit(BH_JWrite, &bh->b_state);
807 J_ASSERT_BH(bh, buffer_jbddirty(bh));
808
809 /* The metadata is now released for reuse, but we need
810 to remember it against this transaction so that when
811 we finally commit, we can do any checkpointing
812 required. */
813 JBUFFER_TRACE(jh, "file as BJ_Forget");
f7f4bccb 814 jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
470decc6
DK
815 /* Wake up any transactions which were waiting for this
816 IO to complete */
817 wake_up_bit(&bh->b_state, BH_Unshadow);
818 JBUFFER_TRACE(jh, "brelse shadowed buffer");
819 __brelse(bh);
820 }
821
822 J_ASSERT (commit_transaction->t_shadow_list == NULL);
823
824 jbd_debug(3, "JBD: commit phase 5\n");
825
826 /* Here we wait for the revoke record and descriptor record buffers */
827 wait_for_ctlbuf:
828 while (commit_transaction->t_log_list != NULL) {
829 struct buffer_head *bh;
830
831 jh = commit_transaction->t_log_list->b_tprev;
832 bh = jh2bh(jh);
833 if (buffer_locked(bh)) {
834 wait_on_buffer(bh);
835 goto wait_for_ctlbuf;
836 }
837 if (cond_resched())
838 goto wait_for_ctlbuf;
839
840 if (unlikely(!buffer_uptodate(bh)))
841 err = -EIO;
842
843 BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
844 clear_buffer_jwrite(bh);
f7f4bccb
MC
845 jbd2_journal_unfile_buffer(journal, jh);
846 jbd2_journal_put_journal_head(jh);
470decc6
DK
847 __brelse(bh); /* One for getblk */
848 /* AKPM: bforget here */
849 }
850
851 jbd_debug(3, "JBD: commit phase 6\n");
852
818d276c
GS
853 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
854 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
855 err = journal_submit_commit_record(journal, commit_transaction,
856 &cbh, crc32_sum);
857 if (err)
858 __jbd2_journal_abort_hard(journal);
859 }
b048d846
MC
860 if (!err && !is_journal_aborted(journal))
861 err = journal_wait_on_commit_record(cbh);
470decc6
DK
862
863 if (err)
a7fa2baf 864 jbd2_journal_abort(journal, err);
470decc6
DK
865
866 /* End of a transaction! Finally, we can do checkpoint
867 processing: any buffers committed as a result of this
868 transaction can be removed from any checkpoint list it was on
869 before. */
870
871 jbd_debug(3, "JBD: commit phase 7\n");
872
873 J_ASSERT(commit_transaction->t_sync_datalist == NULL);
874 J_ASSERT(commit_transaction->t_buffers == NULL);
875 J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
876 J_ASSERT(commit_transaction->t_iobuf_list == NULL);
877 J_ASSERT(commit_transaction->t_shadow_list == NULL);
878 J_ASSERT(commit_transaction->t_log_list == NULL);
879
880restart_loop:
881 /*
882 * As there are other places (journal_unmap_buffer()) adding buffers
883 * to this list we have to be careful and hold the j_list_lock.
884 */
885 spin_lock(&journal->j_list_lock);
886 while (commit_transaction->t_forget) {
887 transaction_t *cp_transaction;
888 struct buffer_head *bh;
889
890 jh = commit_transaction->t_forget;
891 spin_unlock(&journal->j_list_lock);
892 bh = jh2bh(jh);
893 jbd_lock_bh_state(bh);
894 J_ASSERT_JH(jh, jh->b_transaction == commit_transaction ||
895 jh->b_transaction == journal->j_running_transaction);
896
897 /*
898 * If there is undo-protected committed data against
899 * this buffer, then we can remove it now. If it is a
900 * buffer needing such protection, the old frozen_data
901 * field now points to a committed version of the
902 * buffer, so rotate that field to the new committed
903 * data.
904 *
905 * Otherwise, we can just throw away the frozen data now.
906 */
907 if (jh->b_committed_data) {
af1e76d6 908 jbd2_free(jh->b_committed_data, bh->b_size);
470decc6
DK
909 jh->b_committed_data = NULL;
910 if (jh->b_frozen_data) {
911 jh->b_committed_data = jh->b_frozen_data;
912 jh->b_frozen_data = NULL;
913 }
914 } else if (jh->b_frozen_data) {
af1e76d6 915 jbd2_free(jh->b_frozen_data, bh->b_size);
470decc6
DK
916 jh->b_frozen_data = NULL;
917 }
918
919 spin_lock(&journal->j_list_lock);
920 cp_transaction = jh->b_cp_transaction;
921 if (cp_transaction) {
922 JBUFFER_TRACE(jh, "remove from old cp transaction");
8e85fb3f 923 cp_transaction->t_chp_stats.cs_dropped++;
f7f4bccb 924 __jbd2_journal_remove_checkpoint(jh);
470decc6
DK
925 }
926
927 /* Only re-checkpoint the buffer_head if it is marked
928 * dirty. If the buffer was added to the BJ_Forget list
f7f4bccb 929 * by jbd2_journal_forget, it may no longer be dirty and
470decc6
DK
930 * there's no point in keeping a checkpoint record for
931 * it. */
932
933 /* A buffer which has been freed while still being
934 * journaled by a previous transaction may end up still
935 * being dirty here, but we want to avoid writing back
936 * that buffer in the future now that the last use has
937 * been committed. That's not only a performance gain,
938 * it also stops aliasing problems if the buffer is left
939 * behind for writeback and gets reallocated for another
940 * use in a different page. */
941 if (buffer_freed(bh)) {
942 clear_buffer_freed(bh);
943 clear_buffer_jbddirty(bh);
944 }
945
946 if (buffer_jbddirty(bh)) {
947 JBUFFER_TRACE(jh, "add to new checkpointing trans");
f7f4bccb 948 __jbd2_journal_insert_checkpoint(jh, commit_transaction);
470decc6 949 JBUFFER_TRACE(jh, "refile for checkpoint writeback");
f7f4bccb 950 __jbd2_journal_refile_buffer(jh);
470decc6
DK
951 jbd_unlock_bh_state(bh);
952 } else {
953 J_ASSERT_BH(bh, !buffer_dirty(bh));
954 /* The buffer on BJ_Forget list and not jbddirty means
955 * it has been freed by this transaction and hence it
956 * could not have been reallocated until this
957 * transaction has committed. *BUT* it could be
958 * reallocated once we have written all the data to
959 * disk and before we process the buffer on BJ_Forget
960 * list. */
961 JBUFFER_TRACE(jh, "refile or unfile freed buffer");
f7f4bccb 962 __jbd2_journal_refile_buffer(jh);
470decc6
DK
963 if (!jh->b_transaction) {
964 jbd_unlock_bh_state(bh);
965 /* needs a brelse */
f7f4bccb 966 jbd2_journal_remove_journal_head(bh);
470decc6
DK
967 release_buffer_page(bh);
968 } else
969 jbd_unlock_bh_state(bh);
970 }
971 cond_resched_lock(&journal->j_list_lock);
972 }
973 spin_unlock(&journal->j_list_lock);
974 /*
f5a7a6b0
JK
975 * This is a bit sleazy. We use j_list_lock to protect transition
976 * of a transaction into T_FINISHED state and calling
977 * __jbd2_journal_drop_transaction(). Otherwise we could race with
978 * other checkpointing code processing the transaction...
470decc6
DK
979 */
980 spin_lock(&journal->j_state_lock);
981 spin_lock(&journal->j_list_lock);
982 /*
983 * Now recheck if some buffers did not get attached to the transaction
984 * while the lock was dropped...
985 */
986 if (commit_transaction->t_forget) {
987 spin_unlock(&journal->j_list_lock);
988 spin_unlock(&journal->j_state_lock);
989 goto restart_loop;
990 }
991
992 /* Done with this transaction! */
993
994 jbd_debug(3, "JBD: commit phase 8\n");
995
996 J_ASSERT(commit_transaction->t_state == T_COMMIT);
997
8e85fb3f
JL
998 commit_transaction->t_start = jiffies;
999 stats.u.run.rs_logging = jbd2_time_diff(stats.u.run.rs_logging,
1000 commit_transaction->t_start);
1001
1002 /*
1003 * File the transaction for history
1004 */
1005 stats.ts_type = JBD2_STATS_RUN;
1006 stats.ts_tid = commit_transaction->t_tid;
1007 stats.u.run.rs_handle_count = commit_transaction->t_handle_count;
1008 spin_lock(&journal->j_history_lock);
1009 memcpy(journal->j_history + journal->j_history_cur, &stats,
1010 sizeof(stats));
1011 if (++journal->j_history_cur == journal->j_history_max)
1012 journal->j_history_cur = 0;
1013
1014 /*
1015 * Calculate overall stats
1016 */
1017 journal->j_stats.ts_tid++;
1018 journal->j_stats.u.run.rs_wait += stats.u.run.rs_wait;
1019 journal->j_stats.u.run.rs_running += stats.u.run.rs_running;
1020 journal->j_stats.u.run.rs_locked += stats.u.run.rs_locked;
1021 journal->j_stats.u.run.rs_flushing += stats.u.run.rs_flushing;
1022 journal->j_stats.u.run.rs_logging += stats.u.run.rs_logging;
1023 journal->j_stats.u.run.rs_handle_count += stats.u.run.rs_handle_count;
1024 journal->j_stats.u.run.rs_blocks += stats.u.run.rs_blocks;
1025 journal->j_stats.u.run.rs_blocks_logged += stats.u.run.rs_blocks_logged;
1026 spin_unlock(&journal->j_history_lock);
1027
470decc6
DK
1028 commit_transaction->t_state = T_FINISHED;
1029 J_ASSERT(commit_transaction == journal->j_committing_transaction);
1030 journal->j_commit_sequence = commit_transaction->t_tid;
1031 journal->j_committing_transaction = NULL;
1032 spin_unlock(&journal->j_state_lock);
1033
f89b7795
JK
1034 if (commit_transaction->t_checkpoint_list == NULL &&
1035 commit_transaction->t_checkpoint_io_list == NULL) {
f7f4bccb 1036 __jbd2_journal_drop_transaction(journal, commit_transaction);
470decc6
DK
1037 } else {
1038 if (journal->j_checkpoint_transactions == NULL) {
1039 journal->j_checkpoint_transactions = commit_transaction;
1040 commit_transaction->t_cpnext = commit_transaction;
1041 commit_transaction->t_cpprev = commit_transaction;
1042 } else {
1043 commit_transaction->t_cpnext =
1044 journal->j_checkpoint_transactions;
1045 commit_transaction->t_cpprev =
1046 commit_transaction->t_cpnext->t_cpprev;
1047 commit_transaction->t_cpnext->t_cpprev =
1048 commit_transaction;
1049 commit_transaction->t_cpprev->t_cpnext =
1050 commit_transaction;
1051 }
1052 }
1053 spin_unlock(&journal->j_list_lock);
1054
1055 jbd_debug(1, "JBD: commit %d complete, head %d\n",
1056 journal->j_commit_sequence, journal->j_tail_sequence);
1057
1058 wake_up(&journal->j_wait_done_commit);
1059}